summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon MacMullen <simon@rabbitmq.com>2014-09-29 13:58:15 +0100
committerSimon MacMullen <simon@rabbitmq.com>2014-09-29 13:58:15 +0100
commit44230b0cab0fbc6626b15a18d2a2bd4b9eb47f5e (patch)
tree4b825dc642cb6eb9a060e54bf8d69288fbee4904
parentbec334aa7c26e7d6fd479aefbb129053a824cb5b (diff)
parentda290d755b89e5629fb8cb8d4998c68c4aae6450 (diff)
downloadrabbitmq-server-44230b0cab0fbc6626b15a18d2a2bd4b9eb47f5e.tar.gz
Junk bug25739
-rw-r--r--.hgignore33
-rw-r--r--INSTALL2
-rw-r--r--LICENSE8
-rw-r--r--LICENSE-MIT-Mochi9
-rw-r--r--LICENSE-MPL-RabbitMQ455
-rw-r--r--Makefile378
-rw-r--r--README1
-rwxr-xr-xcalculate-relative45
-rwxr-xr-xcheck_xref287
-rw-r--r--codegen.py591
-rw-r--r--docs/examples-to-end.xsl93
-rw-r--r--docs/html-to-website-xml.xsl90
-rw-r--r--docs/rabbitmq-echopid.xml71
-rw-r--r--docs/rabbitmq-env.conf.5.xml83
-rw-r--r--docs/rabbitmq-plugins.1.xml182
-rw-r--r--docs/rabbitmq-server.1.xml132
-rw-r--r--docs/rabbitmq-service.xml218
-rw-r--r--docs/rabbitmqctl.1.xml1777
-rw-r--r--docs/remove-namespaces.xsl18
-rw-r--r--docs/usage.xsl74
-rw-r--r--ebin/rabbit_app.in73
-rw-r--r--generate_app16
-rw-r--r--generate_deps57
-rw-r--r--include/gm_specs.hrl29
-rw-r--r--include/rabbit.hrl110
-rw-r--r--include/rabbit_msg_store.hrl25
-rw-r--r--packaging/RPMS/Fedora/Makefile58
-rw-r--r--packaging/RPMS/Fedora/rabbitmq-server.init179
-rw-r--r--packaging/RPMS/Fedora/rabbitmq-server.logrotate12
-rw-r--r--packaging/RPMS/Fedora/rabbitmq-server.spec244
-rw-r--r--packaging/common/LICENSE.head5
-rw-r--r--packaging/common/LICENSE.tail516
-rw-r--r--packaging/common/rabbitmq-script-wrapper44
-rwxr-xr-xpackaging/common/rabbitmq-server.ocf371
-rw-r--r--packaging/debs/Debian/Makefile42
-rwxr-xr-xpackaging/debs/Debian/check-changelog.sh29
-rw-r--r--packaging/debs/Debian/debian/changelog246
-rw-r--r--packaging/debs/Debian/debian/compat1
-rw-r--r--packaging/debs/Debian/debian/control17
-rw-r--r--packaging/debs/Debian/debian/dirs9
-rw-r--r--packaging/debs/Debian/debian/postinst60
-rw-r--r--packaging/debs/Debian/debian/postrm.in65
-rw-r--r--packaging/debs/Debian/debian/rabbitmq-server.default9
-rw-r--r--packaging/debs/Debian/debian/rabbitmq-server.init187
-rw-r--r--packaging/debs/Debian/debian/rabbitmq-server.logrotate12
-rw-r--r--packaging/debs/Debian/debian/rules22
-rw-r--r--packaging/debs/Debian/debian/watch4
-rw-r--r--packaging/debs/apt-repository/Makefile28
-rw-r--r--packaging/debs/apt-repository/README17
-rw-r--r--packaging/debs/apt-repository/README-real-repository130
-rw-r--r--packaging/debs/apt-repository/distributions7
-rw-r--r--packaging/debs/apt-repository/dupload.conf16
-rw-r--r--packaging/generic-unix/Makefile30
-rw-r--r--packaging/macports/Makefile58
-rw-r--r--packaging/macports/Portfile.in123
-rwxr-xr-xpackaging/macports/make-checksums.sh14
-rwxr-xr-xpackaging/macports/make-port-diff.sh29
-rw-r--r--packaging/macports/patch-org.macports.rabbitmq-server.plist.diff10
-rw-r--r--packaging/standalone/Makefile82
-rw-r--r--packaging/standalone/erl.diff5
-rw-r--r--packaging/standalone/src/rabbit_release.erl154
-rw-r--r--packaging/windows-exe/Makefile16
-rw-r--r--packaging/windows-exe/rabbitmq.icobin4286 -> 0 bytes
-rw-r--r--packaging/windows-exe/rabbitmq_nsi.in239
-rw-r--r--packaging/windows/Makefile38
-rwxr-xr-xquickcheck37
-rw-r--r--scripts/rabbitmq-defaults36
-rw-r--r--scripts/rabbitmq-echopid.bat49
-rwxr-xr-xscripts/rabbitmq-env55
-rwxr-xr-xscripts/rabbitmq-plugins38
-rwxr-xr-xscripts/rabbitmq-plugins.bat57
-rwxr-xr-xscripts/rabbitmq-server128
-rwxr-xr-xscripts/rabbitmq-server.bat153
-rwxr-xr-xscripts/rabbitmq-service.bat230
-rwxr-xr-xscripts/rabbitmqctl38
-rwxr-xr-xscripts/rabbitmqctl.bat49
-rw-r--r--src/app_utils.erl138
-rw-r--r--src/background_gc.erl81
-rw-r--r--src/credit_flow.erl141
-rw-r--r--src/delegate.erl176
-rw-r--r--src/delegate_sup.erl59
-rw-r--r--src/dtree.erl163
-rw-r--r--src/file_handle_cache.erl1227
-rw-r--r--src/gatherer.erl145
-rw-r--r--src/gen_server2.erl1249
-rw-r--r--src/gm.erl1493
-rw-r--r--src/gm_soak_test.erl133
-rw-r--r--src/gm_speed_test.erl83
-rw-r--r--src/gm_tests.erl186
-rw-r--r--src/lqueue.erl90
-rw-r--r--src/mirrored_supervisor.erl505
-rw-r--r--src/mirrored_supervisor_tests.erl339
-rw-r--r--src/mnesia_sync.erl77
-rw-r--r--src/mochijson2.erl893
-rw-r--r--src/mochinum.erl358
-rw-r--r--src/pg2_fixed.erl400
-rw-r--r--src/pg_local.erl213
-rw-r--r--src/pmon.erl70
-rw-r--r--src/priority_queue.erl194
-rw-r--r--src/rabbit.erl776
-rw-r--r--src/rabbit_access_control.erl108
-rw-r--r--src/rabbit_alarm.erl238
-rw-r--r--src/rabbit_amqqueue.erl727
-rw-r--r--src/rabbit_amqqueue_process.erl1462
-rw-r--r--src/rabbit_amqqueue_sup.erl52
-rw-r--r--src/rabbit_auth_backend.erl72
-rw-r--r--src/rabbit_auth_backend_internal.erl331
-rw-r--r--src/rabbit_auth_mechanism.erl56
-rw-r--r--src/rabbit_auth_mechanism_amqplain.erl55
-rw-r--r--src/rabbit_auth_mechanism_cr_demo.erl57
-rw-r--r--src/rabbit_auth_mechanism_plain.erl73
-rw-r--r--src/rabbit_autoheal.erl199
-rw-r--r--src/rabbit_backing_queue.erl237
-rw-r--r--src/rabbit_backing_queue_qc.erl453
-rw-r--r--src/rabbit_basic.erl276
-rw-r--r--src/rabbit_binary_generator.erl242
-rw-r--r--src/rabbit_binary_parser.erl101
-rw-r--r--src/rabbit_binding.erl530
-rw-r--r--src/rabbit_channel.erl1656
-rw-r--r--src/rabbit_channel_sup.erl90
-rw-r--r--src/rabbit_channel_sup_sup.erl48
-rw-r--r--src/rabbit_client_sup.erl56
-rw-r--r--src/rabbit_command_assembler.erl137
-rw-r--r--src/rabbit_connection_sup.erl70
-rw-r--r--src/rabbit_control_main.erl728
-rw-r--r--src/rabbit_direct.erl108
-rw-r--r--src/rabbit_disk_monitor.erl198
-rw-r--r--src/rabbit_error_logger.erl95
-rw-r--r--src/rabbit_error_logger_file_h.erl95
-rw-r--r--src/rabbit_event.erl148
-rw-r--r--src/rabbit_exchange.erl475
-rw-r--r--src/rabbit_exchange_decorator.erl106
-rw-r--r--src/rabbit_exchange_type.erl81
-rw-r--r--src/rabbit_exchange_type_direct.erl51
-rw-r--r--src/rabbit_exchange_type_fanout.erl50
-rw-r--r--src/rabbit_exchange_type_headers.erl127
-rw-r--r--src/rabbit_exchange_type_invalid.erl52
-rw-r--r--src/rabbit_exchange_type_topic.erl267
-rw-r--r--src/rabbit_file.erl311
-rw-r--r--src/rabbit_framing.erl49
-rw-r--r--src/rabbit_guid.erl177
-rw-r--r--src/rabbit_heartbeat.erl132
-rw-r--r--src/rabbit_intermediate_sup.erl39
-rw-r--r--src/rabbit_limiter.erl435
-rw-r--r--src/rabbit_log.erl110
-rw-r--r--src/rabbit_memory_monitor.erl257
-rw-r--r--src/rabbit_mirror_queue_coordinator.erl427
-rw-r--r--src/rabbit_mirror_queue_master.erl475
-rw-r--r--src/rabbit_mirror_queue_misc.erl347
-rw-r--r--src/rabbit_mirror_queue_mode.erl57
-rw-r--r--src/rabbit_mirror_queue_mode_all.erl41
-rw-r--r--src/rabbit_mirror_queue_mode_exactly.erl56
-rw-r--r--src/rabbit_mirror_queue_mode_nodes.erl70
-rw-r--r--src/rabbit_mirror_queue_slave.erl865
-rw-r--r--src/rabbit_mirror_queue_slave_sup.erl37
-rw-r--r--src/rabbit_mirror_queue_sync.erl260
-rw-r--r--src/rabbit_misc.erl1118
-rw-r--r--src/rabbit_mnesia.erl889
-rw-r--r--src/rabbit_msg_file.erl125
-rw-r--r--src/rabbit_msg_store.erl2066
-rw-r--r--src/rabbit_msg_store_ets_index.erl79
-rw-r--r--src/rabbit_msg_store_gc.erl137
-rw-r--r--src/rabbit_msg_store_index.erl59
-rw-r--r--src/rabbit_net.erl232
-rw-r--r--src/rabbit_networking.erl470
-rw-r--r--src/rabbit_node_monitor.erl476
-rw-r--r--src/rabbit_nodes.erl109
-rw-r--r--src/rabbit_parameter_validation.erl87
-rw-r--r--src/rabbit_plugins.erl226
-rw-r--r--src/rabbit_plugins_main.erl287
-rw-r--r--src/rabbit_policy.erl259
-rw-r--r--src/rabbit_policy_validator.erl39
-rw-r--r--src/rabbit_prelaunch.erl72
-rw-r--r--src/rabbit_queue_collector.erl90
-rw-r--r--src/rabbit_queue_index.erl1119
-rw-r--r--src/rabbit_reader.erl1059
-rw-r--r--src/rabbit_registry.erl163
-rw-r--r--src/rabbit_restartable_sup.erl43
-rw-r--r--src/rabbit_router.erl83
-rw-r--r--src/rabbit_runtime_parameter.erl42
-rw-r--r--src/rabbit_runtime_parameters.erl221
-rw-r--r--src/rabbit_runtime_parameters_test.erl64
-rw-r--r--src/rabbit_sasl_report_file_h.erl93
-rw-r--r--src/rabbit_ssl.erl302
-rw-r--r--src/rabbit_sup.erl95
-rw-r--r--src/rabbit_table.erl311
-rw-r--r--src/rabbit_tests.erl2870
-rw-r--r--src/rabbit_tests_event_receiver.erl58
-rw-r--r--src/rabbit_trace.erl119
-rw-r--r--src/rabbit_types.erl159
-rw-r--r--src/rabbit_upgrade.erl281
-rw-r--r--src/rabbit_upgrade_functions.erl327
-rw-r--r--src/rabbit_variable_queue.erl1792
-rw-r--r--src/rabbit_version.erl175
-rw-r--r--src/rabbit_vhost.erl142
-rw-r--r--src/rabbit_vm.erl228
-rw-r--r--src/rabbit_writer.erl296
-rw-r--r--src/supervised_lifecycle.erl68
-rw-r--r--src/supervisor2.erl1232
-rw-r--r--src/supervisor2_tests.erl70
-rw-r--r--src/tcp_acceptor.erl105
-rw-r--r--src/tcp_acceptor_sup.erl43
-rw-r--r--src/tcp_listener.erl98
-rw-r--r--src/tcp_listener_sup.erl70
-rw-r--r--src/test_sup.erl93
-rw-r--r--src/vm_memory_monitor.erl382
-rw-r--r--src/worker_pool.erl142
-rw-r--r--src/worker_pool_sup.erl53
-rw-r--r--src/worker_pool_worker.erl106
-rw-r--r--version.mk1
210 files changed, 0 insertions, 51421 deletions
diff --git a/.hgignore b/.hgignore
deleted file mode 100644
index 912b4a56..00000000
--- a/.hgignore
+++ /dev/null
@@ -1,33 +0,0 @@
-syntax: glob
-*.beam
-*~
-*.swp
-*.patch
-erl_crash.dump
-deps.mk
-
-syntax: regexp
-^cover/
-^dist/
-^include/rabbit_framing\.hrl$
-^include/rabbit_framing_spec\.hrl$
-^src/rabbit_framing_amqp.*\.erl$
-^src/.*\_usage.erl$
-^rabbit\.plt$
-^basic.plt$
-^ebin/rabbit\.(app|rel|boot|script)$
-^plugins/
-^priv/plugins/
-
-^packaging/RPMS/Fedora/(BUILD|RPMS|SOURCES|SPECS|SRPMS)$
-^packaging/debs/Debian/rabbitmq-server_.*\.(dsc|(diff|tar)\.gz|deb|changes)$
-^packaging/debs/apt-repository/debian$
-^packaging/macports/macports$
-^packaging/generic-unix/rabbitmq-server-generic-unix-.*\.tar\.gz$
-^packaging/windows/rabbitmq-server-windows-.*\.zip$
-^packaging/windows-exe/rabbitmq_server-.*$
-^packaging/windows-exe/rabbitmq-.*\.nsi$
-^packaging/windows-exe/rabbitmq-server-.*\.exe$
-
-^docs/.*\.[15]\.gz$
-^docs/.*\.man\.xml$
diff --git a/INSTALL b/INSTALL
deleted file mode 100644
index be34498e..00000000
--- a/INSTALL
+++ /dev/null
@@ -1,2 +0,0 @@
-Please see http://www.rabbitmq.com/download.html for links to guides
-to installing RabbitMQ.
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 9feeceac..00000000
--- a/LICENSE
+++ /dev/null
@@ -1,8 +0,0 @@
-This package, the RabbitMQ server is licensed under the MPL. For the
-MPL, please see LICENSE-MPL-RabbitMQ.
-
-The files `mochijson2.erl' and `mochinum.erl' are (c) 2007 Mochi Media, Inc and
-licensed under a MIT license, see LICENSE-MIT-Mochi.
-
-If you have any questions regarding licensing, please contact us at
-info@rabbitmq.com.
diff --git a/LICENSE-MIT-Mochi b/LICENSE-MIT-Mochi
deleted file mode 100644
index c85b65a4..00000000
--- a/LICENSE-MIT-Mochi
+++ /dev/null
@@ -1,9 +0,0 @@
-This is the MIT license.
-
-Copyright (c) 2007 Mochi Media, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/LICENSE-MPL-RabbitMQ b/LICENSE-MPL-RabbitMQ
deleted file mode 100644
index 549d0f1c..00000000
--- a/LICENSE-MPL-RabbitMQ
+++ /dev/null
@@ -1,455 +0,0 @@
- MOZILLA PUBLIC LICENSE
- Version 1.1
-
- ---------------
-
-1. Definitions.
-
- 1.0.1. "Commercial Use" means distribution or otherwise making the
- Covered Code available to a third party.
-
- 1.1. "Contributor" means each entity that creates or contributes to
- the creation of Modifications.
-
- 1.2. "Contributor Version" means the combination of the Original
- Code, prior Modifications used by a Contributor, and the Modifications
- made by that particular Contributor.
-
- 1.3. "Covered Code" means the Original Code or Modifications or the
- combination of the Original Code and Modifications, in each case
- including portions thereof.
-
- 1.4. "Electronic Distribution Mechanism" means a mechanism generally
- accepted in the software development community for the electronic
- transfer of data.
-
- 1.5. "Executable" means Covered Code in any form other than Source
- Code.
-
- 1.6. "Initial Developer" means the individual or entity identified
- as the Initial Developer in the Source Code notice required by Exhibit
- A.
-
- 1.7. "Larger Work" means a work which combines Covered Code or
- portions thereof with code not governed by the terms of this License.
-
- 1.8. "License" means this document.
-
- 1.8.1. "Licensable" means having the right to grant, to the maximum
- extent possible, whether at the time of the initial grant or
- subsequently acquired, any and all of the rights conveyed herein.
-
- 1.9. "Modifications" means any addition to or deletion from the
- substance or structure of either the Original Code or any previous
- Modifications. When Covered Code is released as a series of files, a
- Modification is:
- A. Any addition to or deletion from the contents of a file
- containing Original Code or previous Modifications.
-
- B. Any new file that contains any part of the Original Code or
- previous Modifications.
-
- 1.10. "Original Code" means Source Code of computer software code
- which is described in the Source Code notice required by Exhibit A as
- Original Code, and which, at the time of its release under this
- License is not already Covered Code governed by this License.
-
- 1.10.1. "Patent Claims" means any patent claim(s), now owned or
- hereafter acquired, including without limitation, method, process,
- and apparatus claims, in any patent Licensable by grantor.
-
- 1.11. "Source Code" means the preferred form of the Covered Code for
- making modifications to it, including all modules it contains, plus
- any associated interface definition files, scripts used to control
- compilation and installation of an Executable, or source code
- differential comparisons against either the Original Code or another
- well known, available Covered Code of the Contributor's choice. The
- Source Code can be in a compressed or archival form, provided the
- appropriate decompression or de-archiving software is widely available
- for no charge.
-
- 1.12. "You" (or "Your") means an individual or a legal entity
- exercising rights under, and complying with all of the terms of, this
- License or a future version of this License issued under Section 6.1.
- For legal entities, "You" includes any entity which controls, is
- controlled by, or is under common control with You. For purposes of
- this definition, "control" means (a) the power, direct or indirect,
- to cause the direction or management of such entity, whether by
- contract or otherwise, or (b) ownership of more than fifty percent
- (50%) of the outstanding shares or beneficial ownership of such
- entity.
-
-2. Source Code License.
-
- 2.1. The Initial Developer Grant.
- The Initial Developer hereby grants You a world-wide, royalty-free,
- non-exclusive license, subject to third party intellectual property
- claims:
- (a) under intellectual property rights (other than patent or
- trademark) Licensable by Initial Developer to use, reproduce,
- modify, display, perform, sublicense and distribute the Original
- Code (or portions thereof) with or without Modifications, and/or
- as part of a Larger Work; and
-
- (b) under Patents Claims infringed by the making, using or
- selling of Original Code, to make, have made, use, practice,
- sell, and offer for sale, and/or otherwise dispose of the
- Original Code (or portions thereof).
-
- (c) the licenses granted in this Section 2.1(a) and (b) are
- effective on the date Initial Developer first distributes
- Original Code under the terms of this License.
-
- (d) Notwithstanding Section 2.1(b) above, no patent license is
- granted: 1) for code that You delete from the Original Code; 2)
- separate from the Original Code; or 3) for infringements caused
- by: i) the modification of the Original Code or ii) the
- combination of the Original Code with other software or devices.
-
- 2.2. Contributor Grant.
- Subject to third party intellectual property claims, each Contributor
- hereby grants You a world-wide, royalty-free, non-exclusive license
-
- (a) under intellectual property rights (other than patent or
- trademark) Licensable by Contributor, to use, reproduce, modify,
- display, perform, sublicense and distribute the Modifications
- created by such Contributor (or portions thereof) either on an
- unmodified basis, with other Modifications, as Covered Code
- and/or as part of a Larger Work; and
-
- (b) under Patent Claims infringed by the making, using, or
- selling of Modifications made by that Contributor either alone
- and/or in combination with its Contributor Version (or portions
- of such combination), to make, use, sell, offer for sale, have
- made, and/or otherwise dispose of: 1) Modifications made by that
- Contributor (or portions thereof); and 2) the combination of
- Modifications made by that Contributor with its Contributor
- Version (or portions of such combination).
-
- (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
- effective on the date Contributor first makes Commercial Use of
- the Covered Code.
-
- (d) Notwithstanding Section 2.2(b) above, no patent license is
- granted: 1) for any code that Contributor has deleted from the
- Contributor Version; 2) separate from the Contributor Version;
- 3) for infringements caused by: i) third party modifications of
- Contributor Version or ii) the combination of Modifications made
- by that Contributor with other software (except as part of the
- Contributor Version) or other devices; or 4) under Patent Claims
- infringed by Covered Code in the absence of Modifications made by
- that Contributor.
-
-3. Distribution Obligations.
-
- 3.1. Application of License.
- The Modifications which You create or to which You contribute are
- governed by the terms of this License, including without limitation
- Section 2.2. The Source Code version of Covered Code may be
- distributed only under the terms of this License or a future version
- of this License released under Section 6.1, and You must include a
- copy of this License with every copy of the Source Code You
- distribute. You may not offer or impose any terms on any Source Code
- version that alters or restricts the applicable version of this
- License or the recipients' rights hereunder. However, You may include
- an additional document offering the additional rights described in
- Section 3.5.
-
- 3.2. Availability of Source Code.
- Any Modification which You create or to which You contribute must be
- made available in Source Code form under the terms of this License
- either on the same media as an Executable version or via an accepted
- Electronic Distribution Mechanism to anyone to whom you made an
- Executable version available; and if made available via Electronic
- Distribution Mechanism, must remain available for at least twelve (12)
- months after the date it initially became available, or at least six
- (6) months after a subsequent version of that particular Modification
- has been made available to such recipients. You are responsible for
- ensuring that the Source Code version remains available even if the
- Electronic Distribution Mechanism is maintained by a third party.
-
- 3.3. Description of Modifications.
- You must cause all Covered Code to which You contribute to contain a
- file documenting the changes You made to create that Covered Code and
- the date of any change. You must include a prominent statement that
- the Modification is derived, directly or indirectly, from Original
- Code provided by the Initial Developer and including the name of the
- Initial Developer in (a) the Source Code, and (b) in any notice in an
- Executable version or related documentation in which You describe the
- origin or ownership of the Covered Code.
-
- 3.4. Intellectual Property Matters
- (a) Third Party Claims.
- If Contributor has knowledge that a license under a third party's
- intellectual property rights is required to exercise the rights
- granted by such Contributor under Sections 2.1 or 2.2,
- Contributor must include a text file with the Source Code
- distribution titled "LEGAL" which describes the claim and the
- party making the claim in sufficient detail that a recipient will
- know whom to contact. If Contributor obtains such knowledge after
- the Modification is made available as described in Section 3.2,
- Contributor shall promptly modify the LEGAL file in all copies
- Contributor makes available thereafter and shall take other steps
- (such as notifying appropriate mailing lists or newsgroups)
- reasonably calculated to inform those who received the Covered
- Code that new knowledge has been obtained.
-
- (b) Contributor APIs.
- If Contributor's Modifications include an application programming
- interface and Contributor has knowledge of patent licenses which
- are reasonably necessary to implement that API, Contributor must
- also include this information in the LEGAL file.
-
- (c) Representations.
- Contributor represents that, except as disclosed pursuant to
- Section 3.4(a) above, Contributor believes that Contributor's
- Modifications are Contributor's original creation(s) and/or
- Contributor has sufficient rights to grant the rights conveyed by
- this License.
-
- 3.5. Required Notices.
- You must duplicate the notice in Exhibit A in each file of the Source
- Code. If it is not possible to put such notice in a particular Source
- Code file due to its structure, then You must include such notice in a
- location (such as a relevant directory) where a user would be likely
- to look for such a notice. If You created one or more Modification(s)
- You may add your name as a Contributor to the notice described in
- Exhibit A. You must also duplicate this License in any documentation
- for the Source Code where You describe recipients' rights or ownership
- rights relating to Covered Code. You may choose to offer, and to
- charge a fee for, warranty, support, indemnity or liability
- obligations to one or more recipients of Covered Code. However, You
- may do so only on Your own behalf, and not on behalf of the Initial
- Developer or any Contributor. You must make it absolutely clear than
- any such warranty, support, indemnity or liability obligation is
- offered by You alone, and You hereby agree to indemnify the Initial
- Developer and every Contributor for any liability incurred by the
- Initial Developer or such Contributor as a result of warranty,
- support, indemnity or liability terms You offer.
-
- 3.6. Distribution of Executable Versions.
- You may distribute Covered Code in Executable form only if the
- requirements of Section 3.1-3.5 have been met for that Covered Code,
- and if You include a notice stating that the Source Code version of
- the Covered Code is available under the terms of this License,
- including a description of how and where You have fulfilled the
- obligations of Section 3.2. The notice must be conspicuously included
- in any notice in an Executable version, related documentation or
- collateral in which You describe recipients' rights relating to the
- Covered Code. You may distribute the Executable version of Covered
- Code or ownership rights under a license of Your choice, which may
- contain terms different from this License, provided that You are in
- compliance with the terms of this License and that the license for the
- Executable version does not attempt to limit or alter the recipient's
- rights in the Source Code version from the rights set forth in this
- License. If You distribute the Executable version under a different
- license You must make it absolutely clear that any terms which differ
- from this License are offered by You alone, not by the Initial
- Developer or any Contributor. You hereby agree to indemnify the
- Initial Developer and every Contributor for any liability incurred by
- the Initial Developer or such Contributor as a result of any such
- terms You offer.
-
- 3.7. Larger Works.
- You may create a Larger Work by combining Covered Code with other code
- not governed by the terms of this License and distribute the Larger
- Work as a single product. In such a case, You must make sure the
- requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
- If it is impossible for You to comply with any of the terms of this
- License with respect to some or all of the Covered Code due to
- statute, judicial order, or regulation then You must: (a) comply with
- the terms of this License to the maximum extent possible; and (b)
- describe the limitations and the code they affect. Such description
- must be included in the LEGAL file described in Section 3.4 and must
- be included with all distributions of the Source Code. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Application of this License.
-
- This License applies to code to which the Initial Developer has
- attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
- 6.1. New Versions.
- Netscape Communications Corporation ("Netscape") may publish revised
- and/or new versions of the License from time to time. Each version
- will be given a distinguishing version number.
-
- 6.2. Effect of New Versions.
- Once Covered Code has been published under a particular version of the
- License, You may always continue to use it under the terms of that
- version. You may also choose to use such Covered Code under the terms
- of any subsequent version of the License published by Netscape. No one
- other than Netscape has the right to modify the terms applicable to
- Covered Code created under this License.
-
- 6.3. Derivative Works.
- If You create or use a modified version of this License (which you may
- only do in order to apply it to code which is not already Covered Code
- governed by this License), You must (a) rename Your license so that
- the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
- "MPL", "NPL" or any confusingly similar phrase do not appear in your
- license (except to note that your license differs from this License)
- and (b) otherwise make it clear that Your version of the license
- contains terms which differ from the Mozilla Public License and
- Netscape Public License. (Filling in the name of the Initial
- Developer, Original Code or Contributor in the notice described in
- Exhibit A shall not of themselves be deemed to be modifications of
- this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
- COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
- WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
- WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
- DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
- THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
- IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
- YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
- COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
- OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
- ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
- 8.1. This License and the rights granted hereunder will terminate
- automatically if You fail to comply with terms herein and fail to cure
- such breach within 30 days of becoming aware of the breach. All
- sublicenses to the Covered Code which are properly granted shall
- survive any termination of this License. Provisions which, by their
- nature, must remain in effect beyond the termination of this License
- shall survive.
-
- 8.2. If You initiate litigation by asserting a patent infringement
- claim (excluding declatory judgment actions) against Initial Developer
- or a Contributor (the Initial Developer or Contributor against whom
- You file such action is referred to as "Participant") alleging that:
-
- (a) such Participant's Contributor Version directly or indirectly
- infringes any patent, then any and all rights granted by such
- Participant to You under Sections 2.1 and/or 2.2 of this License
- shall, upon 60 days notice from Participant terminate prospectively,
- unless if within 60 days after receipt of notice You either: (i)
- agree in writing to pay Participant a mutually agreeable reasonable
- royalty for Your past and future use of Modifications made by such
- Participant, or (ii) withdraw Your litigation claim with respect to
- the Contributor Version against such Participant. If within 60 days
- of notice, a reasonable royalty and payment arrangement are not
- mutually agreed upon in writing by the parties or the litigation claim
- is not withdrawn, the rights granted by Participant to You under
- Sections 2.1 and/or 2.2 automatically terminate at the expiration of
- the 60 day notice period specified above.
-
- (b) any software, hardware, or device, other than such Participant's
- Contributor Version, directly or indirectly infringes any patent, then
- any rights granted to You by such Participant under Sections 2.1(b)
- and 2.2(b) are revoked effective as of the date You first made, used,
- sold, distributed, or had made, Modifications made by that
- Participant.
-
- 8.3. If You assert a patent infringement claim against Participant
- alleging that such Participant's Contributor Version directly or
- indirectly infringes any patent where such claim is resolved (such as
- by license or settlement) prior to the initiation of patent
- infringement litigation, then the reasonable value of the licenses
- granted by such Participant under Sections 2.1 or 2.2 shall be taken
- into account in determining the amount or value of any payment or
- license.
-
- 8.4. In the event of termination under Sections 8.1 or 8.2 above,
- all end user license agreements (excluding distributors and resellers)
- which have been validly granted by You or any distributor hereunder
- prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
- UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
- (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
- DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
- OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
- ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
- CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
- WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
- COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
- INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
- LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
- RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
- PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
- EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
- THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
- The Covered Code is a "commercial item," as that term is defined in
- 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
- software" and "commercial computer software documentation," as such
- terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
- C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
- all U.S. Government End Users acquire Covered Code with only those
- rights set forth herein.
-
-11. MISCELLANEOUS.
-
- This License represents the complete agreement concerning subject
- matter hereof. If any provision of this License is held to be
- unenforceable, such provision shall be reformed only to the extent
- necessary to make it enforceable. This License shall be governed by
- California law provisions (except to the extent applicable law, if
- any, provides otherwise), excluding its conflict-of-law provisions.
- With respect to disputes in which at least one party is a citizen of,
- or an entity chartered or registered to do business in the United
- States of America, any litigation relating to this License shall be
- subject to the jurisdiction of the Federal Courts of the Northern
- District of California, with venue lying in Santa Clara County,
- California, with the losing party responsible for costs, including
- without limitation, court costs and reasonable attorneys' fees and
- expenses. The application of the United Nations Convention on
- Contracts for the International Sale of Goods is expressly excluded.
- Any law or regulation which provides that the language of a contract
- shall be construed against the drafter shall not apply to this
- License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
- As between Initial Developer and the Contributors, each party is
- responsible for claims and damages arising, directly or indirectly,
- out of its utilization of rights under this License and You agree to
- work with Initial Developer and Contributors to distribute such
- responsibility on an equitable basis. Nothing herein is intended or
- shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
- Initial Developer may designate portions of the Covered Code as
- "Multiple-Licensed". "Multiple-Licensed" means that the Initial
- Developer permits you to utilize portions of the Covered Code under
- Your choice of the NPL or the alternative licenses, if any, specified
- by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A -Mozilla Public License.
-
- ``The contents of this file are subject to the Mozilla Public License
- Version 1.1 (the "License"); you may not use this file except in
- compliance with the License. You may obtain a copy of the License at
- http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS IS"
- basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
- License for the specific language governing rights and limitations
- under the License.
-
- The Original Code is RabbitMQ.
-
- The Initial Developer of the Original Code is GoPivotal, Inc.
- Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.''
-
- [NOTE: The text of this Exhibit A may differ slightly from the text of
- the notices in the Source Code files of the Original Code. You should
- use the text of this Exhibit A rather than the text found in the
- Original Code Source Code for Your Modifications.]
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 56d4b3c0..00000000
--- a/Makefile
+++ /dev/null
@@ -1,378 +0,0 @@
-TMPDIR ?= /tmp
-
-RABBITMQ_NODENAME ?= rabbit
-RABBITMQ_SERVER_START_ARGS ?=
-RABBITMQ_MNESIA_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-mnesia
-RABBITMQ_PLUGINS_EXPAND_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-plugins-scratch
-RABBITMQ_LOG_BASE ?= $(TMPDIR)
-
-DEPS_FILE=deps.mk
-SOURCE_DIR=src
-EBIN_DIR=ebin
-INCLUDE_DIR=include
-DOCS_DIR=docs
-INCLUDES=$(wildcard $(INCLUDE_DIR)/*.hrl) $(INCLUDE_DIR)/rabbit_framing.hrl
-SOURCES=$(wildcard $(SOURCE_DIR)/*.erl) $(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl $(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl $(USAGES_ERL)
-BEAM_TARGETS=$(patsubst $(SOURCE_DIR)/%.erl, $(EBIN_DIR)/%.beam, $(SOURCES))
-TARGETS=$(EBIN_DIR)/rabbit.app $(INCLUDE_DIR)/rabbit_framing.hrl $(BEAM_TARGETS) plugins
-WEB_URL=http://www.rabbitmq.com/
-MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml))
-WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml $(DOCS_DIR)/rabbitmq-echopid.xml)
-USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml $(DOCS_DIR)/rabbitmq-plugins.1.xml
-USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML)))
-QC_MODULES := rabbit_backing_queue_qc
-QC_TRIALS ?= 100
-
-ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes)
-PYTHON=python
-else
-ifeq ($(shell python2.6 -c 'import simplejson' 2>/dev/null && echo yes),yes)
-PYTHON=python2.6
-else
-ifeq ($(shell python2.5 -c 'import simplejson' 2>/dev/null && echo yes),yes)
-PYTHON=python2.5
-else
-# Hmm. Missing simplejson?
-PYTHON=python
-endif
-endif
-endif
-
-BASIC_PLT=basic.plt
-RABBIT_PLT=rabbit.plt
-
-ifndef USE_SPECS
-# our type specs rely on callback specs, which are available in R15B
-# upwards.
-USE_SPECS:=$(shell erl -noshell -eval 'io:format([list_to_integer(X) || X <- string:tokens(erlang:system_info(version), ".")] >= [5,9]), halt().')
-endif
-
-ifndef USE_PROPER_QC
-# PropEr needs to be installed for property checking
-# http://proper.softlab.ntua.gr/
-USE_PROPER_QC:=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
-endif
-
-#other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests
-ERLC_OPTS=-I $(INCLUDE_DIR) -o $(EBIN_DIR) -Wall -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc)
-
-include version.mk
-
-PLUGINS_SRC_DIR?=$(shell [ -d "plugins-src" ] && echo "plugins-src" || echo )
-PLUGINS_DIR=plugins
-TARBALL_NAME=rabbitmq-server-$(VERSION)
-TARGET_SRC_DIR=dist/$(TARBALL_NAME)
-
-SIBLING_CODEGEN_DIR=../rabbitmq-codegen/
-AMQP_CODEGEN_DIR=$(shell [ -d $(SIBLING_CODEGEN_DIR) ] && echo $(SIBLING_CODEGEN_DIR) || echo codegen)
-AMQP_SPEC_JSON_FILES_0_9_1=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.9.1.json $(AMQP_CODEGEN_DIR)/credit_extension.json
-AMQP_SPEC_JSON_FILES_0_8=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.8.json
-
-ERL_CALL=erl_call -sname $(RABBITMQ_NODENAME) -e
-
-ERL_EBIN=erl -noinput -pa $(EBIN_DIR)
-
-define usage_xml_to_erl
- $(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, $(SOURCE_DIR)/rabbit_%_usage.erl, $(subst -,_,$(1))))
-endef
-
-define usage_dep
- $(call usage_xml_to_erl, $(1)): $(1) $(DOCS_DIR)/usage.xsl
-endef
-
-define boolean_macro
-$(if $(filter true,$(1)),-D$(2))
-endef
-
-ifneq "$(SBIN_DIR)" ""
-ifneq "$(TARGET_DIR)" ""
-SCRIPTS_REL_PATH=$(shell ./calculate-relative $(TARGET_DIR)/sbin $(SBIN_DIR))
-endif
-endif
-
-# Versions prior to this are not supported
-NEED_MAKE := 3.80
-ifneq "$(NEED_MAKE)" "$(firstword $(sort $(NEED_MAKE) $(MAKE_VERSION)))"
-$(error Versions of make prior to $(NEED_MAKE) are not supported)
-endif
-
-# .DEFAULT_GOAL introduced in 3.81
-DEFAULT_GOAL_MAKE := 3.81
-ifneq "$(DEFAULT_GOAL_MAKE)" "$(firstword $(sort $(DEFAULT_GOAL_MAKE) $(MAKE_VERSION)))"
-.DEFAULT_GOAL=all
-endif
-
-all: $(TARGETS)
-
-.PHONY: plugins check-xref
-ifneq "$(PLUGINS_SRC_DIR)" ""
-plugins:
- [ -d "$(PLUGINS_SRC_DIR)/rabbitmq-server" ] || ln -s "$(CURDIR)" "$(PLUGINS_SRC_DIR)/rabbitmq-server"
- mkdir -p $(PLUGINS_DIR)
- PLUGINS_SRC_DIR="" $(MAKE) -C "$(PLUGINS_SRC_DIR)" plugins-dist PLUGINS_DIST_DIR="$(CURDIR)/$(PLUGINS_DIR)" VERSION=$(VERSION)
- echo "Put your EZs here and use rabbitmq-plugins to enable them." > $(PLUGINS_DIR)/README
- rm -f $(PLUGINS_DIR)/rabbit_common*.ez
-
-# add -q to remove printout of warnings....
-check-xref: $(BEAM_TARGETS) $(PLUGINS_DIR)
- rm -rf lib
- ./check_xref $(PLUGINS_DIR) -q
-
-else
-plugins:
-# Not building plugins
-
-check-xref:
- $(info xref checks are disabled)
-
-endif
-
-$(DEPS_FILE): $(SOURCES) $(INCLUDES)
- rm -f $@
- echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript generate_deps $@ $(EBIN_DIR)
-
-$(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(SOURCES) generate_app
- escript generate_app $< $@ $(SOURCE_DIR)
-
-$(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE)
- erlc $(ERLC_OPTS) -pa $(EBIN_DIR) $<
-
-$(INCLUDE_DIR)/rabbit_framing.hrl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8)
- $(PYTHON) codegen.py --ignore-conflicts header $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) $@
-
-$(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1)
- $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_9_1) $@
-
-$(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_8)
- $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_8) $@
-
-dialyze: $(BEAM_TARGETS) $(BASIC_PLT)
- dialyzer --plt $(BASIC_PLT) --no_native --fullpath \
- $(BEAM_TARGETS)
-
-# rabbit.plt is used by rabbitmq-erlang-client's dialyze make target
-create-plt: $(RABBIT_PLT)
-
-$(RABBIT_PLT): $(BEAM_TARGETS) $(BASIC_PLT)
- dialyzer --plt $(BASIC_PLT) --output_plt $@ --no_native \
- --add_to_plt $(BEAM_TARGETS)
-
-$(BASIC_PLT): $(BEAM_TARGETS)
- if [ -f $@ ]; then \
- touch $@; \
- else \
- dialyzer --output_plt $@ --build_plt \
- --apps erts kernel stdlib compiler sasl os_mon mnesia tools \
- public_key crypto ssl xmerl; \
- fi
-
-clean:
- rm -f $(EBIN_DIR)/*.beam
- rm -f $(EBIN_DIR)/rabbit.app $(EBIN_DIR)/rabbit.boot $(EBIN_DIR)/rabbit.script $(EBIN_DIR)/rabbit.rel
- rm -f $(PLUGINS_DIR)/*.ez
- [ -d "$(PLUGINS_SRC_DIR)" ] && PLUGINS_SRC_DIR="" PRESERVE_CLONE_DIR=1 make -C $(PLUGINS_SRC_DIR) clean || true
- rm -f $(INCLUDE_DIR)/rabbit_framing.hrl $(SOURCE_DIR)/rabbit_framing_amqp_*.erl codegen.pyc
- rm -f $(DOCS_DIR)/*.[0-9].gz $(DOCS_DIR)/*.man.xml $(DOCS_DIR)/*.erl $(USAGES_ERL)
- rm -f $(RABBIT_PLT)
- rm -f $(DEPS_FILE)
-
-cleandb:
- rm -rf $(RABBITMQ_MNESIA_DIR)/*
-
-############ various tasks to interact with RabbitMQ ###################
-
-BASIC_SCRIPT_ENVIRONMENT_SETTINGS=\
- RABBITMQ_NODE_IP_ADDRESS="$(RABBITMQ_NODE_IP_ADDRESS)" \
- RABBITMQ_NODE_PORT="$(RABBITMQ_NODE_PORT)" \
- RABBITMQ_LOG_BASE="$(RABBITMQ_LOG_BASE)" \
- RABBITMQ_MNESIA_DIR="$(RABBITMQ_MNESIA_DIR)" \
- RABBITMQ_PLUGINS_EXPAND_DIR="$(RABBITMQ_PLUGINS_EXPAND_DIR)"
-
-run: all
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_ALLOW_INPUT=true \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server
-
-run-node: all
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_NODE_ONLY=true \
- RABBITMQ_ALLOW_INPUT=true \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server
-
-run-background-node: all
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_NODE_ONLY=true \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server
-
-run-tests: all
- OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \
- echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null
-
-run-qc: all
- $(foreach MOD,$(QC_MODULES),./quickcheck $(RABBITMQ_NODENAME) $(MOD) $(QC_TRIALS))
-
-start-background-node: all
- -rm -f $(RABBITMQ_MNESIA_DIR).pid
- mkdir -p $(RABBITMQ_MNESIA_DIR)
- nohup sh -c "$(MAKE) run-background-node > $(RABBITMQ_MNESIA_DIR)/startup_log 2> $(RABBITMQ_MNESIA_DIR)/startup_err" > /dev/null &
- ./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_MNESIA_DIR).pid kernel
-
-start-rabbit-on-node: all
- echo "rabbit:start()." | $(ERL_CALL)
- ./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_MNESIA_DIR).pid
-
-stop-rabbit-on-node: all
- echo "rabbit:stop()." | $(ERL_CALL)
-
-set-resource-alarm: all
- echo "rabbit_alarm:set_alarm({{resource_limit, $(SOURCE), node()}, []})." | \
- $(ERL_CALL)
-
-clear-resource-alarm: all
- echo "rabbit_alarm:clear_alarm({resource_limit, $(SOURCE), node()})." | \
- $(ERL_CALL)
-
-stop-node:
- -$(ERL_CALL) -q
-
-# code coverage will be created for subdirectory "ebin" of COVER_DIR
-COVER_DIR=.
-
-start-cover: all
- echo "rabbit_misc:start_cover([\"rabbit\", \"hare\"])." | $(ERL_CALL)
- echo "rabbit_misc:enable_cover([\"$(COVER_DIR)\"])." | $(ERL_CALL)
-
-start-secondary-cover: all
- echo "rabbit_misc:start_cover([\"hare\"])." | $(ERL_CALL)
-
-stop-cover: all
- echo "rabbit_misc:report_cover(), cover:stop()." | $(ERL_CALL)
- cat cover/summary.txt
-
-########################################################################
-
-srcdist: distclean
- mkdir -p $(TARGET_SRC_DIR)/codegen
- cp -r ebin src include LICENSE LICENSE-MPL-RabbitMQ INSTALL README $(TARGET_SRC_DIR)
- sed 's/%%VSN%%/$(VERSION)/' $(TARGET_SRC_DIR)/ebin/rabbit_app.in > $(TARGET_SRC_DIR)/ebin/rabbit_app.in.tmp && \
- mv $(TARGET_SRC_DIR)/ebin/rabbit_app.in.tmp $(TARGET_SRC_DIR)/ebin/rabbit_app.in
-
- cp -r $(AMQP_CODEGEN_DIR)/* $(TARGET_SRC_DIR)/codegen/
- cp codegen.py Makefile generate_app generate_deps calculate-relative $(TARGET_SRC_DIR)
-
- echo "VERSION?=${VERSION}" > $(TARGET_SRC_DIR)/version.mk
-
- cp -r scripts $(TARGET_SRC_DIR)
- cp -r $(DOCS_DIR) $(TARGET_SRC_DIR)
- chmod 0755 $(TARGET_SRC_DIR)/scripts/*
-
-ifneq "$(PLUGINS_SRC_DIR)" ""
- cp -r $(PLUGINS_SRC_DIR) $(TARGET_SRC_DIR)/plugins-src
- rm $(TARGET_SRC_DIR)/LICENSE
- cat packaging/common/LICENSE.head >> $(TARGET_SRC_DIR)/LICENSE
- cat $(AMQP_CODEGEN_DIR)/license_info >> $(TARGET_SRC_DIR)/LICENSE
- find $(PLUGINS_SRC_DIR)/licensing -name "license_info_*" -exec cat '{}' >> $(TARGET_SRC_DIR)/LICENSE \;
- cat packaging/common/LICENSE.tail >> $(TARGET_SRC_DIR)/LICENSE
- find $(PLUGINS_SRC_DIR)/licensing -name "LICENSE-*" -exec cp '{}' $(TARGET_SRC_DIR) \;
- rm -rf $(TARGET_SRC_DIR)/licensing
-else
- @echo No plugins source distribution found
-endif
-
- (cd dist; tar -zchf $(TARBALL_NAME).tar.gz $(TARBALL_NAME))
- (cd dist; zip -q -r $(TARBALL_NAME).zip $(TARBALL_NAME))
- rm -rf $(TARGET_SRC_DIR)
-
-distclean: clean
- $(MAKE) -C $(AMQP_CODEGEN_DIR) distclean
- rm -rf dist
- find . -regex '.*\(~\|#\|\.swp\|\.dump\)' -exec rm {} \;
-
-# xmlto can not read from standard input, so we mess with a tmp file.
-%.gz: %.xml $(DOCS_DIR)/examples-to-end.xsl
- xmlto --version | grep -E '^xmlto version 0\.0\.([0-9]|1[1-8])$$' >/dev/null || opt='--stringparam man.indent.verbatims=0' ; \
- xsltproc --novalid $(DOCS_DIR)/examples-to-end.xsl $< > $<.tmp && \
- xmlto -o $(DOCS_DIR) $$opt man $<.tmp && \
- gzip -f $(DOCS_DIR)/`basename $< .xml`
- rm -f $<.tmp
-
-# Use tmp files rather than a pipeline so that we get meaningful errors
-# Do not fold the cp into previous line, it's there to stop the file being
-# generated but empty if we fail
-$(SOURCE_DIR)/%_usage.erl:
- xsltproc --novalid --stringparam modulename "`basename $@ .erl`" \
- $(DOCS_DIR)/usage.xsl $< > $@.tmp
- sed -e 's/"/\\"/g' -e 's/%QUOTE%/"/g' $@.tmp > $@.tmp2
- fold -s $@.tmp2 > $@.tmp3
- mv $@.tmp3 $@
- rm $@.tmp $@.tmp2
-
-# We rename the file before xmlto sees it since xmlto will use the name of
-# the file to make internal links.
-%.man.xml: %.xml $(DOCS_DIR)/html-to-website-xml.xsl
- cp $< `basename $< .xml`.xml && \
- xmlto xhtml-nochunks `basename $< .xml`.xml ; rm `basename $< .xml`.xml
- cat `basename $< .xml`.html | \
- xsltproc --novalid $(DOCS_DIR)/remove-namespaces.xsl - | \
- xsltproc --novalid --stringparam original `basename $<` $(DOCS_DIR)/html-to-website-xml.xsl - | \
- xmllint --format - > $@
- rm `basename $< .xml`.html
-
-docs_all: $(MANPAGES) $(WEB_MANPAGES)
-
-install: install_bin install_docs
-
-install_bin: all install_dirs
- cp -r ebin include LICENSE* INSTALL $(TARGET_DIR)
-
- chmod 0755 scripts/*
- for script in rabbitmq-env rabbitmq-server rabbitmqctl rabbitmq-plugins rabbitmq-defaults; do \
- cp scripts/$$script $(TARGET_DIR)/sbin; \
- [ -e $(SBIN_DIR)/$$script ] || ln -s $(SCRIPTS_REL_PATH)/$$script $(SBIN_DIR)/$$script; \
- done
-
- mkdir -p $(TARGET_DIR)/$(PLUGINS_DIR)
- [ -d "$(PLUGINS_DIR)" ] && cp $(PLUGINS_DIR)/*.ez $(PLUGINS_DIR)/README $(TARGET_DIR)/$(PLUGINS_DIR) || true
-
-install_docs: docs_all install_dirs
- for section in 1 5; do \
- mkdir -p $(MAN_DIR)/man$$section; \
- for manpage in $(DOCS_DIR)/*.$$section.gz; do \
- cp $$manpage $(MAN_DIR)/man$$section; \
- done; \
- done
-
-install_dirs:
- @ OK=true && \
- { [ -n "$(TARGET_DIR)" ] || { echo "Please set TARGET_DIR."; OK=false; }; } && \
- { [ -n "$(SBIN_DIR)" ] || { echo "Please set SBIN_DIR."; OK=false; }; } && \
- { [ -n "$(MAN_DIR)" ] || { echo "Please set MAN_DIR."; OK=false; }; } && $$OK
-
- mkdir -p $(TARGET_DIR)/sbin
- mkdir -p $(SBIN_DIR)
- mkdir -p $(MAN_DIR)
-
-$(foreach XML,$(USAGES_XML),$(eval $(call usage_dep, $(XML))))
-
-# Note that all targets which depend on clean must have clean in their
-# name. Also any target that doesn't depend on clean should not have
-# clean in its name, unless you know that you don't need any of the
-# automatic dependency generation for that target (e.g. cleandb).
-
-# We want to load the dep file if *any* target *doesn't* contain
-# "clean" - i.e. if removing all clean-like targets leaves something.
-
-ifeq "$(MAKECMDGOALS)" ""
-TESTABLEGOALS:=$(.DEFAULT_GOAL)
-else
-TESTABLEGOALS:=$(MAKECMDGOALS)
-endif
-
-ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" ""
-include $(DEPS_FILE)
-endif
-
-.PHONY: run-qc
diff --git a/README b/README
deleted file mode 100644
index 67e3a66a..00000000
--- a/README
+++ /dev/null
@@ -1 +0,0 @@
-Please see http://www.rabbitmq.com/build-server.html for build instructions.
diff --git a/calculate-relative b/calculate-relative
deleted file mode 100755
index 3af18e8f..00000000
--- a/calculate-relative
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-#
-# relpath.py
-# R.Barran 30/08/2004
-# Retrieved from http://code.activestate.com/recipes/302594/
-
-import os
-import sys
-
-def relpath(target, base=os.curdir):
- """
- Return a relative path to the target from either the current dir or an optional base dir.
- Base can be a directory specified either as absolute or relative to current dir.
- """
-
- if not os.path.exists(target):
- raise OSError, 'Target does not exist: '+target
-
- if not os.path.isdir(base):
- raise OSError, 'Base is not a directory or does not exist: '+base
-
- base_list = (os.path.abspath(base)).split(os.sep)
- target_list = (os.path.abspath(target)).split(os.sep)
-
- # On the windows platform the target may be on a completely different drive from the base.
- if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
- raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
-
- # Starting from the filepath root, work out how much of the filepath is
- # shared by base and target.
- for i in range(min(len(base_list), len(target_list))):
- if base_list[i] <> target_list[i]: break
- else:
- # If we broke out of the loop, i is pointing to the first differing path elements.
- # If we didn't break out of the loop, i is pointing to identical path elements.
- # Increment i so that in all cases it points to the first differing path elements.
- i+=1
-
- rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
- if (len(rel_list) == 0):
- return "."
- return os.path.join(*rel_list)
-
-if __name__ == "__main__":
- print(relpath(sys.argv[1], sys.argv[2]))
diff --git a/check_xref b/check_xref
deleted file mode 100755
index 24307fdb..00000000
--- a/check_xref
+++ /dev/null
@@ -1,287 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
--mode(compile).
-
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-main(["-h"]) ->
- io:format("usage: check_xref PluginDirectory (options)~n"
- "options:~n"
- " -q - quiet mode (only prints errors)~n"
- " -X - disables all filters~n");
-main([PluginsDir|Argv]) ->
- put({?MODULE, quiet}, lists:member("-q", Argv)),
- put({?MODULE, no_filters}, lists:member("-X", Argv)),
-
- {ok, Cwd} = file:get_cwd(),
- code:add_pathz(filename:join(Cwd, "ebin")),
- LibDir = filename:join(Cwd, "lib"),
- case filelib:is_dir(LibDir) of
- false -> ok;
- true -> os:cmd("rm -rf " ++ LibDir)
- end,
- Rc = try
- check(Cwd, PluginsDir, LibDir, checks())
- catch
- _:Err ->
- io:format(user, "failed: ~p~n", [Err]),
- 1
- end,
- shutdown(Rc, LibDir).
-
-shutdown(Rc, LibDir) ->
- os:cmd("rm -rf " ++ LibDir),
- erlang:halt(Rc).
-
-check(Cwd, PluginsDir, LibDir, Checks) ->
- {ok, Plugins} = file:list_dir(PluginsDir),
- ok = file:make_dir(LibDir),
- put({?MODULE, third_party}, []),
- [begin
- Source = filename:join(PluginsDir, Plugin),
- Target = filename:join(LibDir, Plugin),
- IsExternal = external_dependency(Plugin),
- AppN = case IsExternal of
- true -> filename:join(LibDir, unmangle_name(Plugin));
- false -> filename:join(
- LibDir, filename:basename(Plugin, ".ez"))
- end,
-
- report(info, "mkdir -p ~s~n", [Target]),
- filelib:ensure_dir(Target),
-
- report(info, "cp ~s ~s~n", [Source, Target]),
- {ok, _} = file:copy(Source, Target),
-
- report(info, "unzip -d ~s ~s~n", [LibDir, Target]),
- {ok, _} = zip:unzip(Target, [{cwd, LibDir}]),
-
- UnpackDir = filename:join(LibDir, filename:basename(Target, ".ez")),
- report(info, "mv ~s ~s~n", [UnpackDir, AppN]),
- ok = file:rename(UnpackDir, AppN),
-
- code:add_patha(filename:join(AppN, "ebin")),
- case IsExternal of
- true -> App = list_to_atom(hd(string:tokens(filename:basename(AppN),
- "-"))),
- report(info, "loading ~p~n", [App]),
- application:load(App),
- store_third_party(App);
- _ -> ok
- end
- end || Plugin <- Plugins,
- lists:suffix(".ez", Plugin)],
-
- RabbitAppEbin = filename:join([LibDir, "rabbit", "ebin"]),
- filelib:ensure_dir(filename:join(RabbitAppEbin, "foo")),
- {ok, Beams} = file:list_dir("ebin"),
- [{ok, _} = file:copy(filename:join("ebin", Beam),
- filename:join(RabbitAppEbin, Beam)) || Beam <- Beams],
- xref:start(?MODULE),
- xref:set_default(?MODULE, [{verbose, false}, {warnings, false}]),
- xref:set_library_path(?MODULE, code:get_path()),
- xref:add_release(?MODULE, Cwd, {name, rabbit}),
- store_unresolved_calls(),
- Results = lists:flatten([perform_analysis(Q) || Q <- Checks]),
- report(Results).
-
-%%
-%% Analysis
-%%
-
-perform_analysis({Query, Description, Severity}) ->
- perform_analysis({Query, Description, Severity, fun(_) -> false end});
-perform_analysis({Query, Description, Severity, Filter}) ->
- report_progress("Checking whether any code ~s "
- "(~s)~n", [Description, Query]),
- case analyse(Query) of
- {ok, Analysis} ->
- [filter(Result, Filter) ||
- Result <- process_analysis(Query, Description,
- Severity, Analysis)];
- {error, Module, Reason} ->
- {analysis_error, {Module, Reason}}
- end.
-
-partition(Results) ->
- lists:partition(fun({{_, L}, _}) -> L =:= error end, Results).
-
-analyse(Query) when is_atom(Query) ->
- xref:analyse(?MODULE, Query, [{verbose, false}]);
-analyse(Query) when is_list(Query) ->
- xref:q(?MODULE, Query).
-
-process_analysis(Query, Tag, Severity, Analysis) when is_atom(Query) ->
- [{{Tag, Severity}, MFA} || MFA <- Analysis];
-process_analysis(Query, Tag, Severity, Analysis) when is_list(Query) ->
- [{{Tag, Severity}, Result} || Result <- Analysis].
-
-checks() ->
- [{"(XXL)(Lin) ((XC - UC) || (XU - X - B))",
- "has call to undefined function(s)",
- error, filters()},
- {"(Lin) (L - LU)", "has unused local function(s)",
- error, filters()},
- {"(Lin) (LU * (X - XU))",
- "has exported function(s) only used locally",
- warning, filters()},
- {"(Lin) (DF * (XU + LU))", "used deprecated function(s)",
- warning, filters()}].
-% {"(Lin) (X - XU)", "possibly unused export",
-% warning, fun filter_unused/1}].
-
-%%
-%% noise filters (can be disabled with -X) - strip uninteresting analyses
-%%
-
-filter(Result, Filter) ->
- case Filter(Result) of
- false -> Result;
- true -> [] %% NB: this gets flattened out later on....
- end.
-
-filters() ->
- case get({?MODULE, no_filters}) of
- true -> fun(_) -> false end;
- _ -> filter_chain([fun is_unresolved_call/1, fun is_callback/1,
- fun is_unused/1, fun is_irrelevant/1])
- end.
-
-filter_chain(FnChain) ->
- fun(AnalysisResult) ->
- Result = cleanup(AnalysisResult),
- lists:foldl(fun(F, false) -> F(Result);
- (_F, true) -> true
- end, false, FnChain)
- end.
-
-cleanup({{_, _},{{{{_,_,_}=MFA1,_},{{_,_,_}=MFA2,_}},_}}) -> {MFA1, MFA2};
-cleanup({{_, _},{{{_,_,_}=MFA1,_},{{_,_,_}=MFA2,_}}}) -> {MFA1, MFA2};
-cleanup({{_, _},{{_,_,_}=MFA1,{_,_,_}=MFA2},_}) -> {MFA1, MFA2};
-cleanup({{_, _},{{_,_,_}=MFA1,{_,_,_}=MFA2}}) -> {MFA1, MFA2};
-cleanup({{_, _}, {_,_,_}=MFA}) -> MFA;
-cleanup({{_, _}, {{_,_,_}=MFA,_}}) -> MFA;
-cleanup({{_,_,_}=MFA, {_,_,_}}) -> MFA;
-cleanup({{_,_,_}=MFA, {_,_,_},_}) -> MFA;
-cleanup(Other) -> Other.
-
-is_irrelevant({{M,_,_}, {_,_,_}}) ->
- is_irrelevant(M);
-is_irrelevant({M,_,_}) ->
- is_irrelevant(M);
-is_irrelevant(Mod) when is_atom(Mod) ->
- lists:member(Mod, get({?MODULE, third_party})).
-
-is_unused({{_,_,_}=MFA, {_,_,_}}) ->
- is_unused(MFA);
-is_unused({M,_F,_A}) ->
- lists:suffix("_tests", atom_to_list(M));
-is_unused(_) ->
- false.
-
-is_unresolved_call({_, F, A}) ->
- UC = get({?MODULE, unresolved_calls}),
- sets:is_element({'$M_EXPR', F, A}, UC);
-is_unresolved_call(_) ->
- false.
-
-%% TODO: cache this....
-is_callback({M,_,_}=MFA) ->
- Attributes = M:module_info(attributes),
- Behaviours = proplists:append_values(behaviour, Attributes),
- {_, Callbacks} = lists:foldl(fun acc_behaviours/2, {M, []}, Behaviours),
- lists:member(MFA, Callbacks);
-is_callback(_) ->
- false.
-
-acc_behaviours(B, {M, CB}=Acc) ->
- case catch(B:behaviour_info(callbacks)) of
- [{_,_} | _] = Callbacks ->
- {M, CB ++ [{M, F, A} || {F,A} <- Callbacks]};
- _ ->
- Acc
- end.
-
-%%
-%% reporting/output
-%%
-
-report(Results) ->
- [report_failures(F) || F <- Results],
- {Errors, Warnings} = partition(Results),
- report(info, "Completed: ~p errors, ~p warnings~n",
- [length(Errors), length(Warnings)]),
- case length(Errors) > 0 of
- true -> 1;
- false -> 0
- end.
-
-report_failures({analysis_error, {Mod, Reason}}) ->
- report(error, "~s:0 Analysis Error: ~p~n", [source_file(Mod), Reason]);
-report_failures({{Tag, Level}, {{{{M,_,_},L},{{M2,F2,A2},_}},_}}) ->
- report(Level, "~s:~w ~s ~p:~p/~p~n",
- [source_file(M), L, Tag, M2, F2, A2]);
-report_failures({{Tag, Level}, {{M,F,A},L}}) ->
- report(Level, "~s:~w ~s ~p:~p/~p~n", [source_file(M), L, Tag, M, F, A]);
-report_failures({{Tag, Level}, {M,F,A}}) ->
- report(Level, "~s:unknown ~s ~p:~p/~p~n", [source_file(M), Tag, M, F, A]);
-report_failures(Term) ->
- report(error, "Ignoring ~p~n", [Term]),
- ok.
-
-report_progress(Fmt, Args) ->
- report(info, Fmt, Args).
-
-report(Level, Fmt, Args) ->
- case {get({?MODULE, quiet}), Level} of
- {true, error} -> do_report(lookup_prefix(Level), Fmt, Args);
- {false, _} -> do_report(lookup_prefix(Level), Fmt, Args);
- _ -> ok
- end.
-
-do_report(Prefix, Fmt, Args) ->
- io:format(Prefix ++ Fmt, Args).
-
-lookup_prefix(error) -> "ERROR: ";
-lookup_prefix(warning) -> "WARNING: ";
-lookup_prefix(info) -> "INFO: ".
-
-source_file(M) ->
- proplists:get_value(source, M:module_info(compile)).
-
-%%
-%% setup/code-path/file-system ops
-%%
-
-store_third_party(App) ->
- {ok, AppConfig} = application:get_all_key(App),
- AppModules = proplists:get_value(modules, AppConfig),
- put({?MODULE, third_party}, AppModules ++ get({?MODULE, third_party})).
-
-%% TODO: this ought not to be maintained in such a fashion
-external_dependency(Path) ->
- lists:any(fun(P) -> lists:prefix(P, Path) end,
- ["mochiweb", "webmachine", "rfc4627", "eldap"]).
-
-unmangle_name(Path) ->
- [Name, Vsn | _] = re:split(Path, "-", [{return, list}]),
- string:join([Name, Vsn], "-").
-
-store_unresolved_calls() ->
- {ok, UCFull} = analyse("UC"),
- UC = [MFA || {_, {_,_,_} = MFA} <- UCFull],
- put({?MODULE, unresolved_calls}, sets:from_list(UC)).
diff --git a/codegen.py b/codegen.py
deleted file mode 100644
index 842549cf..00000000
--- a/codegen.py
+++ /dev/null
@@ -1,591 +0,0 @@
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-##
-
-from __future__ import nested_scopes
-
-import sys
-sys.path.append("../rabbitmq-codegen") # in case we're next to an experimental revision
-sys.path.append("codegen") # in case we're building from a distribution package
-
-from amqp_codegen import *
-import string
-import re
-
-# Coming up with a proper encoding of AMQP tables in JSON is too much
-# hassle at this stage. Given that the only default value we are
-# interested in is for the empty table, we only support that.
-def convertTable(d):
- if len(d) == 0:
- return "[]"
- else:
- raise Exception('Non-empty table defaults not supported ' + d)
-
-erlangDefaultValueTypeConvMap = {
- bool : lambda x: str(x).lower(),
- str : lambda x: "<<\"" + x + "\">>",
- int : lambda x: str(x),
- float : lambda x: str(x),
- dict: convertTable,
- unicode: lambda x: "<<\"" + x.encode("utf-8") + "\">>"
-}
-
-def erlangize(s):
- s = s.replace('-', '_')
- s = s.replace(' ', '_')
- return s
-
-AmqpMethod.erlangName = lambda m: "'" + erlangize(m.klass.name) + '.' + erlangize(m.name) + "'"
-
-AmqpClass.erlangName = lambda c: "'" + erlangize(c.name) + "'"
-
-def erlangConstantName(s):
- return '_'.join(re.split('[- ]', s.upper()))
-
-class PackedMethodBitField:
- def __init__(self, index):
- self.index = index
- self.domain = 'bit'
- self.contents = []
-
- def extend(self, f):
- self.contents.append(f)
-
- def count(self):
- return len(self.contents)
-
- def full(self):
- return self.count() == 8
-
-def multiLineFormat(things, prologue, separator, lineSeparator, epilogue, thingsPerLine = 4):
- r = [prologue]
- i = 0
- for t in things:
- if i != 0:
- if i % thingsPerLine == 0:
- r += [lineSeparator]
- else:
- r += [separator]
- r += [t]
- i += 1
- r += [epilogue]
- return "".join(r)
-
-def prettyType(typeName, subTypes, typesPerLine = 4):
- """Pretty print a type signature made up of many alternative subtypes"""
- sTs = multiLineFormat(subTypes,
- "( ", " | ", "\n | ", " )",
- thingsPerLine = typesPerLine)
- return "-type(%s ::\n %s)." % (typeName, sTs)
-
-def printFileHeader():
- print """%% Autogenerated code. Do not edit.
-%%
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%"""
-
-def genErl(spec):
- def erlType(domain):
- return erlangize(spec.resolveDomain(domain))
-
- def fieldTypeList(fields):
- return '[' + ', '.join([erlType(f.domain) for f in fields]) + ']'
-
- def fieldNameList(fields):
- return '[' + ', '.join([erlangize(f.name) for f in fields]) + ']'
-
- def fieldTempList(fields):
- return '[' + ', '.join(['F' + str(f.index) for f in fields]) + ']'
-
- def fieldMapList(fields):
- return ', '.join([erlangize(f.name) + " = F" + str(f.index) for f in fields])
-
- def genLookupMethodName(m):
- print "lookup_method_name({%d, %d}) -> %s;" % (m.klass.index, m.index, m.erlangName())
-
- def genLookupClassName(c):
- print "lookup_class_name(%d) -> %s;" % (c.index, c.erlangName())
-
- def genMethodId(m):
- print "method_id(%s) -> {%d, %d};" % (m.erlangName(), m.klass.index, m.index)
-
- def genMethodHasContent(m):
- print "method_has_content(%s) -> %s;" % (m.erlangName(), str(m.hasContent).lower())
-
- def genMethodIsSynchronous(m):
- hasNoWait = "nowait" in fieldNameList(m.arguments)
- if m.isSynchronous and hasNoWait:
- print "is_method_synchronous(#%s{nowait = NoWait}) -> not(NoWait);" % (m.erlangName())
- else:
- print "is_method_synchronous(#%s{}) -> %s;" % (m.erlangName(), str(m.isSynchronous).lower())
-
- def genMethodFieldTypes(m):
- """Not currently used - may be useful in future?"""
- print "method_fieldtypes(%s) -> %s;" % (m.erlangName(), fieldTypeList(m.arguments))
-
- def genMethodFieldNames(m):
- print "method_fieldnames(%s) -> %s;" % (m.erlangName(), fieldNameList(m.arguments))
-
- def packMethodFields(fields):
- packed = []
- bitfield = None
- for f in fields:
- if erlType(f.domain) == 'bit':
- if not(bitfield) or bitfield.full():
- bitfield = PackedMethodBitField(f.index)
- packed.append(bitfield)
- bitfield.extend(f)
- else:
- bitfield = None
- packed.append(f)
- return packed
-
- def methodFieldFragment(f):
- type = erlType(f.domain)
- p = 'F' + str(f.index)
- if type == 'shortstr':
- return p+'Len:8/unsigned, '+p+':'+p+'Len/binary'
- elif type == 'longstr':
- return p+'Len:32/unsigned, '+p+':'+p+'Len/binary'
- elif type == 'octet':
- return p+':8/unsigned'
- elif type == 'short':
- return p+':16/unsigned'
- elif type == 'long':
- return p+':32/unsigned'
- elif type == 'longlong':
- return p+':64/unsigned'
- elif type == 'timestamp':
- return p+':64/unsigned'
- elif type == 'bit':
- return p+'Bits:8'
- elif type == 'table':
- return p+'Len:32/unsigned, '+p+'Tab:'+p+'Len/binary'
-
- def genFieldPostprocessing(packed):
- for f in packed:
- type = erlType(f.domain)
- if type == 'bit':
- for index in range(f.count()):
- print " F%d = ((F%dBits band %d) /= 0)," % \
- (f.index + index,
- f.index,
- 1 << index)
- elif type == 'table':
- print " F%d = rabbit_binary_parser:parse_table(F%dTab)," % \
- (f.index, f.index)
- else:
- pass
-
- def genMethodRecord(m):
- print "method_record(%s) -> #%s{};" % (m.erlangName(), m.erlangName())
-
- def genDecodeMethodFields(m):
- packedFields = packMethodFields(m.arguments)
- binaryPattern = ', '.join([methodFieldFragment(f) for f in packedFields])
- if binaryPattern:
- restSeparator = ', '
- else:
- restSeparator = ''
- recordConstructorExpr = '#%s{%s}' % (m.erlangName(), fieldMapList(m.arguments))
- print "decode_method_fields(%s, <<%s>>) ->" % (m.erlangName(), binaryPattern)
- genFieldPostprocessing(packedFields)
- print " %s;" % (recordConstructorExpr,)
-
- def genDecodeProperties(c):
- def presentBin(fields):
- ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
- return '<<' + ps + ', _:%d, R0/binary>>' % (16 - len(fields),)
- def writePropFieldLine(field):
- i = str(field.index)
- if field.domain == 'bit':
- print " {F%s, R%s} = {P%s =/= 0, R%s}," % \
- (i, str(field.index + 1), i, i)
- else:
- print " {F%s, R%s} = if P%s =:= 0 -> {undefined, R%s}; true -> ?%s_VAL(R%s, L%s, V%s, X%s) end," % \
- (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i, i)
-
- if len(c.fields) == 0:
- print "decode_properties(%d, <<>>) ->" % (c.index,)
- else:
- print ("decode_properties(%d, %s) ->" %
- (c.index, presentBin(c.fields)))
- for field in c.fields:
- writePropFieldLine(field)
- print " <<>> = %s," % ('R' + str(len(c.fields)))
- print " #'P_%s'{%s};" % (erlangize(c.name), fieldMapList(c.fields))
-
- def genFieldPreprocessing(packed):
- for f in packed:
- type = erlType(f.domain)
- if type == 'bit':
- print " F%dBits = (%s)," % \
- (f.index,
- ' bor '.join(['(bitvalue(F%d) bsl %d)' % (x.index, x.index - f.index)
- for x in f.contents]))
- elif type == 'table':
- print " F%dTab = rabbit_binary_generator:generate_table(F%d)," % (f.index, f.index)
- print " F%dLen = size(F%dTab)," % (f.index, f.index)
- elif type == 'shortstr':
- print " F%dLen = shortstr_size(F%d)," % (f.index, f.index)
- elif type == 'longstr':
- print " F%dLen = size(F%d)," % (f.index, f.index)
- else:
- pass
-
- def genEncodeMethodFields(m):
- packedFields = packMethodFields(m.arguments)
- print "encode_method_fields(#%s{%s}) ->" % (m.erlangName(), fieldMapList(m.arguments))
- genFieldPreprocessing(packedFields)
- print " <<%s>>;" % (', '.join([methodFieldFragment(f) for f in packedFields]))
-
- def genEncodeProperties(c):
- def presentBin(fields):
- ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
- return '<<' + ps + ', 0:%d>>' % (16 - len(fields),)
- def writePropFieldLine(field):
- i = str(field.index)
- if field.domain == 'bit':
- print " {P%s, R%s} = {F%s =:= 1, R%s}," % \
- (i, str(field.index + 1), i, i)
- else:
- print " {P%s, R%s} = if F%s =:= undefined -> {0, R%s}; true -> {1, [?%s_PROP(F%s, L%s) | R%s]} end," % \
- (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i)
-
- print "encode_properties(#'P_%s'{%s}) ->" % (erlangize(c.name), fieldMapList(c.fields))
- if len(c.fields) == 0:
- print " <<>>;"
- else:
- print " R0 = [<<>>],"
- for field in c.fields:
- writePropFieldLine(field)
- print " list_to_binary([%s | lists:reverse(R%s)]);" % \
- (presentBin(c.fields), str(len(c.fields)))
-
- def messageConstantClass(cls):
- # We do this because 0.8 uses "soft error" and 8.1 uses "soft-error".
- return erlangConstantName(cls)
-
- def genLookupException(c,v,cls):
- mCls = messageConstantClass(cls)
- if mCls == 'SOFT_ERROR': genLookupException1(c,'false')
- elif mCls == 'HARD_ERROR': genLookupException1(c, 'true')
- elif mCls == '': pass
- else: raise Exception('Unknown constant class' + cls)
-
- def genLookupException1(c,hardErrorBoolStr):
- n = erlangConstantName(c)
- print 'lookup_amqp_exception(%s) -> {%s, ?%s, <<"%s">>};' % \
- (n.lower(), hardErrorBoolStr, n, n)
-
- def genAmqpException(c,v,cls):
- n = erlangConstantName(c)
- print 'amqp_exception(?%s) -> %s;' % \
- (n, n.lower())
-
- methods = spec.allMethods()
-
- printFileHeader()
- module = "rabbit_framing_amqp_%d_%d" % (spec.major, spec.minor)
- if spec.revision != 0:
- module = "%s_%d" % (module, spec.revision)
- if module == "rabbit_framing_amqp_8_0":
- module = "rabbit_framing_amqp_0_8"
- print "-module(%s)." % module
- print """-include("rabbit_framing.hrl").
-
--export([version/0]).
--export([lookup_method_name/1]).
--export([lookup_class_name/1]).
-
--export([method_id/1]).
--export([method_has_content/1]).
--export([is_method_synchronous/1]).
--export([method_record/1]).
--export([method_fieldnames/1]).
--export([decode_method_fields/2]).
--export([decode_properties/2]).
--export([encode_method_fields/1]).
--export([encode_properties/1]).
--export([lookup_amqp_exception/1]).
--export([amqp_exception/1]).
-
-"""
- print "%% Various types"
- print "-ifdef(use_specs)."
-
- print """-export_type([amqp_field_type/0, amqp_property_type/0,
- amqp_table/0, amqp_array/0, amqp_value/0,
- amqp_method_name/0, amqp_method/0, amqp_method_record/0,
- amqp_method_field_name/0, amqp_property_record/0,
- amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
-
--type(amqp_field_type() ::
- 'longstr' | 'signedint' | 'decimal' | 'timestamp' |
- 'table' | 'byte' | 'double' | 'float' | 'long' |
- 'short' | 'bool' | 'binary' | 'void' | 'array').
--type(amqp_property_type() ::
- 'shortstr' | 'longstr' | 'octet' | 'short' | 'long' |
- 'longlong' | 'timestamp' | 'bit' | 'table').
-
--type(amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}]).
--type(amqp_array() :: [{amqp_field_type(), amqp_value()}]).
--type(amqp_value() :: binary() | % longstr
- integer() | % signedint
- {non_neg_integer(), non_neg_integer()} | % decimal
- amqp_table() |
- amqp_array() |
- byte() | % byte
- float() | % double
- integer() | % long
- integer() | % short
- boolean() | % bool
- binary() | % binary
- 'undefined' | % void
- non_neg_integer() % timestamp
- ).
-"""
-
- print prettyType("amqp_method_name()",
- [m.erlangName() for m in methods])
- print prettyType("amqp_method()",
- ["{%s, %s}" % (m.klass.index, m.index) for m in methods],
- 6)
- print prettyType("amqp_method_record()",
- ["#%s{}" % (m.erlangName()) for m in methods])
- fieldNames = set()
- for m in methods:
- fieldNames.update(m.arguments)
- fieldNames = [erlangize(f.name) for f in fieldNames]
- print prettyType("amqp_method_field_name()",
- fieldNames)
- print prettyType("amqp_property_record()",
- ["#'P_%s'{}" % erlangize(c.name) for c in spec.allClasses()])
- print prettyType("amqp_exception()",
- ["'%s'" % erlangConstantName(c).lower() for (c, v, cls) in spec.constants])
- print prettyType("amqp_exception_code()",
- ["%i" % v for (c, v, cls) in spec.constants])
- classIds = set()
- for m in spec.allMethods():
- classIds.add(m.klass.index)
- print prettyType("amqp_class_id()",
- ["%i" % ci for ci in classIds])
- print prettyType("amqp_class_name()",
- ["%s" % c.erlangName() for c in spec.allClasses()])
- print "-endif. % use_specs"
-
- print """
-%% Method signatures
--ifdef(use_specs).
--spec(version/0 :: () -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
--spec(lookup_method_name/1 :: (amqp_method()) -> amqp_method_name()).
--spec(lookup_class_name/1 :: (amqp_class_id()) -> amqp_class_name()).
--spec(method_id/1 :: (amqp_method_name()) -> amqp_method()).
--spec(method_has_content/1 :: (amqp_method_name()) -> boolean()).
--spec(is_method_synchronous/1 :: (amqp_method_record()) -> boolean()).
--spec(method_record/1 :: (amqp_method_name()) -> amqp_method_record()).
--spec(method_fieldnames/1 :: (amqp_method_name()) -> [amqp_method_field_name()]).
--spec(decode_method_fields/2 ::
- (amqp_method_name(), binary()) -> amqp_method_record() | rabbit_types:connection_exit()).
--spec(decode_properties/2 :: (non_neg_integer(), binary()) -> amqp_property_record()).
--spec(encode_method_fields/1 :: (amqp_method_record()) -> binary()).
--spec(encode_properties/1 :: (amqp_property_record()) -> binary()).
--spec(lookup_amqp_exception/1 :: (amqp_exception()) -> {boolean(), amqp_exception_code(), binary()}).
--spec(amqp_exception/1 :: (amqp_exception_code()) -> amqp_exception()).
--endif. % use_specs
-
-bitvalue(true) -> 1;
-bitvalue(false) -> 0;
-bitvalue(undefined) -> 0.
-
-shortstr_size(S) ->
- case size(S) of
- Len when Len =< 255 -> Len;
- _ -> exit(method_field_shortstr_overflow)
- end.
-
--define(SHORTSTR_VAL(R, L, V, X),
- begin
- <<L:8/unsigned, V:L/binary, X/binary>> = R,
- {V, X}
- end).
-
--define(LONGSTR_VAL(R, L, V, X),
- begin
- <<L:32/unsigned, V:L/binary, X/binary>> = R,
- {V, X}
- end).
-
--define(SHORT_VAL(R, L, V, X),
- begin
- <<V:8/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(LONG_VAL(R, L, V, X),
- begin
- <<V:32/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(LONGLONG_VAL(R, L, V, X),
- begin
- <<V:64/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(OCTET_VAL(R, L, V, X),
- begin
- <<V:8/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(TABLE_VAL(R, L, V, X),
- begin
- <<L:32/unsigned, V:L/binary, X/binary>> = R,
- {rabbit_binary_parser:parse_table(V), X}
- end).
-
--define(TIMESTAMP_VAL(R, L, V, X),
- begin
- <<V:64/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(SHORTSTR_PROP(X, L),
- begin
- L = size(X),
- if L < 256 -> <<L:8, X:L/binary>>;
- true -> exit(content_properties_shortstr_overflow)
- end
- end).
-
--define(LONGSTR_PROP(X, L),
- begin
- L = size(X),
- <<L:32, X:L/binary>>
- end).
-
--define(OCTET_PROP(X, L), <<X:8/unsigned>>).
--define(SHORT_PROP(X, L), <<X:16/unsigned>>).
--define(LONG_PROP(X, L), <<X:32/unsigned>>).
--define(LONGLONG_PROP(X, L), <<X:64/unsigned>>).
--define(TIMESTAMP_PROP(X, L), <<X:64/unsigned>>).
-
--define(TABLE_PROP(X, T),
- begin
- T = rabbit_binary_generator:generate_table(X),
- <<(size(T)):32, T/binary>>
- end).
-"""
- version = "{%d, %d, %d}" % (spec.major, spec.minor, spec.revision)
- if version == '{8, 0, 0}': version = '{0, 8, 0}'
- print "version() -> %s." % (version)
-
- for m in methods: genLookupMethodName(m)
- print "lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id})."
-
- for c in spec.allClasses(): genLookupClassName(c)
- print "lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId})."
-
- for m in methods: genMethodId(m)
- print "method_id(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodHasContent(m)
- print "method_has_content(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodIsSynchronous(m)
- print "is_method_synchronous(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodRecord(m)
- print "method_record(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodFieldNames(m)
- print "method_fieldnames(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genDecodeMethodFields(m)
- print "decode_method_fields(Name, BinaryFields) ->"
- print " rabbit_misc:frame_error(Name, BinaryFields)."
-
- for c in spec.allClasses(): genDecodeProperties(c)
- print "decode_properties(ClassId, _BinaryFields) -> exit({unknown_class_id, ClassId})."
-
- for m in methods: genEncodeMethodFields(m)
- print "encode_method_fields(Record) -> exit({unknown_method_name, element(1, Record)})."
-
- for c in spec.allClasses(): genEncodeProperties(c)
- print "encode_properties(Record) -> exit({unknown_properties_record, Record})."
-
- for (c,v,cls) in spec.constants: genLookupException(c,v,cls)
- print "lookup_amqp_exception(Code) ->"
- print " rabbit_log:warning(\"Unknown AMQP error code '~p'~n\", [Code]),"
- print " {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}."
-
- for(c,v,cls) in spec.constants: genAmqpException(c,v,cls)
- print "amqp_exception(_Code) -> undefined."
-
-def genHrl(spec):
- def fieldNameList(fields):
- return ', '.join([erlangize(f.name) for f in fields])
-
- def fieldNameListDefaults(fields):
- def fillField(field):
- result = erlangize(f.name)
- if field.defaultvalue != None:
- conv_fn = erlangDefaultValueTypeConvMap[type(field.defaultvalue)]
- result += ' = ' + conv_fn(field.defaultvalue)
- return result
- return ', '.join([fillField(f) for f in fields])
-
- methods = spec.allMethods()
-
- printFileHeader()
- print "-define(PROTOCOL_PORT, %d)." % (spec.port)
-
- for (c,v,cls) in spec.constants:
- print "-define(%s, %s)." % (erlangConstantName(c), v)
-
- print "%% Method field records."
- for m in methods:
- print "-record(%s, {%s})." % (m.erlangName(), fieldNameListDefaults(m.arguments))
-
- print "%% Class property records."
- for c in spec.allClasses():
- print "-record('P_%s', {%s})." % (erlangize(c.name), fieldNameList(c.fields))
-
-
-def generateErl(specPath):
- genErl(AmqpSpec(specPath))
-
-def generateHrl(specPath):
- genHrl(AmqpSpec(specPath))
-
-if __name__ == "__main__":
- do_main_dict({"header": generateHrl,
- "body": generateErl})
-
diff --git a/docs/examples-to-end.xsl b/docs/examples-to-end.xsl
deleted file mode 100644
index 4db1d5c4..00000000
--- a/docs/examples-to-end.xsl
+++ /dev/null
@@ -1,93 +0,0 @@
-<?xml version='1.0'?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version='1.0'>
-
-<xsl:output doctype-public="-//OASIS//DTD DocBook XML V4.5//EN"
- doctype-system="http://www.docbook.org/xml/4.5/docbookx.dtd"
- indent="yes"
-/>
-
-<!-- Don't copy examples through in place -->
-<xsl:template match="*[@role='example-prefix']"/>
-<xsl:template match="*[@role='example']"/>
-
-<!-- Copy everything through (with lower priority) -->
-<xsl:template match="@*|node()">
- <xsl:copy><xsl:apply-templates select="@*|node()"/></xsl:copy>
-</xsl:template>
-
-<!-- Copy the root node, and add examples at the end-->
-<xsl:template match="/refentry">
-<refentry lang="en">
-<xsl:for-each select="*">
- <xsl:copy><xsl:apply-templates select="@*|node()"/></xsl:copy>
-</xsl:for-each>
- <refsect1>
- <title>Examples</title>
-<xsl:if test="//screen[@role='example']">
- <variablelist>
-<xsl:for-each select="//screen[@role='example']">
- <varlistentry>
- <term><command><xsl:copy-of select="text()"/></command></term>
- <listitem>
- <xsl:copy-of select="following-sibling::para[@role='example' and preceding-sibling::screen[1] = current()]"/>
- </listitem>
- </varlistentry>
-</xsl:for-each>
- </variablelist>
-</xsl:if>
-<!--
-We need to handle multiline examples separately, since not using a
-variablelist leads to slightly less nice formatting (the explanation doesn't get
-indented)
--->
-<xsl:for-each select="//screen[@role='example-multiline']">
-<screen><emphasis role="bold"><xsl:copy-of select="text()"/></emphasis></screen>
-<xsl:copy-of select="following-sibling::para[@role='example']"/>
-</xsl:for-each>
- </refsect1>
-</refentry>
-</xsl:template>
-
-<!--
- We show all the subcommands using XML that looks like this:
-
- <term>
- <cmdsynopsis>
- <command>list_connections</command>
- <arg choice="opt">
- <replaceable>connectioninfoitem</replaceable>
- ...
- </arg>
- </cmdsynopsis>
- </term>
-
- However, while DocBook renders this sensibly for HTML, for some reason it
- doen't show anything inside <cmdsynopsis> at all for man pages. I think what
- we're doing is semantically correct so this is a bug in DocBook. The following
- rules essentially do what DocBook does when <cmdsynopsis> is not inside a
- <term>.
--->
-
-<xsl:template match="term/cmdsynopsis">
- <xsl:apply-templates mode="docbook-bug"/>
-</xsl:template>
-
-<xsl:template match="command" mode="docbook-bug">
- <emphasis role="bold"><xsl:apply-templates mode="docbook-bug"/></emphasis>
-</xsl:template>
-
-<xsl:template match="arg[@choice='opt']" mode="docbook-bug">
- [<xsl:apply-templates mode="docbook-bug"/>]
-</xsl:template>
-
-<xsl:template match="arg[@choice='req']" mode="docbook-bug">
- {<xsl:apply-templates mode="docbook-bug"/>}
-</xsl:template>
-
-<xsl:template match="replaceable" mode="docbook-bug">
- <emphasis><xsl:apply-templates mode="docbook-bug"/></emphasis>
-</xsl:template>
-
-</xsl:stylesheet>
-
diff --git a/docs/html-to-website-xml.xsl b/docs/html-to-website-xml.xsl
deleted file mode 100644
index d83d5073..00000000
--- a/docs/html-to-website-xml.xsl
+++ /dev/null
@@ -1,90 +0,0 @@
-<?xml version='1.0'?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- xmlns:doc="http://www.rabbitmq.com/namespaces/ad-hoc/doc"
- xmlns="http://www.w3.org/1999/xhtml"
- version='1.0'>
-
-<xsl:param name="original"/>
-
-<xsl:output method="xml" />
-
-<!-- Copy every element through -->
-<xsl:template match="*">
- <xsl:element name="{name()}" namespace="http://www.w3.org/1999/xhtml">
- <xsl:apply-templates select="@*|node()"/>
- </xsl:element>
-</xsl:template>
-
-<xsl:template match="@*">
- <xsl:copy/>
-</xsl:template>
-
-<!-- Copy the root node, and munge the outer part of the page -->
-<xsl:template match="/html">
-<xsl:processing-instruction name="xml-stylesheet">type="text/xml" href="page.xsl"</xsl:processing-instruction>
-<html xmlns:doc="http://www.rabbitmq.com/namespaces/ad-hoc/doc" xmlns="http://www.w3.org/1999/xhtml">
- <head>
- <title><xsl:value-of select="document($original)/refentry/refnamediv/refname"/><xsl:if test="document($original)/refentry/refmeta/manvolnum">(<xsl:value-of select="document($original)/refentry/refmeta/manvolnum"/>)</xsl:if> manual page</title>
- </head>
- <body show-in-this-page="true">
- <xsl:choose>
- <xsl:when test="document($original)/refentry/refmeta/manvolnum">
- <p>
- This is the manual page for
- <code><xsl:value-of select="document($original)/refentry/refnamediv/refname"/>(<xsl:value-of select="document($original)/refentry/refmeta/manvolnum"/>)</code>.
- </p>
- <p>
- <a href="../manpages.html">See a list of all manual pages</a>.
- </p>
- </xsl:when>
- <xsl:otherwise>
- <p>
- This is the documentation for
- <code><xsl:value-of select="document($original)/refentry/refnamediv/refname"/></code>.
- </p>
- </xsl:otherwise>
- </xsl:choose>
- <p>
- For more general documentation, please see the
- <a href="../admin-guide.html">administrator's guide</a>.
- </p>
-
- <xsl:apply-templates select="body/div[@class='refentry']"/>
- </body>
-</html>
-</xsl:template>
-
-<!-- Specific instructions to revert the DocBook HTML to be more like our ad-hoc XML schema -->
-
-<xsl:template match="div[@class='refsect1'] | div[@class='refnamediv'] | div[@class='refsynopsisdiv']">
- <doc:section name="{h2}">
- <xsl:apply-templates select="node()"/>
- </doc:section>
-</xsl:template>
-
-<xsl:template match="div[@class='refsect2']">
- <doc:subsection name="{h3}">
- <xsl:apply-templates select="node()"/>
- </doc:subsection>
-</xsl:template>
-
-<xsl:template match="h2 | h3">
- <doc:heading>
- <xsl:apply-templates select="node()"/>
- </doc:heading>
-</xsl:template>
-
-<xsl:template match="pre[@class='screen']">
- <pre class="sourcecode">
- <xsl:apply-templates select="node()"/>
- </pre>
-</xsl:template>
-
-<xsl:template match="div[@class='cmdsynopsis']">
- <div class="cmdsynopsis" id="{p/code[@class='command']}">
- <xsl:apply-templates select="node()"/>
- </div>
-</xsl:template>
-
-</xsl:stylesheet>
-
diff --git a/docs/rabbitmq-echopid.xml b/docs/rabbitmq-echopid.xml
deleted file mode 100644
index d3dcea52..00000000
--- a/docs/rabbitmq-echopid.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
-<refentry lang="en">
- <refentryinfo>
- <productname>RabbitMQ Server</productname>
- <authorgroup>
- <corpauthor>The RabbitMQ Team &lt;<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>&gt;</corpauthor>
- </authorgroup>
- </refentryinfo>
-
- <refmeta>
- <refentrytitle>rabbitmq-echopid.bat</refentrytitle>
- <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
- </refmeta>
-
- <refnamediv>
- <refname>rabbitmq-echopid.bat</refname>
- <refpurpose>return the process id of the Erlang runtime hosting RabbitMQ</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <cmdsynopsis>
- <command>rabbitmq-echopid.bat</command>
- <arg choice="req">sname</arg>
- </cmdsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
- <para>
- RabbitMQ is an implementation of AMQP, the emerging
- standard for high performance enterprise messaging. The
- RabbitMQ server is a robust and scalable implementation of
- an AMQP broker.
- </para>
- <para>
- Running <command>rabbitmq-echopid</command> will attempt to
- discover and echo the process id (PID) of the Erlang runtime
- process (erl.exe) that is hosting RabbitMQ. To allow erl.exe
- time to start up and load RabbitMQ, the script will wait for
- ten seconds before timing out if a suitable PID cannot be
- found.
- </para>
- <para>
- If a PID is discovered, the script will echo it to stdout
- before exiting with a ERRORLEVEL of 0. If no PID is
- discovered before the timeout, nothing is written to stdout
- and the script exits setting ERRORLEVEL to 1.
- </para>
- <para>
- Note that this script only exists on Windows due to the need
- to wait for erl.exe and possibly time-out. To obtain the PID
- on Unix set RABBITMQ_PID_FILE before starting
- rabbitmq-server and do not use "-detached".
- </para>
- </refsect1>
-
- <refsect1>
- <title>Options</title>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><arg choice="req">sname</arg></cmdsynopsis></term>
- <listitem>
- <para role="usage">
-The short-name form of the RabbitMQ node name.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/docs/rabbitmq-env.conf.5.xml b/docs/rabbitmq-env.conf.5.xml
deleted file mode 100644
index c887596c..00000000
--- a/docs/rabbitmq-env.conf.5.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
-<refentry lang="en">
- <refentryinfo>
- <productname>RabbitMQ Server</productname>
- <authorgroup>
- <corpauthor>The RabbitMQ Team &lt;<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>&gt;</corpauthor>
- </authorgroup>
- </refentryinfo>
-
- <refmeta>
- <refentrytitle>rabbitmq-env.conf</refentrytitle>
- <manvolnum>5</manvolnum>
- <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
- </refmeta>
-
- <refnamediv>
- <refname>rabbitmq-env.conf</refname>
- <refpurpose>default settings for RabbitMQ AMQP server</refpurpose>
- </refnamediv>
-
- <refsect1>
- <title>Description</title>
- <para>
-<filename>/etc/rabbitmq/rabbitmq-env.conf</filename> contains variable settings that override the
-defaults built in to the RabbitMQ startup scripts.
- </para>
- <para>
-The file is interpreted by the system shell, and so should consist of
-a sequence of shell environment variable definitions. Normal shell
-syntax is permitted (since the file is sourced using the shell "."
-operator), including line comments starting with "#".
- </para>
- <para>
-In order of preference, the startup scripts get their values from the
-environment, from <filename>/etc/rabbitmq/rabbitmq-env.conf</filename> and finally from the
-built-in default values. For example, for the <envar>RABBITMQ_NODENAME</envar>
-setting,
- </para>
- <para>
- <envar>RABBITMQ_NODENAME</envar>
- </para>
- <para>
-from the environment is checked first. If it is absent or equal to the
-empty string, then
- </para>
- <para>
- <envar>NODENAME</envar>
- </para>
- <para>
-from <filename>/etc/rabbitmq/rabbitmq-env.conf</filename> is checked. If it is also absent
-or set equal to the empty string then the default value from the
-startup script is used.
- </para>
- <para>
-The variable names in /etc/rabbitmq/rabbitmq-env.conf are always equal to the
-environment variable names, with the <envar>RABBITMQ_</envar> prefix removed:
-<envar>RABBITMQ_NODE_PORT</envar> from the environment becomes <envar>NODE_PORT</envar> in the
-<filename>/etc/rabbitmq/rabbitmq-env.conf</filename> file, etc.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example-multiline">
-# I am a complete /etc/rabbitmq/rabbitmq-env.conf file.
-# Comment lines start with a hash character.
-# This is a /bin/sh script file - use ordinary envt var syntax
-NODENAME=hare
- </screen>
- <para role="example">
- This is an example of a complete
- <filename>/etc/rabbitmq/rabbitmq-env.conf</filename> file that overrides the default Erlang
- node name from "rabbit" to "hare".
- </para>
-
- </refsect1>
-
- <refsect1>
- <title>See also</title>
- <para>
- <citerefentry><refentrytitle>rabbitmq-server</refentrytitle><manvolnum>1</manvolnum></citerefentry>
- <citerefentry><refentrytitle>rabbitmqctl</refentrytitle><manvolnum>1</manvolnum></citerefentry>
- </para>
- </refsect1>
-</refentry>
diff --git a/docs/rabbitmq-plugins.1.xml b/docs/rabbitmq-plugins.1.xml
deleted file mode 100644
index 8ecb4fc8..00000000
--- a/docs/rabbitmq-plugins.1.xml
+++ /dev/null
@@ -1,182 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
-<!--
- There is some extra magic in this document besides the usual DocBook semantics
- to allow us to derive manpages, HTML and usage messages from the same source
- document.
-
- Examples need to be moved to the end for man pages. To this end, <para>s and
- <screen>s with role="example" will be moved, and with role="example-prefix"
- will be removed.
-
- The usage messages are more involved. We have some magic in usage.xsl to pull
- out the command synopsis, global option and subcommand synopses. We also pull
- out <para>s with role="usage".
-
- Finally we construct lists of possible values for subcommand options, if the
- subcommand's <varlistentry> has role="usage-has-option-list". The option which
- takes the values should be marked with role="usage-option-list".
--->
-
-<refentry lang="en">
- <refentryinfo>
- <productname>RabbitMQ Server</productname>
- <authorgroup>
- <corpauthor>The RabbitMQ Team &lt;<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>&gt;</corpauthor>
- </authorgroup>
- </refentryinfo>
-
- <refmeta>
- <refentrytitle>rabbitmq-plugins</refentrytitle>
- <manvolnum>1</manvolnum>
- <refmiscinfo class="manual">RabbitMQ Service</refmiscinfo>
- </refmeta>
-
- <refnamediv>
- <refname>rabbitmq-plugins</refname>
- <refpurpose>command line tool for managing RabbitMQ broker plugins</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <cmdsynopsis>
- <command>rabbitmq-plugins</command>
- <arg choice="req"><replaceable>command</replaceable></arg>
- <arg choice="opt" rep="repeat"><replaceable>command options</replaceable></arg>
- </cmdsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
- <para>
- <command>rabbitmq-plugins</command> is a command line tool for managing
- RabbitMQ broker plugins. It allows one to enable, disable and browse
- plugins. It must be run by a user with write permissions to the RabbitMQ
- configuration directory.
- </para>
- <para>
- Some plugins depend on others to work
- correctly. <command>rabbitmq-plugins</command> traverses these
- dependencies and enables all required plugins. Plugins listed on
- the <command>rabbitmq-plugins</command> command line are marked as
- explicitly enabled; dependent plugins are marked as implicitly
- enabled. Implicitly enabled plugins are automatically disabled again
- when they are no longer required.
- </para>
- </refsect1>
-
- <refsect1>
- <title>Commands</title>
-
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><command>list</command> <arg choice="opt">-v</arg> <arg choice="opt">-m</arg> <arg choice="opt">-E</arg> <arg choice="opt">-e</arg> <arg choice="opt"><replaceable>pattern</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>-v</term>
- <listitem><para>Show all plugin details (verbose).</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>-m</term>
- <listitem><para>Show only plugin names (minimal).</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>-E</term>
- <listitem><para>Show only explicitly enabled
- plugins.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>-e</term>
- <listitem><para>Show only explicitly or implicitly
- enabled plugins.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>pattern</term>
- <listitem><para>Pattern to filter the plugin names by.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Lists all plugins, their versions, dependencies and
- descriptions. Each plugin is prefixed with a status
- indicator - [ ] to indicate that the plugin is not
- enabled, [E] to indicate that it is explicitly enabled,
- [e] to indicate that it is implicitly enabled, and [!] to
- indicate that it is enabled but missing and thus not
- operational.
- </para>
- <para>
- If the optional pattern is given, only plugins whose
- name matches <command>pattern</command> are shown.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmq-plugins list</screen>
- <para role="example">
- This command lists all plugins, on one line each.
- </para>
- <screen role="example">rabbitmq-plugins list -v </screen>
- <para role="example">
- This command lists all plugins.
- </para>
- <screen role="example">rabbitmq-plugins list -v management</screen>
- <para role="example">
- This command lists all plugins whose name contains "management".
- </para>
- <screen role="example">rabbitmq-plugins list -e rabbit</screen>
- <para role="example">
- This command lists all implicitly or explicitly enabled
- RabbitMQ plugins.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>enable</command> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>plugin</term>
- <listitem><para>One or more plugins to enable.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Enables the specified plugins and all their
- dependencies.
- </para>
-
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmq-plugins enable rabbitmq_shovel rabbitmq_management</screen>
- <para role="example">
- This command enables the <command>shovel</command> and
- <command>management</command> plugins and all their
- dependencies.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>disable</command> <arg choice="req"><replaceable>plugin</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>plugin</term>
- <listitem><para>One or more plugins to disable.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Disables the specified plugins and all plugins that
- depend on them.
- </para>
-
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmq-plugins disable amqp_client</screen>
- <para role="example">
- This command disables <command>amqp_client</command> and
- all plugins that depend on it.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
-
- </refsect1>
-
-</refentry>
diff --git a/docs/rabbitmq-server.1.xml b/docs/rabbitmq-server.1.xml
deleted file mode 100644
index 32ae842c..00000000
--- a/docs/rabbitmq-server.1.xml
+++ /dev/null
@@ -1,132 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
-<refentry lang="en">
- <refentryinfo>
- <productname>RabbitMQ Server</productname>
- <authorgroup>
- <corpauthor>The RabbitMQ Team &lt;<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>&gt;</corpauthor>
- </authorgroup>
- </refentryinfo>
-
- <refmeta>
- <refentrytitle>rabbitmq-server</refentrytitle>
- <manvolnum>1</manvolnum>
- <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
- </refmeta>
-
- <refnamediv>
- <refname>rabbitmq-server</refname>
- <refpurpose>start RabbitMQ AMQP server</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <cmdsynopsis>
- <command>rabbitmq-server</command>
- <arg choice="opt">-detached</arg>
- </cmdsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
- <para>
- RabbitMQ is an implementation of AMQP, the emerging standard for high
-performance enterprise messaging. The RabbitMQ server is a robust and
-scalable implementation of an AMQP broker.
- </para>
- <para>
-Running rabbitmq-server in the foreground displays a banner message,
-and reports on progress in the startup sequence, concluding with the
-message "broker running", indicating that the RabbitMQ broker has been
-started successfully. To shut down the server, just terminate the
-process or use rabbitmqctl(1).
- </para>
- </refsect1>
-
- <refsect1>
- <title>Environment</title>
- <variablelist>
-
- <varlistentry>
- <term>RABBITMQ_MNESIA_BASE</term>
- <listitem>
- <para>
-Defaults to <filename>/var/lib/rabbitmq/mnesia</filename>. Set this to the directory where
-Mnesia database files should be placed.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_LOG_BASE</term>
- <listitem>
- <para>
-Defaults to <filename>/var/log/rabbitmq</filename>. Log files generated by the server will
-be placed in this directory.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_NODENAME</term>
- <listitem>
- <para>
-Defaults to rabbit. This can be useful if you want to run more than
-one node per machine - <envar>RABBITMQ_NODENAME</envar> should be unique per
-erlang-node-and-machine combination. See the
-<ulink url="http://www.rabbitmq.com/clustering.html#single-machine">clustering on a single
-machine guide</ulink> for details.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_NODE_IP_ADDRESS</term>
- <listitem>
- <para>
-By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if
-available. Set this if you only want to bind to one network interface
-or address family.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_NODE_PORT</term>
- <listitem>
- <para>
-Defaults to 5672.
- </para>
- </listitem>
- </varlistentry>
-
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Options</title>
- <variablelist>
- <varlistentry>
- <term>-detached</term>
- <listitem>
- <para>
- Start the server process in the background. Note that this will
- cause the pid not to be written to the pid file.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmq-server -detached</screen>
- <para role="example">
- Runs RabbitMQ AMQP server in the background.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>See also</title>
- <para>
- <citerefentry><refentrytitle>rabbitmq-env.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>
- <citerefentry><refentrytitle>rabbitmqctl</refentrytitle><manvolnum>1</manvolnum></citerefentry>
- </para>
- </refsect1>
-</refentry>
diff --git a/docs/rabbitmq-service.xml b/docs/rabbitmq-service.xml
deleted file mode 100644
index a4bd1580..00000000
--- a/docs/rabbitmq-service.xml
+++ /dev/null
@@ -1,218 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
-<refentry lang="en">
- <refentryinfo>
- <productname>RabbitMQ Server</productname>
- <authorgroup>
- <corpauthor>The RabbitMQ Team &lt;<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>&gt;</corpauthor>
- </authorgroup>
- </refentryinfo>
-
- <refmeta>
- <refentrytitle>rabbitmq-service.bat</refentrytitle>
- <refmiscinfo class="manual">RabbitMQ Server</refmiscinfo>
- </refmeta>
-
- <refnamediv>
- <refname>rabbitmq-service.bat</refname>
- <refpurpose>manage RabbitMQ AMQP service</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <cmdsynopsis>
- <command>rabbitmq-service.bat</command>
- <arg choice="opt">command</arg>
- </cmdsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
- <para>
- RabbitMQ is an implementation of AMQP, the emerging standard for high
-performance enterprise messaging. The RabbitMQ server is a robust and
-scalable implementation of an AMQP broker.
- </para>
- <para>
-Running <command>rabbitmq-service</command> allows the RabbitMQ broker to be run as a
-service on NT/2000/2003/XP/Vista® environments. The RabbitMQ broker
-service can be started and stopped using the Windows® services
-applet.
- </para>
- <para>
-By default the service will run in the authentication context of the
-local system account. It is therefore necessary to synchronise Erlang
-cookies between the local system account (typically
-<filename>C:\WINDOWS\.erlang.cookie</filename> and the account that will be used to
-run <command>rabbitmqctl</command>.
- </para>
- </refsect1>
-
- <refsect1>
- <title>Commands</title>
- <variablelist>
-
- <varlistentry>
- <term>help</term>
- <listitem>
- <para>
-Display usage information.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>install</term>
- <listitem>
- <para>
-Install the service. The service will not be started.
-Subsequent invocations will update the service parameters if
-relevant environment variables were modified or if the active
-plugins were changed.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>remove</term>
- <listitem>
- <para>
-Remove the service. If the service is running then it will
-automatically be stopped before being removed. No files will be
-deleted as a consequence and <command>rabbitmq-server</command> will remain operable.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>start</term>
- <listitem>
- <para>
-Start the service. The service must have been correctly installed
-beforehand.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>stop</term>
- <listitem>
- <para>
-Stop the service. The service must be running for this command to
-have any effect.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>disable</term>
- <listitem>
- <para>
-Disable the service. This is the equivalent of setting the startup
-type to <code>Disabled</code> using the service control panel.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>enable</term>
- <listitem>
- <para>
-Enable the service. This is the equivalent of setting the startup
-type to <code>Automatic</code> using the service control panel.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Environment</title>
- <variablelist>
-
- <varlistentry>
- <term>RABBITMQ_SERVICENAME</term>
- <listitem>
- <para>
-Defaults to RabbitMQ.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_BASE</term>
- <listitem>
- <para>
-Defaults to the application data directory of the current user.
-This is the location of log and database directories.
-
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_NODENAME</term>
- <listitem>
- <para>
-Defaults to rabbit. This can be useful if you want to run more than
-one node per machine - <envar>RABBITMQ_NODENAME</envar> should be unique per
-erlang-node-and-machine combination. See the
-<ulink url="http://www.rabbitmq.com/clustering.html#single-machine">clustering on a single
-machine guide</ulink> for details.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_NODE_IP_ADDRESS</term>
- <listitem>
- <para>
-By default RabbitMQ will bind to all interfaces, on IPv4 and IPv6 if
-available. Set this if you only want to bind to one network interface
-or address family.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_NODE_PORT</term>
- <listitem>
- <para>
-Defaults to 5672.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>ERLANG_SERVICE_MANAGER_PATH</term>
- <listitem>
- <para>
-Defaults to <filename>C:\Program Files\erl5.5.5\erts-5.5.5\bin</filename>
-(or <filename>C:\Program Files (x86)\erl5.5.5\erts-5.5.5\bin</filename> for 64-bit
-environments). This is the installation location of the Erlang service
-manager.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>RABBITMQ_CONSOLE_LOG</term>
- <listitem>
- <para>
-Set this varable to <code>new</code> or <code>reuse</code> to have the console
-output from the server redirected to a file named <code>SERVICENAME</code>.debug
-in the application data directory of the user that installed the service.
-Under Vista this will be <filename>C:\Users\AppData\username\SERVICENAME</filename>.
-Under previous versions of Windows this will be
-<filename>C:\Documents and Settings\username\Application Data\SERVICENAME</filename>.
-If <code>RABBITMQ_CONSOLE_LOG</code> is set to <code>new</code> then a new file will be
-created each time the service starts. If <code>RABBITMQ_CONSOLE_LOG</code> is
-set to <code>reuse</code> then the file will be overwritten each time the
-service starts. The default behaviour when <code>RABBITMQ_CONSOLE_LOG</code> is
-not set or set to a value other than <code>new</code> or <code>reuse</code> is to discard
-the server output.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml
deleted file mode 100644
index 1d641144..00000000
--- a/docs/rabbitmqctl.1.xml
+++ /dev/null
@@ -1,1777 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.docbook.org/xml/4.5/docbookx.dtd">
-<!--
- There is some extra magic in this document besides the usual DocBook semantics
- to allow us to derive manpages, HTML and usage messages from the same source
- document.
-
- Examples need to be moved to the end for man pages. To this end, <para>s and
- <screen>s with role="example" will be moved, and with role="example-prefix"
- will be removed.
-
- The usage messages are more involved. We have some magic in usage.xsl to pull
- out the command synopsis, global option and subcommand synopses. We also pull
- out <para>s with role="usage".
-
- Finally we construct lists of possible values for subcommand options, if the
- subcommand's <varlistentry> has role="usage-has-option-list". The option which
- takes the values should be marked with role="usage-option-list".
--->
-
-<refentry lang="en">
- <refentryinfo>
- <productname>RabbitMQ Server</productname>
- <authorgroup>
- <corpauthor>The RabbitMQ Team &lt;<ulink url="mailto:info@rabbitmq.com"><email>info@rabbitmq.com</email></ulink>&gt;</corpauthor>
- </authorgroup>
- </refentryinfo>
-
- <refmeta>
- <refentrytitle>rabbitmqctl</refentrytitle>
- <manvolnum>1</manvolnum>
- <refmiscinfo class="manual">RabbitMQ Service</refmiscinfo>
- </refmeta>
-
- <refnamediv>
- <refname>rabbitmqctl</refname>
- <refpurpose>command line tool for managing a RabbitMQ broker</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <cmdsynopsis>
- <command>rabbitmqctl</command>
- <arg choice="opt">-n <replaceable>node</replaceable></arg>
- <arg choice="opt">-q</arg>
- <arg choice="req"><replaceable>command</replaceable></arg>
- <arg choice="opt" rep="repeat"><replaceable>command options</replaceable></arg>
- </cmdsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
- <para>
- RabbitMQ is an implementation of AMQP, the emerging standard for high
- performance enterprise messaging. The RabbitMQ server is a robust and
- scalable implementation of an AMQP broker.
- </para>
- <para>
- <command>rabbitmqctl</command> is a command line tool for managing a
- RabbitMQ broker. It performs all actions by connecting to one of the
- broker's nodes.
- </para>
- <para>
- Diagnostic information is displayed if the broker was not
- running, could not be reached, or rejected the connection due to
- mismatching Erlang cookies.
- </para>
- </refsect1>
-
- <refsect1>
- <title>Options</title>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><arg choice="opt">-n <replaceable>node</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para role="usage">
- Default node is "rabbit@server", where server is the local host. On
- a host named "server.example.com", the node name of the RabbitMQ
- Erlang node will usually be rabbit@server (unless RABBITMQ_NODENAME
- has been set to some non-default value at broker startup time). The
- output of <command>hostname -s</command> is usually the correct suffix to use after the
- "@" sign. See rabbitmq-server(1) for details of configuring the
- RabbitMQ broker.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><arg choice="opt">-q</arg></cmdsynopsis></term>
- <listitem>
- <para role="usage">
- Quiet output mode is selected with the "-q" flag. Informational
- messages are suppressed when quiet mode is in effect.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Commands</title>
-
- <refsect2>
- <title>Application and Cluster Management</title>
-
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><command>stop</command> <arg choice="opt"><replaceable>pid_file</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Stops the Erlang node on which RabbitMQ is running. To
- restart the node follow the instructions for <citetitle>Running
- the Server</citetitle> in the <ulink url="http://www.rabbitmq.com/install.html">installation
- guide</ulink>.
- </para>
- <para>
- If a <option>pid_file</option> is specified, also waits
- for the process specified there to terminate. See the
- description of the <option>wait</option> command below
- for details on this file.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl stop</screen>
- <para role="example">
- This command instructs the RabbitMQ node to terminate.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry id="stop_app">
- <term><cmdsynopsis><command>stop_app</command></cmdsynopsis></term>
- <listitem>
- <para>
- Stops the RabbitMQ application, leaving the Erlang node
- running.
- </para>
- <para>
- This command is typically run prior to performing other
- management actions that require the RabbitMQ application
- to be stopped, e.g. <link
- linkend="reset"><command>reset</command></link>.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl stop_app</screen>
- <para role="example">
- This command instructs the RabbitMQ node to stop the
- RabbitMQ application.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>start_app</command></cmdsynopsis></term>
- <listitem>
- <para>
- Starts the RabbitMQ application.
- </para>
- <para>
- This command is typically run after performing other
- management actions that required the RabbitMQ application
- to be stopped, e.g. <link
- linkend="reset"><command>reset</command></link>.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl start_app</screen>
- <para role="example">
- This command instructs the RabbitMQ node to start the
- RabbitMQ application.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>wait</command> <arg choice="req"><replaceable>pid_file</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Wait for the RabbitMQ application to start.
- </para>
- <para>
- This command will wait for the RabbitMQ application to
- start at the node. It will wait for the pid file to
- be created, then for a process with a pid specified in the
- pid file to start, and then for the RabbitMQ application
- to start in that process. It will fail if the process
- terminates without starting the RabbitMQ application.
- </para>
- <para>
- A suitable pid file is created by
- the <command>rabbitmq-server</command> script. By
- default this is located in the Mnesia directory. Modify
- the <command>RABBITMQ_PID_FILE</command> environment
- variable to change the location.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl wait /var/run/rabbitmq/pid</screen>
- <para role="example">
- This command will return when the RabbitMQ node has
- started up.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry id="reset">
- <term><cmdsynopsis><command>reset</command></cmdsynopsis></term>
- <listitem>
- <para>
- Return a RabbitMQ node to its virgin state.
- </para>
- <para>
- Removes the node from any cluster it belongs to, removes
- all data from the management database, such as configured
- users and vhosts, and deletes all persistent
- messages.
- </para>
- <para>
- For <command>reset</command> and <command>force_reset</command> to
- succeed the RabbitMQ application must have been stopped,
- e.g. with <link linkend="stop_app"><command>stop_app</command></link>.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl reset</screen>
- <para role="example">
- This command resets the RabbitMQ node.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>force_reset</command></cmdsynopsis></term>
- <listitem>
- <para>
- Forcefully return a RabbitMQ node to its virgin state.
- </para>
- <para>
- The <command>force_reset</command> command differs from
- <command>reset</command> in that it resets the node
- unconditionally, regardless of the current management
- database state and cluster configuration. It should only
- be used as a last resort if the database or cluster
- configuration has been corrupted.
- </para>
- <para>
- For <command>reset</command> and <command>force_reset</command> to
- succeed the RabbitMQ application must have been stopped,
- e.g. with <link linkend="stop_app"><command>stop_app</command></link>.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl force_reset</screen>
- <para role="example">
- This command resets the RabbitMQ node.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>rotate_logs</command> <arg choice="req"><replaceable>suffix</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Instruct the RabbitMQ node to rotate the log files.
- </para>
- <para>
- The RabbitMQ broker appends the contents of its log
- files to files with names composed of the original name
- and the suffix, and then resumes logging to freshly
- created files at the original location. I.e. effectively
- the current log contents are moved to the end of the
- suffixed files.
- </para>
- <para>
- When the target files do not exist they are created.
- When no <option>suffix</option> is specified, the empty
- log files are simply created at the original location;
- no rotation takes place.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl rotate_logs .1</screen>
- <para role="example">
- This command instructs the RabbitMQ node to append the contents
- of the log files to files with names consisting of the original logs'
- names and ".1" suffix, e.g. rabbit@mymachine.log.1 and
- rabbit@mymachine-sasl.log.1. Finally, logging resumes to
- fresh files at the old locations.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
-
- <refsect2>
- <title>Cluster management</title>
-
- <variablelist>
- <varlistentry id="join_cluster">
- <term><cmdsynopsis><command>join_cluster</command> <arg choice="req"><replaceable>clusternode</replaceable></arg> <arg choice="opt">--ram</arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>clusternode</term>
- <listitem><para>Node to cluster with.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><arg choice="opt">--ram</arg></cmdsynopsis></term>
- <listitem>
- <para>
- If provided, the node will join the cluster as a RAM node.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>
- Instruct the node to become a member of the cluster that the
- specified node is in. Before clustering, the node is reset, so be
- careful when using this command. For this command to succeed the
- RabbitMQ application must have been stopped, e.g. with <link
- linkend="stop_app"><command>stop_app</command></link>.
- </para>
- <para>
- Cluster nodes can be of two types: disc or RAM. Disc nodes
- replicate data in RAM and on disc, thus providing redundancy in
- the event of node failure and recovery from global events such
- as power failure across all nodes. RAM nodes replicate data in
- RAM only (with the exception of queue contents, which can reside
- on disc if the queue is persistent or too big to fit in memory)
- and are mainly used for scalability. RAM nodes are more
- performant only when managing resources (e.g. adding/removing
- queues, exchanges, or bindings). A cluster must always have at
- least one disc node, and usually should have more than one.
- </para>
- <para>
- The node will be a disc node by default. If you wish to
- create a RAM node, provide the <command>--ram</command> flag.
- </para>
- <para>
- After executing the <command>cluster</command> command, whenever
- the RabbitMQ application is started on the current node it will
- attempt to connect to the nodes that were in the cluster when the
- node went down.
- </para>
- <para>
- To leave a cluster, <command>reset</command> the node. You can
- also remove nodes remotely with the
- <command>forget_cluster_node</command> command.
- </para>
- <para>
- For more details see the <ulink
- url="http://www.rabbitmq.com/clustering.html">clustering
- guide</ulink>.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl join_cluster hare@elena --ram</screen>
- <para role="example">
- This command instructs the RabbitMQ node to join the cluster that
- <command>hare@elena</command> is part of, as a ram node.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>cluster_status</command></cmdsynopsis></term>
- <listitem>
- <para>
- Displays all the nodes in the cluster grouped by node type,
- together with the currently running nodes.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl cluster_status</screen>
- <para role="example">
- This command displays the nodes in the cluster.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>change_cluster_node_type</command> <arg choice="req">disc | ram</arg></cmdsynopsis>
- </term>
- <listitem>
- <para>
- Changes the type of the cluster node. The node must be stopped for
- this operation to succeed, and when turning a node into a RAM node
- the node must not be the only disc node in the cluster.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl change_cluster_node_type disc</screen>
- <para role="example">
- This command will turn a RAM node into a disc node.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>forget_cluster_node</command> <arg choice="opt">--offline</arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><arg choice="opt">--offline</arg></cmdsynopsis></term>
- <listitem>
- <para>
- Enables node removal from an offline node. This is only
- useful in the situation where all the nodes are offline and
- the last node to go down cannot be brought online, thus
- preventing the whole cluster from starting. It should not be
- used in any other circumstances since it can lead to
- inconsistencies.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>
- Removes a cluster node remotely. The node that is being removed
- must be offline, while the node we are removing from must be
- online, except when using the <command>--offline</command> flag.
- </para>
- <para>
- When using the <command>--offline</command> flag the node you
- connect to will become the canonical source for cluster metadata
- (e.g. which queues exist), even if it was not before. Therefore
- you should use this command on the latest node to shut down if
- at all possible.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl -n hare@mcnulty forget_cluster_node rabbit@stringer</screen>
- <para role="example">
- This command will remove the node
- <command>rabbit@stringer</command> from the node
- <command>hare@mcnulty</command>.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>update_cluster_nodes</command> <arg choice="req">clusternode</arg></cmdsynopsis>
- </term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>clusternode</term>
- <listitem>
- <para>
- The node to consult for up to date information.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>
- Instructs an already clustered node to contact
- <command>clusternode</command> to cluster when waking up. This is
- different from <command>join_cluster</command> since it does not
- join any cluster - it checks that the node is already in a cluster
- with <command>clusternode</command>.
- </para>
- <para>
- The need for this command is motivated by the fact that clusters
- can change while a node is offline. Consider the situation in
- which node A and B are clustered. A goes down, C clusters with B,
- and then B leaves the cluster. When A wakes up, it'll try to
- contact B, but this will fail since B is not in the cluster
- anymore. <command>update_cluster_nodes -n A C</command> will solve
- this situation.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>sync_queue</command> <arg choice="req">queue</arg></cmdsynopsis>
- </term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>queue</term>
- <listitem>
- <para>
- The name of the queue to synchronise.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>
- Instructs a mirrored queue with unsynchronised slaves to
- synchronise itself. The queue will block while
- synchronisation takes place (all publishers to and
- consumers from the queue will block). The queue must be
- mirrored for this command to succeed.
- </para>
- <para>
- Note that unsynchronised queues from which messages are
- being drained will become synchronised eventually. This
- command is primarily useful for queues which are not
- being drained.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>cancel_sync_queue</command> <arg choice="req">queue</arg></cmdsynopsis>
- </term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>queue</term>
- <listitem>
- <para>
- The name of the queue to cancel synchronisation for.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>
- Instructs a synchronising mirrored queue to stop
- synchronising itself.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
-
- <refsect2>
- <title>User management</title>
- <para>
- Note that <command>rabbitmqctl</command> manages the RabbitMQ
- internal user database. Users from any alternative
- authentication backend will not be visible
- to <command>rabbitmqctl</command>.
- </para>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><command>add_user</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>password</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>username</term>
- <listitem><para>The name of the user to create.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>password</term>
- <listitem><para>The password the created user will use to log in to the broker.</para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl add_user tonyg changeit</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to create a
- (non-administrative) user named <command>tonyg</command> with
- (initial) password
- <command>changeit</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>delete_user</command> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>username</term>
- <listitem><para>The name of the user to delete.</para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl delete_user tonyg</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to delete the
- user named <command>tonyg</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>change_password</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>newpassword</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>username</term>
- <listitem><para>The name of the user whose password is to be changed.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>newpassword</term>
- <listitem><para>The new password for the user.</para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl change_password tonyg newpass</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to change the
- password for the user named <command>tonyg</command> to
- <command>newpass</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>clear_password</command> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>username</term>
- <listitem><para>The name of the user whose password is to be cleared.</para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl clear_password tonyg</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to clear the
- password for the user named
- <command>tonyg</command>. This user now cannot log in with a password (but may be able to through e.g. SASL EXTERNAL if configured).
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>set_user_tags</command> <arg choice="req"><replaceable>username</replaceable></arg> <arg choice="req"><replaceable>tag</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>username</term>
- <listitem><para>The name of the user whose tags are to
- be set.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>tag</term>
- <listitem><para>Zero, one or more tags to set. Any
- existing tags will be removed.</para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl set_user_tags tonyg administrator</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to ensure the user
- named <command>tonyg</command> is an administrator. This has no
- effect when the user logs in via AMQP, but can be used to permit
- the user to manage users, virtual hosts and permissions when the
- user logs in via some other means (for example with the
- management plugin).
- </para>
- <screen role="example">rabbitmqctl set_user_tags tonyg</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to remove any
- tags from the user named <command>tonyg</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>list_users</command></cmdsynopsis></term>
- <listitem>
- <para>
- Lists users. Each result row will contain the user name
- followed by a list of the tags set for that user.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl list_users</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to list all
- users.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
-
- <refsect2>
- <title>Access control</title>
- <para>
- Note that <command>rabbitmqctl</command> manages the RabbitMQ
- internal user database. Permissions for users from any
- alternative authorisation backend will not be visible
- to <command>rabbitmqctl</command>.
- </para>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><command>add_vhost</command> <arg choice="req"><replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>vhostpath</term>
- <listitem><para>The name of the virtual host entry to create.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Creates a virtual host.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl add_vhost test</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to create a new
- virtual host called <command>test</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>delete_vhost</command> <arg choice="req"><replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>vhostpath</term>
- <listitem><para>The name of the virtual host entry to delete.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Deletes a virtual host.
- </para>
- <para>
- Deleting a virtual host deletes all its exchanges,
- queues, bindings, user permissions, parameters and policies.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl delete_vhost test</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to delete the
- virtual host called <command>test</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry role="usage-has-option-list">
- <term><cmdsynopsis><command>list_vhosts</command> <arg choice="opt" role="usage-option-list"><replaceable>vhostinfoitem</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <para>
- Lists virtual hosts.
- </para>
- <para>
- The <command>vhostinfoitem</command> parameter is used to indicate which
- virtual host information items to include in the results. The column order in the
- results will match the order of the parameters.
- <command>vhostinfoitem</command> can take any value from
- the list that follows:
- </para>
- <variablelist>
- <varlistentry>
- <term>name</term>
- <listitem><para>The name of the virtual host with non-ASCII characters escaped as in C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>tracing</term>
- <listitem><para>Whether tracing is enabled for this virtual host.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- If no <command>vhostinfoitem</command>s are specified
- then the vhost name is displayed.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl list_vhosts name tracing</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to list all
- virtual hosts.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>set_permissions</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>user</replaceable></arg> <arg choice="req"><replaceable>conf</replaceable></arg> <arg choice="req"><replaceable>write</replaceable></arg> <arg choice="req"><replaceable>read</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>vhostpath</term>
- <listitem><para>The name of the virtual host to which to grant the user access, defaulting to <command>/</command>.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>user</term>
- <listitem><para>The name of the user to grant access to the specified virtual host.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>conf</term>
- <listitem><para>A regular expression matching resource names for which the user is granted configure permissions.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>write</term>
- <listitem><para>A regular expression matching resource names for which the user is granted write permissions.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>read</term>
- <listitem><para>A regular expression matching resource names for which the user is granted read permissions.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Sets user permissions.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl set_permissions -p /myvhost tonyg "^tonyg-.*" ".*" ".*"</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to grant the
- user named <command>tonyg</command> access to the virtual host
- called <command>/myvhost</command>, with configure permissions
- on all resources whose names starts with "tonyg-", and
- write and read permissions on all resources.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>clear_permissions</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>vhostpath</term>
- <listitem><para>The name of the virtual host to which to deny the user access, defaulting to <command>/</command>.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>username</term>
- <listitem><para>The name of the user to deny access to the specified virtual host.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Sets user permissions.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl clear_permissions -p /myvhost tonyg</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to deny the
- user named <command>tonyg</command> access to the virtual host
- called <command>/myvhost</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>list_permissions</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>vhostpath</term>
- <listitem><para>The name of the virtual host for which to list the users that have been granted access to it, and their permissions. Defaults to <command>/</command>.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Lists permissions in a virtual host.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl list_permissions -p /myvhost</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to list all
- the users which have been granted access to the virtual
- host called <command>/myvhost</command>, and the
- permissions they have for operations on resources in
- that virtual host. Note that an empty string means no
- permissions granted.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>list_user_permissions</command> <arg choice="req"><replaceable>username</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>username</term>
- <listitem><para>The name of the user for which to list the permissions.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Lists user permissions.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl list_user_permissions tonyg</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to list all the
- virtual hosts to which the user named <command>tonyg</command>
- has been granted access, and the permissions the user has
- for operations on resources in these virtual hosts.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
-
- <refsect2>
- <title>Parameter Management</title>
- <para>
- Certain features of RabbitMQ (such as the federation plugin)
- are controlled by dynamic,
- cluster-wide <emphasis>parameters</emphasis>. Each parameter
- consists of a component name, a name and a value, and is
- associated with a virtual host. The component name and name are
- strings, and the value is an Erlang term. Parameters can be
- set, cleared and listed. In general you should refer to the
- documentation for the feature in question to see how to set
- parameters.
- </para>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><command>set_parameter</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>component_name</replaceable></arg> <arg choice="req"><replaceable>name</replaceable></arg> <arg choice="req"><replaceable>value</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Sets a parameter.
- </para>
- <variablelist>
- <varlistentry>
- <term>component_name</term>
- <listitem><para>
- The name of the component for which the
- parameter is being set.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>name</term>
- <listitem><para>
- The name of the parameter being set.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>value</term>
- <listitem><para>
- The value for the parameter, as a
- JSON term. In most shells you are very likely to
- need to quote this.
- </para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl set_parameter federation local_username '"guest"'</screen>
- <para role="example">
- This command sets the parameter <command>local_username</command> for the <command>federation</command> component in the default virtual host to the JSON term <command>"guest"</command>.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>clear_parameter</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>component_name</replaceable></arg> <arg choice="req"><replaceable>key</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Clears a parameter.
- </para>
- <variablelist>
- <varlistentry>
- <term>component_name</term>
- <listitem><para>
- The name of the component for which the
- parameter is being cleared.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>name</term>
- <listitem><para>
- The name of the parameter being cleared.
- </para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl clear_parameter federation local_username</screen>
- <para role="example">
- This command clears the parameter <command>local_username</command> for the <command>federation</command> component in the default virtual host.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>list_parameters</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Lists all parameters for a virtual host.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl list_parameters</screen>
- <para role="example">
- This command lists all parameters in the default virtual host.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
-
- <refsect2>
- <title>Policy Management</title>
- <para>
- Policies are used to control and modify the behaviour of queues
- and exchanges on a cluster-wide basis. Policies apply within a
- given vhost, and consist of a name, pattern, definition and an
- optional priority. Policies can be set, cleared and listed.
- </para>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><command>set_policy</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>name</replaceable></arg> <arg choice="req"><replaceable>pattern</replaceable></arg> <arg choice="req"><replaceable>definition</replaceable></arg> <arg choice="opt"><replaceable>priority</replaceable></arg> </cmdsynopsis></term>
- <listitem>
- <para>
- Sets a policy.
- </para>
- <variablelist>
- <varlistentry>
- <term>name</term>
- <listitem><para>
- The name of the policy.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>pattern</term>
- <listitem><para>
- The regular expression, which when matches on a given resources causes the policy to apply.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>definition</term>
- <listitem><para>
- The definition of the policy, as a
- JSON term. In most shells you are very likely to
- need to quote this.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>priority</term>
- <listitem><para>
- The priority of the policy as an integer, defaulting to 0. Higher numbers indicate greater precedence.
- </para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl set_policy federate-me "^amq." '{"federation-upstream-set":"all"}'</screen>
- <para role="example">
- This command sets the policy <command>federate-me</command> in the default virtual host so that built-in exchanges are federated.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>clear_policy</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="req"><replaceable>name</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Clears a policy.
- </para>
- <variablelist>
- <varlistentry>
- <term>name</term>
- <listitem><para>
- The name of the policy being cleared.
- </para></listitem>
- </varlistentry>
- </variablelist>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl clear_policy federate-me</screen>
- <para role="example">
- This command clears the <command>federate-me</command> policy in the default virtual host.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>list_policies</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Lists all policies for a virtual host.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl list_policies</screen>
- <para role="example">
- This command lists all policies in the default virtual host.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
-
- <refsect2>
- <title>Server Status</title>
- <para>
- The server status queries interrogate the server and return a list of
- results with tab-delimited columns. Some queries (<command>list_queues</command>,
- <command>list_exchanges</command>, <command>list_bindings</command>, and
- <command>list_consumers</command>) accept an
- optional <command>vhost</command> parameter. This parameter, if present, must be
- specified immediately after the query.
- </para>
- <para role="usage">
- The list_queues, list_exchanges and list_bindings commands accept an
- optional virtual host parameter for which to display results. The
- default value is "/".
- </para>
-
- <variablelist>
- <varlistentry role="usage-has-option-list">
- <term><cmdsynopsis><command>list_queues</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="opt" role="usage-option-list"><replaceable>queueinfoitem</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <para>
- Returns queue details. Queue details of the <command>/</command> virtual host
- are returned if the "-p" flag is absent. The "-p" flag can be used to
- override this default.
- </para>
- <para>
- The <command>queueinfoitem</command> parameter is used to indicate which queue
- information items to include in the results. The column order in the
- results will match the order of the parameters.
- <command>queueinfoitem</command> can take any value from the list
- that follows:
- </para>
- <variablelist>
- <varlistentry>
- <term>name</term>
- <listitem><para>The name of the queue with non-ASCII characters escaped as in C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>durable</term>
- <listitem><para>Whether or not the queue survives server restarts.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>auto_delete</term>
- <listitem><para>Whether the queue will be deleted automatically when no longer used.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>arguments</term>
- <listitem><para>Queue arguments.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>policy</term>
- <listitem><para>Policy name applying to the queue.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>pid</term>
- <listitem><para>Id of the Erlang process associated with the queue.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>owner_pid</term>
- <listitem><para>Id of the Erlang process representing the connection
- which is the exclusive owner of the queue. Empty if the
- queue is non-exclusive.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>exclusive_consumer_pid</term>
- <listitem><para>Id of the Erlang process representing the channel of the
- exclusive consumer subscribed to this queue. Empty if
- there is no exclusive consumer.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>exclusive_consumer_tag</term>
- <listitem><para>Consumer tag of the exclusive consumer subscribed to
- this queue. Empty if there is no exclusive consumer.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>messages_ready</term>
- <listitem><para>Number of messages ready to be delivered to clients.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>messages_unacknowledged</term>
- <listitem><para>Number of messages delivered to clients but not yet acknowledged.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>messages</term>
- <listitem><para>Sum of ready and unacknowledged messages
- (queue depth).</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>consumers</term>
- <listitem><para>Number of consumers.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>active_consumers</term>
- <listitem>
- <para>
- Number of active consumers. An active consumer is
- one which could immediately receive any messages
- sent to the queue - i.e. it is not limited by its
- prefetch count, TCP congestion, flow control, or
- because it has issued channel.flow. At least one
- of messages_ready and active_consumers must always
- be zero.
- </para>
- <para>
- Note that this value is an instantaneous snapshot
- - when consumers are restricted by their prefetch
- count they may only appear to be active for small
- fractions of a second until more messages are sent
- out.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>memory</term>
- <listitem><para>Bytes of memory consumed by the Erlang process associated with the
- queue, including stack, heap and internal structures.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>slave_pids</term>
- <listitem><para>If the queue is mirrored, this gives the IDs of the current slaves.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>synchronised_slave_pids</term>
- <listitem><para>If the queue is mirrored, this gives the IDs of
- the current slaves which are synchronised with the master -
- i.e. those which could take over from the master without
- message loss.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>status</term>
- <listitem><para>The status of the queue. Normally
- 'running', but may be "{syncing, MsgCount}" if the queue is
- synchronising.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- If no <command>queueinfoitem</command>s are specified then queue name and depth are
- displayed.
- </para>
- <para role="example-prefix">
- For example:
- </para>
- <screen role="example">rabbitmqctl list_queues -p /myvhost messages consumers</screen>
- <para role="example">
- This command displays the depth and number of consumers for each
- queue of the virtual host named <command>/myvhost</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry role="usage-has-option-list">
- <term><cmdsynopsis><command>list_exchanges</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="opt" role="usage-option-list"><replaceable>exchangeinfoitem</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <para>
- Returns exchange details. Exchange details of the <command>/</command> virtual host
- are returned if the "-p" flag is absent. The "-p" flag can be used to
- override this default.
- </para>
- <para>
- The <command>exchangeinfoitem</command> parameter is used to indicate which
- exchange information items to include in the results. The column order in the
- results will match the order of the parameters.
- <command>exchangeinfoitem</command> can take any value from the list
- that follows:
- </para>
- <variablelist>
- <varlistentry>
- <term>name</term>
- <listitem><para>The name of the exchange with non-ASCII characters escaped as in C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>type</term>
- <listitem><para>The exchange type (such as
- [<command>direct</command>,
- <command>topic</command>, <command>headers</command>,
- <command>fanout</command>]).</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>durable</term>
- <listitem><para>Whether or not the exchange survives server restarts.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>auto_delete</term>
- <listitem><para>Whether the exchange will be deleted automatically when no longer used.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>internal</term>
- <listitem><para>Whether the exchange is internal, i.e. cannot be directly published to by a client.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>arguments</term>
- <listitem><para>Exchange arguments.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>policy</term>
- <listitem><para>Policy name for applying to the exchange.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- If no <command>exchangeinfoitem</command>s are specified then
- exchange name and type are displayed.
- </para>
- <para role="example-prefix">
- For example:
- </para>
- <screen role="example">rabbitmqctl list_exchanges -p /myvhost name type</screen>
- <para role="example">
- This command displays the name and type for each
- exchange of the virtual host named <command>/myvhost</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry role="usage-has-option-list">
- <term><cmdsynopsis><command>list_bindings</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg> <arg choice="opt" role="usage-option-list"><replaceable>bindinginfoitem</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <para>
- Returns binding details. By default the bindings for
- the <command>/</command> virtual host are returned. The
- "-p" flag can be used to override this default.
- </para>
- <para>
- The <command>bindinginfoitem</command> parameter is used
- to indicate which binding information items to include
- in the results. The column order in the results will
- match the order of the parameters.
- <command>bindinginfoitem</command> can take any value
- from the list that follows:
- </para>
- <variablelist>
- <varlistentry>
- <term>source_name</term>
- <listitem><para>The name of the source of messages to
- which the binding is attached. With non-ASCII
- characters escaped as in C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>source_kind</term>
- <listitem><para>The kind of the source of messages to
- which the binding is attached. Currently always
- exchange. With non-ASCII characters escaped as in
- C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>destination_name</term>
- <listitem><para>The name of the destination of
- messages to which the binding is attached. With
- non-ASCII characters escaped as in
- C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>destination_kind</term>
- <listitem><para>The kind of the destination of
- messages to which the binding is attached. With
- non-ASCII characters escaped as in
- C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>routing_key</term>
- <listitem><para>The binding's routing key, with
- non-ASCII characters escaped as in C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>arguments</term>
- <listitem><para>The binding's arguments.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- If no <command>bindinginfoitem</command>s are specified then
- all above items are displayed.
- </para>
- <para role="example-prefix">
- For example:
- </para>
- <screen role="example">rabbitmqctl list_bindings -p /myvhost exchange_name queue_name</screen>
- <para role="example">
- This command displays the exchange name and queue name
- of the bindings in the virtual host
- named <command>/myvhost</command>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry id="list_connections" role="usage-has-option-list">
- <term><cmdsynopsis><command>list_connections</command> <arg choice="opt" role="usage-option-list"><replaceable>connectioninfoitem</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <para>
- Returns TCP/IP connection statistics.
- </para>
- <para>
- The <command>connectioninfoitem</command> parameter is used to indicate
- which connection information items to include in the results. The
- column order in the results will match the order of the parameters.
- <command>connectioninfoitem</command> can take any value from the list
- that follows:
- </para>
-
- <variablelist>
- <varlistentry>
- <term>pid</term>
- <listitem><para>Id of the Erlang process associated with the connection.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>name</term>
- <listitem><para>Readable name for the connection.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>port</term>
- <listitem><para>Server port.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>host</term>
- <listitem><para>Server hostname obtained via reverse
- DNS, or its IP address if reverse DNS failed or was
- not enabled.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>peer_port</term>
- <listitem><para>Peer port.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>peer_host</term>
- <listitem><para>Peer hostname obtained via reverse
- DNS, or its IP address if reverse DNS failed or was
- not enabled.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>ssl</term>
- <listitem><para>Boolean indicating whether the
- connection is secured with SSL.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>ssl_protocol</term>
- <listitem><para>SSL protocol
- (e.g. tlsv1)</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>ssl_key_exchange</term>
- <listitem><para>SSL key exchange algorithm
- (e.g. rsa)</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>ssl_cipher</term>
- <listitem><para>SSL cipher algorithm
- (e.g. aes_256_cbc)</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>ssl_hash</term>
- <listitem><para>SSL hash function
- (e.g. sha)</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>peer_cert_subject</term>
- <listitem><para>The subject of the peer's SSL
- certificate, in RFC4514 form.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>peer_cert_issuer</term>
- <listitem><para>The issuer of the peer's SSL
- certificate, in RFC4514 form.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>peer_cert_validity</term>
- <listitem><para>The period for which the peer's SSL
- certificate is valid.</para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>last_blocked_by</term>
- <listitem><para>The reason for which this connection
- was last blocked. One of 'resource' - due to a memory
- or disk alarm, 'flow' - due to internal flow control, or
- 'none' if the connection was never
- blocked.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>last_blocked_age</term>
- <listitem><para>Time, in seconds, since this
- connection was last blocked, or
- 'infinity'.</para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>state</term>
- <listitem><para>Connection state (one of [<command>starting</command>, <command>tuning</command>,
- <command>opening</command>, <command>running</command>, <command>blocking</command>, <command>blocked</command>, <command>closing</command>, <command>closed</command>]).</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>channels</term>
- <listitem><para>Number of channels using the connection.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>protocol</term>
- <listitem><para>Version of the AMQP protocol in use (currently one of <command>{0,9,1}</command> or <command>{0,8,0}</command>). Note that if a client requests an AMQP 0-9 connection, we treat it as AMQP 0-9-1.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>auth_mechanism</term>
- <listitem><para>SASL authentication mechanism used, such as <command>PLAIN</command>.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>user</term>
- <listitem><para>Username associated with the connection.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>vhost</term>
- <listitem><para>Virtual host name with non-ASCII characters escaped as in C.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>timeout</term>
- <listitem><para>Connection timeout / negotiated heartbeat interval, in seconds.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>frame_max</term>
- <listitem><para>Maximum frame size (bytes).</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>client_properties</term>
- <listitem><para>Informational properties transmitted by the client
- during connection establishment.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>recv_oct</term>
- <listitem><para>Octets received.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>recv_cnt</term>
- <listitem><para>Packets received.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>send_oct</term>
- <listitem><para>Octets send.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>send_cnt</term>
- <listitem><para>Packets sent.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>send_pend</term>
- <listitem><para>Send queue size.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- If no <command>connectioninfoitem</command>s are
- specified then user, peer host, peer port, time since
- flow control and memory block state are displayed.
- </para>
-
- <para role="example-prefix">
- For example:
- </para>
- <screen role="example">rabbitmqctl list_connections send_pend port</screen>
- <para role="example">
- This command displays the send queue size and server port for each
- connection.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry role="usage-has-option-list">
- <term><cmdsynopsis><command>list_channels</command> <arg choice="opt" role="usage-option-list"><replaceable>channelinfoitem</replaceable> ...</arg></cmdsynopsis></term>
- <listitem>
- <para>
- Returns information on all current channels, the logical
- containers executing most AMQP commands. This includes
- channels that are part of ordinary AMQP connections, and
- channels created by various plug-ins and other extensions.
- </para>
- <para>
- The <command>channelinfoitem</command> parameter is used to
- indicate which channel information items to include in the
- results. The column order in the results will match the
- order of the parameters.
- <command>channelinfoitem</command> can take any value from the list
- that follows:
- </para>
-
- <variablelist>
- <varlistentry>
- <term>pid</term>
- <listitem><para>Id of the Erlang process associated with the connection.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>connection</term>
- <listitem><para>Id of the Erlang process associated with the connection
- to which the channel belongs.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>name</term>
- <listitem><para>Readable name for the channel.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>number</term>
- <listitem><para>The number of the channel, which uniquely identifies it within
- a connection.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>user</term>
- <listitem><para>Username associated with the channel.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>vhost</term>
- <listitem><para>Virtual host in which the channel operates.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>transactional</term>
- <listitem><para>True if the channel is in transactional mode, false otherwise.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>confirm</term>
- <listitem><para>True if the channel is in confirm mode, false otherwise.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>consumer_count</term>
- <listitem><para>Number of logical AMQP consumers retrieving messages via
- the channel.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>messages_unacknowledged</term>
- <listitem><para>Number of messages delivered via this channel but not
- yet acknowledged.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>messages_uncommitted</term>
- <listitem><para>Number of messages received in an as yet
- uncommitted transaction.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>acks_uncommitted</term>
- <listitem><para>Number of acknowledgements received in an as yet
- uncommitted transaction.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>messages_unconfirmed</term>
- <listitem><para>Number of published messages not yet
- confirmed. On channels not in confirm mode, this
- remains 0.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>prefetch_count</term>
- <listitem><para>QoS prefetch count limit in force, 0 if unlimited.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>client_flow_blocked</term>
- <listitem><para>True if the client issued a
- <command>channel.flow{active=false}</command>
- command, blocking the server from delivering
- messages to the channel's consumers.
- </para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- If no <command>channelinfoitem</command>s are specified then pid,
- user, consumer_count, and messages_unacknowledged are assumed.
- </para>
-
- <para role="example-prefix">
- For example:
- </para>
- <screen role="example">rabbitmqctl list_channels connection messages_unacknowledged</screen>
- <para role="example">
- This command displays the connection process and count
- of unacknowledged messages for each channel.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>list_consumers</command> <arg choice="opt">-p <replaceable>vhostpath</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- List consumers, i.e. subscriptions to a queue's message
- stream. Each line printed shows, separated by tab
- characters, the name of the queue subscribed to, the id of
- the channel process via which the subscription was created
- and is managed, the consumer tag which uniquely identifies
- the subscription within a channel, and a boolean
- indicating whether acknowledgements are expected for
- messages delivered to this consumer.
- </para>
- <para>
- The output is a list of rows containing, in order, the queue name,
- channel process id, consumer tag, and a boolean indicating whether
- acknowledgements are expected from the consumer.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>status</command></cmdsynopsis></term>
- <listitem>
- <para>
- Displays broker status information such as the running
- applications on the current Erlang node, RabbitMQ and
- Erlang versions, OS name, memory and file descriptor
- statistics. (See the <command>cluster_status</command>
- command to find out which nodes are clustered and
- running.)
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl status</screen>
- <para role="example">
- This command displays information about the RabbitMQ
- broker.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>environment</command></cmdsynopsis></term>
- <listitem>
- <para>
- Display the name and value of each variable in the
- application environment.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>report</command></cmdsynopsis></term>
- <listitem>
- <para>
- Generate a server status report containing a
- concatenation of all server status information for
- support purposes. The output should be redirected to a
- file when accompanying a support request.
- </para>
- <para role="example-prefix">
- For example:
- </para>
- <screen role="example">rabbitmqctl report > server_report.txt</screen>
- <para role="example">
- This command creates a server report which may be
- attached to a support request email.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>eval</command> <arg choice="req"><replaceable>expr</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <para>
- Evaluate an arbitrary Erlang expression.
- </para>
- <para role="example-prefix">
- For example:
- </para>
- <screen role="example">rabbitmqctl eval 'node().'</screen>
- <para role="example">
- This command returns the name of the node to which rabbitmqctl has connected.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
-
- <refsect2>
- <title>Miscellaneous</title>
- <variablelist>
- <varlistentry>
- <term><cmdsynopsis><command>close_connection</command> <arg choice="req"><replaceable>connectionpid</replaceable></arg> <arg choice="req"><replaceable>explanation</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>connectionpid</term>
- <listitem><para>Id of the Erlang process associated with the connection to close.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>explanation</term>
- <listitem><para>Explanation string.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Instruct the broker to close the connection associated
- with the Erlang process id <option>connectionpid</option> (see also the
- <link linkend="list_connections"><command>list_connections</command></link>
- command), passing the <option>explanation</option> string to the
- connected client as part of the AMQP connection shutdown
- protocol.
- </para>
- <para role="example-prefix">For example:</para>
- <screen role="example">rabbitmqctl close_connection "&lt;rabbit@tanto.4262.0&gt;" "go away"</screen>
- <para role="example">
- This command instructs the RabbitMQ broker to close the
- connection associated with the Erlang process
- id <command>&lt;rabbit@tanto.4262.0&gt;</command>, passing the
- explanation <command>go away</command> to the connected client.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>trace_on</command> <arg choice="opt">-p <replaceable>vhost</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>vhost</term>
- <listitem><para>The name of the virtual host for which to start tracing.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Starts tracing.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><cmdsynopsis><command>trace_off</command> <arg choice="opt">-p <replaceable>vhost</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>vhost</term>
- <listitem><para>The name of the virtual host for which to stop tracing.</para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- Stops tracing.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><cmdsynopsis><command>set_vm_memory_high_watermark</command> <arg choice="req"><replaceable>fraction</replaceable></arg></cmdsynopsis></term>
- <listitem>
- <variablelist>
- <varlistentry>
- <term>fraction</term>
- <listitem><para>
- The new memory threshold fraction at which flow
- control is triggered, as a floating point number
- greater than or equal to 0.
- </para></listitem>
- </varlistentry>
- </variablelist>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect2>
- </refsect1>
-
-</refentry>
diff --git a/docs/remove-namespaces.xsl b/docs/remove-namespaces.xsl
deleted file mode 100644
index 7f7f3c12..00000000
--- a/docs/remove-namespaces.xsl
+++ /dev/null
@@ -1,18 +0,0 @@
-<?xml version='1.0'?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- xmlns:doc="http://www.rabbitmq.com/namespaces/ad-hoc/doc"
- xmlns="http://www.w3.org/1999/xhtml"
- version='1.0'>
-
-<xsl:output method="xml" />
-
- <!-- Copy every element through with local name only -->
- <xsl:template match="*">
- <xsl:element name="{local-name()}" namespace="">
- <xsl:apply-templates select="@*|node()"/>
- </xsl:element>
- </xsl:template>
-
- <!-- Copy every attribute through -->
- <xsl:template match="@*"><xsl:copy/></xsl:template>
-</xsl:stylesheet>
diff --git a/docs/usage.xsl b/docs/usage.xsl
deleted file mode 100644
index 586f8303..00000000
--- a/docs/usage.xsl
+++ /dev/null
@@ -1,74 +0,0 @@
-<?xml version='1.0'?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version='1.0'>
-
-<xsl:param name="modulename"/>
-
-<xsl:output method="text"
- encoding="UTF-8"
- indent="no"/>
-<xsl:strip-space elements="*"/>
-<xsl:preserve-space elements="cmdsynopsis arg" />
-
-<xsl:template match="/">
-<!-- Pull out cmdsynopsis to show the command usage line. -->%% Generated, do not edit!
--module(<xsl:value-of select="$modulename" />).
--export([usage/0]).
-usage() -> %QUOTE%Usage:
-<xsl:value-of select="refentry/refsynopsisdiv/cmdsynopsis/command"/>
-<xsl:text> </xsl:text>
-<xsl:for-each select="refentry/refsynopsisdiv/cmdsynopsis/arg">
- <xsl:apply-templates select="." />
- <xsl:text> </xsl:text>
-</xsl:for-each>
-
-<xsl:text>&#10;</xsl:text>
-
-<!-- List options (any variable list in a section called "Options"). -->
-<xsl:for-each select=".//*[title='Options']/variablelist">
- <xsl:if test="position() = 1">&#10;Options:&#10;</xsl:if>
- <xsl:for-each select="varlistentry">
- <xsl:text> </xsl:text>
- <xsl:for-each select=".//term">
- <xsl:value-of select="."/>
- <xsl:if test="not(position() = last())">, </xsl:if>
- </xsl:for-each><xsl:text>&#10;</xsl:text>
- </xsl:for-each>
-</xsl:for-each>
-
-<!-- Any paragraphs which have been marked as role="usage" (principally for global flags). -->
-<xsl:text>&#10;</xsl:text>
-<xsl:for-each select=".//*[title='Options']//para[@role='usage']">
-<xsl:value-of select="normalize-space(.)"/><xsl:text>&#10;&#10;</xsl:text>
-</xsl:for-each>
-
-<!-- List commands (any first-level variable list in a section called "Commands"). -->
-<xsl:for-each select=".//*[title='Commands']/variablelist | .//*[title='Commands']/refsect2/variablelist">
- <xsl:if test="position() = 1">Commands:&#10;</xsl:if>
- <xsl:for-each select="varlistentry">
- <xsl:text> </xsl:text>
- <xsl:apply-templates select="term"/>
- <xsl:text>&#10;</xsl:text>
- </xsl:for-each>
- <xsl:text>&#10;</xsl:text>
-</xsl:for-each>
-
-<xsl:apply-templates select=".//*[title='Commands']/refsect2" mode="command-usage" />
-%QUOTE%.
-</xsl:template>
-
-<!-- Option lists in command usage -->
-<xsl:template match="varlistentry[@role='usage-has-option-list']" mode="command-usage">&lt;<xsl:value-of select="term/cmdsynopsis/arg[@role='usage-option-list']/replaceable"/>&gt; must be a member of the list [<xsl:for-each select="listitem/variablelist/varlistentry"><xsl:apply-templates select="term"/><xsl:if test="not(position() = last())">, </xsl:if></xsl:for-each>].<xsl:text>&#10;&#10;</xsl:text></xsl:template>
-
-<!-- Usage paras in command usage -->
-<xsl:template match="para[@role='usage']" mode="command-usage">
-<xsl:value-of select="normalize-space(.)"/><xsl:text>&#10;&#10;</xsl:text>
-</xsl:template>
-
-<!-- Don't show anything else in command usage -->
-<xsl:template match="text()" mode="command-usage"/>
-
-<xsl:template match="arg[@choice='opt']">[<xsl:apply-templates/>]</xsl:template>
-<xsl:template match="replaceable">&lt;<xsl:value-of select="."/>&gt;</xsl:template>
-
-</xsl:stylesheet>
diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in
deleted file mode 100644
index a4582e2d..00000000
--- a/ebin/rabbit_app.in
+++ /dev/null
@@ -1,73 +0,0 @@
-{application, rabbit, %% -*- erlang -*-
- [{description, "RabbitMQ"},
- {id, "RabbitMQ"},
- {vsn, "%%VSN%%"},
- {modules, []},
- {registered, [rabbit_amqqueue_sup,
- rabbit_log,
- rabbit_node_monitor,
- rabbit_router,
- rabbit_sup,
- rabbit_tcp_client_sup,
- rabbit_direct_client_sup]},
- {applications, [kernel, stdlib, sasl, mnesia, os_mon, xmerl]},
-%% we also depend on crypto, public_key and ssl but they shouldn't be
-%% in here as we don't actually want to start it
- {mod, {rabbit, []}},
- {env, [{tcp_listeners, [5672]},
- {ssl_listeners, []},
- {ssl_options, []},
- {vm_memory_high_watermark, 0.4},
- {disk_free_limit, 1000000000}, %% 1GB
- {msg_store_index_module, rabbit_msg_store_ets_index},
- {backing_queue_module, rabbit_variable_queue},
- %% 0 ("no limit") would make a better default, but that
- %% breaks the QPid Java client
- {frame_max, 131072},
- {heartbeat, 600},
- {msg_store_file_size_limit, 16777216},
- {queue_index_max_journal_entries, 65536},
- {default_user, <<"guest">>},
- {default_pass, <<"guest">>},
- {default_user_tags, [administrator]},
- {default_vhost, <<"/">>},
- {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
- {cluster_nodes, {[], disc}},
- {server_properties, []},
- {collect_statistics, none},
- {collect_statistics_interval, 5000},
- {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
- {auth_backends, [rabbit_auth_backend_internal]},
- {delegate_count, 16},
- {trace_vhosts, []},
- {log_levels, [{connection, info}]},
- {ssl_cert_login_from, distinguished_name},
- {reverse_dns_lookups, false},
- {cluster_partition_handling, ignore},
- {tcp_listen_options, [binary,
- {packet, raw},
- {reuseaddr, true},
- {backlog, 128},
- {nodelay, true},
- {linger, {true, 0}},
- {exit_on_close, false}]},
- {hipe_compile, false},
- %% see bug 24513 for how this list was created
- {hipe_modules,
- [rabbit_reader, rabbit_channel, gen_server2, rabbit_exchange,
- rabbit_command_assembler, rabbit_framing_amqp_0_9_1, rabbit_basic,
- rabbit_event, lists, queue, priority_queue, rabbit_router,
- rabbit_trace, rabbit_misc, rabbit_binary_parser,
- rabbit_exchange_type_direct, rabbit_guid, rabbit_net,
- rabbit_amqqueue_process, rabbit_variable_queue,
- rabbit_binary_generator, rabbit_writer, delegate, gb_sets, lqueue,
- sets, orddict, rabbit_amqqueue, rabbit_limiter, gb_trees,
- rabbit_queue_index, rabbit_exchange_decorator, gen, dict, ordsets,
- file_handle_cache, rabbit_msg_store, array,
- rabbit_msg_store_ets_index, rabbit_msg_file,
- rabbit_exchange_type_fanout, rabbit_exchange_type_topic, mnesia,
- mnesia_lib, rpc, mnesia_tm, qlc, sofs, proplists, credit_flow,
- pmon, ssl_connection, tls_connection, ssl_record, tls_record,
- gen_fsm, ssl]},
- {ssl_apps, [asn1, crypto, public_key, ssl]}
- ]}]}.
diff --git a/generate_app b/generate_app
deleted file mode 100644
index fb0eb1ea..00000000
--- a/generate_app
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-main([InFile, OutFile | SrcDirs]) ->
- Modules = [list_to_atom(filename:basename(F, ".erl")) ||
- SrcDir <- SrcDirs,
- F <- filelib:wildcard("*.erl", SrcDir)],
- {ok, [{application, Application, Properties}]} = file:consult(InFile),
- NewProperties =
- case proplists:get_value(modules, Properties) of
- [] -> lists:keyreplace(modules, 1, Properties, {modules, Modules});
- _ -> Properties
- end,
- file:write_file(
- OutFile,
- io_lib:format("~p.~n", [{application, Application, NewProperties}])).
diff --git a/generate_deps b/generate_deps
deleted file mode 100644
index ddfca816..00000000
--- a/generate_deps
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
--mode(compile).
-
-%% We expect the list of Erlang source and header files to arrive on
-%% stdin, with the entries colon-separated.
-main([TargetFile, EbinDir]) ->
- ErlsAndHrls = [ string:strip(S,left) ||
- S <- string:tokens(io:get_line(""), ":\n")],
- ErlFiles = [F || F <- ErlsAndHrls, lists:suffix(".erl", F)],
- Modules = sets:from_list(
- [list_to_atom(filename:basename(FileName, ".erl")) ||
- FileName <- ErlFiles]),
- HrlFiles = [F || F <- ErlsAndHrls, lists:suffix(".hrl", F)],
- IncludeDirs = lists:usort([filename:dirname(Path) || Path <- HrlFiles]),
- Headers = sets:from_list(HrlFiles),
- Deps = lists:foldl(
- fun (Path, Deps1) ->
- dict:store(Path, detect_deps(IncludeDirs, EbinDir,
- Modules, Headers, Path),
- Deps1)
- end, dict:new(), ErlFiles),
- {ok, Hdl} = file:open(TargetFile, [write, delayed_write]),
- dict:fold(
- fun (_Path, [], ok) ->
- ok;
- (Path, Dep, ok) ->
- Module = filename:basename(Path, ".erl"),
- ok = file:write(Hdl, [EbinDir, "/", Module, ".beam: ",
- Path]),
- ok = sets:fold(fun (E, ok) -> file:write(Hdl, [" ", E]) end,
- ok, Dep),
- file:write(Hdl, ["\n"])
- end, ok, Deps),
- ok = file:write(Hdl, [TargetFile, ": ", escript:script_name(), "\n"]),
- ok = file:sync(Hdl),
- ok = file:close(Hdl).
-
-detect_deps(IncludeDirs, EbinDir, Modules, Headers, Path) ->
- {ok, Forms} = epp:parse_file(Path, IncludeDirs, [{use_specs, true}]),
- lists:foldl(
- fun ({attribute, _LineNumber, Attribute, Behaviour}, Deps)
- when Attribute =:= behaviour orelse Attribute =:= behavior ->
- case sets:is_element(Behaviour, Modules) of
- true -> sets:add_element(
- [EbinDir, "/", atom_to_list(Behaviour), ".beam"],
- Deps);
- false -> Deps
- end;
- ({attribute, _LineNumber, file, {FileName, _LineNumber1}}, Deps) ->
- case sets:is_element(FileName, Headers) of
- true -> sets:add_element(FileName, Deps);
- false -> Deps
- end;
- (_Form, Deps) ->
- Deps
- end, sets:new(), Forms).
diff --git a/include/gm_specs.hrl b/include/gm_specs.hrl
deleted file mode 100644
index dc51f50e..00000000
--- a/include/gm_specs.hrl
+++ /dev/null
@@ -1,29 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--ifdef(use_specs).
-
--type(callback_result() :: 'ok' | {'stop', any()} | {'become', atom(), args()}).
--type(args() :: any()).
--type(members() :: [pid()]).
-
--spec(joined/2 :: (args(), members()) -> callback_result()).
--spec(members_changed/4 :: (args(), members(),
- members(), members()) -> callback_result()).
--spec(handle_msg/3 :: (args(), pid(), any()) -> callback_result()).
--spec(terminate/2 :: (args(), term()) -> any()).
-
--endif.
diff --git a/include/rabbit.hrl b/include/rabbit.hrl
deleted file mode 100644
index 6df44bea..00000000
--- a/include/rabbit.hrl
+++ /dev/null
@@ -1,110 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--record(user, {username,
- tags,
- auth_backend, %% Module this user came from
- impl %% Scratch space for that module
- }).
-
--record(internal_user, {username, password_hash, tags}).
--record(permission, {configure, write, read}).
--record(user_vhost, {username, virtual_host}).
--record(user_permission, {user_vhost, permission}).
-
--record(vhost, {virtual_host, dummy}).
-
--record(content,
- {class_id,
- properties, %% either 'none', or a decoded record/tuple
- properties_bin, %% either 'none', or an encoded properties binary
- %% Note: at most one of properties and properties_bin can be
- %% 'none' at once.
- protocol, %% The protocol under which properties_bin was encoded
- payload_fragments_rev %% list of binaries, in reverse order (!)
- }).
-
--record(resource, {virtual_host, kind, name}).
-
--record(exchange, {name, type, durable, auto_delete, internal, arguments,
- scratches, policy, decorators}).
--record(exchange_serial, {name, next}).
-
--record(amqqueue, {name, durable, auto_delete, exclusive_owner = none,
- arguments, pid, slave_pids, sync_slave_pids, policy,
- gm_pids}).
-
-%% mnesia doesn't like unary records, so we add a dummy 'value' field
--record(route, {binding, value = const}).
--record(reverse_route, {reverse_binding, value = const}).
-
--record(binding, {source, key, destination, args = []}).
--record(reverse_binding, {destination, key, source, args = []}).
-
--record(topic_trie_node, {trie_node, edge_count, binding_count}).
--record(topic_trie_edge, {trie_edge, node_id}).
--record(topic_trie_binding, {trie_binding, value = const}).
-
--record(trie_node, {exchange_name, node_id}).
--record(trie_edge, {exchange_name, node_id, word}).
--record(trie_binding, {exchange_name, node_id, destination}).
-
--record(listener, {node, protocol, host, ip_address, port}).
-
--record(runtime_parameters, {key, value}).
-
--record(basic_message, {exchange_name, routing_keys = [], content, id,
- is_persistent}).
-
--record(ssl_socket, {tcp, ssl}).
--record(delivery, {mandatory, sender, message, msg_seq_no}).
--record(amqp_error, {name, explanation = "", method = none}).
-
--record(event, {type, props, timestamp}).
-
--record(message_properties, {expiry, needs_confirming = false}).
-
--record(plugin, {name, %% atom()
- version, %% string()
- description, %% string()
- type, %% 'ez' or 'dir'
- dependencies, %% [{atom(), string()}]
- location}). %% string()
-
-%%----------------------------------------------------------------------------
-
--define(COPYRIGHT_MESSAGE, "Copyright (C) 2007-2013 GoPivotal, Inc.").
--define(INFORMATION_MESSAGE, "Licensed under the MPL. See http://www.rabbitmq.com/").
--define(ERTS_MINIMUM, "5.6.3").
-
-%% EMPTY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1
-%% - 1 byte of frame type
-%% - 2 bytes of channel number
-%% - 4 bytes of frame payload length
-%% - 1 byte of payload trailer FRAME_END byte
-%% See rabbit_binary_generator:check_empty_frame_size/0, an assertion
-%% called at startup.
--define(EMPTY_FRAME_SIZE, 8).
-
--define(MAX_WAIT, 16#ffffffff).
-
--define(HIBERNATE_AFTER_MIN, 1000).
--define(DESIRED_HIBERNATE, 10000).
--define(CREDIT_DISC_BOUND, {2000, 500}).
-
--define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>).
--define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]).
--define(DELETED_HEADER, <<"BCC">>).
diff --git a/include/rabbit_msg_store.hrl b/include/rabbit_msg_store.hrl
deleted file mode 100644
index da4fd839..00000000
--- a/include/rabbit_msg_store.hrl
+++ /dev/null
@@ -1,25 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--include("rabbit.hrl").
-
--ifdef(use_specs).
-
--type(msg() :: any()).
-
--endif.
-
--record(msg_location, {msg_id, ref_count, file, offset, total_size}).
diff --git a/packaging/RPMS/Fedora/Makefile b/packaging/RPMS/Fedora/Makefile
deleted file mode 100644
index 4f5f1327..00000000
--- a/packaging/RPMS/Fedora/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-TARBALL_DIR=../../../dist
-TARBALL=$(notdir $(wildcard $(TARBALL_DIR)/rabbitmq-server-[0-9.]*.tar.gz))
-COMMON_DIR=../../common
-VERSION=$(shell echo $(TARBALL) | sed -e 's:rabbitmq-server-\(.*\)\.tar\.gz:\1:g')
-
-TOP_DIR=$(shell pwd)
-#Under debian we do not want to check build dependencies, since that
-#only checks build-dependencies using rpms, not debs
-DEFINES=--define '_topdir $(TOP_DIR)' --define '_tmppath $(TOP_DIR)/tmp' --define '_sysconfdir /etc' --define '_localstatedir /var'
-
-ifndef RPM_OS
-RPM_OS=fedora
-endif
-
-ifeq "$(RPM_OS)" "suse"
-FUNCTION_LIBRARY=
-REQUIRES=/sbin/chkconfig /sbin/service
-OS_DEFINES=--define '_initrddir /etc/init.d' --define 'dist .suse'
-SPEC_DEFINES=--define 'group_tag Productivity/Networking/Other'
-START_PROG=startproc
-else
-FUNCTION_LIBRARY=\# Source function library.\n. /etc/init.d/functions
-REQUIRES=chkconfig initscripts
-OS_DEFINES=--define '_initrddir /etc/rc.d/init.d'
-SPEC_DEFINES=--define 'group_tag Development/Libraries'
-START_PROG=daemon
-endif
-
-rpms: clean server
-
-prepare:
- mkdir -p BUILD SOURCES SPECS SRPMS RPMS tmp
- cp $(TARBALL_DIR)/$(TARBALL) SOURCES
- cp rabbitmq-server.spec SPECS
- sed -i 's|%%VERSION%%|$(VERSION)|;s|%%REQUIRES%%|$(REQUIRES)|' \
- SPECS/rabbitmq-server.spec
-
- cp ${COMMON_DIR}/* SOURCES/
- cp rabbitmq-server.init SOURCES/rabbitmq-server.init
- sed -i \
- -e 's|^START_PROG=.*$$|START_PROG="$(START_PROG)"|' \
- -e 's|^@FUNCTION_LIBRARY@|$(FUNCTION_LIBRARY)|' \
- SOURCES/rabbitmq-server.init
-ifeq "$(RPM_OS)" "fedora"
-# Fedora says that only vital services should have Default-Start
- sed -i -e '/^# Default-Start:/d;/^# Default-Stop:/d' \
- SOURCES/rabbitmq-server.init
-endif
- sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \
- -e 's|@STDOUT_STDERR_REDIRECTION@||' \
- SOURCES/rabbitmq-script-wrapper
- cp rabbitmq-server.logrotate SOURCES/rabbitmq-server.logrotate
-
-server: prepare
- rpmbuild -ba --nodeps SPECS/rabbitmq-server.spec $(DEFINES) $(OS_DEFINES) $(SPEC_DEFINES)
-
-clean:
- rm -rf SOURCES SPECS RPMS SRPMS BUILD tmp
diff --git a/packaging/RPMS/Fedora/rabbitmq-server.init b/packaging/RPMS/Fedora/rabbitmq-server.init
deleted file mode 100644
index 3e48147b..00000000
--- a/packaging/RPMS/Fedora/rabbitmq-server.init
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/bin/sh
-#
-# rabbitmq-server RabbitMQ broker
-#
-# chkconfig: - 80 05
-# description: Enable AMQP service provided by RabbitMQ
-#
-
-### BEGIN INIT INFO
-# Provides: rabbitmq-server
-# Required-Start: $remote_fs $network
-# Required-Stop: $remote_fs $network
-# Default-Start: 3 5
-# Default-Stop: 0 1 2 6
-# Description: RabbitMQ broker
-# Short-Description: Enable AMQP service provided by RabbitMQ broker
-### END INIT INFO
-
-@FUNCTION_LIBRARY@
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-NAME=rabbitmq-server
-DAEMON=/usr/sbin/${NAME}
-CONTROL=/usr/sbin/rabbitmqctl
-DESC=rabbitmq-server
-USER=rabbitmq
-ROTATE_SUFFIX=
-INIT_LOG_DIR=/var/log/rabbitmq
-PID_FILE=/var/run/rabbitmq/pid
-
-START_PROG= # Set when building package
-LOCK_FILE=/var/lock/subsys/$NAME
-
-test -x $DAEMON || exit 0
-test -x $CONTROL || exit 0
-
-RETVAL=0
-set -e
-
-[ -f /etc/default/${NAME} ] && . /etc/default/${NAME}
-
-ensure_pid_dir () {
- PID_DIR=`dirname ${PID_FILE}`
- if [ ! -d ${PID_DIR} ] ; then
- mkdir -p ${PID_DIR}
- chown -R ${USER}:${USER} ${PID_DIR}
- chmod 755 ${PID_DIR}
- fi
-}
-
-remove_pid () {
- rm -f ${PID_FILE}
- rmdir `dirname ${PID_FILE}` || :
-}
-
-start_rabbitmq () {
- status_rabbitmq quiet
- if [ $RETVAL = 0 ] ; then
- echo RabbitMQ is currently running
- else
- RETVAL=0
- ensure_pid_dir
- set +e
- RABBITMQ_PID_FILE=$PID_FILE $START_PROG $DAEMON \
- > "${INIT_LOG_DIR}/startup_log" \
- 2> "${INIT_LOG_DIR}/startup_err" \
- 0<&- &
- $CONTROL wait $PID_FILE >/dev/null 2>&1
- RETVAL=$?
- set -e
- case "$RETVAL" in
- 0)
- echo SUCCESS
- if [ -n "$LOCK_FILE" ] ; then
- touch $LOCK_FILE
- fi
- ;;
- *)
- remove_pid
- echo FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\}
- RETVAL=1
- ;;
- esac
- fi
-}
-
-stop_rabbitmq () {
- status_rabbitmq quiet
- if [ $RETVAL = 0 ] ; then
- set +e
- $CONTROL stop ${PID_FILE} > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err
- RETVAL=$?
- set -e
- if [ $RETVAL = 0 ] ; then
- remove_pid
- if [ -n "$LOCK_FILE" ] ; then
- rm -f $LOCK_FILE
- fi
- else
- echo FAILED - check ${INIT_LOG_DIR}/shutdown_log, _err
- fi
- else
- echo RabbitMQ is not running
- RETVAL=0
- fi
-}
-
-status_rabbitmq() {
- set +e
- if [ "$1" != "quiet" ] ; then
- $CONTROL status 2>&1
- else
- $CONTROL status > /dev/null 2>&1
- fi
- if [ $? != 0 ] ; then
- RETVAL=3
- fi
- set -e
-}
-
-rotate_logs_rabbitmq() {
- set +e
- $CONTROL rotate_logs ${ROTATE_SUFFIX}
- if [ $? != 0 ] ; then
- RETVAL=1
- fi
- set -e
-}
-
-restart_running_rabbitmq () {
- status_rabbitmq quiet
- if [ $RETVAL = 0 ] ; then
- restart_rabbitmq
- else
- echo RabbitMQ is not runnning
- RETVAL=0
- fi
-}
-
-restart_rabbitmq() {
- stop_rabbitmq
- start_rabbitmq
-}
-
-case "$1" in
- start)
- echo -n "Starting $DESC: "
- start_rabbitmq
- echo "$NAME."
- ;;
- stop)
- echo -n "Stopping $DESC: "
- stop_rabbitmq
- echo "$NAME."
- ;;
- status)
- status_rabbitmq
- ;;
- rotate-logs)
- echo -n "Rotating log files for $DESC: "
- rotate_logs_rabbitmq
- ;;
- force-reload|reload|restart)
- echo -n "Restarting $DESC: "
- restart_rabbitmq
- echo "$NAME."
- ;;
- try-restart)
- echo -n "Restarting $DESC: "
- restart_running_rabbitmq
- echo "$NAME."
- ;;
- *)
- echo "Usage: $0 {start|stop|status|rotate-logs|restart|condrestart|try-restart|reload|force-reload}" >&2
- RETVAL=1
- ;;
-esac
-
-exit $RETVAL
diff --git a/packaging/RPMS/Fedora/rabbitmq-server.logrotate b/packaging/RPMS/Fedora/rabbitmq-server.logrotate
deleted file mode 100644
index 6b657614..00000000
--- a/packaging/RPMS/Fedora/rabbitmq-server.logrotate
+++ /dev/null
@@ -1,12 +0,0 @@
-/var/log/rabbitmq/*.log {
- weekly
- missingok
- rotate 20
- compress
- delaycompress
- notifempty
- sharedscripts
- postrotate
- /sbin/service rabbitmq-server rotate-logs > /dev/null
- endscript
-}
diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec
deleted file mode 100644
index f2195d84..00000000
--- a/packaging/RPMS/Fedora/rabbitmq-server.spec
+++ /dev/null
@@ -1,244 +0,0 @@
-%define debug_package %{nil}
-
-Name: rabbitmq-server
-Version: %%VERSION%%
-Release: 1%{?dist}
-License: MPLv1.1 and MIT and ASL 2.0 and BSD
-Group: %{group_tag}
-Source: http://www.rabbitmq.com/releases/rabbitmq-server/v%{version}/%{name}-%{version}.tar.gz
-Source1: rabbitmq-server.init
-Source2: rabbitmq-script-wrapper
-Source3: rabbitmq-server.logrotate
-Source4: rabbitmq-server.ocf
-URL: http://www.rabbitmq.com/
-BuildArch: noarch
-BuildRequires: erlang >= R12B-3, python-simplejson, xmlto, libxslt
-Requires: erlang >= R12B-3, logrotate
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-%{_arch}-root
-Summary: The RabbitMQ server
-Requires(post): %%REQUIRES%%
-Requires(pre): %%REQUIRES%%
-
-%description
-RabbitMQ is an implementation of AMQP, the emerging standard for high
-performance enterprise messaging. The RabbitMQ server is a robust and
-scalable implementation of an AMQP broker.
-
-# We want to install into /usr/lib, even on 64-bit platforms
-%define _rabbit_libdir %{_exec_prefix}/lib/rabbitmq
-%define _rabbit_erllibdir %{_rabbit_libdir}/lib/rabbitmq_server-%{version}
-%define _rabbit_wrapper %{_builddir}/`basename %{S:2}`
-%define _rabbit_server_ocf %{_builddir}/`basename %{S:4}`
-%define _plugins_state_dir %{_localstatedir}/lib/rabbitmq/plugins
-
-
-%define _maindir %{buildroot}%{_rabbit_erllibdir}
-
-
-%prep
-%setup -q
-
-%build
-cp %{S:2} %{_rabbit_wrapper}
-cp %{S:4} %{_rabbit_server_ocf}
-make %{?_smp_mflags}
-
-%install
-rm -rf %{buildroot}
-
-make install TARGET_DIR=%{_maindir} \
- SBIN_DIR=%{buildroot}%{_rabbit_libdir}/bin \
- MAN_DIR=%{buildroot}%{_mandir}
-
-mkdir -p %{buildroot}%{_localstatedir}/lib/rabbitmq/mnesia
-mkdir -p %{buildroot}%{_localstatedir}/log/rabbitmq
-
-#Copy all necessary lib files etc.
-install -p -D -m 0755 %{S:1} %{buildroot}%{_initrddir}/rabbitmq-server
-install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmqctl
-install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmq-server
-install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmq-plugins
-install -p -D -m 0755 %{_rabbit_server_ocf} %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server
-
-install -p -D -m 0644 %{S:3} %{buildroot}%{_sysconfdir}/logrotate.d/rabbitmq-server
-
-mkdir -p %{buildroot}%{_sysconfdir}/rabbitmq
-
-rm %{_maindir}/LICENSE %{_maindir}/LICENSE-MPL-RabbitMQ %{_maindir}/INSTALL
-
-#Build the list of files
-echo '%defattr(-,root,root, -)' >%{_builddir}/%{name}.files
-find %{buildroot} -path %{buildroot}%{_sysconfdir} -prune -o '!' -type d -printf "/%%P\n" >>%{_builddir}/%{name}.files
-
-%pre
-
-if [ $1 -gt 1 ]; then
- # Upgrade - stop previous instance of rabbitmq-server init.d script
- /sbin/service rabbitmq-server stop
-fi
-
-# create rabbitmq group
-if ! getent group rabbitmq >/dev/null; then
- groupadd -r rabbitmq
-fi
-
-# create rabbitmq user
-if ! getent passwd rabbitmq >/dev/null; then
- useradd -r -g rabbitmq -d %{_localstatedir}/lib/rabbitmq rabbitmq \
- -c "RabbitMQ messaging server"
-fi
-
-%post
-/sbin/chkconfig --add %{name}
-if [ -f %{_sysconfdir}/rabbitmq/rabbitmq.conf ] && [ ! -f %{_sysconfdir}/rabbitmq/rabbitmq-env.conf ]; then
- mv %{_sysconfdir}/rabbitmq/rabbitmq.conf %{_sysconfdir}/rabbitmq/rabbitmq-env.conf
-fi
-
-%preun
-if [ $1 = 0 ]; then
- #Complete uninstall
- /sbin/service rabbitmq-server stop
- /sbin/chkconfig --del rabbitmq-server
-
- # We do not remove /var/log and /var/lib directories
- # Leave rabbitmq user and group
-fi
-
-# Clean out plugin activation state, both on uninstall and upgrade
-rm -rf %{_plugins_state_dir}
-for ext in rel script boot ; do
- rm -f %{_rabbit_erllibdir}/ebin/rabbit.$ext
-done
-
-%files -f ../%{name}.files
-%defattr(-,root,root,-)
-%attr(0755, rabbitmq, rabbitmq) %dir %{_localstatedir}/lib/rabbitmq
-%attr(0755, rabbitmq, rabbitmq) %dir %{_localstatedir}/log/rabbitmq
-%dir %{_sysconfdir}/rabbitmq
-%{_initrddir}/rabbitmq-server
-%config(noreplace) %{_sysconfdir}/logrotate.d/rabbitmq-server
-%doc LICENSE*
-
-%clean
-rm -rf %{buildroot}
-
-%changelog
-* Thu Aug 15 2013 simon@rabbitmq.com 3.1.5-1
-- New Upstream Release
-
-* Tue Jun 25 2013 tim@rabbitmq.com 3.1.3-1
-- New Upstream Release
-
-* Mon Jun 24 2013 tim@rabbitmq.com 3.1.2-1
-- New Upstream Release
-
-* Mon May 20 2013 tim@rabbitmq.com 3.1.1-1
-- Test release
-
-* Wed May 1 2013 simon@rabbitmq.com 3.1.0-1
-- New Upstream Release
-
-* Tue Dec 11 2012 simon@rabbitmq.com 3.0.1-1
-- New Upstream Release
-
-* Fri Nov 16 2012 simon@rabbitmq.com 3.0.0-1
-- New Upstream Release
-
-* Fri Dec 16 2011 steve@rabbitmq.com 2.7.1-1
-- New Upstream Release
-
-* Tue Nov 8 2011 steve@rabbitmq.com 2.7.0-1
-- New Upstream Release
-
-* Fri Sep 9 2011 tim@rabbitmq.com 2.6.1-1
-- New Upstream Release
-
-* Fri Aug 26 2011 tim@rabbitmq.com 2.6.0-1
-- New Upstream Release
-
-* Mon Jun 27 2011 simon@rabbitmq.com 2.5.1-1
-- New Upstream Release
-
-* Thu Jun 9 2011 jerryk@vmware.com 2.5.0-1
-- New Upstream Release
-
-* Thu Apr 7 2011 Alexandru Scvortov <alexandru@rabbitmq.com> 2.4.1-1
-- New Upstream Release
-
-* Tue Mar 22 2011 Alexandru Scvortov <alexandru@rabbitmq.com> 2.4.0-1
-- New Upstream Release
-
-* Thu Feb 3 2011 simon@rabbitmq.com 2.3.1-1
-- New Upstream Release
-
-* Tue Feb 1 2011 simon@rabbitmq.com 2.3.0-1
-- New Upstream Release
-
-* Mon Nov 29 2010 rob@rabbitmq.com 2.2.0-1
-- New Upstream Release
-
-* Tue Oct 19 2010 vlad@rabbitmq.com 2.1.1-1
-- New Upstream Release
-
-* Tue Sep 14 2010 marek@rabbitmq.com 2.1.0-1
-- New Upstream Release
-
-* Mon Aug 23 2010 mikeb@rabbitmq.com 2.0.0-1
-- New Upstream Release
-
-* Wed Jul 14 2010 Emile Joubert <emile@rabbitmq.com> 1.8.1-1
-- New Upstream Release
-
-* Tue Jun 15 2010 Matthew Sackman <matthew@rabbitmq.com> 1.8.0-1
-- New Upstream Release
-
-* Mon Feb 15 2010 Matthew Sackman <matthew@lshift.net> 1.7.2-1
-- New Upstream Release
-
-* Fri Jan 22 2010 Matthew Sackman <matthew@lshift.net> 1.7.1-1
-- New Upstream Release
-
-* Mon Oct 5 2009 David Wragg <dpw@lshift.net> 1.7.0-1
-- New upstream release
-
-* Wed Jun 17 2009 Matthias Radestock <matthias@lshift.net> 1.6.0-1
-- New upstream release
-
-* Tue May 19 2009 Matthias Radestock <matthias@lshift.net> 1.5.5-1
-- Maintenance release for the 1.5.x series
-
-* Mon Apr 6 2009 Matthias Radestock <matthias@lshift.net> 1.5.4-1
-- Maintenance release for the 1.5.x series
-
-* Tue Feb 24 2009 Tony Garnock-Jones <tonyg@lshift.net> 1.5.3-1
-- Maintenance release for the 1.5.x series
-
-* Mon Feb 23 2009 Tony Garnock-Jones <tonyg@lshift.net> 1.5.2-1
-- Maintenance release for the 1.5.x series
-
-* Mon Jan 19 2009 Ben Hood <0x6e6562@gmail.com> 1.5.1-1
-- Maintenance release for the 1.5.x series
-
-* Wed Dec 17 2008 Matthias Radestock <matthias@lshift.net> 1.5.0-1
-- New upstream release
-
-* Thu Jul 24 2008 Tony Garnock-Jones <tonyg@lshift.net> 1.4.0-1
-- New upstream release
-
-* Mon Mar 3 2008 Adrien Pierard <adrian@lshift.net> 1.3.0-1
-- New upstream release
-
-* Wed Sep 26 2007 Simon MacMullen <simon@lshift.net> 1.2.0-1
-- New upstream release
-
-* Wed Aug 29 2007 Simon MacMullen <simon@lshift.net> 1.1.1-1
-- New upstream release
-
-* Mon Jul 30 2007 Simon MacMullen <simon@lshift.net> 1.1.0-1.alpha
-- New upstream release
-
-* Tue Jun 12 2007 Hubert Plociniczak <hubert@lshift.net> 1.0.0-1.20070607
-- Building from source tarball, added starting script, stopping
-
-* Mon May 21 2007 Hubert Plociniczak <hubert@lshift.net> 1.0.0-1.alpha
-- Initial build of server library of RabbitMQ package
diff --git a/packaging/common/LICENSE.head b/packaging/common/LICENSE.head
deleted file mode 100644
index 2b5a17ee..00000000
--- a/packaging/common/LICENSE.head
+++ /dev/null
@@ -1,5 +0,0 @@
-This package, the RabbitMQ server is licensed under the MPL.
-
-If you have any questions regarding licensing, please contact us at
-info@rabbitmq.com.
-
diff --git a/packaging/common/LICENSE.tail b/packaging/common/LICENSE.tail
deleted file mode 100644
index 2dbaca0a..00000000
--- a/packaging/common/LICENSE.tail
+++ /dev/null
@@ -1,516 +0,0 @@
-
-The MIT license is as follows:
-
- "Permission is hereby granted, free of charge, to any person
- obtaining a copy of this file (the Software), to deal in the
- Software without restriction, including without limitation the
- rights to use, copy, modify, merge, publish, distribute,
- sublicense, and/or sell copies of the Software, and to permit
- persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE."
-
-
-The BSD 2-Clause license is as follows:
-
- "Redistribution and use in source and binary forms, with or
- without modification, are permitted provided that the
- following conditions are met:
-
- 1. Redistributions of source code must retain the above
- copyright notice, this list of conditions and the following
- disclaimer.
-
- 2. Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials
- provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
- CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
-
-
-The rest of this package is licensed under the Mozilla Public License 1.1
-Authors and Copyright are as described below:
-
- The Initial Developer of the Original Code is GoPivotal, Inc.
- Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-
-
- MOZILLA PUBLIC LICENSE
- Version 1.1
-
- ---------------
-
-1. Definitions.
-
- 1.0.1. "Commercial Use" means distribution or otherwise making the
- Covered Code available to a third party.
-
- 1.1. "Contributor" means each entity that creates or contributes to
- the creation of Modifications.
-
- 1.2. "Contributor Version" means the combination of the Original
- Code, prior Modifications used by a Contributor, and the Modifications
- made by that particular Contributor.
-
- 1.3. "Covered Code" means the Original Code or Modifications or the
- combination of the Original Code and Modifications, in each case
- including portions thereof.
-
- 1.4. "Electronic Distribution Mechanism" means a mechanism generally
- accepted in the software development community for the electronic
- transfer of data.
-
- 1.5. "Executable" means Covered Code in any form other than Source
- Code.
-
- 1.6. "Initial Developer" means the individual or entity identified
- as the Initial Developer in the Source Code notice required by Exhibit
- A.
-
- 1.7. "Larger Work" means a work which combines Covered Code or
- portions thereof with code not governed by the terms of this License.
-
- 1.8. "License" means this document.
-
- 1.8.1. "Licensable" means having the right to grant, to the maximum
- extent possible, whether at the time of the initial grant or
- subsequently acquired, any and all of the rights conveyed herein.
-
- 1.9. "Modifications" means any addition to or deletion from the
- substance or structure of either the Original Code or any previous
- Modifications. When Covered Code is released as a series of files, a
- Modification is:
- A. Any addition to or deletion from the contents of a file
- containing Original Code or previous Modifications.
-
- B. Any new file that contains any part of the Original Code or
- previous Modifications.
-
- 1.10. "Original Code" means Source Code of computer software code
- which is described in the Source Code notice required by Exhibit A as
- Original Code, and which, at the time of its release under this
- License is not already Covered Code governed by this License.
-
- 1.10.1. "Patent Claims" means any patent claim(s), now owned or
- hereafter acquired, including without limitation, method, process,
- and apparatus claims, in any patent Licensable by grantor.
-
- 1.11. "Source Code" means the preferred form of the Covered Code for
- making modifications to it, including all modules it contains, plus
- any associated interface definition files, scripts used to control
- compilation and installation of an Executable, or source code
- differential comparisons against either the Original Code or another
- well known, available Covered Code of the Contributor's choice. The
- Source Code can be in a compressed or archival form, provided the
- appropriate decompression or de-archiving software is widely available
- for no charge.
-
- 1.12. "You" (or "Your") means an individual or a legal entity
- exercising rights under, and complying with all of the terms of, this
- License or a future version of this License issued under Section 6.1.
- For legal entities, "You" includes any entity which controls, is
- controlled by, or is under common control with You. For purposes of
- this definition, "control" means (a) the power, direct or indirect,
- to cause the direction or management of such entity, whether by
- contract or otherwise, or (b) ownership of more than fifty percent
- (50%) of the outstanding shares or beneficial ownership of such
- entity.
-
-2. Source Code License.
-
- 2.1. The Initial Developer Grant.
- The Initial Developer hereby grants You a world-wide, royalty-free,
- non-exclusive license, subject to third party intellectual property
- claims:
- (a) under intellectual property rights (other than patent or
- trademark) Licensable by Initial Developer to use, reproduce,
- modify, display, perform, sublicense and distribute the Original
- Code (or portions thereof) with or without Modifications, and/or
- as part of a Larger Work; and
-
- (b) under Patents Claims infringed by the making, using or
- selling of Original Code, to make, have made, use, practice,
- sell, and offer for sale, and/or otherwise dispose of the
- Original Code (or portions thereof).
-
- (c) the licenses granted in this Section 2.1(a) and (b) are
- effective on the date Initial Developer first distributes
- Original Code under the terms of this License.
-
- (d) Notwithstanding Section 2.1(b) above, no patent license is
- granted: 1) for code that You delete from the Original Code; 2)
- separate from the Original Code; or 3) for infringements caused
- by: i) the modification of the Original Code or ii) the
- combination of the Original Code with other software or devices.
-
- 2.2. Contributor Grant.
- Subject to third party intellectual property claims, each Contributor
- hereby grants You a world-wide, royalty-free, non-exclusive license
-
- (a) under intellectual property rights (other than patent or
- trademark) Licensable by Contributor, to use, reproduce, modify,
- display, perform, sublicense and distribute the Modifications
- created by such Contributor (or portions thereof) either on an
- unmodified basis, with other Modifications, as Covered Code
- and/or as part of a Larger Work; and
-
- (b) under Patent Claims infringed by the making, using, or
- selling of Modifications made by that Contributor either alone
- and/or in combination with its Contributor Version (or portions
- of such combination), to make, use, sell, offer for sale, have
- made, and/or otherwise dispose of: 1) Modifications made by that
- Contributor (or portions thereof); and 2) the combination of
- Modifications made by that Contributor with its Contributor
- Version (or portions of such combination).
-
- (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
- effective on the date Contributor first makes Commercial Use of
- the Covered Code.
-
- (d) Notwithstanding Section 2.2(b) above, no patent license is
- granted: 1) for any code that Contributor has deleted from the
- Contributor Version; 2) separate from the Contributor Version;
- 3) for infringements caused by: i) third party modifications of
- Contributor Version or ii) the combination of Modifications made
- by that Contributor with other software (except as part of the
- Contributor Version) or other devices; or 4) under Patent Claims
- infringed by Covered Code in the absence of Modifications made by
- that Contributor.
-
-3. Distribution Obligations.
-
- 3.1. Application of License.
- The Modifications which You create or to which You contribute are
- governed by the terms of this License, including without limitation
- Section 2.2. The Source Code version of Covered Code may be
- distributed only under the terms of this License or a future version
- of this License released under Section 6.1, and You must include a
- copy of this License with every copy of the Source Code You
- distribute. You may not offer or impose any terms on any Source Code
- version that alters or restricts the applicable version of this
- License or the recipients' rights hereunder. However, You may include
- an additional document offering the additional rights described in
- Section 3.5.
-
- 3.2. Availability of Source Code.
- Any Modification which You create or to which You contribute must be
- made available in Source Code form under the terms of this License
- either on the same media as an Executable version or via an accepted
- Electronic Distribution Mechanism to anyone to whom you made an
- Executable version available; and if made available via Electronic
- Distribution Mechanism, must remain available for at least twelve (12)
- months after the date it initially became available, or at least six
- (6) months after a subsequent version of that particular Modification
- has been made available to such recipients. You are responsible for
- ensuring that the Source Code version remains available even if the
- Electronic Distribution Mechanism is maintained by a third party.
-
- 3.3. Description of Modifications.
- You must cause all Covered Code to which You contribute to contain a
- file documenting the changes You made to create that Covered Code and
- the date of any change. You must include a prominent statement that
- the Modification is derived, directly or indirectly, from Original
- Code provided by the Initial Developer and including the name of the
- Initial Developer in (a) the Source Code, and (b) in any notice in an
- Executable version or related documentation in which You describe the
- origin or ownership of the Covered Code.
-
- 3.4. Intellectual Property Matters
- (a) Third Party Claims.
- If Contributor has knowledge that a license under a third party's
- intellectual property rights is required to exercise the rights
- granted by such Contributor under Sections 2.1 or 2.2,
- Contributor must include a text file with the Source Code
- distribution titled "LEGAL" which describes the claim and the
- party making the claim in sufficient detail that a recipient will
- know whom to contact. If Contributor obtains such knowledge after
- the Modification is made available as described in Section 3.2,
- Contributor shall promptly modify the LEGAL file in all copies
- Contributor makes available thereafter and shall take other steps
- (such as notifying appropriate mailing lists or newsgroups)
- reasonably calculated to inform those who received the Covered
- Code that new knowledge has been obtained.
-
- (b) Contributor APIs.
- If Contributor's Modifications include an application programming
- interface and Contributor has knowledge of patent licenses which
- are reasonably necessary to implement that API, Contributor must
- also include this information in the LEGAL file.
-
- (c) Representations.
- Contributor represents that, except as disclosed pursuant to
- Section 3.4(a) above, Contributor believes that Contributor's
- Modifications are Contributor's original creation(s) and/or
- Contributor has sufficient rights to grant the rights conveyed by
- this License.
-
- 3.5. Required Notices.
- You must duplicate the notice in Exhibit A in each file of the Source
- Code. If it is not possible to put such notice in a particular Source
- Code file due to its structure, then You must include such notice in a
- location (such as a relevant directory) where a user would be likely
- to look for such a notice. If You created one or more Modification(s)
- You may add your name as a Contributor to the notice described in
- Exhibit A. You must also duplicate this License in any documentation
- for the Source Code where You describe recipients' rights or ownership
- rights relating to Covered Code. You may choose to offer, and to
- charge a fee for, warranty, support, indemnity or liability
- obligations to one or more recipients of Covered Code. However, You
- may do so only on Your own behalf, and not on behalf of the Initial
- Developer or any Contributor. You must make it absolutely clear than
- any such warranty, support, indemnity or liability obligation is
- offered by You alone, and You hereby agree to indemnify the Initial
- Developer and every Contributor for any liability incurred by the
- Initial Developer or such Contributor as a result of warranty,
- support, indemnity or liability terms You offer.
-
- 3.6. Distribution of Executable Versions.
- You may distribute Covered Code in Executable form only if the
- requirements of Section 3.1-3.5 have been met for that Covered Code,
- and if You include a notice stating that the Source Code version of
- the Covered Code is available under the terms of this License,
- including a description of how and where You have fulfilled the
- obligations of Section 3.2. The notice must be conspicuously included
- in any notice in an Executable version, related documentation or
- collateral in which You describe recipients' rights relating to the
- Covered Code. You may distribute the Executable version of Covered
- Code or ownership rights under a license of Your choice, which may
- contain terms different from this License, provided that You are in
- compliance with the terms of this License and that the license for the
- Executable version does not attempt to limit or alter the recipient's
- rights in the Source Code version from the rights set forth in this
- License. If You distribute the Executable version under a different
- license You must make it absolutely clear that any terms which differ
- from this License are offered by You alone, not by the Initial
- Developer or any Contributor. You hereby agree to indemnify the
- Initial Developer and every Contributor for any liability incurred by
- the Initial Developer or such Contributor as a result of any such
- terms You offer.
-
- 3.7. Larger Works.
- You may create a Larger Work by combining Covered Code with other code
- not governed by the terms of this License and distribute the Larger
- Work as a single product. In such a case, You must make sure the
- requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
- If it is impossible for You to comply with any of the terms of this
- License with respect to some or all of the Covered Code due to
- statute, judicial order, or regulation then You must: (a) comply with
- the terms of this License to the maximum extent possible; and (b)
- describe the limitations and the code they affect. Such description
- must be included in the LEGAL file described in Section 3.4 and must
- be included with all distributions of the Source Code. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Application of this License.
-
- This License applies to code to which the Initial Developer has
- attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
- 6.1. New Versions.
- Netscape Communications Corporation ("Netscape") may publish revised
- and/or new versions of the License from time to time. Each version
- will be given a distinguishing version number.
-
- 6.2. Effect of New Versions.
- Once Covered Code has been published under a particular version of the
- License, You may always continue to use it under the terms of that
- version. You may also choose to use such Covered Code under the terms
- of any subsequent version of the License published by Netscape. No one
- other than Netscape has the right to modify the terms applicable to
- Covered Code created under this License.
-
- 6.3. Derivative Works.
- If You create or use a modified version of this License (which you may
- only do in order to apply it to code which is not already Covered Code
- governed by this License), You must (a) rename Your license so that
- the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
- "MPL", "NPL" or any confusingly similar phrase do not appear in your
- license (except to note that your license differs from this License)
- and (b) otherwise make it clear that Your version of the license
- contains terms which differ from the Mozilla Public License and
- Netscape Public License. (Filling in the name of the Initial
- Developer, Original Code or Contributor in the notice described in
- Exhibit A shall not of themselves be deemed to be modifications of
- this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
- COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
- WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
- WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
- DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
- THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
- IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
- YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
- COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
- OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
- ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
- 8.1. This License and the rights granted hereunder will terminate
- automatically if You fail to comply with terms herein and fail to cure
- such breach within 30 days of becoming aware of the breach. All
- sublicenses to the Covered Code which are properly granted shall
- survive any termination of this License. Provisions which, by their
- nature, must remain in effect beyond the termination of this License
- shall survive.
-
- 8.2. If You initiate litigation by asserting a patent infringement
- claim (excluding declatory judgment actions) against Initial Developer
- or a Contributor (the Initial Developer or Contributor against whom
- You file such action is referred to as "Participant") alleging that:
-
- (a) such Participant's Contributor Version directly or indirectly
- infringes any patent, then any and all rights granted by such
- Participant to You under Sections 2.1 and/or 2.2 of this License
- shall, upon 60 days notice from Participant terminate prospectively,
- unless if within 60 days after receipt of notice You either: (i)
- agree in writing to pay Participant a mutually agreeable reasonable
- royalty for Your past and future use of Modifications made by such
- Participant, or (ii) withdraw Your litigation claim with respect to
- the Contributor Version against such Participant. If within 60 days
- of notice, a reasonable royalty and payment arrangement are not
- mutually agreed upon in writing by the parties or the litigation claim
- is not withdrawn, the rights granted by Participant to You under
- Sections 2.1 and/or 2.2 automatically terminate at the expiration of
- the 60 day notice period specified above.
-
- (b) any software, hardware, or device, other than such Participant's
- Contributor Version, directly or indirectly infringes any patent, then
- any rights granted to You by such Participant under Sections 2.1(b)
- and 2.2(b) are revoked effective as of the date You first made, used,
- sold, distributed, or had made, Modifications made by that
- Participant.
-
- 8.3. If You assert a patent infringement claim against Participant
- alleging that such Participant's Contributor Version directly or
- indirectly infringes any patent where such claim is resolved (such as
- by license or settlement) prior to the initiation of patent
- infringement litigation, then the reasonable value of the licenses
- granted by such Participant under Sections 2.1 or 2.2 shall be taken
- into account in determining the amount or value of any payment or
- license.
-
- 8.4. In the event of termination under Sections 8.1 or 8.2 above,
- all end user license agreements (excluding distributors and resellers)
- which have been validly granted by You or any distributor hereunder
- prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
- UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
- (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
- DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
- OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
- ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
- CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
- WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
- COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
- INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
- LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
- RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
- PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
- EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
- THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
- The Covered Code is a "commercial item," as that term is defined in
- 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
- software" and "commercial computer software documentation," as such
- terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
- C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
- all U.S. Government End Users acquire Covered Code with only those
- rights set forth herein.
-
-11. MISCELLANEOUS.
-
- This License represents the complete agreement concerning subject
- matter hereof. If any provision of this License is held to be
- unenforceable, such provision shall be reformed only to the extent
- necessary to make it enforceable. This License shall be governed by
- California law provisions (except to the extent applicable law, if
- any, provides otherwise), excluding its conflict-of-law provisions.
- With respect to disputes in which at least one party is a citizen of,
- or an entity chartered or registered to do business in the United
- States of America, any litigation relating to this License shall be
- subject to the jurisdiction of the Federal Courts of the Northern
- District of California, with venue lying in Santa Clara County,
- California, with the losing party responsible for costs, including
- without limitation, court costs and reasonable attorneys' fees and
- expenses. The application of the United Nations Convention on
- Contracts for the International Sale of Goods is expressly excluded.
- Any law or regulation which provides that the language of a contract
- shall be construed against the drafter shall not apply to this
- License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
- As between Initial Developer and the Contributors, each party is
- responsible for claims and damages arising, directly or indirectly,
- out of its utilization of rights under this License and You agree to
- work with Initial Developer and Contributors to distribute such
- responsibility on an equitable basis. Nothing herein is intended or
- shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
- Initial Developer may designate portions of the Covered Code as
- "Multiple-Licensed". "Multiple-Licensed" means that the Initial
- Developer permits you to utilize portions of the Covered Code under
- Your choice of the NPL or the alternative licenses, if any, specified
- by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A -Mozilla Public License.
-
- ``The contents of this file are subject to the Mozilla Public License
- Version 1.1 (the "License"); you may not use this file except in
- compliance with the License. You may obtain a copy of the License at
- http://www.mozilla.org/MPL/
-
- Software distributed under the License is distributed on an "AS IS"
- basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
- License for the specific language governing rights and limitations
- under the License.
-
- The Original Code is RabbitMQ.
-
- The Initial Developer of the Original Code is GoPivotal, Inc.
- Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.''
-
- [NOTE: The text of this Exhibit A may differ slightly from the text of
- the notices in the Source Code files of the Original Code. You should
- use the text of this Exhibit A rather than the text found in the
- Original Code Source Code for Your Modifications.]
diff --git a/packaging/common/rabbitmq-script-wrapper b/packaging/common/rabbitmq-script-wrapper
deleted file mode 100644
index 7e5f7749..00000000
--- a/packaging/common/rabbitmq-script-wrapper
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-##
-
-# Escape spaces and quotes, because shell is revolting.
-for arg in "$@" ; do
- # Escape quotes in parameters, so that they're passed through cleanly.
- arg=$(sed -e 's/"/\\"/g' <<-END
- $arg
- END
- )
- CMDLINE="${CMDLINE} \"${arg}\""
-done
-
-cd /var/lib/rabbitmq
-
-SCRIPT=`basename $0`
-
-if [ `id -u` = `id -u rabbitmq` -a "$SCRIPT" = "rabbitmq-server" ] ; then
- /usr/lib/rabbitmq/bin/rabbitmq-server "$@" @STDOUT_STDERR_REDIRECTION@
-elif [ `id -u` = `id -u rabbitmq` -o "$SCRIPT" = "rabbitmq-plugins" ] ; then
- /usr/lib/rabbitmq/bin/${SCRIPT} "$@"
-elif [ `id -u` = 0 ] ; then
- @SU_RABBITMQ_SH_C@ "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}"
-else
- /usr/lib/rabbitmq/bin/${SCRIPT}
- echo
- echo "Only root or rabbitmq should run ${SCRIPT}"
- echo
- exit 1
-fi
diff --git a/packaging/common/rabbitmq-server.ocf b/packaging/common/rabbitmq-server.ocf
deleted file mode 100755
index 6b3abf3e..00000000
--- a/packaging/common/rabbitmq-server.ocf
+++ /dev/null
@@ -1,371 +0,0 @@
-#!/bin/sh
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-##
-
-##
-## OCF Resource Agent compliant rabbitmq-server resource script.
-##
-
-## OCF instance parameters
-## OCF_RESKEY_server
-## OCF_RESKEY_ctl
-## OCF_RESKEY_nodename
-## OCF_RESKEY_ip
-## OCF_RESKEY_port
-## OCF_RESKEY_config_file
-## OCF_RESKEY_log_base
-## OCF_RESKEY_mnesia_base
-## OCF_RESKEY_server_start_args
-## OCF_RESKEY_pid_file
-
-#######################################################################
-# Initialization:
-
-: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/resource.d/heartbeat}
-. ${OCF_FUNCTIONS_DIR}/.ocf-shellfuncs
-
-#######################################################################
-
-OCF_RESKEY_server_default="/usr/sbin/rabbitmq-server"
-OCF_RESKEY_ctl_default="/usr/sbin/rabbitmqctl"
-OCF_RESKEY_nodename_default="rabbit@localhost"
-OCF_RESKEY_log_base_default="/var/log/rabbitmq"
-OCF_RESKEY_pid_file_default="/var/run/rabbitmq/pid"
-: ${OCF_RESKEY_server=${OCF_RESKEY_server_default}}
-: ${OCF_RESKEY_ctl=${OCF_RESKEY_ctl_default}}
-: ${OCF_RESKEY_nodename=${OCF_RESKEY_nodename_default}}
-: ${OCF_RESKEY_log_base=${OCF_RESKEY_log_base_default}}
-: ${OCF_RESKEY_pid_file=${OCF_RESKEY_pid_file_default}}
-
-meta_data() {
- cat <<END
-<?xml version="1.0"?>
-<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
-<resource-agent name="rabbitmq-server">
-<version>1.0</version>
-
-<longdesc lang="en">
-Resource agent for RabbitMQ-server
-</longdesc>
-
-<shortdesc lang="en">Resource agent for RabbitMQ-server</shortdesc>
-
-<parameters>
-<parameter name="server" unique="0" required="0">
-<longdesc lang="en">
-The path to the rabbitmq-server script
-</longdesc>
-<shortdesc lang="en">Path to rabbitmq-server</shortdesc>
-<content type="string" default="${OCF_RESKEY_server_default}" />
-</parameter>
-
-<parameter name="ctl" unique="0" required="0">
-<longdesc lang="en">
-The path to the rabbitmqctl script
-</longdesc>
-<shortdesc lang="en">Path to rabbitmqctl</shortdesc>
-<content type="string" default="${OCF_RESKEY_ctl_default}" />
-</parameter>
-
-<parameter name="nodename" unique="0" required="0">
-<longdesc lang="en">
-The node name for rabbitmq-server
-</longdesc>
-<shortdesc lang="en">Node name</shortdesc>
-<content type="string" default="${OCF_RESKEY_nodename_default}" />
-</parameter>
-
-<parameter name="ip" unique="0" required="0">
-<longdesc lang="en">
-The IP address for rabbitmq-server to listen on
-</longdesc>
-<shortdesc lang="en">IP Address</shortdesc>
-<content type="string" default="" />
-</parameter>
-
-<parameter name="port" unique="0" required="0">
-<longdesc lang="en">
-The IP Port for rabbitmq-server to listen on
-</longdesc>
-<shortdesc lang="en">IP Port</shortdesc>
-<content type="integer" default="" />
-</parameter>
-
-<parameter name="config_file" unique="0" required="0">
-<longdesc lang="en">
-Location of the config file (without the .config suffix)
-</longdesc>
-<shortdesc lang="en">Config file path (without the .config suffix)</shortdesc>
-<content type="string" default="" />
-</parameter>
-
-<parameter name="log_base" unique="0" required="0">
-<longdesc lang="en">
-Location of the directory under which logs will be created
-</longdesc>
-<shortdesc lang="en">Log base path</shortdesc>
-<content type="string" default="${OCF_RESKEY_log_base_default}" />
-</parameter>
-
-<parameter name="mnesia_base" unique="0" required="0">
-<longdesc lang="en">
-Location of the directory under which mnesia will store data
-</longdesc>
-<shortdesc lang="en">Mnesia base path</shortdesc>
-<content type="string" default="" />
-</parameter>
-
-<parameter name="server_start_args" unique="0" required="0">
-<longdesc lang="en">
-Additional arguments provided to the server on startup
-</longdesc>
-<shortdesc lang="en">Server start arguments</shortdesc>
-<content type="string" default="" />
-</parameter>
-
-<parameter name="pid_file" unique="0" required="0">
-<longdesc lang="en">
-Location of the file in which the pid will be stored
-</longdesc>
-<shortdesc lang="en">Pid file path</shortdesc>
-<content type="string" default="${OCF_RESKEY_pid_file_default}" />
-</parameter>
-
-</parameters>
-
-<actions>
-<action name="start" timeout="600" />
-<action name="stop" timeout="120" />
-<action name="status" timeout="20" interval="10" />
-<action name="monitor" timeout="20" interval="10" />
-<action name="validate-all" timeout="30" />
-<action name="meta-data" timeout="5" />
-</actions>
-</resource-agent>
-END
-}
-
-rabbit_usage() {
- cat <<END
-usage: $0 {start|stop|status|monitor|validate-all|meta-data}
-
-Expects to have a fully populated OCF RA-compliant environment set.
-END
-}
-
-RABBITMQ_SERVER=$OCF_RESKEY_server
-RABBITMQ_CTL=$OCF_RESKEY_ctl
-RABBITMQ_NODENAME=$OCF_RESKEY_nodename
-RABBITMQ_NODE_IP_ADDRESS=$OCF_RESKEY_ip
-RABBITMQ_NODE_PORT=$OCF_RESKEY_port
-RABBITMQ_CONFIG_FILE=$OCF_RESKEY_config_file
-RABBITMQ_LOG_BASE=$OCF_RESKEY_log_base
-RABBITMQ_MNESIA_BASE=$OCF_RESKEY_mnesia_base
-RABBITMQ_SERVER_START_ARGS=$OCF_RESKEY_server_start_args
-RABBITMQ_PID_FILE=$OCF_RESKEY_pid_file
-[ ! -z $RABBITMQ_NODENAME ] && NODENAME_ARG="-n $RABBITMQ_NODENAME"
-[ ! -z $RABBITMQ_NODENAME ] && export RABBITMQ_NODENAME
-
-ensure_pid_dir () {
- PID_DIR=`dirname ${RABBITMQ_PID_FILE}`
- if [ ! -d ${PID_DIR} ] ; then
- mkdir -p ${PID_DIR}
- chown -R rabbitmq:rabbitmq ${PID_DIR}
- chmod 755 ${PID_DIR}
- fi
- return $OCF_SUCCESS
-}
-
-remove_pid () {
- rm -f ${RABBITMQ_PID_FILE}
- rmdir `dirname ${RABBITMQ_PID_FILE}` || :
-}
-
-export_vars() {
- [ ! -z $RABBITMQ_NODE_IP_ADDRESS ] && export RABBITMQ_NODE_IP_ADDRESS
- [ ! -z $RABBITMQ_NODE_PORT ] && export RABBITMQ_NODE_PORT
- [ ! -z $RABBITMQ_CONFIG_FILE ] && export RABBITMQ_CONFIG_FILE
- [ ! -z $RABBITMQ_LOG_BASE ] && export RABBITMQ_LOG_BASE
- [ ! -z $RABBITMQ_MNESIA_BASE ] && export RABBITMQ_MNESIA_BASE
- [ ! -z $RABBITMQ_SERVER_START_ARGS ] && export RABBITMQ_SERVER_START_ARGS
- [ ! -z $RABBITMQ_PID_FILE ] && ensure_pid_dir && export RABBITMQ_PID_FILE
-}
-
-rabbit_validate_partial() {
- if [ ! -x $RABBITMQ_SERVER ]; then
- ocf_log err "rabbitmq-server server $RABBITMQ_SERVER does not exist or is not executable";
- exit $OCF_ERR_INSTALLED;
- fi
-
- if [ ! -x $RABBITMQ_CTL ]; then
- ocf_log err "rabbitmq-server ctl $RABBITMQ_CTL does not exist or is not executable";
- exit $OCF_ERR_INSTALLED;
- fi
-}
-
-rabbit_validate_full() {
- if [ ! -z $RABBITMQ_CONFIG_FILE ] && [ ! -e "${RABBITMQ_CONFIG_FILE}.config" ]; then
- ocf_log err "rabbitmq-server config_file ${RABBITMQ_CONFIG_FILE}.config does not exist or is not a file";
- exit $OCF_ERR_INSTALLED;
- fi
-
- if [ ! -z $RABBITMQ_LOG_BASE ] && [ ! -d $RABBITMQ_LOG_BASE ]; then
- ocf_log err "rabbitmq-server log_base $RABBITMQ_LOG_BASE does not exist or is not a directory";
- exit $OCF_ERR_INSTALLED;
- fi
-
- if [ ! -z $RABBITMQ_MNESIA_BASE ] && [ ! -d $RABBITMQ_MNESIA_BASE ]; then
- ocf_log err "rabbitmq-server mnesia_base $RABBITMQ_MNESIA_BASE does not exist or is not a directory";
- exit $OCF_ERR_INSTALLED;
- fi
-
- rabbit_validate_partial
-
- return $OCF_SUCCESS
-}
-
-rabbit_status() {
- rabbitmqctl_action "status"
-}
-
-rabbit_wait() {
- rabbitmqctl_action "wait" $1
-}
-
-rabbitmqctl_action() {
- local rc
- local action
- action=$@
- $RABBITMQ_CTL $NODENAME_ARG $action > /dev/null 2> /dev/null
- rc=$?
- case "$rc" in
- 0)
- ocf_log debug "RabbitMQ server is running normally"
- return $OCF_SUCCESS
- ;;
- 2)
- ocf_log debug "RabbitMQ server is not running"
- return $OCF_NOT_RUNNING
- ;;
- *)
- ocf_log err "Unexpected return from rabbitmqctl $NODENAME_ARG $action: $rc"
- exit $OCF_ERR_GENERIC
- esac
-}
-
-rabbit_start() {
- local rc
-
- if rabbit_status; then
- ocf_log info "Resource already running."
- return $OCF_SUCCESS
- fi
-
- export_vars
-
- setsid sh -c "$RABBITMQ_SERVER > ${RABBITMQ_LOG_BASE}/startup_log 2> ${RABBITMQ_LOG_BASE}/startup_err" &
-
- # Wait for the server to come up.
- # Let the CRM/LRM time us out if required
- rabbit_wait $RABBITMQ_PID_FILE
- rc=$?
- if [ "$rc" != $OCF_SUCCESS ]; then
- remove_pid
- ocf_log info "rabbitmq-server start failed: $rc"
- exit $OCF_ERR_GENERIC
- fi
-
- return $OCF_SUCCESS
-}
-
-rabbit_stop() {
- local rc
-
- if ! rabbit_status; then
- ocf_log info "Resource not running."
- return $OCF_SUCCESS
- fi
-
- $RABBITMQ_CTL stop
- rc=$?
-
- if [ "$rc" != 0 ]; then
- ocf_log err "rabbitmq-server stop command failed: $RABBITMQ_CTL stop, $rc"
- return $rc
- fi
-
- # Spin waiting for the server to shut down.
- # Let the CRM/LRM time us out if required
- stop_wait=1
- while [ $stop_wait = 1 ]; do
- rabbit_status
- rc=$?
- if [ "$rc" = $OCF_NOT_RUNNING ]; then
- remove_pid
- stop_wait=0
- break
- elif [ "$rc" != $OCF_SUCCESS ]; then
- ocf_log info "rabbitmq-server stop failed: $rc"
- exit $OCF_ERR_GENERIC
- fi
- sleep 1
- done
-
- return $OCF_SUCCESS
-}
-
-rabbit_monitor() {
- rabbit_status
- return $?
-}
-
-case $__OCF_ACTION in
- meta-data)
- meta_data
- exit $OCF_SUCCESS
- ;;
- usage|help)
- rabbit_usage
- exit $OCF_SUCCESS
- ;;
-esac
-
-if ocf_is_probe; then
- rabbit_validate_partial
-else
- rabbit_validate_full
-fi
-
-case $__OCF_ACTION in
- start)
- rabbit_start
- ;;
- stop)
- rabbit_stop
- ;;
- status|monitor)
- rabbit_monitor
- ;;
- validate-all)
- exit $OCF_SUCCESS
- ;;
- *)
- rabbit_usage
- exit $OCF_ERR_UNIMPLEMENTED
- ;;
-esac
-
-exit $?
diff --git a/packaging/debs/Debian/Makefile b/packaging/debs/Debian/Makefile
deleted file mode 100644
index 6d844364..00000000
--- a/packaging/debs/Debian/Makefile
+++ /dev/null
@@ -1,42 +0,0 @@
-TARBALL_DIR=../../../dist
-TARBALL=$(notdir $(wildcard $(TARBALL_DIR)/rabbitmq-server-[0-9.]*.tar.gz))
-COMMON_DIR=../../common
-VERSION=$(shell echo $(TARBALL) | sed -e 's:rabbitmq-server-\(.*\)\.tar\.gz:\1:g')
-
-DEBIAN_ORIG_TARBALL=$(shell echo $(TARBALL) | sed -e 's:\(.*\)-\(.*\)\(\.tar\.gz\):\1_\2\.orig\3:g')
-UNPACKED_DIR=rabbitmq-server-$(VERSION)
-PACKAGENAME=rabbitmq-server
-SIGNING_KEY_ID=056E8E56
-
-ifneq "$(UNOFFICIAL_RELEASE)" ""
- SIGNING=-us -uc
-else
- SIGNING=-k$(SIGNING_KEY_ID)
-endif
-
-all:
- @echo 'Please choose a target from the Makefile.'
-
-package: clean
- cp $(TARBALL_DIR)/$(TARBALL) $(DEBIAN_ORIG_TARBALL)
- tar -zxf $(DEBIAN_ORIG_TARBALL)
- cp -r debian $(UNPACKED_DIR)
- cp $(COMMON_DIR)/* $(UNPACKED_DIR)/debian/
- sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \
- -e 's|@STDOUT_STDERR_REDIRECTION@| > "/var/log/rabbitmq/startup_log" 2> "/var/log/rabbitmq/startup_err"|' \
- $(UNPACKED_DIR)/debian/rabbitmq-script-wrapper
- chmod a+x $(UNPACKED_DIR)/debian/rules
- echo "This package was debianized by Tony Garnock-Jones <tonyg@rabbitmq.com> on\nWed, 3 Jan 2007 15:43:44 +0000.\n\nIt was downloaded from http://www.rabbitmq.com/\n\n" > $(UNPACKED_DIR)/debian/copyright
- cat $(UNPACKED_DIR)/LICENSE >> $(UNPACKED_DIR)/debian/copyright
- echo "\n\nThe Debian packaging is (C) 2007-2013, GoPivotal, Inc. and is licensed\nunder the MPL 1.1, see above.\n" >> $(UNPACKED_DIR)/debian/copyright
- UNOFFICIAL_RELEASE=$(UNOFFICIAL_RELEASE) VERSION=$(VERSION) ./check-changelog.sh rabbitmq-server $(UNPACKED_DIR)
- cd $(UNPACKED_DIR); GNUPGHOME=$(GNUPG_PATH)/.gnupg dpkg-buildpackage -rfakeroot $(SIGNING)
- rm -rf $(UNPACKED_DIR)
-
-clean:
- rm -rf $(UNPACKED_DIR)
- rm -f $(PACKAGENAME)_*.tar.gz
- rm -f $(PACKAGENAME)_*.diff.gz
- rm -f $(PACKAGENAME)_*.dsc
- rm -f $(PACKAGENAME)_*_*.changes
- rm -f $(PACKAGENAME)_*_*.deb
diff --git a/packaging/debs/Debian/check-changelog.sh b/packaging/debs/Debian/check-changelog.sh
deleted file mode 100755
index ff25e648..00000000
--- a/packaging/debs/Debian/check-changelog.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-
-PACKAGE_NAME=$1
-cd $2
-
-CHANGELOG_VERSION=$(dpkg-parsechangelog | sed -n 's/^Version: \(.*\)-[^-]*$/\1/p')
-
-if [ "${CHANGELOG_VERSION}" != "${VERSION}" ]; then
- if [ -n "${UNOFFICIAL_RELEASE}" ]; then
- echo "${PACKAGE_NAME} (${VERSION}-1) unstable; urgency=low" > debian/changelog.tmp
- echo >> debian/changelog.tmp
- echo " * Unofficial release" >> debian/changelog.tmp
- echo >> debian/changelog.tmp
- echo " -- Nobody <nobody@example.com> $(date -R)" >> debian/changelog.tmp
- echo >> debian/changelog.tmp
- cat debian/changelog >> debian/changelog.tmp
- mv -f debian/changelog.tmp debian/changelog
-
- exit 0
- else
- echo
- echo There is no entry in debian/changelog for version ${VERSION}!
- echo Please create a changelog entry, or set the variable
- echo UNOFFICIAL_RELEASE to automatically create one.
- echo
-
- exit 1
- fi
-fi
diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog
deleted file mode 100644
index 3212514e..00000000
--- a/packaging/debs/Debian/debian/changelog
+++ /dev/null
@@ -1,246 +0,0 @@
-rabbitmq-server (3.1.5-1) unstable; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@rabbitmq.com> Thu, 15 Aug 2013 11:03:13 +0100
-
-rabbitmq-server (3.1.3-1) unstable; urgency=low
-
- * New Upstream Release
-
- -- Tim Watson <tim@rabbitmq.com> Tue, 25 Jun 2013 15:01:12 +0100
-
-rabbitmq-server (3.1.2-1) unstable; urgency=low
-
- * New Upstream Release
-
- -- Tim Watson <tim@rabbitmq.com> Mon, 24 Jun 2013 11:16:41 +0100
-
-rabbitmq-server (3.1.1-1) unstable; urgency=low
-
- * Test release
-
- -- Tim Watson <tim@rabbitmq.com> Mon, 20 May 2013 16:21:20 +0100
-
-rabbitmq-server (3.1.0-1) unstable; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@rabbitmq.com> Wed, 01 May 2013 11:57:58 +0100
-
-rabbitmq-server (3.0.1-1) unstable; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@rabbitmq.com> Tue, 11 Dec 2012 11:29:55 +0000
-
-rabbitmq-server (3.0.0-1) unstable; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@rabbitmq.com> Fri, 16 Nov 2012 14:15:29 +0000
-
-rabbitmq-server (2.7.1-1) natty; urgency=low
-
- * New Upstream Release
-
- -- Steve Powell <steve@rabbitmq.com> Fri, 16 Dec 2011 12:12:36 +0000
-
-rabbitmq-server (2.7.0-1) natty; urgency=low
-
- * New Upstream Release
-
- -- Steve Powell <steve@rabbitmq.com> Tue, 08 Nov 2011 16:47:50 +0000
-
-rabbitmq-server (2.6.1-1) natty; urgency=low
-
- * New Upstream Release
-
- -- Tim <tim@rabbitmq.com> Fri, 09 Sep 2011 14:38:45 +0100
-
-rabbitmq-server (2.6.0-1) natty; urgency=low
-
- * New Upstream Release
-
- -- Tim <tim@rabbitmq.com> Fri, 26 Aug 2011 16:29:40 +0100
-
-rabbitmq-server (2.5.1-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@rabbitmq.com> Mon, 27 Jun 2011 11:21:49 +0100
-
-rabbitmq-server (2.5.0-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- <jerryk@vmware.com> Thu, 09 Jun 2011 07:20:29 -0700
-
-rabbitmq-server (2.4.1-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Alexandru Scvortov <alexandru@rabbitmq.com> Thu, 07 Apr 2011 16:49:22 +0100
-
-rabbitmq-server (2.4.0-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Alexandru Scvortov <alexandru@rabbitmq.com> Tue, 22 Mar 2011 17:34:31 +0000
-
-rabbitmq-server (2.3.1-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@rabbitmq.com> Thu, 03 Feb 2011 12:43:56 +0000
-
-rabbitmq-server (2.3.0-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@rabbitmq.com> Tue, 01 Feb 2011 12:52:16 +0000
-
-rabbitmq-server (2.2.0-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Rob Harrop <rob@rabbitmq.com> Mon, 29 Nov 2010 12:24:48 +0000
-
-rabbitmq-server (2.1.1-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Vlad Alexandru Ionescu <vlad@rabbitmq.com> Tue, 19 Oct 2010 17:20:10 +0100
-
-rabbitmq-server (2.1.0-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Marek Majkowski <marek@rabbitmq.com> Tue, 14 Sep 2010 14:20:17 +0100
-
-rabbitmq-server (2.0.0-1) karmic; urgency=low
-
- * New Upstream Release
-
- -- Michael Bridgen <mikeb@rabbitmq.com> Mon, 23 Aug 2010 14:55:39 +0100
-
-rabbitmq-server (1.8.1-1) lucid; urgency=low
-
- * New Upstream Release
-
- -- Emile Joubert <emile@rabbitmq.com> Wed, 14 Jul 2010 15:05:24 +0100
-
-rabbitmq-server (1.8.0-1) intrepid; urgency=low
-
- * New Upstream Release
-
- -- Matthew Sackman <matthew@rabbitmq.com> Tue, 15 Jun 2010 12:48:48 +0100
-
-rabbitmq-server (1.7.2-1) intrepid; urgency=low
-
- * New Upstream Release
-
- -- Matthew Sackman <matthew@lshift.net> Mon, 15 Feb 2010 15:54:47 +0000
-
-rabbitmq-server (1.7.1-1) intrepid; urgency=low
-
- * New Upstream Release
-
- -- Matthew Sackman <matthew@lshift.net> Fri, 22 Jan 2010 14:14:29 +0000
-
-rabbitmq-server (1.7.0-1) intrepid; urgency=low
-
- * New Upstream Release
-
- -- David Wragg <dpw@lshift.net> Mon, 05 Oct 2009 13:44:41 +0100
-
-rabbitmq-server (1.6.0-1) hardy; urgency=low
-
- * New Upstream Release
-
- -- Matthias Radestock <matthias@lshift.net> Tue, 16 Jun 2009 15:02:58 +0100
-
-rabbitmq-server (1.5.5-1) hardy; urgency=low
-
- * New Upstream Release
-
- -- Matthias Radestock <matthias@lshift.net> Tue, 19 May 2009 09:57:54 +0100
-
-rabbitmq-server (1.5.4-1) hardy; urgency=low
-
- * New Upstream Release
-
- -- Matthias Radestock <matthias@lshift.net> Mon, 06 Apr 2009 09:19:32 +0100
-
-rabbitmq-server (1.5.3-1) hardy; urgency=low
-
- * New Upstream Release
-
- -- Tony Garnock-Jones <tonyg@lshift.net> Tue, 24 Feb 2009 18:23:33 +0000
-
-rabbitmq-server (1.5.2-1) hardy; urgency=low
-
- * New Upstream Release
-
- -- Tony Garnock-Jones <tonyg@lshift.net> Mon, 23 Feb 2009 16:03:38 +0000
-
-rabbitmq-server (1.5.1-1) hardy; urgency=low
-
- * New Upstream Release
-
- -- Simon MacMullen <simon@lshift.net> Mon, 19 Jan 2009 15:46:13 +0000
-
-rabbitmq-server (1.5.0-1) testing; urgency=low
-
- * New Upstream Release
-
- -- Matthias Radestock <matthias@lshift.net> Wed, 17 Dec 2008 18:23:47 +0000
-
-rabbitmq-server (1.4.0-1) testing; urgency=low
-
- * New Upstream Release
-
- -- Tony Garnock-Jones <tonyg@lshift.net> Thu, 24 Jul 2008 13:21:48 +0100
-
-rabbitmq-server (1.3.0-1) testing; urgency=low
-
- * New Upstream Release
-
- -- Adrien Pierard <adrien@lshift.net> Mon, 03 Mar 2008 15:34:38 +0000
-
-rabbitmq-server (1.2.0-2) testing; urgency=low
-
- * Fixed rabbitmqctl wrapper script
-
- -- Simon MacMullen <simon@lshift.net> Fri, 05 Oct 2007 11:55:00 +0100
-
-rabbitmq-server (1.2.0-1) testing; urgency=low
-
- * New upstream release
-
- -- Simon MacMullen <simon@lshift.net> Wed, 26 Sep 2007 11:49:26 +0100
-
-rabbitmq-server (1.1.1-1) testing; urgency=low
-
- * New upstream release
-
- -- Simon MacMullen <simon@lshift.net> Wed, 29 Aug 2007 12:03:15 +0100
-
-rabbitmq-server (1.1.0-alpha-2) testing; urgency=low
-
- * Fixed erlang-nox dependency
-
- -- Simon MacMullen <simon@lshift.net> Thu, 02 Aug 2007 11:27:13 +0100
-
-rabbitmq-server (1.1.0-alpha-1) testing; urgency=low
-
- * New upstream release
-
- -- Simon MacMullen <simon@lshift.net> Fri, 20 Jul 2007 18:17:33 +0100
-
-rabbitmq-server (1.0.0-alpha-1) unstable; urgency=low
-
- * Initial release
-
- -- Tony Garnock-Jones <tonyg@shortstop.lshift.net> Wed, 31 Jan 2007 19:06:33 +0000
-
diff --git a/packaging/debs/Debian/debian/compat b/packaging/debs/Debian/debian/compat
deleted file mode 100644
index 7ed6ff82..00000000
--- a/packaging/debs/Debian/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-5
diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control
deleted file mode 100644
index 3a15c4b6..00000000
--- a/packaging/debs/Debian/debian/control
+++ /dev/null
@@ -1,17 +0,0 @@
-Source: rabbitmq-server
-Section: net
-Priority: extra
-Maintainer: RabbitMQ Team <packaging@rabbitmq.com>
-Uploaders: Emile Joubert <emile@rabbitmq.com>
-DM-Upload-Allowed: yes
-Build-Depends: cdbs, debhelper (>= 5), erlang-dev, python-simplejson, xmlto, xsltproc, erlang-nox (>= 1:12.b.3), erlang-src (>= 1:12.b.3), unzip, zip
-Standards-Version: 3.9.2
-
-Package: rabbitmq-server
-Architecture: all
-Depends: erlang-nox (>= 1:12.b.3) | esl-erlang, adduser, logrotate, ${misc:Depends}
-Description: AMQP server written in Erlang
- RabbitMQ is an implementation of AMQP, the emerging standard for high
- performance enterprise messaging. The RabbitMQ server is a robust and
- scalable implementation of an AMQP broker.
-Homepage: http://www.rabbitmq.com/
diff --git a/packaging/debs/Debian/debian/dirs b/packaging/debs/Debian/debian/dirs
deleted file mode 100644
index 625b7d41..00000000
--- a/packaging/debs/Debian/debian/dirs
+++ /dev/null
@@ -1,9 +0,0 @@
-usr/lib/rabbitmq/bin
-usr/lib/erlang/lib
-usr/sbin
-usr/share/man
-var/lib/rabbitmq/mnesia
-var/log/rabbitmq
-etc/logrotate.d
-etc/rabbitmq
-
diff --git a/packaging/debs/Debian/debian/postinst b/packaging/debs/Debian/debian/postinst
deleted file mode 100644
index b11340ef..00000000
--- a/packaging/debs/Debian/debian/postinst
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/sh
-# postinst script for rabbitmq
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * <postinst> `configure' <most-recently-configured-version>
-# * <old-postinst> `abort-upgrade' <new version>
-# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
-# <new-version>
-# * <postinst> `abort-remove'
-# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
-# <failed-install-package> <version> `removing'
-# <conflicting-package> <version>
-# for details, see http://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-
-# create rabbitmq group
-if ! getent group rabbitmq >/dev/null; then
- addgroup --system rabbitmq
-fi
-
-# create rabbitmq user
-if ! getent passwd rabbitmq >/dev/null; then
- adduser --system --ingroup rabbitmq --home /var/lib/rabbitmq \
- --no-create-home --gecos "RabbitMQ messaging server" \
- --disabled-login rabbitmq
-fi
-
-chown -R rabbitmq:rabbitmq /var/lib/rabbitmq
-chown -R rabbitmq:rabbitmq /var/log/rabbitmq
-
-case "$1" in
- configure)
- if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
- [ ! -f /etc/rabbitmq/rabbitmq-env.conf ]; then
- mv /etc/rabbitmq/rabbitmq.conf /etc/rabbitmq/rabbitmq-env.conf
- fi
- ;;
-
- abort-upgrade|abort-remove|abort-deconfigure)
- ;;
-
- *)
- echo "postinst called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
-
-
diff --git a/packaging/debs/Debian/debian/postrm.in b/packaging/debs/Debian/debian/postrm.in
deleted file mode 100644
index c2e9bbfe..00000000
--- a/packaging/debs/Debian/debian/postrm.in
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/sh
-# postrm script for rabbitmq
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-# * <postrm> `remove'
-# * <postrm> `purge'
-# * <old-postrm> `upgrade' <new-version>
-# * <new-postrm> `failed-upgrade' <old-version>
-# * <new-postrm> `abort-install'
-# * <new-postrm> `abort-install' <old-version>
-# * <new-postrm> `abort-upgrade' <old-version>
-# * <disappearer's-postrm> `disappear' <overwriter>
-# <overwriter-version>
-# for details, see http://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-remove_plugin_traces() {
- # Remove traces of plugins
- rm -rf /var/lib/rabbitmq/plugins-scratch
-}
-
-case "$1" in
- purge)
- rm -f /etc/default/rabbitmq
- if [ -d /var/lib/rabbitmq ]; then
- rm -r /var/lib/rabbitmq
- fi
- if [ -d /var/log/rabbitmq ]; then
- rm -r /var/log/rabbitmq
- fi
- if [ -d /etc/rabbitmq ]; then
- rm -r /etc/rabbitmq
- fi
- remove_plugin_traces
- if getent passwd rabbitmq >/dev/null; then
- # Stop epmd if run by the rabbitmq user
- pkill -u rabbitmq epmd || :
- fi
- ;;
-
- remove|upgrade)
- remove_plugin_traces
- ;;
-
- failed-upgrade|abort-install|abort-upgrade|disappear)
- ;;
-
- *)
- echo "postrm called with unknown argument \`$1'" >&2
- exit 1
- ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
-
-
diff --git a/packaging/debs/Debian/debian/rabbitmq-server.default b/packaging/debs/Debian/debian/rabbitmq-server.default
deleted file mode 100644
index bde5e308..00000000
--- a/packaging/debs/Debian/debian/rabbitmq-server.default
+++ /dev/null
@@ -1,9 +0,0 @@
-# This file is sourced by /etc/init.d/rabbitmq-server. Its primary
-# reason for existing is to allow adjustment of system limits for the
-# rabbitmq-server process.
-#
-# Maximum number of open file handles. This will need to be increased
-# to handle many simultaneous connections. Refer to the system
-# documentation for ulimit (in man bash) for more information.
-#
-#ulimit -n 1024
diff --git a/packaging/debs/Debian/debian/rabbitmq-server.init b/packaging/debs/Debian/debian/rabbitmq-server.init
deleted file mode 100644
index b2d3f86a..00000000
--- a/packaging/debs/Debian/debian/rabbitmq-server.init
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/bin/sh
-#
-# rabbitmq-server RabbitMQ broker
-#
-# chkconfig: - 80 05
-# description: Enable AMQP service provided by RabbitMQ
-#
-
-### BEGIN INIT INFO
-# Provides: rabbitmq-server
-# Required-Start: $remote_fs $network
-# Required-Stop: $remote_fs $network
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Description: RabbitMQ broker
-# Short-Description: Enable AMQP service provided by RabbitMQ broker
-### END INIT INFO
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-NAME=rabbitmq-server
-DAEMON=/usr/sbin/${NAME}
-CONTROL=/usr/sbin/rabbitmqctl
-DESC="message broker"
-USER=rabbitmq
-ROTATE_SUFFIX=
-INIT_LOG_DIR=/var/log/rabbitmq
-PID_FILE=/var/run/rabbitmq/pid
-
-
-test -x $DAEMON || exit 0
-test -x $CONTROL || exit 0
-
-RETVAL=0
-set -e
-
-[ -f /etc/default/${NAME} ] && . /etc/default/${NAME}
-
-. /lib/lsb/init-functions
-. /lib/init/vars.sh
-
-ensure_pid_dir () {
- PID_DIR=`dirname ${PID_FILE}`
- if [ ! -d ${PID_DIR} ] ; then
- mkdir -p ${PID_DIR}
- chown -R ${USER}:${USER} ${PID_DIR}
- chmod 755 ${PID_DIR}
- fi
-}
-
-remove_pid () {
- rm -f ${PID_FILE}
- rmdir `dirname ${PID_FILE}` || :
-}
-
-start_rabbitmq () {
- status_rabbitmq quiet
- if [ $RETVAL != 0 ] ; then
- RETVAL=0
- ensure_pid_dir
- set +e
- RABBITMQ_PID_FILE=$PID_FILE start-stop-daemon --quiet \
- --chuid rabbitmq --start --exec $DAEMON \
- --pidfile "$RABBITMQ_PID_FILE" --background
- $CONTROL wait $PID_FILE >/dev/null 2>&1
- RETVAL=$?
- set -e
- if [ $RETVAL != 0 ] ; then
- remove_pid
- fi
- else
- RETVAL=3
- fi
-}
-
-stop_rabbitmq () {
- status_rabbitmq quiet
- if [ $RETVAL = 0 ] ; then
- set +e
- $CONTROL stop ${PID_FILE} > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err
- RETVAL=$?
- set -e
- if [ $RETVAL = 0 ] ; then
- remove_pid
- fi
- else
- RETVAL=3
- fi
-}
-
-status_rabbitmq() {
- set +e
- if [ "$1" != "quiet" ] ; then
- $CONTROL status 2>&1
- else
- $CONTROL status > /dev/null 2>&1
- fi
- if [ $? != 0 ] ; then
- RETVAL=3
- fi
- set -e
-}
-
-rotate_logs_rabbitmq() {
- set +e
- $CONTROL -q rotate_logs ${ROTATE_SUFFIX}
- if [ $? != 0 ] ; then
- RETVAL=1
- fi
- set -e
-}
-
-restart_running_rabbitmq () {
- status_rabbitmq quiet
- if [ $RETVAL = 0 ] ; then
- restart_rabbitmq
- else
- log_warning_msg "${DESC} not running"
- fi
-}
-
-restart_rabbitmq() {
- stop_rabbitmq
- start_rabbitmq
-}
-
-restart_end() {
- if [ $RETVAL = 0 ] ; then
- log_end_msg 0
- else
- log_end_msg 1
- fi
-}
-
-start_stop_end() {
- case "$RETVAL" in
- 0)
- [ -x /sbin/initctl ] && /sbin/initctl emit --no-wait "${NAME}-${1}"
- log_end_msg 0
- ;;
- 3)
- log_warning_msg "${DESC} already ${1}"
- log_end_msg 0
- RETVAL=0
- ;;
- *)
- log_warning_msg "FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\}"
- log_end_msg 1
- ;;
- esac
-}
-
-case "$1" in
- start)
- log_daemon_msg "Starting ${DESC}" $NAME
- start_rabbitmq
- start_stop_end "running"
- ;;
- stop)
- log_daemon_msg "Stopping ${DESC}" $NAME
- stop_rabbitmq
- start_stop_end "stopped"
- ;;
- status)
- status_rabbitmq
- ;;
- rotate-logs)
- log_action_begin_msg "Rotating log files for ${DESC}: ${NAME}"
- rotate_logs_rabbitmq
- log_action_end_msg $RETVAL
- ;;
- force-reload|reload|restart)
- log_daemon_msg "Restarting ${DESC}" $NAME
- restart_rabbitmq
- restart_end
- ;;
- try-restart)
- log_daemon_msg "Restarting ${DESC}" $NAME
- restart_running_rabbitmq
- restart_end
- ;;
- *)
- echo "Usage: $0 {start|stop|status|rotate-logs|restart|condrestart|try-restart|reload|force-reload}" >&2
- RETVAL=1
- ;;
-esac
-
-exit $RETVAL
diff --git a/packaging/debs/Debian/debian/rabbitmq-server.logrotate b/packaging/debs/Debian/debian/rabbitmq-server.logrotate
deleted file mode 100644
index c786df77..00000000
--- a/packaging/debs/Debian/debian/rabbitmq-server.logrotate
+++ /dev/null
@@ -1,12 +0,0 @@
-/var/log/rabbitmq/*.log {
- weekly
- missingok
- rotate 20
- compress
- delaycompress
- notifempty
- sharedscripts
- postrotate
- /etc/init.d/rabbitmq-server rotate-logs > /dev/null
- endscript
-}
diff --git a/packaging/debs/Debian/debian/rules b/packaging/debs/Debian/debian/rules
deleted file mode 100644
index ecb778df..00000000
--- a/packaging/debs/Debian/debian/rules
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/make -f
-
-include /usr/share/cdbs/1/rules/debhelper.mk
-include /usr/share/cdbs/1/class/makefile.mk
-
-RABBIT_LIB=$(DEB_DESTDIR)usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)/
-RABBIT_BIN=$(DEB_DESTDIR)usr/lib/rabbitmq/bin/
-
-DEB_MAKE_INSTALL_TARGET := install TARGET_DIR=$(RABBIT_LIB) SBIN_DIR=$(RABBIT_BIN) MAN_DIR=$(DEB_DESTDIR)usr/share/man/
-DEB_MAKE_CLEAN_TARGET:= distclean
-
-DOCDIR=$(DEB_DESTDIR)usr/share/doc/rabbitmq-server/
-
-install/rabbitmq-server::
- mkdir -p $(DOCDIR)
- rm $(RABBIT_LIB)LICENSE* $(RABBIT_LIB)INSTALL*
- for script in rabbitmqctl rabbitmq-server rabbitmq-plugins; do \
- install -p -D -m 0755 debian/rabbitmq-script-wrapper $(DEB_DESTDIR)usr/sbin/$$script; \
- done
- sed -e 's|@RABBIT_LIB@|/usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)|g' <debian/postrm.in >debian/postrm
- install -p -D -m 0755 debian/rabbitmq-server.ocf $(DEB_DESTDIR)usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server
- install -p -D -m 0644 debian/rabbitmq-server.default $(DEB_DESTDIR)etc/default/rabbitmq-server
diff --git a/packaging/debs/Debian/debian/watch b/packaging/debs/Debian/debian/watch
deleted file mode 100644
index b41aff9a..00000000
--- a/packaging/debs/Debian/debian/watch
+++ /dev/null
@@ -1,4 +0,0 @@
-version=3
-
-http://www.rabbitmq.com/releases/rabbitmq-server/v(.*)/rabbitmq-server-(\d.*)\.tar\.gz \
- debian uupdate
diff --git a/packaging/debs/apt-repository/Makefile b/packaging/debs/apt-repository/Makefile
deleted file mode 100644
index ce4347bc..00000000
--- a/packaging/debs/apt-repository/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-SIGNING_USER_EMAIL=info@rabbitmq.com
-
-ifeq "$(UNOFFICIAL_RELEASE)" ""
-HOME_ARG=HOME=$(GNUPG_PATH)
-endif
-
-all: debian_apt_repository
-
-clean:
- rm -rf debian
-
-CAN_HAS_REPREPRO=$(shell [ -f /usr/bin/reprepro ] && echo true)
-ifeq ($(CAN_HAS_REPREPRO), true)
-debian_apt_repository: clean
- mkdir -p debian/conf
- cp -a distributions debian/conf
-ifeq "$(UNOFFICIAL_RELEASE)" ""
- echo SignWith: $(SIGNING_USER_EMAIL) >> debian/conf/distributions
-endif
- for FILE in ../Debian/*.changes ; do \
- $(HOME_ARG) reprepro --ignore=wrongdistribution \
- -Vb debian include kitten $${FILE} ; \
- done
- reprepro -Vb debian createsymlinks
-else
-debian_apt_repository:
- @echo Not building APT repository as reprepro could not be found
-endif
diff --git a/packaging/debs/apt-repository/README b/packaging/debs/apt-repository/README
deleted file mode 100644
index 514a37f3..00000000
--- a/packaging/debs/apt-repository/README
+++ /dev/null
@@ -1,17 +0,0 @@
-APT repository for RabbitMQ
-
-Previously we've attempted to run a repository in the same way that
-Debian would: have repository management software installed on the
-server, and upload new packages to the repository as and when they're
-ready.
-
-This turned out to be both fiddly and annoying to do (and more
-particularly to automate) so since our repository is always going to be
-small it's easier just to create the entire repository as part of the
-build process, just like a package. It can then be moved into place as a
-single unit. The make target "debian_apt_repository" (invoked by "dist")
-will create it, and it can get moved onto the server with the rest of
-the packages.
-
-Read "README-real-repository" for information on how we used to do
-this.
diff --git a/packaging/debs/apt-repository/README-real-repository b/packaging/debs/apt-repository/README-real-repository
deleted file mode 100644
index 189852eb..00000000
--- a/packaging/debs/apt-repository/README-real-repository
+++ /dev/null
@@ -1,130 +0,0 @@
-APT Repository for RabbitMQ in Debian
-=====================================
-
-First, a note on what we're trying to do. We want a single "testing"
-repository. When RabbitMQ is more stable we will also want a
-"stable" repository. It is very important to understand that these refer
-to the state of the rabbit code, *NOT* which Debian distribution they go
-with. At the moment our dependencies are very simple so our packages can
-be used with any current Debian version (etch, lenny, sid) as well as
-with Ubuntu. So although we have a "testing" distribution, this is not
-codenamed "lenny". Instead it's currently codenamed "kitten" since
-that's a baby rabbit.
-
-Secondly, a note on software. We need a tool to manage the repository,
-and a tool to perform uploads to the repository. Debian being Debian
-there are quite a few of each. We will use "reprepro" to manage the
-repository since it's modern, maintained, and fairly simple. We will use
-"dupload" to perform the uploads since it gives us the ability to run
-arbitrary commands after the upload, which means we don't need to run a
-cron job on the web server to process uploads.
-
-Creating a repository
-=====================
-
-Much of this was cribbed from:
-http://www.debian-administration.org/articles/286
-
-The repository is fundamentally just some files in a folder, served over
-HTTP (or FTP etc). So let's make it "debian" in the root of
-www.rabbitmq.com.
-
-This means the repository will be at http://www.rabbitmq.com/debian/ and
-can be added to a sources.list as:
-
-deb http://www.rabbitmq.com/debian/ testing main
-deb-src http://www.rabbitmq.com/debian/ testing main
-
-Inside this folder we need a "conf" folder, and in
-that we need a "distributions" configuration file - see the file in this
-folder. Note that:
-
-* We list all architectures so that people can install rabbitmq-server
- on to anything.
-* We don't list the "all" architecture even though we use it; it's
- implied.
-* We only have a "main" component, we could have non-free and contrib
- here if it was relevant.
-* We list the email address associated with the key we want to use to
- sign the repository. Yes, even after signing packages we still want to
- sign the repository.
-
-We're now ready to go. Assuming the path to our repository is /path,
-(and hence configuration is in /path/conf) we can upload a file to the
-repository (creating it in the process) by doing something like this on
-the repository host:
-
-$ reprepro --ignore=wrongdistribution -Vb /path include kitten \
- rabbitmq-server_1.0.0-alpha-1_i386.changes
-
-Note that we upload to the distribution "kitten" rather than "testing".
-We also pass --ignore=wrongdistribution since the current packages are
-built to go in "unstable" (this will be changed obviously).
-
-Note also that the .changes file claims to be for i386 even though the
-package is for architecture "all". This is a bug in debhelper.
-
-Finally, if you've just created a repository, you want to run:
-
-$ reprepro -Vb /path createsymlinks
-
-since this will create "kitten" -> "testing" symlinks. You only need to
-do this once.
-
-Removing packages
-=================
-
-Fairly simple:
-
-$ reprepro --ignore=wrongdistribution -Vb /path remove kitten \
- rabbitmq-server
-
-Subsequent updates and "dupload"
-================================
-
-You can run the "reprepro" command above again to update the versions of
-software in the repository. Since we probably don't want to have to log
-into the machine in question to do this, we can use "dupload". This is a
-tool which uploads Debian packages. The supplied file "dupload.conf" can
-be renamed to ~/.dupload.conf. If you then run:
-
-$ dupload -to rabbit --nomail .
-
-in the folder with the .changes file, dupload will:
-
-* create an incoming folder in your home directory on the repository
-machine
-* upload everything there
-* run reprepro to move the packages into the repository
-* "rm -rf" the uploads folder
-
-This is a bit cheesy but should be enough for our purposes. The
-dupload.conf uses scp and ssh so you need a public-key login (or type
-your password lots).
-
-There's still an open question as to whether dupload is really needed
-for our case.
-
-Keys and signing
-================
-
-We currently sign the package as we build it; but we also need to sign
-the repository. The key is currently on my machine (mrforgetful) and has
-ID 056E8E56. We should put it on CDs though.
-
-reprepro will automatically sign the repository if we have the right
-SignWith line in the configuration, AND the secret key is installed on
-the repository server. This is obviously not ideal; not sure what the
-solution is right now.
-
-You can export the public key with:
-
-$ gpg --export --armor 056E8E56 > rabbit.pub
-
-(Open question: do we want to get our key on subkeys.pgp.net?)
-
-We can then add this key to the website and tell our users to import the
-key into apt with:
-
-# apt-key add rabbit.pub
-
diff --git a/packaging/debs/apt-repository/distributions b/packaging/debs/apt-repository/distributions
deleted file mode 100644
index 75b9fe46..00000000
--- a/packaging/debs/apt-repository/distributions
+++ /dev/null
@@ -1,7 +0,0 @@
-Origin: RabbitMQ
-Label: RabbitMQ Repository for Debian / Ubuntu etc
-Suite: testing
-Codename: kitten
-Architectures: AVR32 alpha amd64 arm armel armhf hppa hurd-i386 i386 ia64 kfreebsd-amd64 kfreebsd-i386 m32 m68k mips mipsel netbsd-alpha netbsd-i386 powerpc s390 s390x sh sparc source
-Components: main
-Description: RabbitMQ Repository for Debian / Ubuntu etc
diff --git a/packaging/debs/apt-repository/dupload.conf b/packaging/debs/apt-repository/dupload.conf
deleted file mode 100644
index 9ceed760..00000000
--- a/packaging/debs/apt-repository/dupload.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-package config;
-
-$rabbit_user = "simon";
-$rabbit_host = "mrforgetful.lshift.net";
-$rabbit_repo_path = "/srv/debian";
-$rabbit_reprepro_extra_args = "--ignore=wrongdistribution";
-
-$cfg{'rabbit'} = {
- fqdn => "$rabbit_host",
- login => "$rabbit_user",
- method => "scp",
- incoming => "incoming",
-};
-
-$preupload{'deb'} = "ssh ${rabbit_host} mkdir incoming";
-$postupload{'deb'} = "ssh ${rabbit_host} \"cd incoming && reprepro ${$rabbit_reprepro_extra_args} -Vb ${rabbit_repo_path} include kitten *.changes && cd .. && rm -r incoming\"";
diff --git a/packaging/generic-unix/Makefile b/packaging/generic-unix/Makefile
deleted file mode 100644
index b6ef9532..00000000
--- a/packaging/generic-unix/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-VERSION=0.0.0
-SOURCE_DIR=rabbitmq-server-$(VERSION)
-TARGET_DIR=rabbitmq_server-$(VERSION)
-TARGET_TARBALL=rabbitmq-server-generic-unix-$(VERSION)
-
-dist:
- tar -zxf ../../dist/$(SOURCE_DIR).tar.gz
-
- $(MAKE) -C $(SOURCE_DIR) \
- TARGET_DIR=`pwd`/$(TARGET_DIR) \
- SBIN_DIR=`pwd`/$(TARGET_DIR)/sbin \
- MAN_DIR=`pwd`/$(TARGET_DIR)/share/man \
- install
-
- sed -e 's:^SYS_PREFIX=$$:SYS_PREFIX=\$${RABBITMQ_HOME}:' \
- $(TARGET_DIR)/sbin/rabbitmq-defaults >$(TARGET_DIR)/sbin/rabbitmq-defaults.tmp \
- && mv $(TARGET_DIR)/sbin/rabbitmq-defaults.tmp $(TARGET_DIR)/sbin/rabbitmq-defaults
- chmod 0755 $(TARGET_DIR)/sbin/rabbitmq-defaults
-
- mkdir -p $(TARGET_DIR)/etc/rabbitmq
-
- tar -zcf $(TARGET_TARBALL).tar.gz $(TARGET_DIR)
- rm -rf $(SOURCE_DIR) $(TARGET_DIR)
-
-clean: clean_partial
- rm -f rabbitmq-server-generic-unix-*.tar.gz
-
-clean_partial:
- rm -rf $(SOURCE_DIR)
- rm -rf $(TARGET_DIR)
diff --git a/packaging/macports/Makefile b/packaging/macports/Makefile
deleted file mode 100644
index 897fc183..00000000
--- a/packaging/macports/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-TARBALL_SRC_DIR=../../dist
-TARBALL_BIN_DIR=../../packaging/generic-unix/
-TARBALL_SRC=$(wildcard $(TARBALL_SRC_DIR)/rabbitmq-server-[0-9.]*.tar.gz)
-TARBALL_BIN=$(wildcard $(TARBALL_BIN_DIR)/rabbitmq-server-generic-unix-[0-9.]*.tar.gz)
-COMMON_DIR=../common
-VERSION=$(shell echo $(TARBALL_SRC) | sed -e 's:rabbitmq-server-\(.*\)\.tar\.gz:\1:g')
-
-# The URL at which things really get deployed
-REAL_WEB_URL=http://www.rabbitmq.com/
-
-# The user@host for an OSX machine with macports installed, which is
-# used to generate the macports index files. That step will be
-# skipped if this variable is not set. If you do set it, you might
-# also want to set SSH_OPTS, which allows adding ssh options, e.g. to
-# specify a key that will get into the OSX machine without a
-# passphrase.
-MACPORTS_USERHOST=
-
-MACPORTS_DIR=macports
-DEST=$(MACPORTS_DIR)/net/rabbitmq-server
-
-all: macports
-
-dirs:
- mkdir -p $(DEST)/files
-
-$(DEST)/Portfile: Portfile.in
- ./make-checksums.sh $(TARBALL_SRC) $(TARBALL_BIN) > checksums.sed
- sed -e "s|@VERSION@|$(VERSION)|g;s|@BASE_URL@|$(REAL_WEB_URL)|g" \
- -f checksums.sed <$^ >$@
- rm checksums.sed
-
-# The purpose of the intricate substitution below is to set up similar
-# environment vars to the ones that su will on Linux. On OS X, we
-# have to use the -m option to su in order to be able to set the shell
-# (which for the rabbitmq user would otherwise be /dev/null). But the
-# -m option means that *all* environment vars get preserved. Erlang
-# needs vars such as HOME to be set. So we have to set them
-# explicitly.
-macports: dirs $(DEST)/Portfile
- sed -e 's|@SU_RABBITMQ_SH_C@|SHELL=/bin/sh HOME=/var/lib/rabbitmq USER=rabbitmq LOGNAME=rabbitmq PATH="$$(eval `PATH=@MACPORTS_PREFIX@/bin /usr/libexec/path_helper -s`; echo $$PATH)" su -m rabbitmq -c|' \
- $(COMMON_DIR)/rabbitmq-script-wrapper >$(DEST)/files/rabbitmq-script-wrapper
- cp patch-org.macports.rabbitmq-server.plist.diff $(DEST)/files
- if [ -n "$(MACPORTS_USERHOST)" ] ; then \
- tar cf - -C $(MACPORTS_DIR) . | ssh $(SSH_OPTS) $(MACPORTS_USERHOST) ' \
- d="/tmp/mkportindex.$$$$" ; \
- mkdir $$d \
- && cd $$d \
- && tar xf - \
- && /opt/local/bin/portindex -a -o . >/dev/null \
- && tar cf - . \
- && cd \
- && rm -rf $$d' \
- | tar xf - -C $(MACPORTS_DIR) ; \
- fi
-
-clean:
- rm -rf $(MACPORTS_DIR) checksums.sed
diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in
deleted file mode 100644
index 82c1fb0c..00000000
--- a/packaging/macports/Portfile.in
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- coding: utf-8; mode: tcl; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- vim:fenc=utf-8:filetype=tcl:et:sw=4:ts=4:sts=4
-# $Id$
-
-PortSystem 1.0
-name rabbitmq-server
-version @VERSION@
-categories net
-maintainers paperplanes.de:meyer openmaintainer
-platforms darwin
-supported_archs noarch
-
-description The RabbitMQ AMQP Server
-long_description \
- RabbitMQ is an implementation of AMQP, the emerging standard for \
- high performance enterprise messaging. The RabbitMQ server is a \
- robust and scalable implementation of an AMQP broker.
-
-
-homepage @BASE_URL@
-master_sites @BASE_URL@releases/rabbitmq-server/v${version}/
-
-distfiles ${name}-${version}${extract.suffix} \
- ${name}-generic-unix-${version}${extract.suffix}
-
-checksums \
- ${name}-${version}${extract.suffix} \
- sha1 @sha1-src@ \
- rmd160 @rmd160-src@ \
- ${name}-generic-unix-${version}${extract.suffix} \
- sha1 @sha1-bin@ \
- rmd160 @rmd160-bin@
-
-depends_lib port:erlang
-depends_build port:libxslt
-
-platform darwin 8 {
- depends_build-append port:py26-simplejson
- build.args PYTHON=${prefix}/bin/python2.6
-}
-platform darwin 9 {
- depends_build-append port:py26-simplejson
- build.args PYTHON=${prefix}/bin/python2.6
-}
-# no need for simplejson on Snow Leopard or higher
-
-
-set serveruser rabbitmq
-set servergroup rabbitmq
-set serverhome ${prefix}/var/lib/rabbitmq
-set logdir ${prefix}/var/log/rabbitmq
-set confdir ${prefix}/etc/rabbitmq
-set mnesiadbdir ${prefix}/var/lib/rabbitmq/mnesia
-set plistloc ${prefix}/etc/LaunchDaemons/org.macports.rabbitmq-server
-set sbindir ${destroot}${prefix}/lib/rabbitmq/bin
-set wrappersbin ${destroot}${prefix}/sbin
-set realsbin ${destroot}${prefix}/lib/rabbitmq/lib/rabbitmq_server-${version}/sbin
-set mansrc ${workpath}/rabbitmq_server-${version}/share/man
-set mandest ${destroot}${prefix}/share/man
-
-use_configure no
-
-use_parallel_build no
-
-build.env-append HOME=${workpath}
-
-build.env-append VERSION=${version}
-
-destroot.env-append VERSION=${version}
-
-destroot.target install_bin
-
-destroot.destdir \
- TARGET_DIR=${destroot}${prefix}/lib/rabbitmq/lib/rabbitmq_server-${version} \
- SBIN_DIR=${sbindir} \
- MAN_DIR=${destroot}${prefix}/share/man
-
-destroot.keepdirs \
- ${destroot}${confdir} \
- ${destroot}${logdir} \
- ${destroot}${mnesiadbdir}
-
-pre-destroot {
- addgroup ${servergroup}
- adduser ${serveruser} gid=[existsgroup ${servergroup}] realname=RabbitMQ\ Server home=${serverhome}
-}
-
-post-destroot {
- xinstall -d -m 775 ${destroot}${confdir}
- xinstall -d -g [existsgroup ${servergroup}] -m 775 ${destroot}${logdir}
- xinstall -d -g [existsgroup ${servergroup}] -m 775 ${destroot}${serverhome}
- xinstall -d -g [existsgroup ${servergroup}] -m 775 ${destroot}${mnesiadbdir}
-
- reinplace -E "s:^SYS_PREFIX=\${RABBITMQ_HOME}$:SYS_PREFIX=${prefix}:" \
- ${realsbin}/rabbitmq-defaults
- reinplace -E "s:^SYS_PREFIX=$:SYS_PREFIX=${prefix}:" \
- ${realsbin}/rabbitmq-defaults
-
- xinstall -m 555 ${filespath}/rabbitmq-script-wrapper \
- ${wrappersbin}/rabbitmq-server
- reinplace -E "s:@MACPORTS_PREFIX@:${prefix}:g" \
- ${wrappersbin}/rabbitmq-server
- reinplace -E "s:/usr/lib/rabbitmq/bin/:${prefix}/lib/rabbitmq/bin/:g" \
- ${wrappersbin}/rabbitmq-server
- reinplace -E "s:/var/lib/rabbitmq:${prefix}/var/lib/rabbitmq:g" \
- ${wrappersbin}/rabbitmq-server
-
- file copy ${wrappersbin}/rabbitmq-server ${wrappersbin}/rabbitmqctl
- file copy ${wrappersbin}/rabbitmq-server ${wrappersbin}/rabbitmq-plugins
-
- xinstall -m 644 -W ${mansrc}/man1 rabbitmq-server.1.gz rabbitmqctl.1.gz rabbitmq-plugins.1.gz \
- ${mandest}/man1/
- xinstall -m 644 -W ${mansrc}/man5 rabbitmq-env.conf.5.gz ${mandest}/man5/
-}
-
-pre-install {
- system "cd ${destroot}${plistloc}; patch <${filespath}/patch-org.macports.rabbitmq-server.plist.diff"
-}
-
-startupitem.create yes
-startupitem.init "PATH=${prefix}/bin:${prefix}/sbin:\$PATH; export PATH"
-startupitem.start "rabbitmq-server 2>&1"
-startupitem.stop "rabbitmqctl stop 2>&1"
-startupitem.logfile ${prefix}/var/log/rabbitmq/startupitem.log
diff --git a/packaging/macports/make-checksums.sh b/packaging/macports/make-checksums.sh
deleted file mode 100755
index 891de6ba..00000000
--- a/packaging/macports/make-checksums.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-# NB: this script requires bash
-tarball_src=$1
-tarball_bin=$2
-for type in src bin
-do
- tarball_var=tarball_${type}
- tarball=${!tarball_var}
- for algo in sha1 rmd160
- do
- checksum=$(openssl $algo ${tarball} | awk '{print $NF}')
- echo "s|@$algo-$type@|$checksum|g"
- done
-done
diff --git a/packaging/macports/make-port-diff.sh b/packaging/macports/make-port-diff.sh
deleted file mode 100755
index ac3afa4e..00000000
--- a/packaging/macports/make-port-diff.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-# This script grabs the latest rabbitmq-server bits from the main
-# macports subversion repo, and from the rabbitmq.com macports repo,
-# and produces a diff from the former to the latter for submission
-# through the macports trac.
-
-set -e
-
-dir=/tmp/$(basename $0).$$
-mkdir -p $dir/macports $dir/rabbitmq
-
-# Get the files from the macports subversion repo
-cd $dir/macports
-svn checkout http://svn.macports.org/repository/macports/trunk/dports/net/rabbitmq-server/ 2>&1 >/dev/null
-
-# Clear out the svn $id tag from the Portfile (and avoid using -i)
-portfile=rabbitmq-server/Portfile
-sed -e 's|^# \$.*$|# $Id$|' ${portfile} > ${portfile}.new
-mv ${portfile}.new ${portfile}
-
-# Get the files from the rabbitmq.com macports repo
-cd ../rabbitmq
-curl -s http://www.rabbitmq.com/releases/macports/net/rabbitmq-server.tgz | tar xzf -
-
-cd ..
-diff -Naur --exclude=.svn macports rabbitmq
-cd /
-rm -rf $dir
diff --git a/packaging/macports/patch-org.macports.rabbitmq-server.plist.diff b/packaging/macports/patch-org.macports.rabbitmq-server.plist.diff
deleted file mode 100644
index 45b49496..00000000
--- a/packaging/macports/patch-org.macports.rabbitmq-server.plist.diff
+++ /dev/null
@@ -1,10 +0,0 @@
---- org.macports.rabbitmq-server.plist.old 2009-02-26 08:00:31.000000000 -0800
-+++ org.macports.rabbitmq-server.plist 2009-02-26 08:01:27.000000000 -0800
-@@ -22,6 +22,7 @@
- <string>;</string>
- <string>--pid=none</string>
- </array>
-+<key>UserName</key><string>rabbitmq</string>
- <key>Debug</key><false/>
- <key>Disabled</key><true/>
- <key>OnDemand</key><false/>
diff --git a/packaging/standalone/Makefile b/packaging/standalone/Makefile
deleted file mode 100644
index 89ccde93..00000000
--- a/packaging/standalone/Makefile
+++ /dev/null
@@ -1,82 +0,0 @@
-VERSION=0.0.0
-SOURCE_DIR=rabbitmq-server-$(VERSION)
-TARGET_DIR=rabbitmq_server-$(VERSION)
-TARGET_TARBALL=rabbitmq-server-$(OS)-standalone-$(VERSION)
-RLS_DIR=$(TARGET_DIR)/release/$(TARGET_DIR)
-
-ERTS_VSN=$(shell erl -noshell -eval 'io:format("~s", [erlang:system_info(version)]), halt().')
-ERTS_ROOT_DIR=$(shell erl -noshell -eval 'io:format("~s", [code:root_dir()]), halt().')
-
-# used to generate the erlang release
-RABBITMQ_HOME=$(TARGET_DIR)
-RABBITMQ_EBIN_ROOT=$(RABBITMQ_HOME)/ebin
-RABBITMQ_PLUGINS_DIR=$(RABBITMQ_HOME)/plugins
-RABBITMQ_PLUGINS_EXPAND_DIR=$(RABBITMQ_PLUGINS_DIR)/expand
-
-RABBITMQ_DEFAULTS=$(TARGET_DIR)/sbin/rabbitmq-defaults
-fix_defaults = sed -e $(1) $(RABBITMQ_DEFAULTS) > $(RABBITMQ_DEFAULTS).tmp \
- && mv $(RABBITMQ_DEFAULTS).tmp $(RABBITMQ_DEFAULTS)
-
-dist:
- tar -zxf ../../dist/$(SOURCE_DIR).tar.gz
-
- $(MAKE) -C $(SOURCE_DIR) \
- TARGET_DIR=`pwd`/$(TARGET_DIR) \
- SBIN_DIR=`pwd`/$(TARGET_DIR)/sbin \
- MAN_DIR=`pwd`/$(TARGET_DIR)/share/man \
- install
-
-## Here we set the RABBITMQ_HOME variable,
-## then we make ERL_DIR point to our released erl
-## and we add the paths to our released start_clean and start_sasl boot scripts
- $(call fix_defaults,'s:^SYS_PREFIX=$$:SYS_PREFIX=\$${RABBITMQ_HOME}:')
- $(call fix_defaults,'s:^ERL_DIR=$$:ERL_DIR=\$${RABBITMQ_HOME}/erts-$(ERTS_VSN)/bin/:')
- $(call fix_defaults,'s:start_clean$$:"\$${SYS_PREFIX}/releases/$(VERSION)/start_clean":')
- $(call fix_defaults,'s:start_sasl:"\$${SYS_PREFIX}/releases/$(VERSION)/start_sasl":')
-
- chmod 0755 $(RABBITMQ_DEFAULTS)
-
- mkdir -p $(TARGET_DIR)/etc/rabbitmq
-
- $(MAKE) generate_release
-
- mkdir -p $(RLS_DIR)
- tar -C $(RLS_DIR) -xzf $(RABBITMQ_HOME)/rabbit.tar.gz
-
-# add minimal boot file
- cp $(ERTS_ROOT_DIR)/bin/start_clean.boot $(RLS_DIR)/releases/$(VERSION)
- cp $(ERTS_ROOT_DIR)/bin/start_sasl.boot $(RLS_DIR)/releases/$(VERSION)
-
-# move rabbitmq files to top level folder
- mv $(RLS_DIR)/lib/rabbit-$(VERSION)/* $(RLS_DIR)
-
-# remove empty lib/rabbit-$(VERSION) folder
- rm -rf $(RLS_DIR)/lib/rabbit-$(VERSION)
-
-# fix Erlang ROOTDIR
- patch -o $(RLS_DIR)/erts-$(ERTS_VSN)/bin/erl $(RLS_DIR)/erts-$(ERTS_VSN)/bin/erl.src < erl.diff
-
- tar -zcf $(TARGET_TARBALL).tar.gz -C $(TARGET_DIR)/release $(TARGET_DIR)
- rm -rf $(SOURCE_DIR) $(TARGET_DIR)
-
-clean: clean_partial
- rm -f rabbitmq-server-$(OS)-standalone-*.tar.gz
-
-clean_partial:
- rm -rf $(SOURCE_DIR)
- rm -rf $(TARGET_DIR)
-
-.PHONY : generate_release
-generate_release:
- erlc \
- -I $(TARGET_DIR)/include/ -o src -Wall \
- -v +debug_info -Duse_specs -Duse_proper_qc \
- -pa $(TARGET_DIR)/ebin/ src/rabbit_release.erl
- erl \
- -pa "$(RABBITMQ_EBIN_ROOT)" \
- -pa src \
- -noinput \
- -hidden \
- -s rabbit_release \
- -extra "$(RABBITMQ_PLUGINS_DIR)" "$(RABBITMQ_PLUGINS_EXPAND_DIR)" "$(RABBITMQ_HOME)"
- rm src/rabbit_release.beam
diff --git a/packaging/standalone/erl.diff b/packaging/standalone/erl.diff
deleted file mode 100644
index c51bfe22..00000000
--- a/packaging/standalone/erl.diff
+++ /dev/null
@@ -1,5 +0,0 @@
-20c20,21
-< ROOTDIR="%FINAL_ROOTDIR%"
----
-> realpath() { [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}" ; }
-> ROOTDIR="$(dirname `realpath $0`)/../.."
diff --git a/packaging/standalone/src/rabbit_release.erl b/packaging/standalone/src/rabbit_release.erl
deleted file mode 100644
index f5e1ecf8..00000000
--- a/packaging/standalone/src/rabbit_release.erl
+++ /dev/null
@@ -1,154 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2012 GoPivotal, Inc. All rights reserved.
-%%
--module(rabbit_release).
-
--export([start/0]).
-
--include("rabbit.hrl").
-
--define(BaseApps, [rabbit]).
--define(ERROR_CODE, 1).
-
-%% We need to calculate all the ERTS apps we need to ship with a
-%% standalone rabbit. To acomplish that we need to unpack and load the plugins
-%% apps that are shiped with rabbit.
-%% Once we get that we generate an erlang release inside a tarball.
-%% Our make file will work with that release to generate our final rabbitmq
-%% package.
-start() ->
- %% Determine our various directories
- [PluginsDistDir, UnpackedPluginDir, RabbitHome] =
- init:get_plain_arguments(),
- RootName = UnpackedPluginDir ++ "/rabbit",
-
- %% extract the plugins so we can load their apps later
- prepare_plugins(PluginsDistDir, UnpackedPluginDir),
-
- %% add the plugin ebin folder to the code path.
- add_plugins_to_path(UnpackedPluginDir),
-
- PluginAppNames = [P#plugin.name ||
- P <- rabbit_plugins:list(PluginsDistDir)],
-
- %% Build the entire set of dependencies - this will load the
- %% applications along the way
- AllApps = case catch sets:to_list(expand_dependencies(PluginAppNames)) of
- {failed_to_load_app, App, Err} ->
- terminate("failed to load application ~s:~n~p",
- [App, Err]);
- AppList ->
- AppList
- end,
-
- %% we need a list of ERTS apps we need to ship with rabbit
- {ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
-
- BaseApps = SslAppsConfig ++ AllApps -- PluginAppNames,
-
- AppVersions = [determine_version(App) || App <- BaseApps],
- RabbitVersion = proplists:get_value(rabbit, AppVersions),
-
- %% Build the overall release descriptor
- RDesc = {release,
- {"rabbit", RabbitVersion},
- {erts, erlang:system_info(version)},
- AppVersions},
-
- %% Write it out to $RABBITMQ_PLUGINS_EXPAND_DIR/rabbit.rel
- rabbit_file:write_file(RootName ++ ".rel", io_lib:format("~p.~n", [RDesc])),
-
- %% Compile the script
- systools:make_script(RootName),
- systools:script2boot(RootName),
- %% Make release tarfile
- make_tar(RootName, RabbitHome),
- rabbit_misc:quit(0).
-
-make_tar(Release, RabbitHome) ->
- systools:make_tar(Release,
- [
- {dirs, [docs, etc, include, plugins, sbin, share]},
- {erts, code:root_dir()},
- {outdir, RabbitHome}
- ]).
-
-determine_version(App) ->
- application:load(App),
- {ok, Vsn} = application:get_key(App, vsn),
- {App, Vsn}.
-
-delete_recursively(Fn) ->
- case rabbit_file:recursive_delete([Fn]) of
- ok -> ok;
- {error, {Path, E}} -> {error, {cannot_delete, Path, E}};
- Error -> Error
- end.
-
-prepare_plugins(PluginsDistDir, DestDir) ->
- %% Eliminate the contents of the destination directory
- case delete_recursively(DestDir) of
- ok -> ok;
- {error, E} -> terminate("Could not delete dir ~s (~p)", [DestDir, E])
- end,
- case filelib:ensure_dir(DestDir ++ "/") of
- ok -> ok;
- {error, E2} -> terminate("Could not create dir ~s (~p)", [DestDir, E2])
- end,
-
- [prepare_plugin(Plugin, DestDir) ||
- Plugin <- rabbit_plugins:list(PluginsDistDir)].
-
-prepare_plugin(#plugin{type = ez, location = Location}, PluginDestDir) ->
- zip:unzip(Location, [{cwd, PluginDestDir}]);
-prepare_plugin(#plugin{type = dir, name = Name, location = Location},
- PluginsDestDir) ->
- rabbit_file:recursive_copy(Location,
- filename:join([PluginsDestDir, Name])).
-
-expand_dependencies(Pending) ->
- expand_dependencies(sets:new(), Pending).
-expand_dependencies(Current, []) ->
- Current;
-expand_dependencies(Current, [Next|Rest]) ->
- case sets:is_element(Next, Current) of
- true ->
- expand_dependencies(Current, Rest);
- false ->
- case application:load(Next) of
- ok ->
- ok;
- {error, {already_loaded, _}} ->
- ok;
- {error, Reason} ->
- throw({failed_to_load_app, Next, Reason})
- end,
- {ok, Required} = application:get_key(Next, applications),
- Unique = [A || A <- Required, not(sets:is_element(A, Current))],
- expand_dependencies(sets:add_element(Next, Current), Rest ++ Unique)
- end.
-
-add_plugins_to_path(PluginDir) ->
- [add_plugin_to_path(PluginName) ||
- PluginName <- filelib:wildcard(PluginDir ++ "/*/ebin/*.app")].
-
-add_plugin_to_path(PluginAppDescFn) ->
- %% Add the plugin ebin directory to the load path
- PluginEBinDirN = filename:dirname(PluginAppDescFn),
- code:add_path(PluginEBinDirN).
-
-terminate(Fmt, Args) ->
- io:format("ERROR: " ++ Fmt ++ "~n", Args),
- rabbit_misc:quit(?ERROR_CODE).
diff --git a/packaging/windows-exe/Makefile b/packaging/windows-exe/Makefile
deleted file mode 100644
index ab50e30b..00000000
--- a/packaging/windows-exe/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-VERSION=0.0.0
-ZIP=../windows/rabbitmq-server-windows-$(VERSION)
-
-dist: rabbitmq-$(VERSION).nsi rabbitmq_server-$(VERSION)
- makensis -V2 rabbitmq-$(VERSION).nsi
-
-rabbitmq-$(VERSION).nsi: rabbitmq_nsi.in
- sed \
- -e 's|%%VERSION%%|$(VERSION)|' \
- $< > $@
-
-rabbitmq_server-$(VERSION):
- unzip -q $(ZIP)
-
-clean:
- rm -rf rabbitmq-*.nsi rabbitmq_server-* rabbitmq-server-*.exe
diff --git a/packaging/windows-exe/rabbitmq.ico b/packaging/windows-exe/rabbitmq.ico
deleted file mode 100644
index 5e169a79..00000000
--- a/packaging/windows-exe/rabbitmq.ico
+++ /dev/null
Binary files differ
diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in
deleted file mode 100644
index 2ab8eee7..00000000
--- a/packaging/windows-exe/rabbitmq_nsi.in
+++ /dev/null
@@ -1,239 +0,0 @@
-; Use the "Modern" UI
-!include MUI2.nsh
-!include LogicLib.nsh
-!include WinMessages.nsh
-!include FileFunc.nsh
-!include WordFunc.nsh
-
-!define env_hklm 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
-!define uninstall "Software\Microsoft\Windows\CurrentVersion\Uninstall\RabbitMQ"
-
-;--------------------------------
-
-; The name of the installer
-Name "RabbitMQ Server %%VERSION%%"
-
-; The file to write
-OutFile "rabbitmq-server-%%VERSION%%.exe"
-
-; Icons
-!define MUI_ICON "rabbitmq.ico"
-
-; The default installation directory
-InstallDir "$PROGRAMFILES\RabbitMQ Server"
-
-; Registry key to check for directory (so if you install again, it will
-; overwrite the old one automatically)
-InstallDirRegKey HKLM "Software\VMware, Inc.\RabbitMQ Server" "Install_Dir"
-
-; Request application privileges for Windows Vista
-RequestExecutionLevel admin
-
-SetCompressor /solid lzma
-
-VIProductVersion "%%VERSION%%.0"
-VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductVersion" "%%VERSION%%"
-VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductName" "RabbitMQ Server"
-;VIAddVersionKey /LANG=${LANG_ENGLISH} "Comments" ""
-VIAddVersionKey /LANG=${LANG_ENGLISH} "CompanyName" "GoPivotal, Inc"
-;VIAddVersionKey /LANG=${LANG_ENGLISH} "LegalTrademarks" "" ; TODO ?
-VIAddVersionKey /LANG=${LANG_ENGLISH} "LegalCopyright" "Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved."
-VIAddVersionKey /LANG=${LANG_ENGLISH} "FileDescription" "RabbitMQ Server"
-VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "%%VERSION%%"
-
-;--------------------------------
-
-; Pages
-
-
-; !insertmacro MUI_PAGE_LICENSE "..\..\LICENSE-MPL-RabbitMQ"
- !insertmacro MUI_PAGE_COMPONENTS
- !insertmacro MUI_PAGE_DIRECTORY
- !insertmacro MUI_PAGE_INSTFILES
- !insertmacro MUI_PAGE_FINISH
-
- !insertmacro MUI_UNPAGE_CONFIRM
- !insertmacro MUI_UNPAGE_INSTFILES
- !define MUI_FINISHPAGE_TEXT "RabbitMQ Server %%VERSION%% has been uninstalled from your computer.$\n$\nPlease note that the log and database directories located at $APPDATA\RabbitMQ have not been removed. You can remove them manually if desired."
- !insertmacro MUI_UNPAGE_FINISH
-
-;--------------------------------
-;Languages
-
- !insertmacro MUI_LANGUAGE "English"
-
-;--------------------------------
-
-; The stuff to install
-Section "RabbitMQ Server (required)" Rabbit
-
- SectionIn RO
-
- ; Set output path to the installation directory.
- SetOutPath $INSTDIR
-
- ; Put files there
- File /r "rabbitmq_server-%%VERSION%%"
- File "rabbitmq.ico"
-
- ; Write the installation path into the registry
- WriteRegStr HKLM "SOFTWARE\VMware, Inc.\RabbitMQ Server" "Install_Dir" "$INSTDIR"
-
- ; Write the uninstall keys for Windows
- WriteRegStr HKLM ${uninstall} "DisplayName" "RabbitMQ Server"
- WriteRegStr HKLM ${uninstall} "UninstallString" "$INSTDIR\uninstall.exe"
- WriteRegStr HKLM ${uninstall} "DisplayIcon" "$INSTDIR\uninstall.exe,0"
- WriteRegStr HKLM ${uninstall} "Publisher" "GoPivotal, Inc."
- WriteRegStr HKLM ${uninstall} "DisplayVersion" "%%VERSION%%"
- WriteRegDWORD HKLM ${uninstall} "NoModify" 1
- WriteRegDWORD HKLM ${uninstall} "NoRepair" 1
-
- ${GetSize} "$INSTDIR" "/S=0K" $0 $1 $2
- IntFmt $0 "0x%08X" $0
- WriteRegDWORD HKLM "${uninstall}" "EstimatedSize" "$0"
-
- WriteUninstaller "uninstall.exe"
-SectionEnd
-
-;--------------------------------
-
-Section "RabbitMQ Service" RabbitService
- ExpandEnvStrings $0 %COMSPEC%
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" install'
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" start'
- ReadEnvStr $1 "HOMEDRIVE"
- ReadEnvStr $2 "HOMEPATH"
- CopyFiles "$WINDIR\.erlang.cookie" "$1$2\.erlang.cookie"
-SectionEnd
-
-;--------------------------------
-
-Section "Start Menu" RabbitStartMenu
- ; In case the service is not installed, or the service installation fails,
- ; make sure these exist or Explorer will get confused.
- CreateDirectory "$APPDATA\RabbitMQ\log"
- CreateDirectory "$APPDATA\RabbitMQ\db"
-
- CreateDirectory "$SMPROGRAMS\RabbitMQ Server"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\Uninstall RabbitMQ.lnk" "$INSTDIR\uninstall.exe" "" "$INSTDIR\uninstall.exe" 0
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Plugins.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\plugins"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Logs.lnk" "$APPDATA\RabbitMQ\log"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Database Directory.lnk" "$APPDATA\RabbitMQ\db"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - (re)install.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "install" "$INSTDIR\rabbitmq.ico"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - remove.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "remove" "$INSTDIR\rabbitmq.ico"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - start.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "start" "$INSTDIR\rabbitmq.ico"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Service - stop.lnk" "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" "stop" "$INSTDIR\rabbitmq.ico"
-
- SetOutPath "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin"
- CreateShortCut "$SMPROGRAMS\RabbitMQ Server\RabbitMQ Command Prompt (sbin dir).lnk" "$WINDIR\system32\cmd.exe" "" "$WINDIR\system32\cmd.exe"
- SetOutPath $INSTDIR
-SectionEnd
-
-;--------------------------------
-
-; Section descriptions
-
-LangString DESC_Rabbit ${LANG_ENGLISH} "The RabbitMQ Server."
-LangString DESC_RabbitService ${LANG_ENGLISH} "Set up RabbitMQ as a Windows Service."
-LangString DESC_RabbitStartMenu ${LANG_ENGLISH} "Add some useful links to the start menu."
-
-!insertmacro MUI_FUNCTION_DESCRIPTION_BEGIN
- !insertmacro MUI_DESCRIPTION_TEXT ${Rabbit} $(DESC_Rabbit)
- !insertmacro MUI_DESCRIPTION_TEXT ${RabbitService} $(DESC_RabbitService)
- !insertmacro MUI_DESCRIPTION_TEXT ${RabbitStartMenu} $(DESC_RabbitStartMenu)
-!insertmacro MUI_FUNCTION_DESCRIPTION_END
-
-;--------------------------------
-
-; Uninstaller
-
-Section "Uninstall"
-
- ; Remove registry keys
- DeleteRegKey HKLM ${uninstall}
- DeleteRegKey HKLM "SOFTWARE\VMware, Inc.\RabbitMQ Server"
-
- ; TODO these will fail if the service is not installed - do we care?
- ExpandEnvStrings $0 %COMSPEC%
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" stop'
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" remove'
-
- ; Remove files and uninstaller
- RMDir /r "$INSTDIR\rabbitmq_server-%%VERSION%%"
- Delete "$INSTDIR\rabbitmq.ico"
- Delete "$INSTDIR\uninstall.exe"
-
- ; Remove start menu items
- RMDir /r "$SMPROGRAMS\RabbitMQ Server"
-
- DeleteRegValue ${env_hklm} ERLANG_HOME
- SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
-
-SectionEnd
-
-;--------------------------------
-
-; Functions
-
-Function .onInit
- Call findErlang
-
- ReadRegStr $0 HKLM ${uninstall} "UninstallString"
- ${If} $0 != ""
- MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION "RabbitMQ is already installed. $\n$\nClick 'OK' to remove the previous version or 'Cancel' to cancel this installation." IDCANCEL norun
-
- ;Run the uninstaller
- ClearErrors
- ExecWait $INSTDIR\uninstall.exe
-
- norun:
- Abort
- ${EndIf}
-FunctionEnd
-
-Function findErlang
-
- StrCpy $0 0
- StrCpy $2 "not-found"
- ${Do}
- EnumRegKey $1 HKLM Software\Ericsson\Erlang $0
- ${If} $1 = ""
- ${Break}
- ${EndIf}
- ${If} $1 <> "ErlSrv"
- StrCpy $2 $1
- ${EndIf}
-
- IntOp $0 $0 + 1
- ${Loop}
-
- ${If} $2 = "not-found"
- MessageBox MB_YESNO|MB_ICONEXCLAMATION "Erlang could not be detected.$\nYou must install Erlang before installing RabbitMQ. Would you like the installer to open a browser window to the Erlang download site?" IDNO abort
- ExecShell "open" "http://www.erlang.org/download.html"
- abort:
- Abort
- ${Else}
- ${VersionCompare} $2 "5.6.3" $0
- ${VersionCompare} $2 "5.8.1" $1
-
- ${If} $0 = 2
- MessageBox MB_OK|MB_ICONEXCLAMATION "Your installed version of Erlang ($2) is too old. Please install a more recent version."
- Abort
- ${ElseIf} $1 = 2
- MessageBox MB_YESNO|MB_ICONEXCLAMATION "Your installed version of Erlang ($2) is comparatively old.$\nFor best results, please install a newer version.$\nDo you wish to continue?" IDYES no_abort
- Abort
- no_abort:
- ${EndIf}
-
- ReadRegStr $0 HKLM "Software\Ericsson\Erlang\$2" ""
-
- ; See http://nsis.sourceforge.net/Setting_Environment_Variables
- WriteRegExpandStr ${env_hklm} ERLANG_HOME $0
- SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
-
- ; On Windows XP changing the permanent environment does not change *our*
- ; environment, so do that as well.
- System::Call 'Kernel32::SetEnvironmentVariableA(t, t) i("ERLANG_HOME", "$0").r0'
- ${EndIf}
-
-FunctionEnd
diff --git a/packaging/windows/Makefile b/packaging/windows/Makefile
deleted file mode 100644
index 1c222162..00000000
--- a/packaging/windows/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-VERSION=0.0.0
-SOURCE_DIR=rabbitmq-server-$(VERSION)
-TARGET_DIR=rabbitmq_server-$(VERSION)
-TARGET_ZIP=rabbitmq-server-windows-$(VERSION)
-
-dist:
- tar -zxf ../../dist/$(SOURCE_DIR).tar.gz
- $(MAKE) -C $(SOURCE_DIR)
-
- mkdir $(SOURCE_DIR)/sbin
- mv $(SOURCE_DIR)/scripts/*.bat $(SOURCE_DIR)/sbin
- rm -rf $(SOURCE_DIR)/scripts
- rm -rf $(SOURCE_DIR)/codegen* $(SOURCE_DIR)/Makefile $(SOURCE_DIR)/*mk
- rm -f $(SOURCE_DIR)/README
- rm -rf $(SOURCE_DIR)/docs
- rm -rf $(SOURCE_DIR)/src
- rm -rf $(SOURCE_DIR)/dist
-
- mv $(SOURCE_DIR) $(TARGET_DIR)
- mkdir -p $(TARGET_DIR)
- mv $(TARGET_DIR)/plugins/README $(TARGET_DIR)/plugins/README.txt
- xmlto -o . xhtml-nochunks ../../docs/rabbitmq-service.xml
- elinks -dump -no-references -no-numbering rabbitmq-service.html \
- > $(TARGET_DIR)/readme-service.txt
- todos $(TARGET_DIR)/readme-service.txt
- todos $(TARGET_DIR)/INSTALL
- todos $(TARGET_DIR)/LICENSE*
- todos $(TARGET_DIR)/plugins/README.txt
- rm -rf $(TARGET_DIR)/plugins-src
- zip -q -r $(TARGET_ZIP).zip $(TARGET_DIR)
- rm -rf $(TARGET_DIR) rabbitmq-service.html
-
-clean: clean_partial
- rm -f rabbitmq-server-windows-*.zip
-
-clean_partial:
- rm -rf $(SOURCE_DIR)
- rm -rf $(TARGET_DIR)
diff --git a/quickcheck b/quickcheck
deleted file mode 100755
index b5382d75..00000000
--- a/quickcheck
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-%%! -sname quickcheck
--mode(compile).
-
-%% A helper to test quickcheck properties on a running broker
-%% NodeStr is a local broker node name
-%% ModStr is the module containing quickcheck properties
-%% TrialsStr is the number of trials
-main([NodeStr, ModStr, TrialsStr]) ->
- {ok, Hostname} = inet:gethostname(),
- Node = list_to_atom(NodeStr ++ "@" ++ Hostname),
- Mod = list_to_atom(ModStr),
- Trials = erlang:list_to_integer(TrialsStr),
- case rpc:call(Node, code, ensure_loaded, [proper]) of
- {module, proper} ->
- case rpc:call(Node, proper, module,
- [Mod] ++ [[{numtests, Trials}, {constraint_tries, 200}]]) of
- [] -> ok;
- _ -> quit(1)
- end;
- {badrpc, Reason} ->
- io:format("Could not contact node ~p: ~p.~n", [Node, Reason]),
- quit(2);
- {error,nofile} ->
- io:format("Module PropEr was not found on node ~p~n", [Node]),
- quit(2)
- end;
-main([]) ->
- io:format("This script requires a node name and a module.~n").
-
-quit(Status) ->
- case os:type() of
- {unix, _} -> halt(Status);
- {win32, _} -> init:stop(Status)
- end.
-
diff --git a/scripts/rabbitmq-defaults b/scripts/rabbitmq-defaults
deleted file mode 100644
index f4b131cd..00000000
--- a/scripts/rabbitmq-defaults
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2012-2013 GoPivotal, Inc. All rights reserved.
-##
-
-### next line potentially updated in package install steps
-SYS_PREFIX=
-
-### next line will be updated when generating a standalone release
-ERL_DIR=
-
-CLEAN_BOOT_FILE=start_clean
-SASL_BOOT_FILE=start_sasl
-
-## Set default values
-
-CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq
-LOG_BASE=${SYS_PREFIX}/var/log/rabbitmq
-MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia
-ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
-
-PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
-
-CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
diff --git a/scripts/rabbitmq-echopid.bat b/scripts/rabbitmq-echopid.bat
deleted file mode 100644
index 5c652c30..00000000
--- a/scripts/rabbitmq-echopid.bat
+++ /dev/null
@@ -1,49 +0,0 @@
-@echo off
-
-REM Usage: rabbitmq-echopid.bat <rabbitmq_nodename>
-REM
-REM <rabbitmq_nodename> sname of the erlang node to connect to (required)
-
-setlocal
-
-if "%1"=="" goto fail
-
-:: set timeout vars ::
-set TIMEOUT=10
-set TIMER=1
-
-:: check that wmic exists ::
-set WMIC_PATH=%SYSTEMROOT%\System32\Wbem\wmic.exe
-if not exist "%WMIC_PATH%" (
- goto fail
-)
-
-:getpid
-for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%-sname %1%%'" get processid 2^>nul`) do (
- set PID=%%P
- goto echopid
-)
-
-:echopid
-:: check for pid not found ::
-if "%PID%" == "" (
- PING 127.0.0.1 -n 2 > nul
- set /a TIMER+=1
- if %TIMEOUT%==%TIMER% goto fail
- goto getpid
-)
-
-:: show pid ::
-echo %PID%
-
-:: all done ::
-:ok
-endlocal
-EXIT /B 0
-
-:: something went wrong ::
-:fail
-endlocal
-EXIT /B 1
-
-
diff --git a/scripts/rabbitmq-env b/scripts/rabbitmq-env
deleted file mode 100755
index c76e7e4b..00000000
--- a/scripts/rabbitmq-env
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/sh
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-##
-
-# Determine where this script is really located (if this script is
-# invoked from another script, this is the location of the caller)
-SCRIPT_PATH="$0"
-while [ -h "$SCRIPT_PATH" ] ; do
- FULL_PATH=`readlink -f $SCRIPT_PATH 2>/dev/null`
- if [ "$?" != "0" ]; then
- REL_PATH=`readlink $SCRIPT_PATH`
- if expr "$REL_PATH" : '/.*' > /dev/null; then
- SCRIPT_PATH="$REL_PATH"
- else
- SCRIPT_PATH="`dirname "$SCRIPT_PATH"`/$REL_PATH"
- fi
- else
- SCRIPT_PATH=$FULL_PATH
- fi
-done
-
-SCRIPT_DIR=`dirname $SCRIPT_PATH`
-RABBITMQ_HOME="${SCRIPT_DIR}/.."
-[ "x" = "x$HOSTNAME" ] && HOSTNAME=`env hostname`
-NODENAME=rabbit@${HOSTNAME%%.*}
-
-## Set defaults
-. ${SCRIPT_DIR}/rabbitmq-defaults
-
-## Common defaults
-SERVER_ERL_ARGS="+K true +A30 +P 1048576 \
- -kernel inet_default_connect_options [{nodelay,true}]"
-
-# warn about old rabbitmq.conf file, if no new one
-if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
- [ ! -f ${CONF_ENV_FILE} ] ; then
- echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- "
- echo "location has moved to ${CONF_ENV_FILE}"
-fi
-
-## Get configuration variables from the configure environment file
-[ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE}
diff --git a/scripts/rabbitmq-plugins b/scripts/rabbitmq-plugins
deleted file mode 100755
index 90eb5a5d..00000000
--- a/scripts/rabbitmq-plugins
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/sh
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-##
-
-# Get default settings with user overrides for (RABBITMQ_)<var_name>
-# Non-empty defaults should be set in rabbitmq-env
-. `dirname $0`/rabbitmq-env
-
-##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-
-[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
-[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
-
-##--- End of overridden <var_name> variables
-
-exec ${ERL_DIR}erl \
- -pa "${RABBITMQ_HOME}/ebin" \
- -noinput \
- -hidden \
- -sname rabbitmq-plugins$$ \
- -boot "${CLEAN_BOOT_FILE}" \
- -s rabbit_plugins_main \
- -enabled_plugins_file "$RABBITMQ_ENABLED_PLUGINS_FILE" \
- -plugins_dist_dir "$RABBITMQ_PLUGINS_DIR" \
- -extra "$@"
diff --git a/scripts/rabbitmq-plugins.bat b/scripts/rabbitmq-plugins.bat
deleted file mode 100755
index 0d1f128e..00000000
--- a/scripts/rabbitmq-plugins.bat
+++ /dev/null
@@ -1,57 +0,0 @@
-@echo off
-REM The contents of this file are subject to the Mozilla Public License
-REM Version 1.1 (the "License"); you may not use this file except in
-REM compliance with the License. You may obtain a copy of the License
-REM at http://www.mozilla.org/MPL/
-REM
-REM Software distributed under the License is distributed on an "AS IS"
-REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM the License for the specific language governing rights and
-REM limitations under the License.
-REM
-REM The Original Code is RabbitMQ.
-REM
-REM The Initial Developer of the Original Code is GoPivotal, Inc.
-REM Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TDP0=%~dp0
-set STAR=%*
-setlocal enabledelayedexpansion
-
-if "!RABBITMQ_SERVICENAME!"=="" (
- set RABBITMQ_SERVICENAME=RabbitMQ
-)
-
-if "!RABBITMQ_BASE!"=="" (
- set RABBITMQ_BASE=!APPDATA!\!RABBITMQ_SERVICENAME!
-)
-
-if not exist "!ERLANG_HOME!\bin\erl.exe" (
- echo.
- echo ******************************
- echo ERLANG_HOME not set correctly.
- echo ******************************
- echo.
- echo Please either set ERLANG_HOME to point to your Erlang installation or place the
- echo RabbitMQ server distribution in the Erlang lib folder.
- echo.
- exit /B
-)
-
-if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
- set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
-)
-
-if "!RABBITMQ_PLUGINS_DIR!"=="" (
- set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
-)
-
-"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden -sname rabbitmq-plugins!RANDOM!!TIME:~9! -s rabbit_plugins_main -enabled_plugins_file "!RABBITMQ_ENABLED_PLUGINS_FILE!" -plugins_dist_dir "!RABBITMQ_PLUGINS_DIR:\=/!" -extra !STAR!
-
-endlocal
-endlocal
diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server
deleted file mode 100755
index b430eec3..00000000
--- a/scripts/rabbitmq-server
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/bin/sh
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-##
-
-# Get default settings with user overrides for (RABBITMQ_)<var_name>
-# Non-empty defaults should be set in rabbitmq-env
-. `dirname $0`/rabbitmq-env
-
-##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-
-DEFAULT_NODE_IP_ADDRESS=auto
-DEFAULT_NODE_PORT=5672
-[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS}
-[ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT}
-
-[ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS}
-[ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT}
-
-[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
-[ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
-[ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}
-[ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE}
-[ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
-[ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
-
-[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
-[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
-
-[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
-[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
-
-[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
-[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
-
-[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
-
-[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
-
-## Log rotation
-[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
-[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
-[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
-[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
-
-##--- End of overridden <var_name> variables
-
-RABBITMQ_START_RABBIT=
-[ "x" = "x$RABBITMQ_ALLOW_INPUT" ] && RABBITMQ_START_RABBIT=" -noinput"
-[ "x" = "x$RABBITMQ_NODE_ONLY" ] && RABBITMQ_START_RABBIT="$RABBITMQ_START_RABBIT -s rabbit boot "
-
-case "$(uname -s)" in
- CYGWIN*) # we make no attempt to record the cygwin pid; rabbitmqctl wait
- # will not be able to make sense of it anyway
- ;;
- *) # When -detached is passed, we don't write the pid, since it'd be the
- # wrong one
- detached=""
- for opt in "$@"; do
- if [ "$opt" = "-detached" ]; then
- detached="true"
- fi
- done
- if [ $detached ]; then
- echo "Warning: PID file not written; -detached was passed." 1>&2
- else
- mkdir -p $(dirname ${RABBITMQ_PID_FILE});
- echo $$ > ${RABBITMQ_PID_FILE}
- fi
-esac
-
-RABBITMQ_EBIN_ROOT="${RABBITMQ_HOME}/ebin"
-if ! ${ERL_DIR}erl -pa "$RABBITMQ_EBIN_ROOT" \
- -boot "${CLEAN_BOOT_FILE}" \
- -noinput \
- -hidden \
- -s rabbit_prelaunch \
- -sname rabbitmqprelaunch$$ \
- -extra "${RABBITMQ_NODENAME}";
- then
- exit 1;
-fi
-
-RABBITMQ_CONFIG_ARG=
-[ -f "${RABBITMQ_CONFIG_FILE}.config" ] && RABBITMQ_CONFIG_ARG="-config ${RABBITMQ_CONFIG_FILE}"
-
-RABBITMQ_LISTEN_ARG=
-[ "x" != "x$RABBITMQ_NODE_PORT" ] && [ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_LISTEN_ARG="-rabbit tcp_listeners [{\""${RABBITMQ_NODE_IP_ADDRESS}"\","${RABBITMQ_NODE_PORT}"}]"
-
-# we need to turn off path expansion because some of the vars, notably
-# RABBITMQ_SERVER_ERL_ARGS, contain terms that look like globs and
-# there is no other way of preventing their expansion.
-set -f
-
-exec ${ERL_DIR}erl \
- -pa ${RABBITMQ_EBIN_ROOT} \
- ${RABBITMQ_START_RABBIT} \
- -sname ${RABBITMQ_NODENAME} \
- -boot "${SASL_BOOT_FILE}" \
- ${RABBITMQ_CONFIG_ARG} \
- +W w \
- ${RABBITMQ_SERVER_ERL_ARGS} \
- ${RABBITMQ_LISTEN_ARG} \
- -sasl errlog_type error \
- -sasl sasl_error_logger false \
- -rabbit error_logger '{file,"'${RABBITMQ_LOGS}'"}' \
- -rabbit sasl_error_logger '{file,"'${RABBITMQ_SASL_LOGS}'"}' \
- -rabbit enabled_plugins_file "\"$RABBITMQ_ENABLED_PLUGINS_FILE\"" \
- -rabbit plugins_dir "\"$RABBITMQ_PLUGINS_DIR\"" \
- -rabbit plugins_expand_dir "\"$RABBITMQ_PLUGINS_EXPAND_DIR\"" \
- -os_mon start_cpu_sup false \
- -os_mon start_disksup false \
- -os_mon start_memsup false \
- -mnesia dir "\"${RABBITMQ_MNESIA_DIR}\"" \
- ${RABBITMQ_SERVER_START_ARGS} \
- "$@"
diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat
deleted file mode 100755
index b00821ed..00000000
--- a/scripts/rabbitmq-server.bat
+++ /dev/null
@@ -1,153 +0,0 @@
-@echo off
-REM The contents of this file are subject to the Mozilla Public License
-REM Version 1.1 (the "License"); you may not use this file except in
-REM compliance with the License. You may obtain a copy of the License
-REM at http://www.mozilla.org/MPL/
-REM
-REM Software distributed under the License is distributed on an "AS IS"
-REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM the License for the specific language governing rights and
-REM limitations under the License.
-REM
-REM The Original Code is RabbitMQ.
-REM
-REM The Initial Developer of the Original Code is GoPivotal, Inc.
-REM Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TDP0=%~dp0
-set STAR=%*
-setlocal enabledelayedexpansion
-
-if "!RABBITMQ_BASE!"=="" (
- set RABBITMQ_BASE=!APPDATA!\RabbitMQ
-)
-
-if "!COMPUTERNAME!"=="" (
- set COMPUTERNAME=localhost
-)
-
-if "!RABBITMQ_NODENAME!"=="" (
- set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
-)
-
-if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
- if not "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_NODE_IP_ADDRESS=auto
- )
-) else (
- if "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_NODE_PORT=5672
- )
-)
-
-if not exist "!ERLANG_HOME!\bin\erl.exe" (
- echo.
- echo ******************************
- echo ERLANG_HOME not set correctly.
- echo ******************************
- echo.
- echo Please either set ERLANG_HOME to point to your Erlang installation or place the
- echo RabbitMQ server distribution in the Erlang lib folder.
- echo.
- exit /B
-)
-
-if "!RABBITMQ_MNESIA_BASE!"=="" (
- set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!/db
-)
-if "!RABBITMQ_LOG_BASE!"=="" (
- set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!/log
-)
-
-
-rem We save the previous logs in their respective backup
-rem Log management (rotation, filtering based of size...) is left as an exercice for the user.
-
-set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
-set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
-
-rem End of log management
-
-
-if "!RABBITMQ_MNESIA_DIR!"=="" (
- set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
-)
-
-if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
- set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
-)
-
-if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
- set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
-)
-
-if "!RABBITMQ_PLUGINS_DIR!"=="" (
- set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
-)
-
-set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin
-
-"!ERLANG_HOME!\bin\erl.exe" ^
- -pa "!RABBITMQ_EBIN_ROOT!" ^
- -noinput -hidden ^
- -s rabbit_prelaunch ^
- -sname rabbitmqprelaunch!RANDOM!!TIME:~9! ^
- -extra "!RABBITMQ_NODENAME!"
-
-if ERRORLEVEL 1 (
- exit /B 1
-)
-
-set RABBITMQ_EBIN_PATH="-pa !RABBITMQ_EBIN_ROOT!"
-
-if "!RABBITMQ_CONFIG_FILE!"=="" (
- set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
-)
-
-if exist "!RABBITMQ_CONFIG_FILE!.config" (
- set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"
-) else (
- set RABBITMQ_CONFIG_ARG=
-)
-
-set RABBITMQ_LISTEN_ARG=
-if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
- if not "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners [{\""!RABBITMQ_NODE_IP_ADDRESS!"\","!RABBITMQ_NODE_PORT!"}]
- )
-)
-
-"!ERLANG_HOME!\bin\erl.exe" ^
--pa "!RABBITMQ_EBIN_ROOT!" ^
--noinput ^
--boot start_sasl ^
--s rabbit boot ^
-!RABBITMQ_CONFIG_ARG! ^
--sname !RABBITMQ_NODENAME! ^
-+W w ^
-+A30 ^
-+P 1048576 ^
--kernel inet_default_connect_options "[{nodelay, true}]" ^
-!RABBITMQ_LISTEN_ARG! ^
-!RABBITMQ_SERVER_ERL_ARGS! ^
--sasl errlog_type error ^
--sasl sasl_error_logger false ^
--rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
--rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
--rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
--rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
--rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
--os_mon start_cpu_sup false ^
--os_mon start_disksup false ^
--os_mon start_memsup false ^
--mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^
-!RABBITMQ_SERVER_START_ARGS! ^
-!STAR!
-
-endlocal
-endlocal
diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat
deleted file mode 100755
index d36b130c..00000000
--- a/scripts/rabbitmq-service.bat
+++ /dev/null
@@ -1,230 +0,0 @@
-@echo off
-REM The contents of this file are subject to the Mozilla Public License
-REM Version 1.1 (the "License"); you may not use this file except in
-REM compliance with the License. You may obtain a copy of the License
-REM at http://www.mozilla.org/MPL/
-REM
-REM Software distributed under the License is distributed on an "AS IS"
-REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM the License for the specific language governing rights and
-REM limitations under the License.
-REM
-REM The Original Code is RabbitMQ.
-REM
-REM The Initial Developer of the Original Code is GoPivotal, Inc.
-REM Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TN0=%~n0
-set TDP0=%~dp0
-set P1=%1
-set STAR=%*
-setlocal enabledelayedexpansion
-
-if "!RABBITMQ_SERVICENAME!"=="" (
- set RABBITMQ_SERVICENAME=RabbitMQ
-)
-
-if "!RABBITMQ_BASE!"=="" (
- set RABBITMQ_BASE=!APPDATA!\!RABBITMQ_SERVICENAME!
-)
-
-if "!COMPUTERNAME!"=="" (
- set COMPUTERNAME=localhost
-)
-
-if "!RABBITMQ_NODENAME!"=="" (
- set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
-)
-
-if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
- if not "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_NODE_IP_ADDRESS=auto
- )
-) else (
- if "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_NODE_PORT=5672
- )
-)
-
-if "!ERLANG_SERVICE_MANAGER_PATH!"=="" (
- if not exist "!ERLANG_HOME!\bin\erl.exe" (
- echo.
- echo ******************************
- echo ERLANG_HOME not set correctly.
- echo ******************************
- echo.
- echo Please either set ERLANG_HOME to point to your Erlang installation or place the
- echo RabbitMQ server distribution in the Erlang lib folder.
- echo.
- exit /B
- )
- for /f "delims=" %%i in ('dir /ad/b "!ERLANG_HOME!"') do if exist "!ERLANG_HOME!\%%i\bin\erlsrv.exe" (
- set ERLANG_SERVICE_MANAGER_PATH=!ERLANG_HOME!\%%i\bin
- )
-)
-
-set CONSOLE_FLAG=
-set CONSOLE_LOG_VALID=
-for %%i in (new reuse) do if "%%i" == "!RABBITMQ_CONSOLE_LOG!" set CONSOLE_LOG_VALID=TRUE
-if "!CONSOLE_LOG_VALID!" == "TRUE" (
- set CONSOLE_FLAG=-debugtype !RABBITMQ_CONSOLE_LOG!
-)
-
-rem *** End of configuration ***
-
-if not exist "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" (
- echo.
- echo **********************************************
- echo ERLANG_SERVICE_MANAGER_PATH not set correctly.
- echo **********************************************
- echo.
- echo "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv.exe" not found
- echo Please set ERLANG_SERVICE_MANAGER_PATH to the folder containing "erlsrv.exe".
- echo.
- exit /B 1
-)
-
-if "!RABBITMQ_MNESIA_BASE!"=="" (
- set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!/db
-)
-if "!RABBITMQ_LOG_BASE!"=="" (
- set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!/log
-)
-
-
-rem We save the previous logs in their respective backup
-rem Log management (rotation, filtering based on size...) is left as an exercise for the user.
-
-set LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
-set SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
-
-rem End of log management
-
-
-if "!RABBITMQ_MNESIA_DIR!"=="" (
- set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
-)
-
-if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
- set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
-)
-
-if "!P1!" == "install" goto INSTALL_SERVICE
-for %%i in (start stop disable enable list remove) do if "%%i" == "!P1!" goto MODIFY_SERVICE
-
-echo.
-echo *********************
-echo Service control usage
-echo *********************
-echo.
-echo !TN0! help - Display this help
-echo !TN0! install - Install the !RABBITMQ_SERVICENAME! service
-echo !TN0! remove - Remove the !RABBITMQ_SERVICENAME! service
-echo.
-echo The following actions can also be accomplished by using
-echo Windows Services Management Console (services.msc):
-echo.
-echo !TN0! start - Start the !RABBITMQ_SERVICENAME! service
-echo !TN0! stop - Stop the !RABBITMQ_SERVICENAME! service
-echo !TN0! disable - Disable the !RABBITMQ_SERVICENAME! service
-echo !TN0! enable - Enable the !RABBITMQ_SERVICENAME! service
-echo.
-exit /B
-
-
-:INSTALL_SERVICE
-
-if not exist "!RABBITMQ_BASE!" (
- echo Creating base directory !RABBITMQ_BASE! & md "!RABBITMQ_BASE!"
-)
-
-"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" list !RABBITMQ_SERVICENAME! 2>NUL 1>NUL
-if errorlevel 1 (
- "!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" add !RABBITMQ_SERVICENAME! -internalservicename !RABBITMQ_SERVICENAME!
-) else (
- echo !RABBITMQ_SERVICENAME! service is already present - only updating service parameters
-)
-
-if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
- set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
-)
-
-if "!RABBITMQ_PLUGINS_DIR!"=="" (
- set RABBITMQ_PLUGINS_DIR=!TDP0!..\plugins
-)
-
-set RABBITMQ_EBIN_ROOT=!TDP0!..\ebin
-
-if "!RABBITMQ_CONFIG_FILE!"=="" (
- set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
-)
-
-if exist "!RABBITMQ_CONFIG_FILE!.config" (
- set RABBITMQ_CONFIG_ARG=-config "!RABBITMQ_CONFIG_FILE!"
-) else (
- set RABBITMQ_CONFIG_ARG=
-)
-
-set RABBITMQ_LISTEN_ARG=
-if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
- if not "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_LISTEN_ARG=-rabbit tcp_listeners "[{\"!RABBITMQ_NODE_IP_ADDRESS!\", !RABBITMQ_NODE_PORT!}]"
- )
-)
-
-set ERLANG_SERVICE_ARGUMENTS= ^
--pa "!RABBITMQ_EBIN_ROOT!" ^
--boot start_sasl ^
--s rabbit boot ^
-!RABBITMQ_CONFIG_ARG! ^
-+W w ^
-+A30 ^
-+P 1048576 ^
--kernel inet_default_connect_options "[{nodelay,true}]" ^
-!RABBITMQ_LISTEN_ARG! ^
-!RABBITMQ_SERVER_ERL_ARGS! ^
--sasl errlog_type error ^
--sasl sasl_error_logger false ^
--rabbit error_logger {file,\""!LOGS:\=/!"\"} ^
--rabbit sasl_error_logger {file,\""!SASL_LOGS:\=/!"\"} ^
--rabbit enabled_plugins_file \""!RABBITMQ_ENABLED_PLUGINS_FILE:\=/!"\" ^
--rabbit plugins_dir \""!RABBITMQ_PLUGINS_DIR:\=/!"\" ^
--rabbit plugins_expand_dir \""!RABBITMQ_PLUGINS_EXPAND_DIR:\=/!"\" ^
--os_mon start_cpu_sup false ^
--os_mon start_disksup false ^
--os_mon start_memsup false ^
--mnesia dir \""!RABBITMQ_MNESIA_DIR:\=/!"\" ^
-!RABBITMQ_SERVER_START_ARGS! ^
-!STAR!
-
-set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:\=\\!
-set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!
-
-"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^
--machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^
--env ERL_CRASH_DUMP="!RABBITMQ_BASE:\=/!/erl_crash.dump" ^
--workdir "!RABBITMQ_BASE!" ^
--stopaction "rabbit:stop_and_halt()." ^
--sname !RABBITMQ_NODENAME! ^
-!CONSOLE_FLAG! ^
--comment "A robust and scalable messaging broker" ^
--args "!ERLANG_SERVICE_ARGUMENTS!" > NUL
-
-goto END
-
-
-:MODIFY_SERVICE
-
-"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" !P1! !RABBITMQ_SERVICENAME!
-goto END
-
-
-:END
-
-endlocal
-endlocal
diff --git a/scripts/rabbitmqctl b/scripts/rabbitmqctl
deleted file mode 100755
index d0f22ce6..00000000
--- a/scripts/rabbitmqctl
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/sh
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is GoPivotal, Inc.
-## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-##
-
-# Get default settings with user overrides for (RABBITMQ_)<var_name>
-# Non-empty defaults should be set in rabbitmq-env
-. `dirname $0`/rabbitmq-env
-
-##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-
-[ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
-[ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
-
-##--- End of overridden <var_name> variables
-
-exec ${ERL_DIR}erl \
- -pa "${RABBITMQ_HOME}/ebin" \
- -noinput \
- -hidden \
- ${RABBITMQ_CTL_ERL_ARGS} \
- -sname rabbitmqctl$$ \
- -boot "${CLEAN_BOOT_FILE}" \
- -s rabbit_control_main \
- -nodename $RABBITMQ_NODENAME \
- -extra "$@"
diff --git a/scripts/rabbitmqctl.bat b/scripts/rabbitmqctl.bat
deleted file mode 100755
index d7cbbb10..00000000
--- a/scripts/rabbitmqctl.bat
+++ /dev/null
@@ -1,49 +0,0 @@
-@echo off
-REM The contents of this file are subject to the Mozilla Public License
-REM Version 1.1 (the "License"); you may not use this file except in
-REM compliance with the License. You may obtain a copy of the License
-REM at http://www.mozilla.org/MPL/
-REM
-REM Software distributed under the License is distributed on an "AS IS"
-REM basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-REM the License for the specific language governing rights and
-REM limitations under the License.
-REM
-REM The Original Code is RabbitMQ.
-REM
-REM The Initial Developer of the Original Code is GoPivotal, Inc.
-REM Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-REM
-
-setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TDP0=%~dp0
-set STAR=%*
-setlocal enabledelayedexpansion
-
-if "!COMPUTERNAME!"=="" (
- set COMPUTERNAME=localhost
-)
-
-if "!RABBITMQ_NODENAME!"=="" (
- set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
-)
-
-if not exist "!ERLANG_HOME!\bin\erl.exe" (
- echo.
- echo ******************************
- echo ERLANG_HOME not set correctly.
- echo ******************************
- echo.
- echo Please either set ERLANG_HOME to point to your Erlang installation or place the
- echo RabbitMQ server distribution in the Erlang lib folder.
- echo.
- exit /B
-)
-
-"!ERLANG_HOME!\bin\erl.exe" -pa "!TDP0!..\ebin" -noinput -hidden !RABBITMQ_CTL_ERL_ARGS! -sname rabbitmqctl!RANDOM!!TIME:~9! -s rabbit_control_main -nodename !RABBITMQ_NODENAME! -extra !STAR!
-
-endlocal
-endlocal
diff --git a/src/app_utils.erl b/src/app_utils.erl
deleted file mode 100644
index 5ae2d295..00000000
--- a/src/app_utils.erl
+++ /dev/null
@@ -1,138 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
--module(app_utils).
-
--export([load_applications/1, start_applications/1, start_applications/2,
- stop_applications/1, stop_applications/2, app_dependency_order/2,
- wait_for_applications/1]).
-
--ifdef(use_specs).
-
--type error_handler() :: fun((atom(), any()) -> 'ok').
-
--spec load_applications([atom()]) -> 'ok'.
--spec start_applications([atom()]) -> 'ok'.
--spec stop_applications([atom()]) -> 'ok'.
--spec start_applications([atom()], error_handler()) -> 'ok'.
--spec stop_applications([atom()], error_handler()) -> 'ok'.
--spec wait_for_applications([atom()]) -> 'ok'.
--spec app_dependency_order([atom()], boolean()) -> [digraph:vertex()].
-
--endif.
-
-%%---------------------------------------------------------------------------
-%% Public API
-
-load_applications(Apps) ->
- load_applications(queue:from_list(Apps), sets:new()),
- ok.
-
-start_applications(Apps) ->
- start_applications(
- Apps, fun (App, Reason) ->
- throw({error, {cannot_start_application, App, Reason}})
- end).
-
-stop_applications(Apps) ->
- stop_applications(
- Apps, fun (App, Reason) ->
- throw({error, {cannot_stop_application, App, Reason}})
- end).
-
-start_applications(Apps, ErrorHandler) ->
- manage_applications(fun lists:foldl/3,
- fun application:start/1,
- fun application:stop/1,
- already_started,
- ErrorHandler,
- Apps).
-
-stop_applications(Apps, ErrorHandler) ->
- manage_applications(fun lists:foldr/3,
- fun application:stop/1,
- fun application:start/1,
- not_started,
- ErrorHandler,
- Apps).
-
-
-wait_for_applications(Apps) ->
- [wait_for_application(App) || App <- Apps], ok.
-
-app_dependency_order(RootApps, StripUnreachable) ->
- {ok, G} = rabbit_misc:build_acyclic_graph(
- fun (App, _Deps) -> [{App, App}] end,
- fun (App, Deps) -> [{Dep, App} || Dep <- Deps] end,
- [{App, app_dependencies(App)} ||
- {App, _Desc, _Vsn} <- application:loaded_applications()]),
- try
- case StripUnreachable of
- true -> digraph:del_vertices(G, digraph:vertices(G) --
- digraph_utils:reachable(RootApps, G));
- false -> ok
- end,
- digraph_utils:topsort(G)
- after
- true = digraph:delete(G)
- end.
-
-%%---------------------------------------------------------------------------
-%% Private API
-
-wait_for_application(Application) ->
- case lists:keymember(Application, 1, rabbit_misc:which_applications()) of
- true -> ok;
- false -> timer:sleep(1000),
- wait_for_application(Application)
- end.
-
-load_applications(Worklist, Loaded) ->
- case queue:out(Worklist) of
- {empty, _WorkList} ->
- ok;
- {{value, App}, Worklist1} ->
- case sets:is_element(App, Loaded) of
- true -> load_applications(Worklist1, Loaded);
- false -> case application:load(App) of
- ok -> ok;
- {error, {already_loaded, App}} -> ok;
- Error -> throw(Error)
- end,
- load_applications(
- queue:join(Worklist1,
- queue:from_list(app_dependencies(App))),
- sets:add_element(App, Loaded))
- end
- end.
-
-app_dependencies(App) ->
- case application:get_key(App, applications) of
- undefined -> [];
- {ok, Lst} -> Lst
- end.
-
-manage_applications(Iterate, Do, Undo, SkipError, ErrorHandler, Apps) ->
- Iterate(fun (App, Acc) ->
- case Do(App) of
- ok -> [App | Acc];
- {error, {SkipError, _}} -> Acc;
- {error, Reason} ->
- lists:foreach(Undo, Acc),
- ErrorHandler(App, Reason)
- end
- end, [], Apps),
- ok.
-
diff --git a/src/background_gc.erl b/src/background_gc.erl
deleted file mode 100644
index fbd7ce23..00000000
--- a/src/background_gc.erl
+++ /dev/null
@@ -1,81 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(background_gc).
-
--behaviour(gen_server2).
-
--export([start_link/0, run/0]).
--export([gc/0]). %% For run_interval only
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--define(MAX_RATIO, 0.01).
--define(IDEAL_INTERVAL, 60000).
-
--record(state, {last_interval}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(run/0 :: () -> 'ok').
--spec(gc/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() -> gen_server2:start_link({local, ?MODULE}, ?MODULE, [],
- [{timeout, infinity}]).
-
-run() -> gen_server2:cast(?MODULE, run).
-
-%%----------------------------------------------------------------------------
-
-init([]) -> {ok, interval_gc(#state{last_interval = ?IDEAL_INTERVAL})}.
-
-handle_call(Msg, _From, State) ->
- {stop, {unexpected_call, Msg}, {unexpected_call, Msg}, State}.
-
-handle_cast(run, State) -> gc(), {noreply, State};
-
-handle_cast(Msg, State) -> {stop, {unexpected_cast, Msg}, State}.
-
-handle_info(run, State) -> {noreply, interval_gc(State)};
-
-handle_info(Msg, State) -> {stop, {unexpected_info, Msg}, State}.
-
-code_change(_OldVsn, State, _Extra) -> {ok, State}.
-
-terminate(_Reason, State) -> State.
-
-%%----------------------------------------------------------------------------
-
-interval_gc(State = #state{last_interval = LastInterval}) ->
- {ok, Interval} = rabbit_misc:interval_operation(
- {?MODULE, gc, []},
- ?MAX_RATIO, ?IDEAL_INTERVAL, LastInterval),
- erlang:send_after(Interval, self(), run),
- State#state{last_interval = Interval}.
-
-gc() ->
- [garbage_collect(P) || P <- processes(),
- {status, waiting} == process_info(P, status)],
- garbage_collect(), %% since we will never be waiting...
- ok.
diff --git a/src/credit_flow.erl b/src/credit_flow.erl
deleted file mode 100644
index d48d649e..00000000
--- a/src/credit_flow.erl
+++ /dev/null
@@ -1,141 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(credit_flow).
-
-%% Credit flow is controlled by a credit specification - a
-%% {InitialCredit, MoreCreditAfter} tuple. For the message sender,
-%% credit starts at InitialCredit and is decremented with every
-%% message sent. The message receiver grants more credit to the sender
-%% by sending it a {bump_credit, ...} control message after receiving
-%% MoreCreditAfter messages. The sender should pass this message in to
-%% handle_bump_msg/1. The sender should block when it goes below 0
-%% (check by invoking blocked/0). If a process is both a sender and a
-%% receiver it will not grant any more credit to its senders when it
-%% is itself blocked - thus the only processes that need to check
-%% blocked/0 are ones that read from network sockets.
-
--define(DEFAULT_CREDIT, {200, 50}).
-
--export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0]).
--export([peer_down/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--opaque(bump_msg() :: {pid(), non_neg_integer()}).
--type(credit_spec() :: {non_neg_integer(), non_neg_integer()}).
-
--spec(send/1 :: (pid()) -> 'ok').
--spec(send/2 :: (pid(), credit_spec()) -> 'ok').
--spec(ack/1 :: (pid()) -> 'ok').
--spec(ack/2 :: (pid(), credit_spec()) -> 'ok').
--spec(handle_bump_msg/1 :: (bump_msg()) -> 'ok').
--spec(blocked/0 :: () -> boolean()).
--spec(peer_down/1 :: (pid()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% process dict update macro - eliminates the performance-hurting
-%% closure creation a HOF would introduce
--define(UPDATE(Key, Default, Var, Expr),
- begin
- %% We deliberately allow Var to escape from the case here
- %% to be used in Expr. Any temporary var we introduced
- %% would also escape, and might conflict.
- case get(Key) of
- undefined -> Var = Default;
- Var -> ok
- end,
- put(Key, Expr)
- end).
-
-%%----------------------------------------------------------------------------
-
-%% There are two "flows" here; of messages and of credit, going in
-%% opposite directions. The variable names "From" and "To" refer to
-%% the flow of credit, but the function names refer to the flow of
-%% messages. This is the clearest I can make it (since the function
-%% names form the API and want to make sense externally, while the
-%% variable names are used in credit bookkeeping and want to make
-%% sense internally).
-
-%% For any given pair of processes, ack/2 and send/2 must always be
-%% called with the same credit_spec().
-
-send(From) -> send(From, ?DEFAULT_CREDIT).
-
-send(From, {InitialCredit, _MoreCreditAfter}) ->
- ?UPDATE({credit_from, From}, InitialCredit, C,
- if C == 1 -> block(From),
- 0;
- true -> C - 1
- end).
-
-ack(To) -> ack(To, ?DEFAULT_CREDIT).
-
-ack(To, {_InitialCredit, MoreCreditAfter}) ->
- ?UPDATE({credit_to, To}, MoreCreditAfter, C,
- if C == 1 -> grant(To, MoreCreditAfter),
- MoreCreditAfter;
- true -> C - 1
- end).
-
-handle_bump_msg({From, MoreCredit}) ->
- ?UPDATE({credit_from, From}, 0, C,
- if C =< 0 andalso C + MoreCredit > 0 -> unblock(From),
- C + MoreCredit;
- true -> C + MoreCredit
- end).
-
-blocked() -> case get(credit_blocked) of
- undefined -> false;
- [] -> false;
- _ -> true
- end.
-
-peer_down(Peer) ->
- %% In theory we could also remove it from credit_deferred here, but it
- %% doesn't really matter; at some point later we will drain
- %% credit_deferred and thus send messages into the void...
- unblock(Peer),
- erase({credit_from, Peer}),
- erase({credit_to, Peer}),
- ok.
-
-%% --------------------------------------------------------------------------
-
-grant(To, Quantity) ->
- Msg = {bump_credit, {self(), Quantity}},
- case blocked() of
- false -> To ! Msg;
- true -> ?UPDATE(credit_deferred, [], Deferred, [{To, Msg} | Deferred])
- end.
-
-block(From) -> ?UPDATE(credit_blocked, [], Blocks, [From | Blocks]).
-
-unblock(From) ->
- ?UPDATE(credit_blocked, [], Blocks, Blocks -- [From]),
- case blocked() of
- false -> case erase(credit_deferred) of
- undefined -> ok;
- Credits -> [To ! Msg || {To, Msg} <- Credits]
- end;
- true -> ok
- end.
diff --git a/src/delegate.erl b/src/delegate.erl
deleted file mode 100644
index 4e1dcd2e..00000000
--- a/src/delegate.erl
+++ /dev/null
@@ -1,176 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(delegate).
-
--behaviour(gen_server2).
-
--export([start_link/1, invoke_no_result/2, invoke/2, call/2, cast/2]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 ::
- (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}).
--spec(invoke/2 ::
- ( pid(), fun ((pid()) -> A)) -> A;
- ([pid()], fun ((pid()) -> A)) -> {[{pid(), A}],
- [{pid(), term()}]}).
--spec(invoke_no_result/2 ::
- (pid() | [pid()], fun ((pid()) -> any())) -> 'ok').
--spec(call/2 ::
- ( pid(), any()) -> any();
- ([pid()], any()) -> {[{pid(), any()}], [{pid(), term()}]}).
--spec(cast/2 :: (pid() | [pid()], any()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(HIBERNATE_AFTER_MIN, 1000).
--define(DESIRED_HIBERNATE, 10000).
-
-%%----------------------------------------------------------------------------
-
-start_link(Num) ->
- gen_server2:start_link({local, delegate_name(Num)}, ?MODULE, [], []).
-
-invoke(Pid, Fun) when is_pid(Pid) andalso node(Pid) =:= node() ->
- Fun(Pid);
-invoke(Pid, Fun) when is_pid(Pid) ->
- case invoke([Pid], Fun) of
- {[{Pid, Result}], []} ->
- Result;
- {[], [{Pid, {Class, Reason, StackTrace}}]} ->
- erlang:raise(Class, Reason, StackTrace)
- end;
-
-invoke([], _Fun) -> %% optimisation
- {[], []};
-invoke([Pid], Fun) when node(Pid) =:= node() -> %% optimisation
- case safe_invoke(Pid, Fun) of
- {ok, _, Result} -> {[{Pid, Result}], []};
- {error, _, Error} -> {[], [{Pid, Error}]}
- end;
-invoke(Pids, Fun) when is_list(Pids) ->
- {LocalPids, Grouped} = group_pids_by_node(Pids),
- %% The use of multi_call is only safe because the timeout is
- %% infinity, and thus there is no process spawned in order to do
- %% the sending. Thus calls can't overtake preceding calls/casts.
- {Replies, BadNodes} =
- case orddict:fetch_keys(Grouped) of
- [] -> {[], []};
- RemoteNodes -> gen_server2:multi_call(
- RemoteNodes, delegate(RemoteNodes),
- {invoke, Fun, Grouped}, infinity)
- end,
- BadPids = [{Pid, {exit, {nodedown, BadNode}, []}} ||
- BadNode <- BadNodes,
- Pid <- orddict:fetch(BadNode, Grouped)],
- ResultsNoNode = lists:append([safe_invoke(LocalPids, Fun) |
- [Results || {_Node, Results} <- Replies]]),
- lists:foldl(
- fun ({ok, Pid, Result}, {Good, Bad}) -> {[{Pid, Result} | Good], Bad};
- ({error, Pid, Error}, {Good, Bad}) -> {Good, [{Pid, Error} | Bad]}
- end, {[], BadPids}, ResultsNoNode).
-
-invoke_no_result(Pid, Fun) when is_pid(Pid) andalso node(Pid) =:= node() ->
- safe_invoke(Pid, Fun), %% we don't care about any error
- ok;
-invoke_no_result(Pid, Fun) when is_pid(Pid) ->
- invoke_no_result([Pid], Fun);
-
-invoke_no_result([], _Fun) -> %% optimisation
- ok;
-invoke_no_result([Pid], Fun) when node(Pid) =:= node() -> %% optimisation
- safe_invoke(Pid, Fun), %% must not die
- ok;
-invoke_no_result(Pids, Fun) when is_list(Pids) ->
- {LocalPids, Grouped} = group_pids_by_node(Pids),
- case orddict:fetch_keys(Grouped) of
- [] -> ok;
- RemoteNodes -> gen_server2:abcast(RemoteNodes, delegate(RemoteNodes),
- {invoke, Fun, Grouped})
- end,
- safe_invoke(LocalPids, Fun), %% must not die
- ok.
-
-call(PidOrPids, Msg) ->
- invoke(PidOrPids, fun (P) -> gen_server2:call(P, Msg, infinity) end).
-
-cast(PidOrPids, Msg) ->
- invoke_no_result(PidOrPids, fun (P) -> gen_server2:cast(P, Msg) end).
-
-%%----------------------------------------------------------------------------
-
-group_pids_by_node(Pids) ->
- LocalNode = node(),
- lists:foldl(
- fun (Pid, {Local, Remote}) when node(Pid) =:= LocalNode ->
- {[Pid | Local], Remote};
- (Pid, {Local, Remote}) ->
- {Local,
- orddict:update(
- node(Pid), fun (List) -> [Pid | List] end, [Pid], Remote)}
- end, {[], orddict:new()}, Pids).
-
-delegate_name(Hash) ->
- list_to_atom("delegate_" ++ integer_to_list(Hash)).
-
-delegate(RemoteNodes) ->
- case get(delegate) of
- undefined -> Name = delegate_name(
- erlang:phash2(self(),
- delegate_sup:count(RemoteNodes))),
- put(delegate, Name),
- Name;
- Name -> Name
- end.
-
-safe_invoke(Pids, Fun) when is_list(Pids) ->
- [safe_invoke(Pid, Fun) || Pid <- Pids];
-safe_invoke(Pid, Fun) when is_pid(Pid) ->
- try
- {ok, Pid, Fun(Pid)}
- catch Class:Reason ->
- {error, Pid, {Class, Reason, erlang:get_stacktrace()}}
- end.
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, node(), hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-handle_call({invoke, Fun, Grouped}, _From, Node) ->
- {reply, safe_invoke(orddict:fetch(Node, Grouped), Fun), Node, hibernate}.
-
-handle_cast({invoke, Fun, Grouped}, Node) ->
- safe_invoke(orddict:fetch(Node, Grouped), Fun),
- {noreply, Node, hibernate}.
-
-handle_info(_Info, Node) ->
- {noreply, Node, hibernate}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, Node, _Extra) ->
- {ok, Node}.
diff --git a/src/delegate_sup.erl b/src/delegate_sup.erl
deleted file mode 100644
index e31d6d38..00000000
--- a/src/delegate_sup.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(delegate_sup).
-
--behaviour(supervisor).
-
--export([start_link/1, count/1]).
-
--export([init/1]).
-
--define(SERVER, ?MODULE).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 :: (integer()) -> rabbit_types:ok_pid_or_error()).
--spec(count/1 :: ([node()]) -> integer()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Count) ->
- supervisor:start_link({local, ?SERVER}, ?MODULE, [Count]).
-
-count([]) ->
- 1;
-count([Node | Nodes]) ->
- try
- length(supervisor:which_children({?SERVER, Node}))
- catch exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
- count(Nodes);
- exit:{R, _} when R =:= noproc; R =:= normal; R =:= shutdown;
- R =:= nodedown ->
- count(Nodes)
- end.
-
-%%----------------------------------------------------------------------------
-
-init([Count]) ->
- {ok, {{one_for_one, 10, 10},
- [{Num, {delegate, start_link, [Num]},
- transient, 16#ffffffff, worker, [delegate]} ||
- Num <- lists:seq(0, Count - 1)]}}.
diff --git a/src/dtree.erl b/src/dtree.erl
deleted file mode 100644
index 5ff36bd9..00000000
--- a/src/dtree.erl
+++ /dev/null
@@ -1,163 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-%% A dual-index tree.
-%%
-%% Entries have the following shape:
-%%
-%% +----+--------------------+---+
-%% | PK | SK1, SK2, ..., SKN | V |
-%% +----+--------------------+---+
-%%
-%% i.e. a primary key, set of secondary keys, and a value.
-%%
-%% There can be only one entry per primary key, but secondary keys may
-%% appear in multiple entries.
-%%
-%% The set of secondary keys must be non-empty. Or, to put it another
-%% way, entries only exist while their secondary key set is non-empty.
-
--module(dtree).
-
--export([empty/0, insert/4, take/3, take/2, take_all/2,
- is_defined/2, is_empty/1, smallest/1, size/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([?MODULE/0]).
-
--opaque(?MODULE() :: {gb_tree(), gb_tree()}).
-
--type(pk() :: any()).
--type(sk() :: any()).
--type(val() :: any()).
--type(kv() :: {pk(), val()}).
-
--spec(empty/0 :: () -> ?MODULE()).
--spec(insert/4 :: (pk(), [sk()], val(), ?MODULE()) -> ?MODULE()).
--spec(take/3 :: ([pk()], sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
--spec(take/2 :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
--spec(take_all/2 :: (sk(), ?MODULE()) -> {[kv()], ?MODULE()}).
--spec(is_defined/2 :: (sk(), ?MODULE()) -> boolean()).
--spec(is_empty/1 :: (?MODULE()) -> boolean()).
--spec(smallest/1 :: (?MODULE()) -> kv()).
--spec(size/1 :: (?MODULE()) -> non_neg_integer()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-empty() -> {gb_trees:empty(), gb_trees:empty()}.
-
-%% Insert an entry. Fails if there already is an entry with the given
-%% primary key.
-insert(PK, [], V, {P, S}) ->
- %% dummy insert to force error if PK exists
- gb_trees:insert(PK, {gb_sets:empty(), V}, P),
- {P, S};
-insert(PK, SKs, V, {P, S}) ->
- {gb_trees:insert(PK, {gb_sets:from_list(SKs), V}, P),
- lists:foldl(fun (SK, S0) ->
- case gb_trees:lookup(SK, S0) of
- {value, PKS} -> PKS1 = gb_sets:insert(PK, PKS),
- gb_trees:update(SK, PKS1, S0);
- none -> PKS = gb_sets:singleton(PK),
- gb_trees:insert(SK, PKS, S0)
- end
- end, S, SKs)}.
-
-%% Remove the given secondary key from the entries of the given
-%% primary keys, returning the primary-key/value pairs of any entries
-%% that were dropped as the result (i.e. due to their secondary key
-%% set becoming empty). It is ok for the given primary keys and/or
-%% secondary key to not exist.
-take(PKs, SK, {P, S}) ->
- case gb_trees:lookup(SK, S) of
- none -> {[], {P, S}};
- {value, PKS} -> TakenPKS = gb_sets:from_list(PKs),
- PKSInter = gb_sets:intersection(PKS, TakenPKS),
- PKSDiff = gb_sets_difference (PKS, PKSInter),
- {KVs, P1} = take2(PKSInter, SK, P),
- {KVs, {P1, case gb_sets:is_empty(PKSDiff) of
- true -> gb_trees:delete(SK, S);
- false -> gb_trees:update(SK, PKSDiff, S)
- end}}
- end.
-
-%% Remove the given secondary key from all entries, returning the
-%% primary-key/value pairs of any entries that were dropped as the
-%% result (i.e. due to their secondary key set becoming empty). It is
-%% ok for the given secondary key to not exist.
-take(SK, {P, S}) ->
- case gb_trees:lookup(SK, S) of
- none -> {[], {P, S}};
- {value, PKS} -> {KVs, P1} = take2(PKS, SK, P),
- {KVs, {P1, gb_trees:delete(SK, S)}}
- end.
-
-%% Drop all entries which contain the given secondary key, returning
-%% the primary-key/value pairs of these entries. It is ok for the
-%% given secondary key to not exist.
-take_all(SK, {P, S}) ->
- case gb_trees:lookup(SK, S) of
- none -> {[], {P, S}};
- {value, PKS} -> {KVs, SKS, P1} = take_all2(PKS, P),
- {KVs, {P1, prune(SKS, PKS, S)}}
- end.
-
-is_defined(SK, {_P, S}) -> gb_trees:is_defined(SK, S).
-
-is_empty({P, _S}) -> gb_trees:is_empty(P).
-
-smallest({P, _S}) -> {K, {_SKS, V}} = gb_trees:smallest(P),
- {K, V}.
-
-size({P, _S}) -> gb_trees:size(P).
-
-%%----------------------------------------------------------------------------
-
-take2(PKS, SK, P) ->
- gb_sets:fold(fun (PK, {KVs, P0}) ->
- {SKS, V} = gb_trees:get(PK, P0),
- SKS1 = gb_sets:delete(SK, SKS),
- case gb_sets:is_empty(SKS1) of
- true -> KVs1 = [{PK, V} | KVs],
- {KVs1, gb_trees:delete(PK, P0)};
- false -> {KVs, gb_trees:update(PK, {SKS1, V}, P0)}
- end
- end, {[], P}, PKS).
-
-take_all2(PKS, P) ->
- gb_sets:fold(fun (PK, {KVs, SKS0, P0}) ->
- {SKS, V} = gb_trees:get(PK, P0),
- {[{PK, V} | KVs], gb_sets:union(SKS, SKS0),
- gb_trees:delete(PK, P0)}
- end, {[], gb_sets:empty(), P}, PKS).
-
-prune(SKS, PKS, S) ->
- gb_sets:fold(fun (SK0, S0) ->
- PKS1 = gb_trees:get(SK0, S0),
- PKS2 = gb_sets_difference(PKS1, PKS),
- case gb_sets:is_empty(PKS2) of
- true -> gb_trees:delete(SK0, S0);
- false -> gb_trees:update(SK0, PKS2, S0)
- end
- end, S, SKS).
-
-gb_sets_difference(S1, S2) ->
- gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl
deleted file mode 100644
index bac7c2c1..00000000
--- a/src/file_handle_cache.erl
+++ /dev/null
@@ -1,1227 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(file_handle_cache).
-
-%% A File Handle Cache
-%%
-%% This extends a subset of the functionality of the Erlang file
-%% module. In the below, we use "file handle" to specifically refer to
-%% file handles, and "file descriptor" to refer to descriptors which
-%% are not file handles, e.g. sockets.
-%%
-%% Some constraints
-%% 1) This supports one writer, multiple readers per file. Nothing
-%% else.
-%% 2) Do not open the same file from different processes. Bad things
-%% may happen, especially for writes.
-%% 3) Writes are all appends. You cannot write to the middle of a
-%% file, although you can truncate and then append if you want.
-%% 4) Although there is a write buffer, there is no read buffer. Feel
-%% free to use the read_ahead mode, but beware of the interaction
-%% between that buffer and the write buffer.
-%%
-%% Some benefits
-%% 1) You do not have to remember to call sync before close
-%% 2) Buffering is much more flexible than with the plain file module,
-%% and you can control when the buffer gets flushed out. This means
-%% that you can rely on reads-after-writes working, without having to
-%% call the expensive sync.
-%% 3) Unnecessary calls to position and sync get optimised out.
-%% 4) You can find out what your 'real' offset is, and what your
-%% 'virtual' offset is (i.e. where the hdl really is, and where it
-%% would be after the write buffer is written out).
-%%
-%% There is also a server component which serves to limit the number
-%% of open file descriptors. This is a hard limit: the server
-%% component will ensure that clients do not have more file
-%% descriptors open than it's configured to allow.
-%%
-%% On open, the client requests permission from the server to open the
-%% required number of file handles. The server may ask the client to
-%% close other file handles that it has open, or it may queue the
-%% request and ask other clients to close file handles they have open
-%% in order to satisfy the request. Requests are always satisfied in
-%% the order they arrive, even if a latter request (for a small number
-%% of file handles) can be satisfied before an earlier request (for a
-%% larger number of file handles). On close, the client sends a
-%% message to the server. These messages allow the server to keep
-%% track of the number of open handles. The client also keeps a
-%% gb_tree which is updated on every use of a file handle, mapping the
-%% time at which the file handle was last used (timestamp) to the
-%% handle. Thus the smallest key in this tree maps to the file handle
-%% that has not been used for the longest amount of time. This
-%% smallest key is included in the messages to the server. As such,
-%% the server keeps track of when the least recently used file handle
-%% was used *at the point of the most recent open or close* by each
-%% client.
-%%
-%% Note that this data can go very out of date, by the client using
-%% the least recently used handle.
-%%
-%% When the limit is exceeded (i.e. the number of open file handles is
-%% at the limit and there are pending 'open' requests), the server
-%% calculates the average age of the last reported least recently used
-%% file handle of all the clients. It then tells all the clients to
-%% close any handles not used for longer than this average, by
-%% invoking the callback the client registered. The client should
-%% receive this message and pass it into
-%% set_maximum_since_use/1. However, it is highly possible this age
-%% will be greater than the ages of all the handles the client knows
-%% of because the client has used its file handles in the mean
-%% time. Thus at this point the client reports to the server the
-%% current timestamp at which its least recently used file handle was
-%% last used. The server will check two seconds later that either it
-%% is back under the limit, in which case all is well again, or if
-%% not, it will calculate a new average age. Its data will be much
-%% more recent now, and so it is very likely that when this is
-%% communicated to the clients, the clients will close file handles.
-%% (In extreme cases, where it's very likely that all clients have
-%% used their open handles since they last sent in an update, which
-%% would mean that the average will never cause any file handles to
-%% be closed, the server can send out an average age of 0, resulting
-%% in all available clients closing all their file handles.)
-%%
-%% Care is taken to ensure that (a) processes which are blocked
-%% waiting for file descriptors to become available are not sent
-%% requests to close file handles; and (b) given it is known how many
-%% file handles a process has open, when the average age is forced to
-%% 0, close messages are only sent to enough processes to release the
-%% correct number of file handles and the list of processes is
-%% randomly shuffled. This ensures we don't cause processes to
-%% needlessly close file handles, and ensures that we don't always
-%% make such requests of the same processes.
-%%
-%% The advantage of this scheme is that there is only communication
-%% from the client to the server on open, close, and when in the
-%% process of trying to reduce file handle usage. There is no
-%% communication from the client to the server on normal file handle
-%% operations. This scheme forms a feed-back loop - the server does
-%% not care which file handles are closed, just that some are, and it
-%% checks this repeatedly when over the limit.
-%%
-%% Handles which are closed as a result of the server are put into a
-%% "soft-closed" state in which the handle is closed (data flushed out
-%% and sync'd first) but the state is maintained. The handle will be
-%% fully reopened again as soon as needed, thus users of this library
-%% do not need to worry about their handles being closed by the server
-%% - reopening them when necessary is handled transparently.
-%%
-%% The server also supports obtain, release and transfer. obtain/{0,1}
-%% blocks until a file descriptor is available, at which point the
-%% requesting process is considered to 'own' more descriptor(s).
-%% release/{0,1} is the inverse operation and releases previously obtained
-%% descriptor(s). transfer/{1,2} transfers ownership of file descriptor(s)
-%% between processes. It is non-blocking. Obtain has a
-%% lower limit, set by the ?OBTAIN_LIMIT/1 macro. File handles can use
-%% the entire limit, but will be evicted by obtain calls up to the
-%% point at which no more obtain calls can be satisfied by the obtains
-%% limit. Thus there will always be some capacity available for file
-%% handles. Processes that use obtain are never asked to return them,
-%% and they are not managed in any way by the server. It is simply a
-%% mechanism to ensure that processes that need file descriptors such
-%% as sockets can do so in such a way that the overall number of open
-%% file descriptors is managed.
-%%
-%% The callers of register_callback/3, obtain, and the argument of
-%% transfer are monitored, reducing the count of handles in use
-%% appropriately when the processes terminate.
-
--behaviour(gen_server2).
-
--export([register_callback/3]).
--export([open/3, close/1, read/2, append/2, needs_sync/1, sync/1, position/2,
- truncate/1, current_virtual_offset/1, current_raw_offset/1, flush/1,
- copy/3, set_maximum_since_use/1, delete/1, clear/1]).
--export([obtain/0, obtain/1, release/0, release/1, transfer/1, transfer/2,
- set_limit/1, get_limit/0, info_keys/0,
- info/0, info/1]).
--export([ulimit/0]).
-
--export([start_link/0, start_link/2, init/1, handle_call/3, handle_cast/2,
- handle_info/2, terminate/2, code_change/3, prioritise_cast/3]).
-
--define(SERVER, ?MODULE).
--define(RESERVED_FOR_OTHERS, 100).
-
--define(FILE_HANDLES_LIMIT_OTHER, 1024).
--define(FILE_HANDLES_CHECK_INTERVAL, 2000).
-
--define(OBTAIN_LIMIT(LIMIT), trunc((LIMIT * 0.9) - 2)).
--define(CLIENT_ETS_TABLE, file_handle_cache_client).
--define(ELDERS_ETS_TABLE, file_handle_cache_elders).
-
-%%----------------------------------------------------------------------------
-
--record(file,
- { reader_count,
- has_writer
- }).
-
--record(handle,
- { hdl,
- offset,
- is_dirty,
- write_buffer_size,
- write_buffer_size_limit,
- write_buffer,
- at_eof,
- path,
- mode,
- options,
- is_write,
- is_read,
- last_used_at
- }).
-
--record(fhc_state,
- { elders,
- limit,
- open_count,
- open_pending,
- obtain_limit,
- obtain_count,
- obtain_pending,
- clients,
- timer_ref,
- alarm_set,
- alarm_clear
- }).
-
--record(cstate,
- { pid,
- callback,
- opened,
- obtained,
- blocked,
- pending_closes
- }).
-
--record(pending,
- { kind,
- pid,
- requested,
- from
- }).
-
-%%----------------------------------------------------------------------------
-%% Specs
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(ref() :: any()).
--type(ok_or_error() :: 'ok' | {'error', any()}).
--type(val_or_error(T) :: {'ok', T} | {'error', any()}).
--type(position() :: ('bof' | 'eof' | non_neg_integer() |
- {('bof' |'eof'), non_neg_integer()} |
- {'cur', integer()})).
--type(offset() :: non_neg_integer()).
-
--spec(register_callback/3 :: (atom(), atom(), [any()]) -> 'ok').
--spec(open/3 ::
- (file:filename(), [any()],
- [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')}])
- -> val_or_error(ref())).
--spec(close/1 :: (ref()) -> ok_or_error()).
--spec(read/2 :: (ref(), non_neg_integer()) ->
- val_or_error([char()] | binary()) | 'eof').
--spec(append/2 :: (ref(), iodata()) -> ok_or_error()).
--spec(sync/1 :: (ref()) -> ok_or_error()).
--spec(position/2 :: (ref(), position()) -> val_or_error(offset())).
--spec(truncate/1 :: (ref()) -> ok_or_error()).
--spec(current_virtual_offset/1 :: (ref()) -> val_or_error(offset())).
--spec(current_raw_offset/1 :: (ref()) -> val_or_error(offset())).
--spec(flush/1 :: (ref()) -> ok_or_error()).
--spec(copy/3 :: (ref(), ref(), non_neg_integer()) ->
- val_or_error(non_neg_integer())).
--spec(delete/1 :: (ref()) -> ok_or_error()).
--spec(clear/1 :: (ref()) -> ok_or_error()).
--spec(set_maximum_since_use/1 :: (non_neg_integer()) -> 'ok').
--spec(obtain/0 :: () -> 'ok').
--spec(obtain/1 :: (non_neg_integer()) -> 'ok').
--spec(release/0 :: () -> 'ok').
--spec(release/1 :: (non_neg_integer()) -> 'ok').
--spec(transfer/1 :: (pid()) -> 'ok').
--spec(transfer/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(set_limit/1 :: (non_neg_integer()) -> 'ok').
--spec(get_limit/0 :: () -> non_neg_integer()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/0 :: () -> rabbit_types:infos()).
--spec(info/1 :: ([atom()]) -> rabbit_types:infos()).
--spec(ulimit/0 :: () -> 'unknown' | non_neg_integer()).
-
--endif.
-
-%%----------------------------------------------------------------------------
--define(INFO_KEYS, [total_limit, total_used, sockets_limit, sockets_used]).
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-start_link() ->
- start_link(fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
-
-start_link(AlarmSet, AlarmClear) ->
- gen_server2:start_link({local, ?SERVER}, ?MODULE, [AlarmSet, AlarmClear],
- [{timeout, infinity}]).
-
-register_callback(M, F, A)
- when is_atom(M) andalso is_atom(F) andalso is_list(A) ->
- gen_server2:cast(?SERVER, {register_callback, self(), {M, F, A}}).
-
-open(Path, Mode, Options) ->
- Path1 = filename:absname(Path),
- File1 = #file { reader_count = RCount, has_writer = HasWriter } =
- case get({Path1, fhc_file}) of
- File = #file {} -> File;
- undefined -> #file { reader_count = 0,
- has_writer = false }
- end,
- Mode1 = append_to_write(Mode),
- IsWriter = is_writer(Mode1),
- case IsWriter andalso HasWriter of
- true -> {error, writer_exists};
- false -> {ok, Ref} = new_closed_handle(Path1, Mode1, Options),
- case get_or_reopen([{Ref, new}]) of
- {ok, [_Handle1]} ->
- RCount1 = case is_reader(Mode1) of
- true -> RCount + 1;
- false -> RCount
- end,
- HasWriter1 = HasWriter orelse IsWriter,
- put({Path1, fhc_file},
- File1 #file { reader_count = RCount1,
- has_writer = HasWriter1 }),
- {ok, Ref};
- Error ->
- erase({Ref, fhc_handle}),
- Error
- end
- end.
-
-close(Ref) ->
- case erase({Ref, fhc_handle}) of
- undefined -> ok;
- Handle -> case hard_close(Handle) of
- ok -> ok;
- {Error, Handle1} -> put_handle(Ref, Handle1),
- Error
- end
- end.
-
-read(Ref, Count) ->
- with_flushed_handles(
- [Ref],
- fun ([#handle { is_read = false }]) ->
- {error, not_open_for_reading};
- ([Handle = #handle { hdl = Hdl, offset = Offset }]) ->
- case prim_file:read(Hdl, Count) of
- {ok, Data} = Obj -> Offset1 = Offset + iolist_size(Data),
- {Obj,
- [Handle #handle { offset = Offset1 }]};
- eof -> {eof, [Handle #handle { at_eof = true }]};
- Error -> {Error, [Handle]}
- end
- end).
-
-append(Ref, Data) ->
- with_handles(
- [Ref],
- fun ([#handle { is_write = false }]) ->
- {error, not_open_for_writing};
- ([Handle]) ->
- case maybe_seek(eof, Handle) of
- {{ok, _Offset}, #handle { hdl = Hdl, offset = Offset,
- write_buffer_size_limit = 0,
- at_eof = true } = Handle1} ->
- Offset1 = Offset + iolist_size(Data),
- {prim_file:write(Hdl, Data),
- [Handle1 #handle { is_dirty = true, offset = Offset1 }]};
- {{ok, _Offset}, #handle { write_buffer = WriteBuffer,
- write_buffer_size = Size,
- write_buffer_size_limit = Limit,
- at_eof = true } = Handle1} ->
- WriteBuffer1 = [Data | WriteBuffer],
- Size1 = Size + iolist_size(Data),
- Handle2 = Handle1 #handle { write_buffer = WriteBuffer1,
- write_buffer_size = Size1 },
- case Limit =/= infinity andalso Size1 > Limit of
- true -> {Result, Handle3} = write_buffer(Handle2),
- {Result, [Handle3]};
- false -> {ok, [Handle2]}
- end;
- {{error, _} = Error, Handle1} ->
- {Error, [Handle1]}
- end
- end).
-
-sync(Ref) ->
- with_flushed_handles(
- [Ref],
- fun ([#handle { is_dirty = false, write_buffer = [] }]) ->
- ok;
- ([Handle = #handle { hdl = Hdl,
- is_dirty = true, write_buffer = [] }]) ->
- case prim_file:sync(Hdl) of
- ok -> {ok, [Handle #handle { is_dirty = false }]};
- Error -> {Error, [Handle]}
- end
- end).
-
-needs_sync(Ref) ->
- %% This must *not* use with_handles/2; see bug 25052
- case get({Ref, fhc_handle}) of
- #handle { is_dirty = false, write_buffer = [] } -> false;
- #handle {} -> true
- end.
-
-position(Ref, NewOffset) ->
- with_flushed_handles(
- [Ref],
- fun ([Handle]) -> {Result, Handle1} = maybe_seek(NewOffset, Handle),
- {Result, [Handle1]}
- end).
-
-truncate(Ref) ->
- with_flushed_handles(
- [Ref],
- fun ([Handle1 = #handle { hdl = Hdl }]) ->
- case prim_file:truncate(Hdl) of
- ok -> {ok, [Handle1 #handle { at_eof = true }]};
- Error -> {Error, [Handle1]}
- end
- end).
-
-current_virtual_offset(Ref) ->
- with_handles([Ref], fun ([#handle { at_eof = true, is_write = true,
- offset = Offset,
- write_buffer_size = Size }]) ->
- {ok, Offset + Size};
- ([#handle { offset = Offset }]) ->
- {ok, Offset}
- end).
-
-current_raw_offset(Ref) ->
- with_handles([Ref], fun ([Handle]) -> {ok, Handle #handle.offset} end).
-
-flush(Ref) ->
- with_flushed_handles([Ref], fun ([Handle]) -> {ok, [Handle]} end).
-
-copy(Src, Dest, Count) ->
- with_flushed_handles(
- [Src, Dest],
- fun ([SHandle = #handle { is_read = true, hdl = SHdl, offset = SOffset },
- DHandle = #handle { is_write = true, hdl = DHdl, offset = DOffset }]
- ) ->
- case prim_file:copy(SHdl, DHdl, Count) of
- {ok, Count1} = Result1 ->
- {Result1,
- [SHandle #handle { offset = SOffset + Count1 },
- DHandle #handle { offset = DOffset + Count1,
- is_dirty = true }]};
- Error ->
- {Error, [SHandle, DHandle]}
- end;
- (_Handles) ->
- {error, incorrect_handle_modes}
- end).
-
-delete(Ref) ->
- case erase({Ref, fhc_handle}) of
- undefined ->
- ok;
- Handle = #handle { path = Path } ->
- case hard_close(Handle #handle { is_dirty = false,
- write_buffer = [] }) of
- ok -> prim_file:delete(Path);
- {Error, Handle1} -> put_handle(Ref, Handle1),
- Error
- end
- end.
-
-clear(Ref) ->
- with_handles(
- [Ref],
- fun ([#handle { at_eof = true, write_buffer_size = 0, offset = 0 }]) ->
- ok;
- ([Handle]) ->
- case maybe_seek(bof, Handle #handle { write_buffer = [],
- write_buffer_size = 0 }) of
- {{ok, 0}, Handle1 = #handle { hdl = Hdl }} ->
- case prim_file:truncate(Hdl) of
- ok -> {ok, [Handle1 #handle { at_eof = true }]};
- Error -> {Error, [Handle1]}
- end;
- {{error, _} = Error, Handle1} ->
- {Error, [Handle1]}
- end
- end).
-
-set_maximum_since_use(MaximumAge) ->
- Now = now(),
- case lists:foldl(
- fun ({{Ref, fhc_handle},
- Handle = #handle { hdl = Hdl, last_used_at = Then }}, Rep) ->
- case Hdl =/= closed andalso
- timer:now_diff(Now, Then) >= MaximumAge of
- true -> soft_close(Ref, Handle) orelse Rep;
- false -> Rep
- end;
- (_KeyValuePair, Rep) ->
- Rep
- end, false, get()) of
- false -> age_tree_change(), ok;
- true -> ok
- end.
-
-obtain() -> obtain(1).
-release() -> release(1).
-transfer(Pid) -> transfer(Pid, 1).
-
-obtain(Count) when Count > 0 ->
- %% If the FHC isn't running, obtains succeed immediately.
- case whereis(?SERVER) of
- undefined -> ok;
- _ -> gen_server2:call(?SERVER, {obtain, Count, self()}, infinity)
- end.
-
-release(Count) when Count > 0 ->
- gen_server2:cast(?SERVER, {release, Count, self()}).
-
-transfer(Pid, Count) when Count > 0 ->
- gen_server2:cast(?SERVER, {transfer, Count, self(), Pid}).
-
-set_limit(Limit) ->
- gen_server2:call(?SERVER, {set_limit, Limit}, infinity).
-
-get_limit() ->
- gen_server2:call(?SERVER, get_limit, infinity).
-
-info_keys() -> ?INFO_KEYS.
-
-info() -> info(?INFO_KEYS).
-info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity).
-
-%%----------------------------------------------------------------------------
-%% Internal functions
-%%----------------------------------------------------------------------------
-
-is_reader(Mode) -> lists:member(read, Mode).
-
-is_writer(Mode) -> lists:member(write, Mode).
-
-append_to_write(Mode) ->
- case lists:member(append, Mode) of
- true -> [write | Mode -- [append, write]];
- false -> Mode
- end.
-
-with_handles(Refs, Fun) ->
- case get_or_reopen([{Ref, reopen} || Ref <- Refs]) of
- {ok, Handles} ->
- case Fun(Handles) of
- {Result, Handles1} when is_list(Handles1) ->
- lists:zipwith(fun put_handle/2, Refs, Handles1),
- Result;
- Result ->
- Result
- end;
- Error ->
- Error
- end.
-
-with_flushed_handles(Refs, Fun) ->
- with_handles(
- Refs,
- fun (Handles) ->
- case lists:foldl(
- fun (Handle, {ok, HandlesAcc}) ->
- {Res, Handle1} = write_buffer(Handle),
- {Res, [Handle1 | HandlesAcc]};
- (Handle, {Error, HandlesAcc}) ->
- {Error, [Handle | HandlesAcc]}
- end, {ok, []}, Handles) of
- {ok, Handles1} ->
- Fun(lists:reverse(Handles1));
- {Error, Handles1} ->
- {Error, lists:reverse(Handles1)}
- end
- end).
-
-get_or_reopen(RefNewOrReopens) ->
- case partition_handles(RefNewOrReopens) of
- {OpenHdls, []} ->
- {ok, [Handle || {_Ref, Handle} <- OpenHdls]};
- {OpenHdls, ClosedHdls} ->
- Oldest = oldest(get_age_tree(), fun () -> now() end),
- case gen_server2:call(?SERVER, {open, self(), length(ClosedHdls),
- Oldest}, infinity) of
- ok ->
- case reopen(ClosedHdls) of
- {ok, RefHdls} -> sort_handles(RefNewOrReopens,
- OpenHdls, RefHdls, []);
- Error -> Error
- end;
- close ->
- [soft_close(Ref, Handle) ||
- {{Ref, fhc_handle}, Handle = #handle { hdl = Hdl }} <-
- get(),
- Hdl =/= closed],
- get_or_reopen(RefNewOrReopens)
- end
- end.
-
-reopen(ClosedHdls) -> reopen(ClosedHdls, get_age_tree(), []).
-
-reopen([], Tree, RefHdls) ->
- put_age_tree(Tree),
- {ok, lists:reverse(RefHdls)};
-reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed,
- path = Path,
- mode = Mode,
- offset = Offset,
- last_used_at = undefined }} |
- RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) ->
- case prim_file:open(Path, case NewOrReopen of
- new -> Mode;
- reopen -> [read | Mode]
- end) of
- {ok, Hdl} ->
- Now = now(),
- {{ok, _Offset}, Handle1} =
- maybe_seek(Offset, Handle #handle { hdl = Hdl,
- offset = 0,
- last_used_at = Now }),
- put({Ref, fhc_handle}, Handle1),
- reopen(RefNewOrReopenHdls, gb_trees:insert(Now, Ref, Tree),
- [{Ref, Handle1} | RefHdls]);
- Error ->
- %% NB: none of the handles in ToOpen are in the age tree
- Oldest = oldest(Tree, fun () -> undefined end),
- [gen_server2:cast(?SERVER, {close, self(), Oldest}) || _ <- ToOpen],
- put_age_tree(Tree),
- Error
- end.
-
-partition_handles(RefNewOrReopens) ->
- lists:foldr(
- fun ({Ref, NewOrReopen}, {Open, Closed}) ->
- case get({Ref, fhc_handle}) of
- #handle { hdl = closed } = Handle ->
- {Open, [{Ref, NewOrReopen, Handle} | Closed]};
- #handle {} = Handle ->
- {[{Ref, Handle} | Open], Closed}
- end
- end, {[], []}, RefNewOrReopens).
-
-sort_handles([], [], [], Acc) ->
- {ok, lists:reverse(Acc)};
-sort_handles([{Ref, _} | RefHdls], [{Ref, Handle} | RefHdlsA], RefHdlsB, Acc) ->
- sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]);
-sort_handles([{Ref, _} | RefHdls], RefHdlsA, [{Ref, Handle} | RefHdlsB], Acc) ->
- sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]).
-
-put_handle(Ref, Handle = #handle { last_used_at = Then }) ->
- Now = now(),
- age_tree_update(Then, Now, Ref),
- put({Ref, fhc_handle}, Handle #handle { last_used_at = Now }).
-
-with_age_tree(Fun) -> put_age_tree(Fun(get_age_tree())).
-
-get_age_tree() ->
- case get(fhc_age_tree) of
- undefined -> gb_trees:empty();
- AgeTree -> AgeTree
- end.
-
-put_age_tree(Tree) -> put(fhc_age_tree, Tree).
-
-age_tree_update(Then, Now, Ref) ->
- with_age_tree(
- fun (Tree) ->
- gb_trees:insert(Now, Ref, gb_trees:delete_any(Then, Tree))
- end).
-
-age_tree_delete(Then) ->
- with_age_tree(
- fun (Tree) ->
- Tree1 = gb_trees:delete_any(Then, Tree),
- Oldest = oldest(Tree1, fun () -> undefined end),
- gen_server2:cast(?SERVER, {close, self(), Oldest}),
- Tree1
- end).
-
-age_tree_change() ->
- with_age_tree(
- fun (Tree) ->
- case gb_trees:is_empty(Tree) of
- true -> Tree;
- false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
- gen_server2:cast(?SERVER, {update, self(), Oldest})
- end,
- Tree
- end).
-
-oldest(Tree, DefaultFun) ->
- case gb_trees:is_empty(Tree) of
- true -> DefaultFun();
- false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
- Oldest
- end.
-
-new_closed_handle(Path, Mode, Options) ->
- WriteBufferSize =
- case proplists:get_value(write_buffer, Options, unbuffered) of
- unbuffered -> 0;
- infinity -> infinity;
- N when is_integer(N) -> N
- end,
- Ref = make_ref(),
- put({Ref, fhc_handle}, #handle { hdl = closed,
- offset = 0,
- is_dirty = false,
- write_buffer_size = 0,
- write_buffer_size_limit = WriteBufferSize,
- write_buffer = [],
- at_eof = false,
- path = Path,
- mode = Mode,
- options = Options,
- is_write = is_writer(Mode),
- is_read = is_reader(Mode),
- last_used_at = undefined }),
- {ok, Ref}.
-
-soft_close(Ref, Handle) ->
- {Res, Handle1} = soft_close(Handle),
- case Res of
- ok -> put({Ref, fhc_handle}, Handle1),
- true;
- _ -> put_handle(Ref, Handle1),
- false
- end.
-
-soft_close(Handle = #handle { hdl = closed }) ->
- {ok, Handle};
-soft_close(Handle) ->
- case write_buffer(Handle) of
- {ok, #handle { hdl = Hdl,
- is_dirty = IsDirty,
- last_used_at = Then } = Handle1 } ->
- ok = case IsDirty of
- true -> prim_file:sync(Hdl);
- false -> ok
- end,
- ok = prim_file:close(Hdl),
- age_tree_delete(Then),
- {ok, Handle1 #handle { hdl = closed,
- is_dirty = false,
- last_used_at = undefined }};
- {_Error, _Handle} = Result ->
- Result
- end.
-
-hard_close(Handle) ->
- case soft_close(Handle) of
- {ok, #handle { path = Path,
- is_read = IsReader, is_write = IsWriter }} ->
- #file { reader_count = RCount, has_writer = HasWriter } = File =
- get({Path, fhc_file}),
- RCount1 = case IsReader of
- true -> RCount - 1;
- false -> RCount
- end,
- HasWriter1 = HasWriter andalso not IsWriter,
- case RCount1 =:= 0 andalso not HasWriter1 of
- true -> erase({Path, fhc_file});
- false -> put({Path, fhc_file},
- File #file { reader_count = RCount1,
- has_writer = HasWriter1 })
- end,
- ok;
- {_Error, _Handle} = Result ->
- Result
- end.
-
-maybe_seek(NewOffset, Handle = #handle { hdl = Hdl, offset = Offset,
- at_eof = AtEoF }) ->
- {AtEoF1, NeedsSeek} = needs_seek(AtEoF, Offset, NewOffset),
- case (case NeedsSeek of
- true -> prim_file:position(Hdl, NewOffset);
- false -> {ok, Offset}
- end) of
- {ok, Offset1} = Result ->
- {Result, Handle #handle { offset = Offset1, at_eof = AtEoF1 }};
- {error, _} = Error ->
- {Error, Handle}
- end.
-
-needs_seek( AtEoF, _CurOffset, cur ) -> {AtEoF, false};
-needs_seek( AtEoF, _CurOffset, {cur, 0}) -> {AtEoF, false};
-needs_seek( true, _CurOffset, eof ) -> {true , false};
-needs_seek( true, _CurOffset, {eof, 0}) -> {true , false};
-needs_seek( false, _CurOffset, eof ) -> {true , true };
-needs_seek( false, _CurOffset, {eof, 0}) -> {true , true };
-needs_seek( AtEoF, 0, bof ) -> {AtEoF, false};
-needs_seek( AtEoF, 0, {bof, 0}) -> {AtEoF, false};
-needs_seek( AtEoF, CurOffset, CurOffset) -> {AtEoF, false};
-needs_seek( true, CurOffset, {bof, DesiredOffset})
- when DesiredOffset >= CurOffset ->
- {true, true};
-needs_seek( true, _CurOffset, {cur, DesiredOffset})
- when DesiredOffset > 0 ->
- {true, true};
-needs_seek( true, CurOffset, DesiredOffset) %% same as {bof, DO}
- when is_integer(DesiredOffset) andalso DesiredOffset >= CurOffset ->
- {true, true};
-%% because we can't really track size, we could well end up at EoF and not know
-needs_seek(_AtEoF, _CurOffset, _DesiredOffset) ->
- {false, true}.
-
-write_buffer(Handle = #handle { write_buffer = [] }) ->
- {ok, Handle};
-write_buffer(Handle = #handle { hdl = Hdl, offset = Offset,
- write_buffer = WriteBuffer,
- write_buffer_size = DataSize,
- at_eof = true }) ->
- case prim_file:write(Hdl, lists:reverse(WriteBuffer)) of
- ok ->
- Offset1 = Offset + DataSize,
- {ok, Handle #handle { offset = Offset1, is_dirty = true,
- write_buffer = [], write_buffer_size = 0 }};
- {error, _} = Error ->
- {Error, Handle}
- end.
-
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
-
-i(total_limit, #fhc_state{limit = Limit}) -> Limit;
-i(total_used, #fhc_state{open_count = C1, obtain_count = C2}) -> C1 + C2;
-i(sockets_limit, #fhc_state{obtain_limit = Limit}) -> Limit;
-i(sockets_used, #fhc_state{obtain_count = Count}) -> Count;
-i(Item, _) -> throw({bad_argument, Item}).
-
-%%----------------------------------------------------------------------------
-%% gen_server2 callbacks
-%%----------------------------------------------------------------------------
-
-init([AlarmSet, AlarmClear]) ->
- Limit = case application:get_env(file_handles_high_watermark) of
- {ok, Watermark} when (is_integer(Watermark) andalso
- Watermark > 0) ->
- Watermark;
- _ ->
- case ulimit() of
- unknown -> ?FILE_HANDLES_LIMIT_OTHER;
- Lim -> lists:max([2, Lim - ?RESERVED_FOR_OTHERS])
- end
- end,
- ObtainLimit = obtain_limit(Limit),
- error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n",
- [Limit, ObtainLimit]),
- Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]),
- Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]),
- {ok, #fhc_state { elders = Elders,
- limit = Limit,
- open_count = 0,
- open_pending = pending_new(),
- obtain_limit = ObtainLimit,
- obtain_count = 0,
- obtain_pending = pending_new(),
- clients = Clients,
- timer_ref = undefined,
- alarm_set = AlarmSet,
- alarm_clear = AlarmClear }}.
-
-prioritise_cast(Msg, _Len, _State) ->
- case Msg of
- {release, _, _} -> 5;
- _ -> 0
- end.
-
-handle_call({open, Pid, Requested, EldestUnusedSince}, From,
- State = #fhc_state { open_count = Count,
- open_pending = Pending,
- elders = Elders,
- clients = Clients })
- when EldestUnusedSince =/= undefined ->
- true = ets:insert(Elders, {Pid, EldestUnusedSince}),
- Item = #pending { kind = open,
- pid = Pid,
- requested = Requested,
- from = From },
- ok = track_client(Pid, Clients),
- case needs_reduce(State #fhc_state { open_count = Count + Requested }) of
- true -> case ets:lookup(Clients, Pid) of
- [#cstate { opened = 0 }] ->
- true = ets:update_element(
- Clients, Pid, {#cstate.blocked, true}),
- {noreply,
- reduce(State #fhc_state {
- open_pending = pending_in(Item, Pending) })};
- [#cstate { opened = Opened }] ->
- true = ets:update_element(
- Clients, Pid,
- {#cstate.pending_closes, Opened}),
- {reply, close, State}
- end;
- false -> {noreply, run_pending_item(Item, State)}
- end;
-
-handle_call({obtain, N, Pid}, From, State = #fhc_state {
- obtain_count = Count,
- obtain_pending = Pending,
- clients = Clients }) ->
- ok = track_client(Pid, Clients),
- Item = #pending { kind = obtain, pid = Pid, requested = N, from = From },
- Enqueue = fun () ->
- true = ets:update_element(Clients, Pid,
- {#cstate.blocked, true}),
- State #fhc_state {
- obtain_pending = pending_in(Item, Pending) }
- end,
- {noreply,
- case obtain_limit_reached(State) of
- true -> Enqueue();
- false -> case needs_reduce(State #fhc_state {
- obtain_count = Count + N }) of
- true -> reduce(Enqueue());
- false -> adjust_alarm(
- State, run_pending_item(Item, State))
- end
- end};
-
-handle_call({set_limit, Limit}, _From, State) ->
- {reply, ok, adjust_alarm(
- State, maybe_reduce(
- process_pending(
- State #fhc_state {
- limit = Limit,
- obtain_limit = obtain_limit(Limit) })))};
-
-handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) ->
- {reply, Limit, State};
-
-handle_call({info, Items}, _From, State) ->
- {reply, infos(Items, State), State}.
-
-handle_cast({register_callback, Pid, MFA},
- State = #fhc_state { clients = Clients }) ->
- ok = track_client(Pid, Clients),
- true = ets:update_element(Clients, Pid, {#cstate.callback, MFA}),
- {noreply, State};
-
-handle_cast({update, Pid, EldestUnusedSince},
- State = #fhc_state { elders = Elders })
- when EldestUnusedSince =/= undefined ->
- true = ets:insert(Elders, {Pid, EldestUnusedSince}),
- %% don't call maybe_reduce from here otherwise we can create a
- %% storm of messages
- {noreply, State};
-
-handle_cast({release, N, Pid}, State) ->
- {noreply, adjust_alarm(State, process_pending(
- update_counts(obtain, Pid, -N, State)))};
-
-handle_cast({close, Pid, EldestUnusedSince},
- State = #fhc_state { elders = Elders, clients = Clients }) ->
- true = case EldestUnusedSince of
- undefined -> ets:delete(Elders, Pid);
- _ -> ets:insert(Elders, {Pid, EldestUnusedSince})
- end,
- ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}),
- {noreply, adjust_alarm(State, process_pending(
- update_counts(open, Pid, -1, State)))};
-
-handle_cast({transfer, N, FromPid, ToPid}, State) ->
- ok = track_client(ToPid, State#fhc_state.clients),
- {noreply, process_pending(
- update_counts(obtain, ToPid, +N,
- update_counts(obtain, FromPid, -N, State)))}.
-
-handle_info(check_counts, State) ->
- {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })};
-
-handle_info({'DOWN', _MRef, process, Pid, _Reason},
- State = #fhc_state { elders = Elders,
- open_count = OpenCount,
- open_pending = OpenPending,
- obtain_count = ObtainCount,
- obtain_pending = ObtainPending,
- clients = Clients }) ->
- [#cstate { opened = Opened, obtained = Obtained }] =
- ets:lookup(Clients, Pid),
- true = ets:delete(Clients, Pid),
- true = ets:delete(Elders, Pid),
- FilterFun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end,
- {noreply, adjust_alarm(
- State,
- process_pending(
- State #fhc_state {
- open_count = OpenCount - Opened,
- open_pending = filter_pending(FilterFun, OpenPending),
- obtain_count = ObtainCount - Obtained,
- obtain_pending = filter_pending(FilterFun, ObtainPending) }))}.
-
-terminate(_Reason, State = #fhc_state { clients = Clients,
- elders = Elders }) ->
- ets:delete(Clients),
- ets:delete(Elders),
- State.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-%% pending queue abstraction helpers
-%%----------------------------------------------------------------------------
-
-queue_fold(Fun, Init, Q) ->
- case queue:out(Q) of
- {empty, _Q} -> Init;
- {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
- end.
-
-filter_pending(Fun, {Count, Queue}) ->
- {Delta, Queue1} =
- queue_fold(
- fun (Item = #pending { requested = Requested }, {DeltaN, QueueN}) ->
- case Fun(Item) of
- true -> {DeltaN, queue:in(Item, QueueN)};
- false -> {DeltaN - Requested, QueueN}
- end
- end, {0, queue:new()}, Queue),
- {Count + Delta, Queue1}.
-
-pending_new() ->
- {0, queue:new()}.
-
-pending_in(Item = #pending { requested = Requested }, {Count, Queue}) ->
- {Count + Requested, queue:in(Item, Queue)}.
-
-pending_out({0, _Queue} = Pending) ->
- {empty, Pending};
-pending_out({N, Queue}) ->
- {{value, #pending { requested = Requested }} = Result, Queue1} =
- queue:out(Queue),
- {Result, {N - Requested, Queue1}}.
-
-pending_count({Count, _Queue}) ->
- Count.
-
-pending_is_empty({0, _Queue}) ->
- true;
-pending_is_empty({_N, _Queue}) ->
- false.
-
-%%----------------------------------------------------------------------------
-%% server helpers
-%%----------------------------------------------------------------------------
-
-obtain_limit(infinity) -> infinity;
-obtain_limit(Limit) -> case ?OBTAIN_LIMIT(Limit) of
- OLimit when OLimit < 0 -> 0;
- OLimit -> OLimit
- end.
-
-obtain_limit_reached(#fhc_state { obtain_limit = Limit,
- obtain_count = Count}) ->
- Limit =/= infinity andalso Count >= Limit.
-
-adjust_alarm(OldState = #fhc_state { alarm_set = AlarmSet,
- alarm_clear = AlarmClear }, NewState) ->
- case {obtain_limit_reached(OldState), obtain_limit_reached(NewState)} of
- {false, true} -> AlarmSet({file_descriptor_limit, []});
- {true, false} -> AlarmClear(file_descriptor_limit);
- _ -> ok
- end,
- NewState.
-
-process_pending(State = #fhc_state { limit = infinity }) ->
- State;
-process_pending(State) ->
- process_open(process_obtain(State)).
-
-process_open(State = #fhc_state { limit = Limit,
- open_pending = Pending,
- open_count = OpenCount,
- obtain_count = ObtainCount }) ->
- {Pending1, State1} =
- process_pending(Pending, Limit - (ObtainCount + OpenCount), State),
- State1 #fhc_state { open_pending = Pending1 }.
-
-process_obtain(State = #fhc_state { limit = Limit,
- obtain_pending = Pending,
- obtain_limit = ObtainLimit,
- obtain_count = ObtainCount,
- open_count = OpenCount }) ->
- Quota = lists:min([ObtainLimit - ObtainCount,
- Limit - (ObtainCount + OpenCount)]),
- {Pending1, State1} = process_pending(Pending, Quota, State),
- State1 #fhc_state { obtain_pending = Pending1 }.
-
-process_pending(Pending, Quota, State) when Quota =< 0 ->
- {Pending, State};
-process_pending(Pending, Quota, State) ->
- case pending_out(Pending) of
- {empty, _Pending} ->
- {Pending, State};
- {{value, #pending { requested = Requested }}, _Pending1}
- when Requested > Quota ->
- {Pending, State};
- {{value, #pending { requested = Requested } = Item}, Pending1} ->
- process_pending(Pending1, Quota - Requested,
- run_pending_item(Item, State))
- end.
-
-run_pending_item(#pending { kind = Kind,
- pid = Pid,
- requested = Requested,
- from = From },
- State = #fhc_state { clients = Clients }) ->
- gen_server2:reply(From, ok),
- true = ets:update_element(Clients, Pid, {#cstate.blocked, false}),
- update_counts(Kind, Pid, Requested, State).
-
-update_counts(Kind, Pid, Delta,
- State = #fhc_state { open_count = OpenCount,
- obtain_count = ObtainCount,
- clients = Clients }) ->
- {OpenDelta, ObtainDelta} = update_counts1(Kind, Pid, Delta, Clients),
- State #fhc_state { open_count = OpenCount + OpenDelta,
- obtain_count = ObtainCount + ObtainDelta }.
-
-update_counts1(open, Pid, Delta, Clients) ->
- ets:update_counter(Clients, Pid, {#cstate.opened, Delta}),
- {Delta, 0};
-update_counts1(obtain, Pid, Delta, Clients) ->
- ets:update_counter(Clients, Pid, {#cstate.obtained, Delta}),
- {0, Delta}.
-
-maybe_reduce(State) ->
- case needs_reduce(State) of
- true -> reduce(State);
- false -> State
- end.
-
-needs_reduce(#fhc_state { limit = Limit,
- open_count = OpenCount,
- open_pending = OpenPending,
- obtain_count = ObtainCount,
- obtain_limit = ObtainLimit,
- obtain_pending = ObtainPending }) ->
- Limit =/= infinity
- andalso ((OpenCount + ObtainCount > Limit)
- orelse (not pending_is_empty(OpenPending))
- orelse (ObtainCount < ObtainLimit
- andalso not pending_is_empty(ObtainPending))).
-
-reduce(State = #fhc_state { open_pending = OpenPending,
- obtain_pending = ObtainPending,
- elders = Elders,
- clients = Clients,
- timer_ref = TRef }) ->
- Now = now(),
- {CStates, Sum, ClientCount} =
- ets:foldl(fun ({Pid, Eldest}, {CStatesAcc, SumAcc, CountAcc} = Accs) ->
- [#cstate { pending_closes = PendingCloses,
- opened = Opened,
- blocked = Blocked } = CState] =
- ets:lookup(Clients, Pid),
- case Blocked orelse PendingCloses =:= Opened of
- true -> Accs;
- false -> {[CState | CStatesAcc],
- SumAcc + timer:now_diff(Now, Eldest),
- CountAcc + 1}
- end
- end, {[], 0, 0}, Elders),
- case CStates of
- [] -> ok;
- _ -> case (Sum / ClientCount) -
- (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of
- AverageAge when AverageAge > 0 ->
- notify_age(CStates, AverageAge);
- _ ->
- notify_age0(Clients, CStates,
- pending_count(OpenPending) +
- pending_count(ObtainPending))
- end
- end,
- case TRef of
- undefined -> TRef1 = erlang:send_after(
- ?FILE_HANDLES_CHECK_INTERVAL, ?SERVER,
- check_counts),
- State #fhc_state { timer_ref = TRef1 };
- _ -> State
- end.
-
-notify_age(CStates, AverageAge) ->
- lists:foreach(
- fun (#cstate { callback = undefined }) -> ok;
- (#cstate { callback = {M, F, A} }) -> apply(M, F, A ++ [AverageAge])
- end, CStates).
-
-notify_age0(Clients, CStates, Required) ->
- case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of
- [] -> ok;
- Notifications -> S = random:uniform(length(Notifications)),
- {L1, L2} = lists:split(S, Notifications),
- notify(Clients, Required, L2 ++ L1)
- end.
-
-notify(_Clients, _Required, []) ->
- ok;
-notify(_Clients, Required, _Notifications) when Required =< 0 ->
- ok;
-notify(Clients, Required, [#cstate{ pid = Pid,
- callback = {M, F, A},
- opened = Opened } | Notifications]) ->
- apply(M, F, A ++ [0]),
- ets:update_element(Clients, Pid, {#cstate.pending_closes, Opened}),
- notify(Clients, Required - Opened, Notifications).
-
-track_client(Pid, Clients) ->
- case ets:insert_new(Clients, #cstate { pid = Pid,
- callback = undefined,
- opened = 0,
- obtained = 0,
- blocked = false,
- pending_closes = 0 }) of
- true -> _MRef = erlang:monitor(process, Pid),
- ok;
- false -> ok
- end.
-
-
-%% To increase the number of file descriptors: on Windows set ERL_MAX_PORTS
-%% environment variable, on Linux set `ulimit -n`.
-ulimit() ->
- case proplists:get_value(max_fds, erlang:system_info(check_io)) of
- MaxFds when is_integer(MaxFds) andalso MaxFds > 1 ->
- case os:type() of
- {win32, _OsName} ->
- %% On Windows max_fds is twice the number of open files:
- %% https://github.com/yrashk/erlang/blob/e1282325ed75e52a98d5/erts/emulator/sys/win32/sys.c#L2459-2466
- MaxFds div 2;
- _Any ->
- %% For other operating systems trust Erlang.
- MaxFds
- end;
- _ ->
- unknown
- end.
diff --git a/src/gatherer.erl b/src/gatherer.erl
deleted file mode 100644
index c13298ca..00000000
--- a/src/gatherer.erl
+++ /dev/null
@@ -1,145 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(gatherer).
-
--behaviour(gen_server2).
-
--export([start_link/0, stop/1, fork/1, finish/1, in/2, sync_in/2, out/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(stop/1 :: (pid()) -> 'ok').
--spec(fork/1 :: (pid()) -> 'ok').
--spec(finish/1 :: (pid()) -> 'ok').
--spec(in/2 :: (pid(), any()) -> 'ok').
--spec(sync_in/2 :: (pid(), any()) -> 'ok').
--spec(out/1 :: (pid()) -> {'value', any()} | 'empty').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(HIBERNATE_AFTER_MIN, 1000).
--define(DESIRED_HIBERNATE, 10000).
-
-%%----------------------------------------------------------------------------
-
--record(gstate, { forks, values, blocked }).
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_server2:start_link(?MODULE, [], [{timeout, infinity}]).
-
-stop(Pid) ->
- gen_server2:call(Pid, stop, infinity).
-
-fork(Pid) ->
- gen_server2:call(Pid, fork, infinity).
-
-finish(Pid) ->
- gen_server2:cast(Pid, finish).
-
-in(Pid, Value) ->
- gen_server2:cast(Pid, {in, Value}).
-
-sync_in(Pid, Value) ->
- gen_server2:call(Pid, {in, Value}, infinity).
-
-out(Pid) ->
- gen_server2:call(Pid, out, infinity).
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, #gstate { forks = 0, values = queue:new(), blocked = queue:new() },
- hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State};
-
-handle_call(fork, _From, State = #gstate { forks = Forks }) ->
- {reply, ok, State #gstate { forks = Forks + 1 }, hibernate};
-
-handle_call({in, Value}, From, State) ->
- {noreply, in(Value, From, State), hibernate};
-
-handle_call(out, From, State = #gstate { forks = Forks,
- values = Values,
- blocked = Blocked }) ->
- case queue:out(Values) of
- {empty, _} when Forks == 0 ->
- {reply, empty, State, hibernate};
- {empty, _} ->
- {noreply, State #gstate { blocked = queue:in(From, Blocked) },
- hibernate};
- {{value, {PendingIn, Value}}, NewValues} ->
- reply(PendingIn, ok),
- {reply, {value, Value}, State #gstate { values = NewValues },
- hibernate}
- end;
-
-handle_call(Msg, _From, State) ->
- {stop, {unexpected_call, Msg}, State}.
-
-handle_cast(finish, State = #gstate { forks = Forks, blocked = Blocked }) ->
- NewForks = Forks - 1,
- NewBlocked = case NewForks of
- 0 -> [gen_server2:reply(From, empty) ||
- From <- queue:to_list(Blocked)],
- queue:new();
- _ -> Blocked
- end,
- {noreply, State #gstate { forks = NewForks, blocked = NewBlocked },
- hibernate};
-
-handle_cast({in, Value}, State) ->
- {noreply, in(Value, undefined, State), hibernate};
-
-handle_cast(Msg, State) ->
- {stop, {unexpected_cast, Msg}, State}.
-
-handle_info(Msg, State) ->
- {stop, {unexpected_info, Msg}, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, State) ->
- State.
-
-%%----------------------------------------------------------------------------
-
-in(Value, From, State = #gstate { values = Values, blocked = Blocked }) ->
- case queue:out(Blocked) of
- {empty, _} ->
- State #gstate { values = queue:in({From, Value}, Values) };
- {{value, PendingOut}, NewBlocked} ->
- reply(From, ok),
- gen_server2:reply(PendingOut, {value, Value}),
- State #gstate { blocked = NewBlocked }
- end.
-
-reply(undefined, _Reply) -> ok;
-reply(From, Reply) -> gen_server2:reply(From, Reply).
diff --git a/src/gen_server2.erl b/src/gen_server2.erl
deleted file mode 100644
index 6690d181..00000000
--- a/src/gen_server2.erl
+++ /dev/null
@@ -1,1249 +0,0 @@
-%% This file is a copy of gen_server.erl from the R13B-1 Erlang/OTP
-%% distribution, with the following modifications:
-%%
-%% 1) the module name is gen_server2
-%%
-%% 2) more efficient handling of selective receives in callbacks
-%% gen_server2 processes drain their message queue into an internal
-%% buffer before invoking any callback module functions. Messages are
-%% dequeued from the buffer for processing. Thus the effective message
-%% queue of a gen_server2 process is the concatenation of the internal
-%% buffer and the real message queue.
-%% As a result of the draining, any selective receive invoked inside a
-%% callback is less likely to have to scan a large message queue.
-%%
-%% 3) gen_server2:cast is guaranteed to be order-preserving
-%% The original code could reorder messages when communicating with a
-%% process on a remote node that was not currently connected.
-%%
-%% 4) The callback module can optionally implement prioritise_call/4,
-%% prioritise_cast/3 and prioritise_info/3. These functions take
-%% Message, From, Length and State or just Message, Length and State
-%% (where Length is the current number of messages waiting to be
-%% processed) and return a single integer representing the priority
-%% attached to the message, or 'drop' to ignore it (for
-%% prioritise_cast/3 and prioritise_info/3 only). Messages with
-%% higher priorities are processed before requests with lower
-%% priorities. The default priority is 0.
-%%
-%% 5) The callback module can optionally implement
-%% handle_pre_hibernate/1 and handle_post_hibernate/1. These will be
-%% called immediately prior to and post hibernation, respectively. If
-%% handle_pre_hibernate returns {hibernate, NewState} then the process
-%% will hibernate. If the module does not implement
-%% handle_pre_hibernate/1 then the default action is to hibernate.
-%%
-%% 6) init can return a 4th arg, {backoff, InitialTimeout,
-%% MinimumTimeout, DesiredHibernatePeriod} (all in milliseconds,
-%% 'infinity' does not make sense here). Then, on all callbacks which
-%% can return a timeout (including init), timeout can be
-%% 'hibernate'. When this is the case, the current timeout value will
-%% be used (initially, the InitialTimeout supplied from init). After
-%% this timeout has occurred, hibernation will occur as normal. Upon
-%% awaking, a new current timeout value will be calculated.
-%%
-%% The purpose is that the gen_server2 takes care of adjusting the
-%% current timeout value such that the process will increase the
-%% timeout value repeatedly if it is unable to sleep for the
-%% DesiredHibernatePeriod. If it is able to sleep for the
-%% DesiredHibernatePeriod it will decrease the current timeout down to
-%% the MinimumTimeout, so that the process is put to sleep sooner (and
-%% hopefully stays asleep for longer). In short, should a process
-%% using this receive a burst of messages, it should not hibernate
-%% between those messages, but as the messages become less frequent,
-%% the process will not only hibernate, it will do so sooner after
-%% each message.
-%%
-%% When using this backoff mechanism, normal timeout values (i.e. not
-%% 'hibernate') can still be used, and if they are used then the
-%% handle_info(timeout, State) will be called as normal. In this case,
-%% returning 'hibernate' from handle_info(timeout, State) will not
-%% hibernate the process immediately, as it would if backoff wasn't
-%% being used. Instead it'll wait for the current timeout as described
-%% above.
-%%
-%% 7) The callback module can return from any of the handle_*
-%% functions, a {become, Module, State} triple, or a {become, Module,
-%% State, Timeout} quadruple. This allows the gen_server to
-%% dynamically change the callback module. The State is the new state
-%% which will be passed into any of the callback functions in the new
-%% module. Note there is no form also encompassing a reply, thus if
-%% you wish to reply in handle_call/3 and change the callback module,
-%% you need to use gen_server2:reply/2 to issue the reply manually.
-%%
-%% 8) The callback module can optionally implement
-%% format_message_queue/2 which is the equivalent of format_status/2
-%% but where the second argument is specifically the priority_queue
-%% which contains the prioritised message_queue.
-%%
-%% 9) The function with_state/2 can be used to debug a process with
-%% heavyweight state (without needing to copy the entire state out of
-%% process as sys:get_status/1 would). Pass through a function which
-%% can be invoked on the state, get back the result. The state is not
-%% modified.
-
-%% All modifications are (C) 2009-2013 GoPivotal, Inc.
-
-%% ``The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved via the world wide web at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
-%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
-%% AB. All Rights Reserved.''
-%%
-%% $Id$
-%%
--module(gen_server2).
-
-%%% ---------------------------------------------------
-%%%
-%%% The idea behind THIS server is that the user module
-%%% provides (different) functions to handle different
-%%% kind of inputs.
-%%% If the Parent process terminates the Module:terminate/2
-%%% function is called.
-%%%
-%%% The user module should export:
-%%%
-%%% init(Args)
-%%% ==> {ok, State}
-%%% {ok, State, Timeout}
-%%% {ok, State, Timeout, Backoff}
-%%% ignore
-%%% {stop, Reason}
-%%%
-%%% handle_call(Msg, {From, Tag}, State)
-%%%
-%%% ==> {reply, Reply, State}
-%%% {reply, Reply, State, Timeout}
-%%% {noreply, State}
-%%% {noreply, State, Timeout}
-%%% {stop, Reason, Reply, State}
-%%% Reason = normal | shutdown | Term terminate(State) is called
-%%%
-%%% handle_cast(Msg, State)
-%%%
-%%% ==> {noreply, State}
-%%% {noreply, State, Timeout}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term terminate(State) is called
-%%%
-%%% handle_info(Info, State) Info is e.g. {'EXIT', P, R}, {nodedown, N}, ...
-%%%
-%%% ==> {noreply, State}
-%%% {noreply, State, Timeout}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term, terminate(State) is called
-%%%
-%%% terminate(Reason, State) Let the user module clean up
-%%% Reason = normal | shutdown | {shutdown, Term} | Term
-%%% always called when server terminates
-%%%
-%%% ==> ok | Term
-%%%
-%%% handle_pre_hibernate(State)
-%%%
-%%% ==> {hibernate, State}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term, terminate(State) is called
-%%%
-%%% handle_post_hibernate(State)
-%%%
-%%% ==> {noreply, State}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term, terminate(State) is called
-%%%
-%%% The work flow (of the server) can be described as follows:
-%%%
-%%% User module Generic
-%%% ----------- -------
-%%% start -----> start
-%%% init <----- .
-%%%
-%%% loop
-%%% handle_call <----- .
-%%% -----> reply
-%%%
-%%% handle_cast <----- .
-%%%
-%%% handle_info <----- .
-%%%
-%%% terminate <----- .
-%%%
-%%% -----> reply
-%%%
-%%%
-%%% ---------------------------------------------------
-
-%% API
--export([start/3, start/4,
- start_link/3, start_link/4,
- call/2, call/3,
- cast/2, reply/2,
- abcast/2, abcast/3,
- multi_call/2, multi_call/3, multi_call/4,
- with_state/2,
- enter_loop/3, enter_loop/4, enter_loop/5, enter_loop/6, wake_hib/1]).
-
-%% System exports
--export([system_continue/3,
- system_terminate/4,
- system_code_change/4,
- format_status/2]).
-
-%% Internal exports
--export([init_it/6]).
-
--import(error_logger, [format/2]).
-
-%% State record
--record(gs2_state, {parent, name, state, mod, time,
- timeout_state, queue, debug, prioritisers}).
-
--ifdef(use_specs).
-
-%%%=========================================================================
-%%% Specs. These exist only to shut up dialyzer's warnings
-%%%=========================================================================
-
--type(gs2_state() :: #gs2_state{}).
-
--spec(handle_common_termination/3 ::
- (any(), atom(), gs2_state()) -> no_return()).
--spec(hibernate/1 :: (gs2_state()) -> no_return()).
--spec(pre_hibernate/1 :: (gs2_state()) -> no_return()).
--spec(system_terminate/4 :: (_, _, _, gs2_state()) -> no_return()).
-
--type(millis() :: non_neg_integer()).
-
-%%%=========================================================================
-%%% API
-%%%=========================================================================
-
--callback init(Args :: term()) ->
- {ok, State :: term()} |
- {ok, State :: term(), timeout() | hibernate} |
- {ok, State :: term(), timeout() | hibernate,
- {backoff, millis(), millis(), millis()}} |
- ignore |
- {stop, Reason :: term()}.
--callback handle_call(Request :: term(), From :: {pid(), Tag :: term()},
- State :: term()) ->
- {reply, Reply :: term(), NewState :: term()} |
- {reply, Reply :: term(), NewState :: term(), timeout() | hibernate} |
- {noreply, NewState :: term()} |
- {noreply, NewState :: term(), timeout() | hibernate} |
- {stop, Reason :: term(),
- Reply :: term(), NewState :: term()}.
--callback handle_cast(Request :: term(), State :: term()) ->
- {noreply, NewState :: term()} |
- {noreply, NewState :: term(), timeout() | hibernate} |
- {stop, Reason :: term(), NewState :: term()}.
--callback handle_info(Info :: term(), State :: term()) ->
- {noreply, NewState :: term()} |
- {noreply, NewState :: term(), timeout() | hibernate} |
- {stop, Reason :: term(), NewState :: term()}.
--callback terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()),
- State :: term()) ->
- ok | term().
--callback code_change(OldVsn :: (term() | {down, term()}), State :: term(),
- Extra :: term()) ->
- {ok, NewState :: term()} | {error, Reason :: term()}.
-
-%% It's not possible to define "optional" -callbacks, so putting specs
-%% for handle_pre_hibernate/1 and handle_post_hibernate/1 will result
-%% in warnings (the same applied for the behaviour_info before).
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{init,1},{handle_call,3},{handle_cast,2},{handle_info,2},
- {terminate,2},{code_change,3}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-%%% -----------------------------------------------------------------
-%%% Starts a generic server.
-%%% start(Mod, Args, Options)
-%%% start(Name, Mod, Args, Options)
-%%% start_link(Mod, Args, Options)
-%%% start_link(Name, Mod, Args, Options) where:
-%%% Name ::= {local, atom()} | {global, atom()}
-%%% Mod ::= atom(), callback module implementing the 'real' server
-%%% Args ::= term(), init arguments (to Mod:init/1)
-%%% Options ::= [{timeout, Timeout} | {debug, [Flag]}]
-%%% Flag ::= trace | log | {logfile, File} | statistics | debug
-%%% (debug == log && statistics)
-%%% Returns: {ok, Pid} |
-%%% {error, {already_started, Pid}} |
-%%% {error, Reason}
-%%% -----------------------------------------------------------------
-start(Mod, Args, Options) ->
- gen:start(?MODULE, nolink, Mod, Args, Options).
-
-start(Name, Mod, Args, Options) ->
- gen:start(?MODULE, nolink, Name, Mod, Args, Options).
-
-start_link(Mod, Args, Options) ->
- gen:start(?MODULE, link, Mod, Args, Options).
-
-start_link(Name, Mod, Args, Options) ->
- gen:start(?MODULE, link, Name, Mod, Args, Options).
-
-
-%% -----------------------------------------------------------------
-%% Make a call to a generic server.
-%% If the server is located at another node, that node will
-%% be monitored.
-%% If the client is trapping exits and is linked server termination
-%% is handled here (? Shall we do that here (or rely on timeouts) ?).
-%% -----------------------------------------------------------------
-call(Name, Request) ->
- case catch gen:call(Name, '$gen_call', Request) of
- {ok,Res} ->
- Res;
- {'EXIT',Reason} ->
- exit({Reason, {?MODULE, call, [Name, Request]}})
- end.
-
-call(Name, Request, Timeout) ->
- case catch gen:call(Name, '$gen_call', Request, Timeout) of
- {ok,Res} ->
- Res;
- {'EXIT',Reason} ->
- exit({Reason, {?MODULE, call, [Name, Request, Timeout]}})
- end.
-
-%% -----------------------------------------------------------------
-%% Make a cast to a generic server.
-%% -----------------------------------------------------------------
-cast({global,Name}, Request) ->
- catch global:send(Name, cast_msg(Request)),
- ok;
-cast({Name,Node}=Dest, Request) when is_atom(Name), is_atom(Node) ->
- do_cast(Dest, Request);
-cast(Dest, Request) when is_atom(Dest) ->
- do_cast(Dest, Request);
-cast(Dest, Request) when is_pid(Dest) ->
- do_cast(Dest, Request).
-
-do_cast(Dest, Request) ->
- do_send(Dest, cast_msg(Request)),
- ok.
-
-cast_msg(Request) -> {'$gen_cast',Request}.
-
-%% -----------------------------------------------------------------
-%% Send a reply to the client.
-%% -----------------------------------------------------------------
-reply({To, Tag}, Reply) ->
- catch To ! {Tag, Reply}.
-
-%% -----------------------------------------------------------------
-%% Asyncronous broadcast, returns nothing, it's just send'n pray
-%% -----------------------------------------------------------------
-abcast(Name, Request) when is_atom(Name) ->
- do_abcast([node() | nodes()], Name, cast_msg(Request)).
-
-abcast(Nodes, Name, Request) when is_list(Nodes), is_atom(Name) ->
- do_abcast(Nodes, Name, cast_msg(Request)).
-
-do_abcast([Node|Nodes], Name, Msg) when is_atom(Node) ->
- do_send({Name,Node},Msg),
- do_abcast(Nodes, Name, Msg);
-do_abcast([], _,_) -> abcast.
-
-%%% -----------------------------------------------------------------
-%%% Make a call to servers at several nodes.
-%%% Returns: {[Replies],[BadNodes]}
-%%% A Timeout can be given
-%%%
-%%% A middleman process is used in case late answers arrives after
-%%% the timeout. If they would be allowed to glog the callers message
-%%% queue, it would probably become confused. Late answers will
-%%% now arrive to the terminated middleman and so be discarded.
-%%% -----------------------------------------------------------------
-multi_call(Name, Req)
- when is_atom(Name) ->
- do_multi_call([node() | nodes()], Name, Req, infinity).
-
-multi_call(Nodes, Name, Req)
- when is_list(Nodes), is_atom(Name) ->
- do_multi_call(Nodes, Name, Req, infinity).
-
-multi_call(Nodes, Name, Req, infinity) ->
- do_multi_call(Nodes, Name, Req, infinity);
-multi_call(Nodes, Name, Req, Timeout)
- when is_list(Nodes), is_atom(Name), is_integer(Timeout), Timeout >= 0 ->
- do_multi_call(Nodes, Name, Req, Timeout).
-
-%% -----------------------------------------------------------------
-%% Apply a function to a generic server's state.
-%% -----------------------------------------------------------------
-with_state(Name, Fun) ->
- case catch gen:call(Name, '$with_state', Fun, infinity) of
- {ok,Res} ->
- Res;
- {'EXIT',Reason} ->
- exit({Reason, {?MODULE, with_state, [Name, Fun]}})
- end.
-
-%%-----------------------------------------------------------------
-%% enter_loop(Mod, Options, State, <ServerName>, <TimeOut>, <Backoff>) ->_
-%%
-%% Description: Makes an existing process into a gen_server.
-%% The calling process will enter the gen_server receive
-%% loop and become a gen_server process.
-%% The process *must* have been started using one of the
-%% start functions in proc_lib, see proc_lib(3).
-%% The user is responsible for any initialization of the
-%% process, including registering a name for it.
-%%-----------------------------------------------------------------
-enter_loop(Mod, Options, State) ->
- enter_loop(Mod, Options, State, self(), infinity, undefined).
-
-enter_loop(Mod, Options, State, Backoff = {backoff, _, _ , _}) ->
- enter_loop(Mod, Options, State, self(), infinity, Backoff);
-
-enter_loop(Mod, Options, State, ServerName = {_, _}) ->
- enter_loop(Mod, Options, State, ServerName, infinity, undefined);
-
-enter_loop(Mod, Options, State, Timeout) ->
- enter_loop(Mod, Options, State, self(), Timeout, undefined).
-
-enter_loop(Mod, Options, State, ServerName, Backoff = {backoff, _, _, _}) ->
- enter_loop(Mod, Options, State, ServerName, infinity, Backoff);
-
-enter_loop(Mod, Options, State, ServerName, Timeout) ->
- enter_loop(Mod, Options, State, ServerName, Timeout, undefined).
-
-enter_loop(Mod, Options, State, ServerName, Timeout, Backoff) ->
- Name = get_proc_name(ServerName),
- Parent = get_parent(),
- Debug = debug_options(Name, Options),
- Queue = priority_queue:new(),
- Backoff1 = extend_backoff(Backoff),
- loop(find_prioritisers(
- #gs2_state { parent = Parent, name = Name, state = State,
- mod = Mod, time = Timeout, timeout_state = Backoff1,
- queue = Queue, debug = Debug })).
-
-%%%========================================================================
-%%% Gen-callback functions
-%%%========================================================================
-
-%%% ---------------------------------------------------
-%%% Initiate the new process.
-%%% Register the name using the Rfunc function
-%%% Calls the Mod:init/Args function.
-%%% Finally an acknowledge is sent to Parent and the main
-%%% loop is entered.
-%%% ---------------------------------------------------
-init_it(Starter, self, Name, Mod, Args, Options) ->
- init_it(Starter, self(), Name, Mod, Args, Options);
-init_it(Starter, Parent, Name0, Mod, Args, Options) ->
- Name = name(Name0),
- Debug = debug_options(Name, Options),
- Queue = priority_queue:new(),
- GS2State = find_prioritisers(
- #gs2_state { parent = Parent,
- name = Name,
- mod = Mod,
- queue = Queue,
- debug = Debug }),
- case catch Mod:init(Args) of
- {ok, State} ->
- proc_lib:init_ack(Starter, {ok, self()}),
- loop(GS2State #gs2_state { state = State,
- time = infinity,
- timeout_state = undefined });
- {ok, State, Timeout} ->
- proc_lib:init_ack(Starter, {ok, self()}),
- loop(GS2State #gs2_state { state = State,
- time = Timeout,
- timeout_state = undefined });
- {ok, State, Timeout, Backoff = {backoff, _, _, _}} ->
- Backoff1 = extend_backoff(Backoff),
- proc_lib:init_ack(Starter, {ok, self()}),
- loop(GS2State #gs2_state { state = State,
- time = Timeout,
- timeout_state = Backoff1 });
- {stop, Reason} ->
- %% For consistency, we must make sure that the
- %% registered name (if any) is unregistered before
- %% the parent process is notified about the failure.
- %% (Otherwise, the parent process could get
- %% an 'already_started' error if it immediately
- %% tried starting the process again.)
- unregister_name(Name0),
- proc_lib:init_ack(Starter, {error, Reason}),
- exit(Reason);
- ignore ->
- unregister_name(Name0),
- proc_lib:init_ack(Starter, ignore),
- exit(normal);
- {'EXIT', Reason} ->
- unregister_name(Name0),
- proc_lib:init_ack(Starter, {error, Reason}),
- exit(Reason);
- Else ->
- Error = {bad_return_value, Else},
- proc_lib:init_ack(Starter, {error, Error}),
- exit(Error)
- end.
-
-name({local,Name}) -> Name;
-name({global,Name}) -> Name;
-%% name(Pid) when is_pid(Pid) -> Pid;
-%% when R12 goes away, drop the line beneath and uncomment the line above
-name(Name) -> Name.
-
-unregister_name({local,Name}) ->
- _ = (catch unregister(Name));
-unregister_name({global,Name}) ->
- _ = global:unregister_name(Name);
-unregister_name(Pid) when is_pid(Pid) ->
- Pid;
-%% Under R12 let's just ignore it, as we have a single term as Name.
-%% On R13 it will never get here, as we get tuple with 'local/global' atom.
-unregister_name(_Name) -> ok.
-
-extend_backoff(undefined) ->
- undefined;
-extend_backoff({backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod}) ->
- {backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod, now()}.
-
-%%%========================================================================
-%%% Internal functions
-%%%========================================================================
-%%% ---------------------------------------------------
-%%% The MAIN loop.
-%%% ---------------------------------------------------
-loop(GS2State = #gs2_state { time = hibernate,
- timeout_state = undefined }) ->
- pre_hibernate(GS2State);
-loop(GS2State) ->
- process_next_msg(drain(GS2State)).
-
-drain(GS2State) ->
- receive
- Input -> drain(in(Input, GS2State))
- after 0 -> GS2State
- end.
-
-process_next_msg(GS2State = #gs2_state { time = Time,
- timeout_state = TimeoutState,
- queue = Queue }) ->
- case priority_queue:out(Queue) of
- {{value, Msg}, Queue1} ->
- process_msg(Msg, GS2State #gs2_state { queue = Queue1 });
- {empty, Queue1} ->
- {Time1, HibOnTimeout}
- = case {Time, TimeoutState} of
- {hibernate, {backoff, Current, _Min, _Desired, _RSt}} ->
- {Current, true};
- {hibernate, _} ->
- %% wake_hib/7 will set Time to hibernate. If
- %% we were woken and didn't receive a msg
- %% then we will get here and need a sensible
- %% value for Time1, otherwise we crash.
- %% R13B1 always waits infinitely when waking
- %% from hibernation, so that's what we do
- %% here too.
- {infinity, false};
- _ -> {Time, false}
- end,
- receive
- Input ->
- %% Time could be 'hibernate' here, so *don't* call loop
- process_next_msg(
- drain(in(Input, GS2State #gs2_state { queue = Queue1 })))
- after Time1 ->
- case HibOnTimeout of
- true ->
- pre_hibernate(
- GS2State #gs2_state { queue = Queue1 });
- false ->
- process_msg(timeout,
- GS2State #gs2_state { queue = Queue1 })
- end
- end
- end.
-
-wake_hib(GS2State = #gs2_state { timeout_state = TS }) ->
- TimeoutState1 = case TS of
- undefined ->
- undefined;
- {SleptAt, TimeoutState} ->
- adjust_timeout_state(SleptAt, now(), TimeoutState)
- end,
- post_hibernate(
- drain(GS2State #gs2_state { timeout_state = TimeoutState1 })).
-
-hibernate(GS2State = #gs2_state { timeout_state = TimeoutState }) ->
- TS = case TimeoutState of
- undefined -> undefined;
- {backoff, _, _, _, _} -> {now(), TimeoutState}
- end,
- proc_lib:hibernate(?MODULE, wake_hib,
- [GS2State #gs2_state { timeout_state = TS }]).
-
-pre_hibernate(GS2State = #gs2_state { state = State,
- mod = Mod }) ->
- case erlang:function_exported(Mod, handle_pre_hibernate, 1) of
- true ->
- case catch Mod:handle_pre_hibernate(State) of
- {hibernate, NState} ->
- hibernate(GS2State #gs2_state { state = NState } );
- Reply ->
- handle_common_termination(Reply, pre_hibernate, GS2State)
- end;
- false ->
- hibernate(GS2State)
- end.
-
-post_hibernate(GS2State = #gs2_state { state = State,
- mod = Mod }) ->
- case erlang:function_exported(Mod, handle_post_hibernate, 1) of
- true ->
- case catch Mod:handle_post_hibernate(State) of
- {noreply, NState} ->
- process_next_msg(GS2State #gs2_state { state = NState,
- time = infinity });
- {noreply, NState, Time} ->
- process_next_msg(GS2State #gs2_state { state = NState,
- time = Time });
- Reply ->
- handle_common_termination(Reply, post_hibernate, GS2State)
- end;
- false ->
- %% use hibernate here, not infinity. This matches
- %% R13B. The key is that we should be able to get through
- %% to process_msg calling sys:handle_system_msg with Time
- %% still set to hibernate, iff that msg is the very msg
- %% that woke us up (or the first msg we receive after
- %% waking up).
- process_next_msg(GS2State #gs2_state { time = hibernate })
- end.
-
-adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO,
- DesiredHibPeriod, RandomState}) ->
- NapLengthMicros = timer:now_diff(AwokeAt, SleptAt),
- CurrentMicros = CurrentTO * 1000,
- MinimumMicros = MinimumTO * 1000,
- DesiredHibMicros = DesiredHibPeriod * 1000,
- GapBetweenMessagesMicros = NapLengthMicros + CurrentMicros,
- Base =
- %% If enough time has passed between the last two messages then we
- %% should consider sleeping sooner. Otherwise stay awake longer.
- case GapBetweenMessagesMicros > (MinimumMicros + DesiredHibMicros) of
- true -> lists:max([MinimumTO, CurrentTO div 2]);
- false -> CurrentTO
- end,
- {Extra, RandomState1} = random:uniform_s(Base, RandomState),
- CurrentTO1 = Base + Extra,
- {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}.
-
-in({'$gen_cast', Msg} = Input,
- GS2State = #gs2_state { prioritisers = {_, F, _} }) ->
- in(Input, F(Msg, GS2State), GS2State);
-in({'$gen_call', From, Msg} = Input,
- GS2State = #gs2_state { prioritisers = {F, _, _} }) ->
- in(Input, F(Msg, From, GS2State), GS2State);
-in({'$with_state', _From, _Fun} = Input, GS2State) ->
- in(Input, 0, GS2State);
-in({'EXIT', Parent, _R} = Input, GS2State = #gs2_state { parent = Parent }) ->
- in(Input, infinity, GS2State);
-in({system, _From, _Req} = Input, GS2State) ->
- in(Input, infinity, GS2State);
-in(Input, GS2State = #gs2_state { prioritisers = {_, _, F} }) ->
- in(Input, F(Input, GS2State), GS2State).
-
-in(_Input, drop, GS2State) ->
- GS2State;
-
-in(Input, Priority, GS2State = #gs2_state { queue = Queue }) ->
- GS2State # gs2_state { queue = priority_queue:in(Input, Priority, Queue) }.
-
-process_msg({system, From, Req},
- GS2State = #gs2_state { parent = Parent, debug = Debug }) ->
- %% gen_server puts Hib on the end as the 7th arg, but that version
- %% of the fun seems not to be documented so leaving out for now.
- sys:handle_system_msg(Req, From, Parent, ?MODULE, Debug, GS2State);
-process_msg({'$with_state', From, Fun},
- GS2State = #gs2_state{state = State}) ->
- reply(From, catch Fun(State)),
- loop(GS2State);
-process_msg({'EXIT', Parent, Reason} = Msg,
- GS2State = #gs2_state { parent = Parent }) ->
- terminate(Reason, Msg, GS2State);
-process_msg(Msg, GS2State = #gs2_state { debug = [] }) ->
- handle_msg(Msg, GS2State);
-process_msg(Msg, GS2State = #gs2_state { name = Name, debug = Debug }) ->
- Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, {in, Msg}),
- handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }).
-
-%%% ---------------------------------------------------
-%%% Send/recive functions
-%%% ---------------------------------------------------
-do_send(Dest, Msg) ->
- catch erlang:send(Dest, Msg).
-
-do_multi_call(Nodes, Name, Req, infinity) ->
- Tag = make_ref(),
- Monitors = send_nodes(Nodes, Name, Tag, Req),
- rec_nodes(Tag, Monitors, Name, undefined);
-do_multi_call(Nodes, Name, Req, Timeout) ->
- Tag = make_ref(),
- Caller = self(),
- Receiver =
- spawn(
- fun () ->
- %% Middleman process. Should be unsensitive to regular
- %% exit signals. The sychronization is needed in case
- %% the receiver would exit before the caller started
- %% the monitor.
- process_flag(trap_exit, true),
- Mref = erlang:monitor(process, Caller),
- receive
- {Caller,Tag} ->
- Monitors = send_nodes(Nodes, Name, Tag, Req),
- TimerId = erlang:start_timer(Timeout, self(), ok),
- Result = rec_nodes(Tag, Monitors, Name, TimerId),
- exit({self(),Tag,Result});
- {'DOWN',Mref,_,_,_} ->
- %% Caller died before sending us the go-ahead.
- %% Give up silently.
- exit(normal)
- end
- end),
- Mref = erlang:monitor(process, Receiver),
- Receiver ! {self(),Tag},
- receive
- {'DOWN',Mref,_,_,{Receiver,Tag,Result}} ->
- Result;
- {'DOWN',Mref,_,_,Reason} ->
- %% The middleman code failed. Or someone did
- %% exit(_, kill) on the middleman process => Reason==killed
- exit(Reason)
- end.
-
-send_nodes(Nodes, Name, Tag, Req) ->
- send_nodes(Nodes, Name, Tag, Req, []).
-
-send_nodes([Node|Tail], Name, Tag, Req, Monitors)
- when is_atom(Node) ->
- Monitor = start_monitor(Node, Name),
- %% Handle non-existing names in rec_nodes.
- catch {Name, Node} ! {'$gen_call', {self(), {Tag, Node}}, Req},
- send_nodes(Tail, Name, Tag, Req, [Monitor | Monitors]);
-send_nodes([_Node|Tail], Name, Tag, Req, Monitors) ->
- %% Skip non-atom Node
- send_nodes(Tail, Name, Tag, Req, Monitors);
-send_nodes([], _Name, _Tag, _Req, Monitors) ->
- Monitors.
-
-%% Against old nodes:
-%% If no reply has been delivered within 2 secs. (per node) check that
-%% the server really exists and wait for ever for the answer.
-%%
-%% Against contemporary nodes:
-%% Wait for reply, server 'DOWN', or timeout from TimerId.
-
-rec_nodes(Tag, Nodes, Name, TimerId) ->
- rec_nodes(Tag, Nodes, Name, [], [], 2000, TimerId).
-
-rec_nodes(Tag, [{N,R}|Tail], Name, Badnodes, Replies, Time, TimerId ) ->
- receive
- {'DOWN', R, _, _, _} ->
- rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- unmonitor(R),
- rec_nodes(Tag, Tail, Name, Badnodes,
- [{N,Reply}|Replies], Time, TimerId);
- {timeout, TimerId, _} ->
- unmonitor(R),
- %% Collect all replies that already have arrived
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
- end;
-rec_nodes(Tag, [N|Tail], Name, Badnodes, Replies, Time, TimerId) ->
- %% R6 node
- receive
- {nodedown, N} ->
- monitor_node(N, false),
- rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes(Tag, Tail, Name, Badnodes,
- [{N,Reply}|Replies], 2000, TimerId);
- {timeout, TimerId, _} ->
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- %% Collect all replies that already have arrived
- rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies)
- after Time ->
- case rpc:call(N, erlang, whereis, [Name]) of
- Pid when is_pid(Pid) -> % It exists try again.
- rec_nodes(Tag, [N|Tail], Name, Badnodes,
- Replies, infinity, TimerId);
- _ -> % badnode
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes(Tag, Tail, Name, [N|Badnodes],
- Replies, 2000, TimerId)
- end
- end;
-rec_nodes(_, [], _, Badnodes, Replies, _, TimerId) ->
- case catch erlang:cancel_timer(TimerId) of
- false -> % It has already sent it's message
- receive
- {timeout, TimerId, _} -> ok
- after 0 ->
- ok
- end;
- _ -> % Timer was cancelled, or TimerId was 'undefined'
- ok
- end,
- {Replies, Badnodes}.
-
-%% Collect all replies that already have arrived
-rec_nodes_rest(Tag, [{N,R}|Tail], Name, Badnodes, Replies) ->
- receive
- {'DOWN', R, _, _, _} ->
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- unmonitor(R),
- rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
- after 0 ->
- unmonitor(R),
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
- end;
-rec_nodes_rest(Tag, [N|Tail], Name, Badnodes, Replies) ->
- %% R6 node
- receive
- {nodedown, N} ->
- monitor_node(N, false),
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
- after 0 ->
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
- end;
-rec_nodes_rest(_Tag, [], _Name, Badnodes, Replies) ->
- {Replies, Badnodes}.
-
-
-%%% ---------------------------------------------------
-%%% Monitor functions
-%%% ---------------------------------------------------
-
-start_monitor(Node, Name) when is_atom(Node), is_atom(Name) ->
- if node() =:= nonode@nohost, Node =/= nonode@nohost ->
- Ref = make_ref(),
- self() ! {'DOWN', Ref, process, {Name, Node}, noconnection},
- {Node, Ref};
- true ->
- case catch erlang:monitor(process, {Name, Node}) of
- {'EXIT', _} ->
- %% Remote node is R6
- monitor_node(Node, true),
- Node;
- Ref when is_reference(Ref) ->
- {Node, Ref}
- end
- end.
-
-%% Cancels a monitor started with Ref=erlang:monitor(_, _).
-unmonitor(Ref) when is_reference(Ref) ->
- erlang:demonitor(Ref),
- receive
- {'DOWN', Ref, _, _, _} ->
- true
- after 0 ->
- true
- end.
-
-%%% ---------------------------------------------------
-%%% Message handling functions
-%%% ---------------------------------------------------
-
-dispatch({'$gen_cast', Msg}, Mod, State) ->
- Mod:handle_cast(Msg, State);
-dispatch(Info, Mod, State) ->
- Mod:handle_info(Info, State).
-
-common_reply(_Name, From, Reply, _NState, [] = _Debug) ->
- reply(From, Reply),
- [];
-common_reply(Name, {To, _Tag} = From, Reply, NState, Debug) ->
- reply(From, Reply),
- sys:handle_debug(Debug, fun print_event/3, Name, {out, Reply, To, NState}).
-
-common_noreply(_Name, _NState, [] = _Debug) ->
- [];
-common_noreply(Name, NState, Debug) ->
- sys:handle_debug(Debug, fun print_event/3, Name, {noreply, NState}).
-
-common_become(_Name, _Mod, _NState, [] = _Debug) ->
- [];
-common_become(Name, Mod, NState, Debug) ->
- sys:handle_debug(Debug, fun print_event/3, Name, {become, Mod, NState}).
-
-handle_msg({'$gen_call', From, Msg}, GS2State = #gs2_state { mod = Mod,
- state = State,
- name = Name,
- debug = Debug }) ->
- case catch Mod:handle_call(Msg, From, State) of
- {reply, Reply, NState} ->
- Debug1 = common_reply(Name, From, Reply, NState, Debug),
- loop(GS2State #gs2_state { state = NState,
- time = infinity,
- debug = Debug1 });
- {reply, Reply, NState, Time1} ->
- Debug1 = common_reply(Name, From, Reply, NState, Debug),
- loop(GS2State #gs2_state { state = NState,
- time = Time1,
- debug = Debug1});
- {stop, Reason, Reply, NState} ->
- {'EXIT', R} =
- (catch terminate(Reason, Msg,
- GS2State #gs2_state { state = NState })),
- common_reply(Name, From, Reply, NState, Debug),
- exit(R);
- Other ->
- handle_common_reply(Other, Msg, GS2State)
- end;
-handle_msg(Msg, GS2State = #gs2_state { mod = Mod, state = State }) ->
- Reply = (catch dispatch(Msg, Mod, State)),
- handle_common_reply(Reply, Msg, GS2State).
-
-handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name,
- debug = Debug}) ->
- case Reply of
- {noreply, NState} ->
- Debug1 = common_noreply(Name, NState, Debug),
- loop(GS2State #gs2_state {state = NState,
- time = infinity,
- debug = Debug1});
- {noreply, NState, Time1} ->
- Debug1 = common_noreply(Name, NState, Debug),
- loop(GS2State #gs2_state {state = NState,
- time = Time1,
- debug = Debug1});
- {become, Mod, NState} ->
- Debug1 = common_become(Name, Mod, NState, Debug),
- loop(find_prioritisers(
- GS2State #gs2_state { mod = Mod,
- state = NState,
- time = infinity,
- debug = Debug1 }));
- {become, Mod, NState, Time1} ->
- Debug1 = common_become(Name, Mod, NState, Debug),
- loop(find_prioritisers(
- GS2State #gs2_state { mod = Mod,
- state = NState,
- time = Time1,
- debug = Debug1 }));
- _ ->
- handle_common_termination(Reply, Msg, GS2State)
- end.
-
-handle_common_termination(Reply, Msg, GS2State) ->
- case Reply of
- {stop, Reason, NState} ->
- terminate(Reason, Msg, GS2State #gs2_state { state = NState });
- {'EXIT', What} ->
- terminate(What, Msg, GS2State);
- _ ->
- terminate({bad_return_value, Reply}, Msg, GS2State)
- end.
-
-%%-----------------------------------------------------------------
-%% Callback functions for system messages handling.
-%%-----------------------------------------------------------------
-system_continue(Parent, Debug, GS2State) ->
- loop(GS2State #gs2_state { parent = Parent, debug = Debug }).
-
-system_terminate(Reason, _Parent, Debug, GS2State) ->
- terminate(Reason, [], GS2State #gs2_state { debug = Debug }).
-
-system_code_change(GS2State = #gs2_state { mod = Mod,
- state = State },
- _Module, OldVsn, Extra) ->
- case catch Mod:code_change(OldVsn, State, Extra) of
- {ok, NewState} ->
- NewGS2State = find_prioritisers(
- GS2State #gs2_state { state = NewState }),
- {ok, [NewGS2State]};
- Else ->
- Else
- end.
-
-%%-----------------------------------------------------------------
-%% Format debug messages. Print them as the call-back module sees
-%% them, not as the real erlang messages. Use trace for that.
-%%-----------------------------------------------------------------
-print_event(Dev, {in, Msg}, Name) ->
- case Msg of
- {'$gen_call', {From, _Tag}, Call} ->
- io:format(Dev, "*DBG* ~p got call ~p from ~w~n",
- [Name, Call, From]);
- {'$gen_cast', Cast} ->
- io:format(Dev, "*DBG* ~p got cast ~p~n",
- [Name, Cast]);
- _ ->
- io:format(Dev, "*DBG* ~p got ~p~n", [Name, Msg])
- end;
-print_event(Dev, {out, Msg, To, State}, Name) ->
- io:format(Dev, "*DBG* ~p sent ~p to ~w, new state ~w~n",
- [Name, Msg, To, State]);
-print_event(Dev, {noreply, State}, Name) ->
- io:format(Dev, "*DBG* ~p new state ~w~n", [Name, State]);
-print_event(Dev, Event, Name) ->
- io:format(Dev, "*DBG* ~p dbg ~p~n", [Name, Event]).
-
-
-%%% ---------------------------------------------------
-%%% Terminate the server.
-%%% ---------------------------------------------------
-
-terminate(Reason, Msg, #gs2_state { name = Name,
- mod = Mod,
- state = State,
- debug = Debug }) ->
- case catch Mod:terminate(Reason, State) of
- {'EXIT', R} ->
- error_info(R, Reason, Name, Msg, State, Debug),
- exit(R);
- _ ->
- case Reason of
- normal ->
- exit(normal);
- shutdown ->
- exit(shutdown);
- {shutdown,_}=Shutdown ->
- exit(Shutdown);
- _ ->
- error_info(Reason, undefined, Name, Msg, State, Debug),
- exit(Reason)
- end
- end.
-
-error_info(_Reason, _RootCause, application_controller, _Msg, _State, _Debug) ->
- %% OTP-5811 Don't send an error report if it's the system process
- %% application_controller which is terminating - let init take care
- %% of it instead
- ok;
-error_info(Reason, RootCause, Name, Msg, State, Debug) ->
- Reason1 = error_reason(Reason),
- Fmt =
- "** Generic server ~p terminating~n"
- "** Last message in was ~p~n"
- "** When Server state == ~p~n"
- "** Reason for termination == ~n** ~p~n",
- case RootCause of
- undefined -> format(Fmt, [Name, Msg, State, Reason1]);
- _ -> format(Fmt ++ "** In 'terminate' callback "
- "with reason ==~n** ~p~n",
- [Name, Msg, State, Reason1,
- error_reason(RootCause)])
- end,
- sys:print_log(Debug),
- ok.
-
-error_reason({undef,[{M,F,A}|MFAs]} = Reason) ->
- case code:is_loaded(M) of
- false -> {'module could not be loaded',[{M,F,A}|MFAs]};
- _ -> case erlang:function_exported(M, F, length(A)) of
- true -> Reason;
- false -> {'function not exported',[{M,F,A}|MFAs]}
- end
- end;
-error_reason(Reason) ->
- Reason.
-
-%%% ---------------------------------------------------
-%%% Misc. functions.
-%%% ---------------------------------------------------
-
-opt(Op, [{Op, Value}|_]) ->
- {ok, Value};
-opt(Op, [_|Options]) ->
- opt(Op, Options);
-opt(_, []) ->
- false.
-
-debug_options(Name, Opts) ->
- case opt(debug, Opts) of
- {ok, Options} -> dbg_options(Name, Options);
- _ -> dbg_options(Name, [])
- end.
-
-dbg_options(Name, []) ->
- Opts =
- case init:get_argument(generic_debug) of
- error ->
- [];
- _ ->
- [log, statistics]
- end,
- dbg_opts(Name, Opts);
-dbg_options(Name, Opts) ->
- dbg_opts(Name, Opts).
-
-dbg_opts(Name, Opts) ->
- case catch sys:debug_options(Opts) of
- {'EXIT',_} ->
- format("~p: ignoring erroneous debug options - ~p~n",
- [Name, Opts]),
- [];
- Dbg ->
- Dbg
- end.
-
-get_proc_name(Pid) when is_pid(Pid) ->
- Pid;
-get_proc_name({local, Name}) ->
- case process_info(self(), registered_name) of
- {registered_name, Name} ->
- Name;
- {registered_name, _Name} ->
- exit(process_not_registered);
- [] ->
- exit(process_not_registered)
- end;
-get_proc_name({global, Name}) ->
- case whereis_name(Name) of
- undefined ->
- exit(process_not_registered_globally);
- Pid when Pid =:= self() ->
- Name;
- _Pid ->
- exit(process_not_registered_globally)
- end.
-
-get_parent() ->
- case get('$ancestors') of
- [Parent | _] when is_pid(Parent)->
- Parent;
- [Parent | _] when is_atom(Parent)->
- name_to_pid(Parent);
- _ ->
- exit(process_was_not_started_by_proc_lib)
- end.
-
-name_to_pid(Name) ->
- case whereis(Name) of
- undefined ->
- case whereis_name(Name) of
- undefined ->
- exit(could_not_find_registerd_name);
- Pid ->
- Pid
- end;
- Pid ->
- Pid
- end.
-
-whereis_name(Name) ->
- case ets:lookup(global_names, Name) of
- [{_Name, Pid, _Method, _RPid, _Ref}] ->
- if node(Pid) == node() ->
- case is_process_alive(Pid) of
- true -> Pid;
- false -> undefined
- end;
- true ->
- Pid
- end;
- [] -> undefined
- end.
-
-find_prioritisers(GS2State = #gs2_state { mod = Mod }) ->
- PCall = function_exported_or_default(Mod, 'prioritise_call', 4,
- fun (_Msg, _From, _State) -> 0 end),
- PCast = function_exported_or_default(Mod, 'prioritise_cast', 3,
- fun (_Msg, _State) -> 0 end),
- PInfo = function_exported_or_default(Mod, 'prioritise_info', 3,
- fun (_Msg, _State) -> 0 end),
- GS2State #gs2_state { prioritisers = {PCall, PCast, PInfo} }.
-
-function_exported_or_default(Mod, Fun, Arity, Default) ->
- case erlang:function_exported(Mod, Fun, Arity) of
- true -> case Arity of
- 3 -> fun (Msg, GS2State = #gs2_state { queue = Queue,
- state = State }) ->
- Length = priority_queue:len(Queue),
- case catch Mod:Fun(Msg, Length, State) of
- drop ->
- drop;
- Res when is_integer(Res) ->
- Res;
- Err ->
- handle_common_termination(Err, Msg, GS2State)
- end
- end;
- 4 -> fun (Msg, From, GS2State = #gs2_state { queue = Queue,
- state = State }) ->
- Length = priority_queue:len(Queue),
- case catch Mod:Fun(Msg, From, Length, State) of
- Res when is_integer(Res) ->
- Res;
- Err ->
- handle_common_termination(Err, Msg, GS2State)
- end
- end
- end;
- false -> Default
- end.
-
-%%-----------------------------------------------------------------
-%% Status information
-%%-----------------------------------------------------------------
-format_status(Opt, StatusData) ->
- [PDict, SysState, Parent, Debug,
- #gs2_state{name = Name, state = State, mod = Mod, queue = Queue}] =
- StatusData,
- NameTag = if is_pid(Name) ->
- pid_to_list(Name);
- is_atom(Name) ->
- Name
- end,
- Header = lists:concat(["Status for generic server ", NameTag]),
- Log = sys:get_debug(log, Debug, []),
- Specfic = callback(Mod, format_status, [Opt, [PDict, State]],
- fun () -> [{data, [{"State", State}]}] end),
- Messages = callback(Mod, format_message_queue, [Opt, Queue],
- fun () -> priority_queue:to_list(Queue) end),
- [{header, Header},
- {data, [{"Status", SysState},
- {"Parent", Parent},
- {"Logged events", Log},
- {"Queued messages", Messages}]} |
- Specfic].
-
-callback(Mod, FunName, Args, DefaultThunk) ->
- case erlang:function_exported(Mod, FunName, length(Args)) of
- true -> case catch apply(Mod, FunName, Args) of
- {'EXIT', _} -> DefaultThunk();
- Success -> Success
- end;
- false -> DefaultThunk()
- end.
diff --git a/src/gm.erl b/src/gm.erl
deleted file mode 100644
index 1cf077e0..00000000
--- a/src/gm.erl
+++ /dev/null
@@ -1,1493 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(gm).
-
-%% Guaranteed Multicast
-%% ====================
-%%
-%% This module provides the ability to create named groups of
-%% processes to which members can be dynamically added and removed,
-%% and for messages to be broadcast within the group that are
-%% guaranteed to reach all members of the group during the lifetime of
-%% the message. The lifetime of a message is defined as being, at a
-%% minimum, the time from which the message is first sent to any
-%% member of the group, up until the time at which it is known by the
-%% member who published the message that the message has reached all
-%% group members.
-%%
-%% The guarantee given is that provided a message, once sent, makes it
-%% to members who do not all leave the group, the message will
-%% continue to propagate to all group members.
-%%
-%% Another way of stating the guarantee is that if member P publishes
-%% messages m and m', then for all members P', if P' is a member of
-%% the group prior to the publication of m, and P' receives m', then
-%% P' will receive m.
-%%
-%% Note that only local-ordering is enforced: i.e. if member P sends
-%% message m and then message m', then for-all members P', if P'
-%% receives m and m', then they will receive m' after m. Causality
-%% ordering is _not_ enforced. I.e. if member P receives message m
-%% and as a result publishes message m', there is no guarantee that
-%% other members P' will receive m before m'.
-%%
-%%
-%% API Use
-%% -------
-%%
-%% Mnesia must be started. Use the idempotent create_tables/0 function
-%% to create the tables required.
-%%
-%% start_link/3
-%% Provide the group name, the callback module name, and any arguments
-%% you wish to be passed into the callback module's functions. The
-%% joined/2 function will be called when we have joined the group,
-%% with the arguments passed to start_link and a list of the current
-%% members of the group. See the callbacks specs and the comments
-%% below for further details of the callback functions.
-%%
-%% leave/1
-%% Provide the Pid. Removes the Pid from the group. The callback
-%% terminate/2 function will be called.
-%%
-%% broadcast/2
-%% Provide the Pid and a Message. The message will be sent to all
-%% members of the group as per the guarantees given above. This is a
-%% cast and the function call will return immediately. There is no
-%% guarantee that the message will reach any member of the group.
-%%
-%% confirmed_broadcast/2
-%% Provide the Pid and a Message. As per broadcast/2 except that this
-%% is a call, not a cast, and only returns 'ok' once the Message has
-%% reached every member of the group. Do not call
-%% confirmed_broadcast/2 directly from the callback module otherwise
-%% you will deadlock the entire group.
-%%
-%% info/1
-%% Provide the Pid. Returns a proplist with various facts, including
-%% the group name and the current group members.
-%%
-%% validate_members/2
-%% Check whether a given member list agrees with the chosen member's
-%% view. Any differences will be communicated via the members_changed
-%% callback. If there are no differences then there will be no reply.
-%% Note that members will not necessarily share the same view.
-%%
-%% forget_group/1
-%% Provide the group name. Removes its mnesia record. Makes no attempt
-%% to ensure the group is empty.
-%%
-%% Implementation Overview
-%% -----------------------
-%%
-%% One possible means of implementation would be a fan-out from the
-%% sender to every member of the group. This would require that the
-%% group is fully connected, and, in the event that the original
-%% sender of the message disappears from the group before the message
-%% has made it to every member of the group, raises questions as to
-%% who is responsible for sending on the message to new group members.
-%% In particular, the issue is with [ Pid ! Msg || Pid <- Members ] -
-%% if the sender dies part way through, who is responsible for
-%% ensuring that the remaining Members receive the Msg? In the event
-%% that within the group, messages sent are broadcast from a subset of
-%% the members, the fan-out arrangement has the potential to
-%% substantially impact the CPU and network workload of such members,
-%% as such members would have to accommodate the cost of sending each
-%% message to every group member.
-%%
-%% Instead, if the members of the group are arranged in a chain, then
-%% it becomes easier to reason about who within the group has received
-%% each message and who has not. It eases issues of responsibility: in
-%% the event of a group member disappearing, the nearest upstream
-%% member of the chain is responsible for ensuring that messages
-%% continue to propagate down the chain. It also results in equal
-%% distribution of sending and receiving workload, even if all
-%% messages are being sent from just a single group member. This
-%% configuration has the further advantage that it is not necessary
-%% for every group member to know of every other group member, and
-%% even that a group member does not have to be accessible from all
-%% other group members.
-%%
-%% Performance is kept high by permitting pipelining and all
-%% communication between joined group members is asynchronous. In the
-%% chain A -> B -> C -> D, if A sends a message to the group, it will
-%% not directly contact C or D. However, it must know that D receives
-%% the message (in addition to B and C) before it can consider the
-%% message fully sent. A simplistic implementation would require that
-%% D replies to C, C replies to B and B then replies to A. This would
-%% result in a propagation delay of twice the length of the chain. It
-%% would also require, in the event of the failure of C, that D knows
-%% to directly contact B and issue the necessary replies. Instead, the
-%% chain forms a ring: D sends the message on to A: D does not
-%% distinguish A as the sender, merely as the next member (downstream)
-%% within the chain (which has now become a ring). When A receives
-%% from D messages that A sent, it knows that all members have
-%% received the message. However, the message is not dead yet: if C
-%% died as B was sending to C, then B would need to detect the death
-%% of C and forward the message on to D instead: thus every node has
-%% to remember every message published until it is told that it can
-%% forget about the message. This is essential not just for dealing
-%% with failure of members, but also for the addition of new members.
-%%
-%% Thus once A receives the message back again, it then sends to B an
-%% acknowledgement for the message, indicating that B can now forget
-%% about the message. B does so, and forwards the ack to C. C forgets
-%% the message, and forwards the ack to D, which forgets the message
-%% and finally forwards the ack back to A. At this point, A takes no
-%% further action: the message and its acknowledgement have made it to
-%% every member of the group. The message is now dead, and any new
-%% member joining the group at this point will not receive the
-%% message.
-%%
-%% We therefore have two roles:
-%%
-%% 1. The sender, who upon receiving their own messages back, must
-%% then send out acknowledgements, and upon receiving their own
-%% acknowledgements back perform no further action.
-%%
-%% 2. The other group members who upon receiving messages and
-%% acknowledgements must update their own internal state accordingly
-%% (the sending member must also do this in order to be able to
-%% accommodate failures), and forwards messages on to their downstream
-%% neighbours.
-%%
-%%
-%% Implementation: It gets trickier
-%% --------------------------------
-%%
-%% Chain A -> B -> C -> D
-%%
-%% A publishes a message which B receives. A now dies. B and D will
-%% detect the death of A, and will link up, thus the chain is now B ->
-%% C -> D. B forwards A's message on to C, who forwards it to D, who
-%% forwards it to B. Thus B is now responsible for A's messages - both
-%% publications and acknowledgements that were in flight at the point
-%% at which A died. Even worse is that this is transitive: after B
-%% forwards A's message to C, B dies as well. Now C is not only
-%% responsible for B's in-flight messages, but is also responsible for
-%% A's in-flight messages.
-%%
-%% Lemma 1: A member can only determine which dead members they have
-%% inherited responsibility for if there is a total ordering on the
-%% conflicting additions and subtractions of members from the group.
-%%
-%% Consider the simultaneous death of B and addition of B' that
-%% transitions a chain from A -> B -> C to A -> B' -> C. Either B' or
-%% C is responsible for in-flight messages from B. It is easy to
-%% ensure that at least one of them thinks they have inherited B, but
-%% if we do not ensure that exactly one of them inherits B, then we
-%% could have B' converting publishes to acks, which then will crash C
-%% as C does not believe it has issued acks for those messages.
-%%
-%% More complex scenarios are easy to concoct: A -> B -> C -> D -> E
-%% becoming A -> C' -> E. Who has inherited which of B, C and D?
-%%
-%% However, for non-conflicting membership changes, only a partial
-%% ordering is required. For example, A -> B -> C becoming A -> A' ->
-%% B. The addition of A', between A and B can have no conflicts with
-%% the death of C: it is clear that A has inherited C's messages.
-%%
-%% For ease of implementation, we adopt the simple solution, of
-%% imposing a total order on all membership changes.
-%%
-%% On the death of a member, it is ensured the dead member's
-%% neighbours become aware of the death, and the upstream neighbour
-%% now sends to its new downstream neighbour its state, including the
-%% messages pending acknowledgement. The downstream neighbour can then
-%% use this to calculate which publishes and acknowledgements it has
-%% missed out on, due to the death of its old upstream. Thus the
-%% downstream can catch up, and continues the propagation of messages
-%% through the group.
-%%
-%% Lemma 2: When a member is joining, it must synchronously
-%% communicate with its upstream member in order to receive its
-%% starting state atomically with its addition to the group.
-%%
-%% New members must start with the same state as their nearest
-%% upstream neighbour. This ensures that it is not surprised by
-%% acknowledgements they are sent, and that should their downstream
-%% neighbour die, they are able to send the correct state to their new
-%% downstream neighbour to ensure it can catch up. Thus in the
-%% transition A -> B -> C becomes A -> A' -> B -> C becomes A -> A' ->
-%% C, A' must start with the state of A, so that it can send C the
-%% correct state when B dies, allowing C to detect any missed
-%% messages.
-%%
-%% If A' starts by adding itself to the group membership, A could then
-%% die, without A' having received the necessary state from A. This
-%% would leave A' responsible for in-flight messages from A, but
-%% having the least knowledge of all, of those messages. Thus A' must
-%% start by synchronously calling A, which then immediately sends A'
-%% back its state. A then adds A' to the group. If A dies at this
-%% point then A' will be able to see this (as A' will fail to appear
-%% in the group membership), and thus A' will ignore the state it
-%% receives from A, and will simply repeat the process, trying to now
-%% join downstream from some other member. This ensures that should
-%% the upstream die as soon as the new member has been joined, the new
-%% member is guaranteed to receive the correct state, allowing it to
-%% correctly process messages inherited due to the death of its
-%% upstream neighbour.
-%%
-%% The canonical definition of the group membership is held by a
-%% distributed database. Whilst this allows the total ordering of
-%% changes to be achieved, it is nevertheless undesirable to have to
-%% query this database for the current view, upon receiving each
-%% message. Instead, we wish for members to be able to cache a view of
-%% the group membership, which then requires a cache invalidation
-%% mechanism. Each member maintains its own view of the group
-%% membership. Thus when the group's membership changes, members may
-%% need to become aware of such changes in order to be able to
-%% accurately process messages they receive. Because of the
-%% requirement of a total ordering of conflicting membership changes,
-%% it is not possible to use the guaranteed broadcast mechanism to
-%% communicate these changes: to achieve the necessary ordering, it
-%% would be necessary for such messages to be published by exactly one
-%% member, which can not be guaranteed given that such a member could
-%% die.
-%%
-%% The total ordering we enforce on membership changes gives rise to a
-%% view version number: every change to the membership creates a
-%% different view, and the total ordering permits a simple
-%% monotonically increasing view version number.
-%%
-%% Lemma 3: If a message is sent from a member that holds view version
-%% N, it can be correctly processed by any member receiving the
-%% message with a view version >= N.
-%%
-%% Initially, let us suppose that each view contains the ordering of
-%% every member that was ever part of the group. Dead members are
-%% marked as such. Thus we have a ring of members, some of which are
-%% dead, and are thus inherited by the nearest alive downstream
-%% member.
-%%
-%% In the chain A -> B -> C, all three members initially have view
-%% version 1, which reflects reality. B publishes a message, which is
-%% forward by C to A. B now dies, which A notices very quickly. Thus A
-%% updates the view, creating version 2. It now forwards B's
-%% publication, sending that message to its new downstream neighbour,
-%% C. This happens before C is aware of the death of B. C must become
-%% aware of the view change before it interprets the message its
-%% received, otherwise it will fail to learn of the death of B, and
-%% thus will not realise it has inherited B's messages (and will
-%% likely crash).
-%%
-%% Thus very simply, we have that each subsequent view contains more
-%% information than the preceding view.
-%%
-%% However, to avoid the views growing indefinitely, we need to be
-%% able to delete members which have died _and_ for which no messages
-%% are in-flight. This requires that upon inheriting a dead member, we
-%% know the last publication sent by the dead member (this is easy: we
-%% inherit a member because we are the nearest downstream member which
-%% implies that we know at least as much than everyone else about the
-%% publications of the dead member), and we know the earliest message
-%% for which the acknowledgement is still in flight.
-%%
-%% In the chain A -> B -> C, when B dies, A will send to C its state
-%% (as C is the new downstream from A), allowing C to calculate which
-%% messages it has missed out on (described above). At this point, C
-%% also inherits B's messages. If that state from A also includes the
-%% last message published by B for which an acknowledgement has been
-%% seen, then C knows exactly which further acknowledgements it must
-%% receive (also including issuing acknowledgements for publications
-%% still in-flight that it receives), after which it is known there
-%% are no more messages in flight for B, thus all evidence that B was
-%% ever part of the group can be safely removed from the canonical
-%% group membership.
-%%
-%% Thus, for every message that a member sends, it includes with that
-%% message its view version. When a member receives a message it will
-%% update its view from the canonical copy, should its view be older
-%% than the view version included in the message it has received.
-%%
-%% The state held by each member therefore includes the messages from
-%% each publisher pending acknowledgement, the last publication seen
-%% from that publisher, and the last acknowledgement from that
-%% publisher. In the case of the member's own publications or
-%% inherited members, this last acknowledgement seen state indicates
-%% the last acknowledgement retired, rather than sent.
-%%
-%%
-%% Proof sketch
-%% ------------
-%%
-%% We need to prove that with the provided operational semantics, we
-%% can never reach a state that is not well formed from a well-formed
-%% starting state.
-%%
-%% Operational semantics (small step): straight-forward message
-%% sending, process monitoring, state updates.
-%%
-%% Well formed state: dead members inherited by exactly one non-dead
-%% member; for every entry in anyone's pending-acks, either (the
-%% publication of the message is in-flight downstream from the member
-%% and upstream from the publisher) or (the acknowledgement of the
-%% message is in-flight downstream from the publisher and upstream
-%% from the member).
-%%
-%% Proof by induction on the applicable operational semantics.
-%%
-%%
-%% Related work
-%% ------------
-%%
-%% The ring configuration and double traversal of messages around the
-%% ring is similar (though developed independently) to the LCR
-%% protocol by [Levy 2008]. However, LCR differs in several
-%% ways. Firstly, by using vector clocks, it enforces a total order of
-%% message delivery, which is unnecessary for our purposes. More
-%% significantly, it is built on top of a "group communication system"
-%% which performs the group management functions, taking
-%% responsibility away from the protocol as to how to cope with safely
-%% adding and removing members. When membership changes do occur, the
-%% protocol stipulates that every member must perform communication
-%% with every other member of the group, to ensure all outstanding
-%% deliveries complete, before the entire group transitions to the new
-%% view. This, in total, requires two sets of all-to-all synchronous
-%% communications.
-%%
-%% This is not only rather inefficient, but also does not explain what
-%% happens upon the failure of a member during this process. It does
-%% though entirely avoid the need for inheritance of responsibility of
-%% dead members that our protocol incorporates.
-%%
-%% In [Marandi et al 2010], a Paxos-based protocol is described. This
-%% work explicitly focuses on the efficiency of communication. LCR
-%% (and our protocol too) are more efficient, but at the cost of
-%% higher latency. The Ring-Paxos protocol is itself built on top of
-%% IP-multicast, which rules it out for many applications where
-%% point-to-point communication is all that can be required. They also
-%% have an excellent related work section which I really ought to
-%% read...
-%%
-%%
-%% [Levy 2008] The Complexity of Reliable Distributed Storage, 2008.
-%% [Marandi et al 2010] Ring Paxos: A High-Throughput Atomic Broadcast
-%% Protocol
-
-
--behaviour(gen_server2).
-
--export([create_tables/0, start_link/4, leave/1, broadcast/2,
- confirmed_broadcast/2, info/1, validate_members/2, forget_group/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3, prioritise_info/3]).
-
--ifndef(use_specs).
--export([behaviour_info/1]).
--endif.
-
--export([table_definitions/0]).
-
--define(GROUP_TABLE, gm_group).
--define(HIBERNATE_AFTER_MIN, 1000).
--define(DESIRED_HIBERNATE, 10000).
--define(BROADCAST_TIMER, 25).
--define(VERSION_START, 0).
--define(SETS, ordsets).
--define(DICT, orddict).
-
--record(state,
- { self,
- left,
- right,
- group_name,
- module,
- view,
- pub_count,
- members_state,
- callback_args,
- confirms,
- broadcast_buffer,
- broadcast_timer,
- txn_executor
- }).
-
--record(gm_group, { name, version, members }).
-
--record(view_member, { id, aliases, left, right }).
-
--record(member, { pending_ack, last_pub, last_ack }).
-
--define(TABLE, {?GROUP_TABLE, [{record_name, gm_group},
- {attributes, record_info(fields, gm_group)}]}).
--define(TABLE_MATCH, {match, #gm_group { _ = '_' }}).
-
--define(TAG, '$gm').
-
--ifdef(use_specs).
-
--export_type([group_name/0]).
-
--type(group_name() :: any()).
--type(txn_fun() :: fun((fun(() -> any())) -> any())).
-
--spec(create_tables/0 :: () -> 'ok' | {'aborted', any()}).
--spec(start_link/4 :: (group_name(), atom(), any(), txn_fun()) ->
- rabbit_types:ok_pid_or_error()).
--spec(leave/1 :: (pid()) -> 'ok').
--spec(broadcast/2 :: (pid(), any()) -> 'ok').
--spec(confirmed_broadcast/2 :: (pid(), any()) -> 'ok').
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(validate_members/2 :: (pid(), [pid()]) -> 'ok').
--spec(forget_group/1 :: (group_name()) -> 'ok').
-
-%% The joined, members_changed and handle_msg callbacks can all return
-%% any of the following terms:
-%%
-%% 'ok' - the callback function returns normally
-%%
-%% {'stop', Reason} - the callback indicates the member should stop
-%% with reason Reason and should leave the group.
-%%
-%% {'become', Module, Args} - the callback indicates that the callback
-%% module should be changed to Module and that the callback functions
-%% should now be passed the arguments Args. This allows the callback
-%% module to be dynamically changed.
-
-%% Called when we've successfully joined the group. Supplied with Args
-%% provided in start_link, plus current group members.
--callback joined(Args :: term(), Members :: [pid()]) ->
- ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
-
-%% Supplied with Args provided in start_link, the list of new members
-%% and the list of members previously known to us that have since
-%% died. Note that if a member joins and dies very quickly, it's
-%% possible that we will never see that member appear in either births
-%% or deaths. However we are guaranteed that (1) we will see a member
-%% joining either in the births here, or in the members passed to
-%% joined/2 before receiving any messages from it; and (2) we will not
-%% see members die that we have not seen born (or supplied in the
-%% members to joined/2).
--callback members_changed(Args :: term(), Births :: [pid()],
- Deaths :: [pid()], Live :: [pid()]) ->
- ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
-
-%% Supplied with Args provided in start_link, the sender, and the
-%% message. This does get called for messages injected by this member,
-%% however, in such cases, there is no special significance of this
-%% invocation: it does not indicate that the message has made it to
-%% any other members, let alone all other members.
--callback handle_msg(Args :: term(), From :: pid(), Message :: term()) ->
- ok | {stop, Reason :: term()} | {become, Module :: atom(), Args :: any()}.
-
-%% Called on gm member termination as per rules in gen_server, with
-%% the Args provided in start_link plus the termination Reason.
--callback terminate(Args :: term(), Reason :: term()) ->
- ok | term().
-
--else.
-
-behaviour_info(callbacks) ->
- [{joined, 2}, {members_changed, 4}, {handle_msg, 3}, {terminate, 2}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-create_tables() ->
- create_tables([?TABLE]).
-
-create_tables([]) ->
- ok;
-create_tables([{Table, Attributes} | Tables]) ->
- case mnesia:create_table(Table, Attributes) of
- {atomic, ok} -> create_tables(Tables);
- {aborted, {already_exists, gm_group}} -> create_tables(Tables);
- Err -> Err
- end.
-
-table_definitions() ->
- {Name, Attributes} = ?TABLE,
- [{Name, [?TABLE_MATCH | Attributes]}].
-
-start_link(GroupName, Module, Args, TxnFun) ->
- gen_server2:start_link(?MODULE, [GroupName, Module, Args, TxnFun], []).
-
-leave(Server) ->
- gen_server2:cast(Server, leave).
-
-broadcast(Server, Msg) ->
- gen_server2:cast(Server, {broadcast, Msg}).
-
-confirmed_broadcast(Server, Msg) ->
- gen_server2:call(Server, {confirmed_broadcast, Msg}, infinity).
-
-info(Server) ->
- gen_server2:call(Server, info, infinity).
-
-validate_members(Server, Members) ->
- gen_server2:cast(Server, {validate_members, Members}).
-
-forget_group(GroupName) ->
- {atomic, ok} = mnesia:sync_transaction(
- fun () ->
- mnesia:delete({?GROUP_TABLE, GroupName})
- end),
- ok.
-
-init([GroupName, Module, Args, TxnFun]) ->
- {MegaSecs, Secs, MicroSecs} = now(),
- random:seed(MegaSecs, Secs, MicroSecs),
- Self = make_member(GroupName),
- gen_server2:cast(self(), join),
- {ok, #state { self = Self,
- left = {Self, undefined},
- right = {Self, undefined},
- group_name = GroupName,
- module = Module,
- view = undefined,
- pub_count = -1,
- members_state = undefined,
- callback_args = Args,
- confirms = queue:new(),
- broadcast_buffer = [],
- broadcast_timer = undefined,
- txn_executor = TxnFun }, hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-
-handle_call({confirmed_broadcast, _Msg}, _From,
- State = #state { members_state = undefined }) ->
- reply(not_joined, State);
-
-handle_call({confirmed_broadcast, Msg}, _From,
- State = #state { self = Self,
- right = {Self, undefined},
- module = Module,
- callback_args = Args }) ->
- handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
- ok, State});
-
-handle_call({confirmed_broadcast, Msg}, From, State) ->
- internal_broadcast(Msg, From, State);
-
-handle_call(info, _From,
- State = #state { members_state = undefined }) ->
- reply(not_joined, State);
-
-handle_call(info, _From, State = #state { group_name = GroupName,
- module = Module,
- view = View }) ->
- reply([{group_name, GroupName},
- {module, Module},
- {group_members, get_pids(alive_view_members(View))}], State);
-
-handle_call({add_on_right, _NewMember}, _From,
- State = #state { members_state = undefined }) ->
- reply(not_ready, State);
-
-handle_call({add_on_right, NewMember}, _From,
- State = #state { self = Self,
- group_name = GroupName,
- view = View,
- members_state = MembersState,
- module = Module,
- callback_args = Args,
- txn_executor = TxnFun }) ->
- {MembersState1, Group} =
- record_new_member_in_group(
- GroupName, Self, NewMember,
- fun (Group1) ->
- View1 = group_to_view(Group1),
- MembersState1 = remove_erased_members(MembersState, View1),
- ok = send_right(NewMember, View1,
- {catchup, Self,
- prepare_members_state(MembersState1)}),
- MembersState1
- end, TxnFun),
- View2 = group_to_view(Group),
- case validate_view(Self, View2) of
- ok ->
- State1 = State #state { view = View2,
- members_state = MembersState1 },
- State2 = check_neighbours(State1),
- Result = callback_view_changed(Args, Module, View, View2),
- handle_callback_result({Result, {ok, Group}, State2});
- Err ->
- {{stop, Err}, State}
- end.
-
-
-handle_cast({?TAG, ReqVer, Msg},
- State = #state { self = Self,
- view = View,
- members_state = MembersState,
- group_name = GroupName,
- module = Module,
- callback_args = Args }) ->
- {Result, State1} =
- case needs_view_update(ReqVer, View) of
- true -> View1 = group_to_view(read_group(GroupName)),
- case validate_view(Self, View1) of
- ok ->
- MemberState1 = remove_erased_members(MembersState,
- View1),
- {callback_view_changed(Args, Module, View, View1),
- check_neighbours(
- State #state { view = View1,
- members_state = MemberState1 })};
- Err ->
- {{stop, Err}, State}
- end;
- false -> {ok, State}
- end,
- handle_callback_result(
- if_callback_success(
- Result, fun handle_msg_true/3, fun handle_msg_false/3, Msg, State1));
-
-handle_cast({broadcast, _Msg}, State = #state { members_state = undefined }) ->
- noreply(State);
-
-handle_cast({broadcast, Msg},
- State = #state { self = Self,
- right = {Self, undefined},
- module = Module,
- callback_args = Args }) ->
- handle_callback_result({Module:handle_msg(Args, get_pid(Self), Msg),
- State});
-
-handle_cast({broadcast, Msg}, State) ->
- internal_broadcast(Msg, none, State);
-
-handle_cast(join, State = #state { self = Self,
- group_name = GroupName,
- members_state = undefined,
- module = Module,
- callback_args = Args,
- txn_executor = TxnFun }) ->
- View = join_group(Self, GroupName, TxnFun),
- MembersState =
- case alive_view_members(View) of
- [Self] -> blank_member_state();
- _ -> undefined
- end,
- State1 = check_neighbours(State #state { view = View,
- members_state = MembersState }),
- handle_callback_result(
- {Module:joined(Args, get_pids(all_known_members(View))), State1});
-
-handle_cast({validate_members, OldMembers},
- State = #state { view = View,
- module = Module,
- callback_args = Args }) ->
- NewMembers = get_pids(all_known_members(View)),
- Births = NewMembers -- OldMembers,
- Deaths = OldMembers -- NewMembers,
- case {Births, Deaths} of
- {[], []} -> noreply(State);
- _ -> Result = Module:members_changed(
- Args, Births, Deaths, NewMembers),
- handle_callback_result({Result, State})
- end;
-
-handle_cast(leave, State) ->
- {stop, normal, State}.
-
-
-handle_info(flush, State) ->
- noreply(
- flush_broadcast_buffer(State #state { broadcast_timer = undefined }));
-
-handle_info(timeout, State) ->
- noreply(flush_broadcast_buffer(State));
-
-handle_info({'DOWN', MRef, process, _Pid, Reason},
- State = #state { self = Self,
- left = Left,
- right = Right,
- group_name = GroupName,
- view = View,
- module = Module,
- callback_args = Args,
- confirms = Confirms,
- txn_executor = TxnFun }) ->
- Member = case {Left, Right} of
- {{Member1, MRef}, _} -> Member1;
- {_, {Member1, MRef}} -> Member1;
- _ -> undefined
- end,
- case {Member, Reason} of
- {undefined, _} ->
- noreply(State);
- {_, {shutdown, ring_shutdown}} ->
- noreply(State);
- _ ->
- View1 =
- group_to_view(record_dead_member_in_group(Member,
- GroupName, TxnFun)),
- {Result, State2} =
- case alive_view_members(View1) of
- [Self] ->
- {Result1, State1} = maybe_erase_aliases(State, View1),
- {Result1, State1 #state {
- members_state = blank_member_state(),
- confirms = purge_confirms(Confirms) }};
- _ ->
- State1 = State #state { view = View1 },
- case validate_view(Self, View1) of
- ok ->
- %% here we won't be pointing out any deaths:
- %% the concern is that there maybe births
- %% which we'd otherwise miss.
- {callback_view_changed(
- Args, Module, View, View1),
- check_neighbours(State1)};
- Err ->
- {{stop, Err}, State1}
- end
- end,
- handle_callback_result({Result, State2})
- end.
-
-
-terminate(Reason, State = #state { module = Module,
- callback_args = Args }) ->
- flush_broadcast_buffer(State),
- Module:terminate(Args, Reason).
-
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-prioritise_info(flush, _Len, _State) ->
- 1;
-prioritise_info({'DOWN', _MRef, process, _Pid, _Reason}, _Len,
- #state { members_state = MS }) when MS /= undefined ->
- 1;
-prioritise_info(_, _Len, _State) ->
- 0.
-
-
-handle_msg(check_neighbours, State) ->
- %% no-op - it's already been done by the calling handle_cast
- {ok, State};
-
-handle_msg({catchup, Left, MembersStateLeft},
- State = #state { self = Self,
- left = {Left, _MRefL},
- right = {Right, _MRefR},
- view = View,
- members_state = undefined }) ->
- ok = send_right(Right, View, {catchup, Self, MembersStateLeft}),
- MembersStateLeft1 = build_members_state(MembersStateLeft),
- {ok, State #state { members_state = MembersStateLeft1 }};
-
-handle_msg({catchup, Left, MembersStateLeft},
- State = #state { self = Self,
- left = {Left, _MRefL},
- view = View,
- members_state = MembersState })
- when MembersState =/= undefined ->
- MembersStateLeft1 = build_members_state(MembersStateLeft),
- AllMembers = lists:usort(?DICT:fetch_keys(MembersState) ++
- ?DICT:fetch_keys(MembersStateLeft1)),
- {MembersState1, Activity} =
- lists:foldl(
- fun (Id, MembersStateActivity) ->
- #member { pending_ack = PALeft, last_ack = LA } =
- find_member_or_blank(Id, MembersStateLeft1),
- with_member_acc(
- fun (#member { pending_ack = PA } = Member, Activity1) ->
- case is_member_alias(Id, Self, View) of
- true ->
- {_AcksInFlight, Pubs, _PA1} =
- find_prefix_common_suffix(PALeft, PA),
- {Member #member { last_ack = LA },
- activity_cons(Id, pubs_from_queue(Pubs),
- [], Activity1)};
- false ->
- {Acks, _Common, Pubs} =
- find_prefix_common_suffix(PA, PALeft),
- {Member,
- activity_cons(Id, pubs_from_queue(Pubs),
- acks_from_queue(Acks),
- Activity1)}
- end
- end, Id, MembersStateActivity)
- end, {MembersState, activity_nil()}, AllMembers),
- handle_msg({activity, Left, activity_finalise(Activity)},
- State #state { members_state = MembersState1 });
-
-handle_msg({catchup, _NotLeft, _MembersState}, State) ->
- {ok, State};
-
-handle_msg({activity, Left, Activity},
- State = #state { self = Self,
- left = {Left, _MRefL},
- view = View,
- members_state = MembersState,
- confirms = Confirms })
- when MembersState =/= undefined ->
- {MembersState1, {Confirms1, Activity1}} =
- lists:foldl(
- fun ({Id, Pubs, Acks}, MembersStateConfirmsActivity) ->
- with_member_acc(
- fun (Member = #member { pending_ack = PA,
- last_pub = LP,
- last_ack = LA },
- {Confirms2, Activity2}) ->
- case is_member_alias(Id, Self, View) of
- true ->
- {ToAck, PA1} =
- find_common(queue_from_pubs(Pubs), PA,
- queue:new()),
- LA1 = last_ack(Acks, LA),
- AckNums = acks_from_queue(ToAck),
- Confirms3 = maybe_confirm(
- Self, Id, Confirms2, AckNums),
- {Member #member { pending_ack = PA1,
- last_ack = LA1 },
- {Confirms3,
- activity_cons(
- Id, [], AckNums, Activity2)}};
- false ->
- PA1 = apply_acks(Acks, join_pubs(PA, Pubs)),
- LA1 = last_ack(Acks, LA),
- LP1 = last_pub(Pubs, LP),
- {Member #member { pending_ack = PA1,
- last_pub = LP1,
- last_ack = LA1 },
- {Confirms2,
- activity_cons(Id, Pubs, Acks, Activity2)}}
- end
- end, Id, MembersStateConfirmsActivity)
- end, {MembersState, {Confirms, activity_nil()}}, Activity),
- State1 = State #state { members_state = MembersState1,
- confirms = Confirms1 },
- Activity3 = activity_finalise(Activity1),
- ok = maybe_send_activity(Activity3, State1),
- {Result, State2} = maybe_erase_aliases(State1, View),
- if_callback_success(
- Result, fun activity_true/3, fun activity_false/3, Activity3, State2);
-
-handle_msg({activity, _NotLeft, _Activity}, State) ->
- {ok, State}.
-
-
-noreply(State) ->
- {noreply, ensure_broadcast_timer(State), flush_timeout(State)}.
-
-reply(Reply, State) ->
- {reply, Reply, ensure_broadcast_timer(State), flush_timeout(State)}.
-
-flush_timeout(#state{broadcast_buffer = []}) -> hibernate;
-flush_timeout(_) -> 0.
-
-ensure_broadcast_timer(State = #state { broadcast_buffer = [],
- broadcast_timer = undefined }) ->
- State;
-ensure_broadcast_timer(State = #state { broadcast_buffer = [],
- broadcast_timer = TRef }) ->
- erlang:cancel_timer(TRef),
- State #state { broadcast_timer = undefined };
-ensure_broadcast_timer(State = #state { broadcast_timer = undefined }) ->
- TRef = erlang:send_after(?BROADCAST_TIMER, self(), flush),
- State #state { broadcast_timer = TRef };
-ensure_broadcast_timer(State) ->
- State.
-
-internal_broadcast(Msg, From, State = #state { self = Self,
- pub_count = PubCount,
- module = Module,
- confirms = Confirms,
- callback_args = Args,
- broadcast_buffer = Buffer }) ->
- PubCount1 = PubCount + 1,
- Result = Module:handle_msg(Args, get_pid(Self), Msg),
- Buffer1 = [{PubCount1, Msg} | Buffer],
- Confirms1 = case From of
- none -> Confirms;
- _ -> queue:in({PubCount1, From}, Confirms)
- end,
- State1 = State #state { pub_count = PubCount1,
- confirms = Confirms1,
- broadcast_buffer = Buffer1 },
- case From =/= none of
- true ->
- handle_callback_result({Result, flush_broadcast_buffer(State1)});
- false ->
- handle_callback_result(
- {Result, State1 #state { broadcast_buffer = Buffer1 }})
- end.
-
-flush_broadcast_buffer(State = #state { broadcast_buffer = [] }) ->
- State;
-flush_broadcast_buffer(State = #state { self = Self,
- members_state = MembersState,
- broadcast_buffer = Buffer,
- pub_count = PubCount }) ->
- [{PubCount, _Msg}|_] = Buffer, %% ASSERTION match on PubCount
- Pubs = lists:reverse(Buffer),
- Activity = activity_cons(Self, Pubs, [], activity_nil()),
- ok = maybe_send_activity(activity_finalise(Activity), State),
- MembersState1 = with_member(
- fun (Member = #member { pending_ack = PA }) ->
- PA1 = queue:join(PA, queue:from_list(Pubs)),
- Member #member { pending_ack = PA1,
- last_pub = PubCount }
- end, Self, MembersState),
- State #state { members_state = MembersState1,
- broadcast_buffer = [] }.
-
-
-%% ---------------------------------------------------------------------------
-%% View construction and inspection
-%% ---------------------------------------------------------------------------
-
-needs_view_update(ReqVer, {Ver, _View}) -> Ver < ReqVer.
-
-view_version({Ver, _View}) -> Ver.
-
-is_member_alive({dead, _Member}) -> false;
-is_member_alive(_) -> true.
-
-is_member_alias(Self, Self, _View) ->
- true;
-is_member_alias(Member, Self, View) ->
- ?SETS:is_element(Member,
- ((fetch_view_member(Self, View)) #view_member.aliases)).
-
-dead_member_id({dead, Member}) -> Member.
-
-store_view_member(VMember = #view_member { id = Id }, {Ver, View}) ->
- {Ver, ?DICT:store(Id, VMember, View)}.
-
-with_view_member(Fun, View, Id) ->
- store_view_member(Fun(fetch_view_member(Id, View)), View).
-
-fetch_view_member(Id, {_Ver, View}) -> ?DICT:fetch(Id, View).
-
-find_view_member(Id, {_Ver, View}) -> ?DICT:find(Id, View).
-
-blank_view(Ver) -> {Ver, ?DICT:new()}.
-
-alive_view_members({_Ver, View}) -> ?DICT:fetch_keys(View).
-
-all_known_members({_Ver, View}) ->
- ?DICT:fold(
- fun (Member, #view_member { aliases = Aliases }, Acc) ->
- ?SETS:to_list(Aliases) ++ [Member | Acc]
- end, [], View).
-
-group_to_view(#gm_group { members = Members, version = Ver }) ->
- Alive = lists:filter(fun is_member_alive/1, Members),
- [_|_] = Alive, %% ASSERTION - can't have all dead members
- add_aliases(link_view(Alive ++ Alive ++ Alive, blank_view(Ver)), Members).
-
-link_view([Left, Middle, Right | Rest], View) ->
- case find_view_member(Middle, View) of
- error ->
- link_view(
- [Middle, Right | Rest],
- store_view_member(#view_member { id = Middle,
- aliases = ?SETS:new(),
- left = Left,
- right = Right }, View));
- {ok, _} ->
- View
- end;
-link_view(_, View) ->
- View.
-
-validate_view(Self, View) ->
- case lists:member(Self, alive_view_members(View)) of
- true -> ok;
- false -> %% Another node removed us from the view. No safe
- %% recovery is possible so we shut the node down
- rabbit_log:info("Fatal network partition detected.~n"
- "Node committing suicide.~n"),
- init:stop(),
- {shutdown, partial_partition_detected}
- end.
-
-add_aliases(View, Members) ->
- Members1 = ensure_alive_suffix(Members),
- {EmptyDeadSet, View1} =
- lists:foldl(
- fun (Member, {DeadAcc, ViewAcc}) ->
- case is_member_alive(Member) of
- true ->
- {?SETS:new(),
- with_view_member(
- fun (VMember =
- #view_member { aliases = Aliases }) ->
- VMember #view_member {
- aliases = ?SETS:union(Aliases, DeadAcc) }
- end, ViewAcc, Member)};
- false ->
- {?SETS:add_element(dead_member_id(Member), DeadAcc),
- ViewAcc}
- end
- end, {?SETS:new(), View}, Members1),
- 0 = ?SETS:size(EmptyDeadSet), %% ASSERTION
- View1.
-
-ensure_alive_suffix(Members) ->
- queue:to_list(ensure_alive_suffix1(queue:from_list(Members))).
-
-ensure_alive_suffix1(MembersQ) ->
- {{value, Member}, MembersQ1} = queue:out_r(MembersQ),
- case is_member_alive(Member) of
- true -> MembersQ;
- false -> ensure_alive_suffix1(queue:in_r(Member, MembersQ1))
- end.
-
-
-%% ---------------------------------------------------------------------------
-%% View modification
-%% ---------------------------------------------------------------------------
-
-join_group(Self, GroupName, TxnFun) ->
- join_group(Self, GroupName, read_group(GroupName), TxnFun).
-
-join_group(Self, GroupName, {error, not_found}, TxnFun) ->
- join_group(Self, GroupName,
- prune_or_create_group(Self, GroupName, TxnFun), TxnFun);
-join_group(Self, _GroupName, #gm_group { members = [Self] } = Group, _TxnFun) ->
- group_to_view(Group);
-join_group(Self, GroupName, #gm_group { members = Members } = Group, TxnFun) ->
- case lists:member(Self, Members) of
- true ->
- group_to_view(Group);
- false ->
- case lists:filter(fun is_member_alive/1, Members) of
- [] ->
- join_group(Self, GroupName,
- prune_or_create_group(Self, GroupName, TxnFun));
- Alive ->
- Left = lists:nth(random:uniform(length(Alive)), Alive),
- Handler =
- fun () ->
- join_group(
- Self, GroupName,
- record_dead_member_in_group(
- Left, GroupName, TxnFun),
- TxnFun)
- end,
- try
- case gen_server2:call(
- get_pid(Left), {add_on_right, Self}, infinity) of
- {ok, Group1} -> group_to_view(Group1);
- not_ready -> join_group(Self, GroupName, TxnFun)
- end
- catch
- exit:{R, _}
- when R =:= noproc; R =:= normal; R =:= shutdown ->
- Handler();
- exit:{{R, _}, _}
- when R =:= nodedown; R =:= shutdown ->
- Handler()
- end
- end
- end.
-
-read_group(GroupName) ->
- case mnesia:dirty_read(?GROUP_TABLE, GroupName) of
- [] -> {error, not_found};
- [Group] -> Group
- end.
-
-prune_or_create_group(Self, GroupName, TxnFun) ->
- Group = TxnFun(
- fun () ->
- GroupNew = #gm_group { name = GroupName,
- members = [Self],
- version = get_version(Self) },
- case mnesia:read({?GROUP_TABLE, GroupName}) of
- [] ->
- mnesia:write(GroupNew),
- GroupNew;
- [Group1 = #gm_group { members = Members }] ->
- case lists:any(fun is_member_alive/1, Members) of
- true -> Group1;
- false -> mnesia:write(GroupNew),
- GroupNew
- end
- end
- end),
- Group.
-
-record_dead_member_in_group(Member, GroupName, TxnFun) ->
- Group =
- TxnFun(
- fun () -> [Group1 = #gm_group { members = Members, version = Ver }] =
- mnesia:read({?GROUP_TABLE, GroupName}),
- case lists:splitwith(
- fun (Member1) -> Member1 =/= Member end, Members) of
- {_Members1, []} -> %% not found - already recorded dead
- Group1;
- {Members1, [Member | Members2]} ->
- Members3 = Members1 ++ [{dead, Member} | Members2],
- Group2 = Group1 #gm_group { members = Members3,
- version = Ver + 1 },
- mnesia:write(Group2),
- Group2
- end
- end),
- Group.
-
-record_new_member_in_group(GroupName, Left, NewMember, Fun, TxnFun) ->
- {Result, Group} =
- TxnFun(
- fun () ->
- [#gm_group { members = Members, version = Ver } = Group1] =
- mnesia:read({?GROUP_TABLE, GroupName}),
- {Prefix, [Left | Suffix]} =
- lists:splitwith(fun (M) -> M =/= Left end, Members),
- Members1 = Prefix ++ [Left, NewMember | Suffix],
- Group2 = Group1 #gm_group { members = Members1,
- version = Ver + 1 },
- Result = Fun(Group2),
- mnesia:write(Group2),
- {Result, Group2}
- end),
- {Result, Group}.
-
-erase_members_in_group(Members, GroupName, TxnFun) ->
- DeadMembers = [{dead, Id} || Id <- Members],
- Group =
- TxnFun(
- fun () ->
- [Group1 = #gm_group { members = [_|_] = Members1,
- version = Ver }] =
- mnesia:read({?GROUP_TABLE, GroupName}),
- case Members1 -- DeadMembers of
- Members1 -> Group1;
- Members2 -> Group2 =
- Group1 #gm_group { members = Members2,
- version = Ver + 1 },
- mnesia:write(Group2),
- Group2
- end
- end),
- Group.
-
-maybe_erase_aliases(State = #state { self = Self,
- group_name = GroupName,
- view = View0,
- members_state = MembersState,
- module = Module,
- callback_args = Args,
- txn_executor = TxnFun }, View) ->
- #view_member { aliases = Aliases } = fetch_view_member(Self, View),
- {Erasable, MembersState1}
- = ?SETS:fold(
- fun (Id, {ErasableAcc, MembersStateAcc} = Acc) ->
- #member { last_pub = LP, last_ack = LA } =
- find_member_or_blank(Id, MembersState),
- case can_erase_view_member(Self, Id, LA, LP) of
- true -> {[Id | ErasableAcc],
- erase_member(Id, MembersStateAcc)};
- false -> Acc
- end
- end, {[], MembersState}, Aliases),
- State1 = State #state { members_state = MembersState1 },
- case Erasable of
- [] -> {ok, State1 #state { view = View }};
- _ -> View1 = group_to_view(
- erase_members_in_group(Erasable, GroupName, TxnFun)),
- {callback_view_changed(Args, Module, View0, View1),
- check_neighbours(State1 #state { view = View1 })}
- end.
-
-can_erase_view_member(Self, Self, _LA, _LP) -> false;
-can_erase_view_member(_Self, _Id, N, N) -> true;
-can_erase_view_member(_Self, _Id, _LA, _LP) -> false.
-
-
-%% ---------------------------------------------------------------------------
-%% View monitoring and maintanence
-%% ---------------------------------------------------------------------------
-
-ensure_neighbour(_Ver, Self, {Self, undefined}, Self) ->
- {Self, undefined};
-ensure_neighbour(Ver, Self, {Self, undefined}, RealNeighbour) ->
- ok = gen_server2:cast(get_pid(RealNeighbour),
- {?TAG, Ver, check_neighbours}),
- {RealNeighbour, maybe_monitor(RealNeighbour, Self)};
-ensure_neighbour(_Ver, _Self, {RealNeighbour, MRef}, RealNeighbour) ->
- {RealNeighbour, MRef};
-ensure_neighbour(Ver, Self, {RealNeighbour, MRef}, Neighbour) ->
- true = erlang:demonitor(MRef),
- Msg = {?TAG, Ver, check_neighbours},
- ok = gen_server2:cast(get_pid(RealNeighbour), Msg),
- ok = case Neighbour of
- Self -> ok;
- _ -> gen_server2:cast(get_pid(Neighbour), Msg)
- end,
- {Neighbour, maybe_monitor(Neighbour, Self)}.
-
-maybe_monitor( Self, Self) -> undefined;
-maybe_monitor(Other, _Self) -> erlang:monitor(process, get_pid(Other)).
-
-check_neighbours(State = #state { self = Self,
- left = Left,
- right = Right,
- view = View,
- broadcast_buffer = Buffer }) ->
- #view_member { left = VLeft, right = VRight }
- = fetch_view_member(Self, View),
- Ver = view_version(View),
- Left1 = ensure_neighbour(Ver, Self, Left, VLeft),
- Right1 = ensure_neighbour(Ver, Self, Right, VRight),
- Buffer1 = case Right1 of
- {Self, undefined} -> [];
- _ -> Buffer
- end,
- State1 = State #state { left = Left1, right = Right1,
- broadcast_buffer = Buffer1 },
- ok = maybe_send_catchup(Right, State1),
- State1.
-
-maybe_send_catchup(Right, #state { right = Right }) ->
- ok;
-maybe_send_catchup(_Right, #state { self = Self,
- right = {Self, undefined} }) ->
- ok;
-maybe_send_catchup(_Right, #state { members_state = undefined }) ->
- ok;
-maybe_send_catchup(_Right, #state { self = Self,
- right = {Right, _MRef},
- view = View,
- members_state = MembersState }) ->
- send_right(Right, View,
- {catchup, Self, prepare_members_state(MembersState)}).
-
-
-%% ---------------------------------------------------------------------------
-%% Catch_up delta detection
-%% ---------------------------------------------------------------------------
-
-find_prefix_common_suffix(A, B) ->
- {Prefix, A1} = find_prefix(A, B, queue:new()),
- {Common, Suffix} = find_common(A1, B, queue:new()),
- {Prefix, Common, Suffix}.
-
-%% Returns the elements of A that occur before the first element of B,
-%% plus the remainder of A.
-find_prefix(A, B, Prefix) ->
- case {queue:out(A), queue:out(B)} of
- {{{value, Val}, _A1}, {{value, Val}, _B1}} ->
- {Prefix, A};
- {{empty, A1}, {{value, _A}, _B1}} ->
- {Prefix, A1};
- {{{value, {NumA, _MsgA} = Val}, A1},
- {{value, {NumB, _MsgB}}, _B1}} when NumA < NumB ->
- find_prefix(A1, B, queue:in(Val, Prefix));
- {_, {empty, _B1}} ->
- {A, Prefix} %% Prefix well be empty here
- end.
-
-%% A should be a prefix of B. Returns the commonality plus the
-%% remainder of B.
-find_common(A, B, Common) ->
- case {queue:out(A), queue:out(B)} of
- {{{value, Val}, A1}, {{value, Val}, B1}} ->
- find_common(A1, B1, queue:in(Val, Common));
- {{empty, _A}, _} ->
- {Common, B}
- end.
-
-
-%% ---------------------------------------------------------------------------
-%% Members helpers
-%% ---------------------------------------------------------------------------
-
-with_member(Fun, Id, MembersState) ->
- store_member(
- Id, Fun(find_member_or_blank(Id, MembersState)), MembersState).
-
-with_member_acc(Fun, Id, {MembersState, Acc}) ->
- {MemberState, Acc1} = Fun(find_member_or_blank(Id, MembersState), Acc),
- {store_member(Id, MemberState, MembersState), Acc1}.
-
-find_member_or_blank(Id, MembersState) ->
- case ?DICT:find(Id, MembersState) of
- {ok, Result} -> Result;
- error -> blank_member()
- end.
-
-erase_member(Id, MembersState) -> ?DICT:erase(Id, MembersState).
-
-blank_member() ->
- #member { pending_ack = queue:new(), last_pub = -1, last_ack = -1 }.
-
-blank_member_state() -> ?DICT:new().
-
-store_member(Id, MemberState, MembersState) ->
- ?DICT:store(Id, MemberState, MembersState).
-
-prepare_members_state(MembersState) -> ?DICT:to_list(MembersState).
-
-build_members_state(MembersStateList) -> ?DICT:from_list(MembersStateList).
-
-make_member(GroupName) ->
- {case read_group(GroupName) of
- #gm_group { version = Version } -> Version;
- {error, not_found} -> ?VERSION_START
- end, self()}.
-
-remove_erased_members(MembersState, View) ->
- lists:foldl(fun (Id, MembersState1) ->
- store_member(Id, find_member_or_blank(Id, MembersState),
- MembersState1)
- end, blank_member_state(), all_known_members(View)).
-
-get_version({Version, _Pid}) -> Version.
-
-get_pid({_Version, Pid}) -> Pid.
-
-get_pids(Ids) -> [Pid || {_Version, Pid} <- Ids].
-
-%% ---------------------------------------------------------------------------
-%% Activity assembly
-%% ---------------------------------------------------------------------------
-
-activity_nil() -> queue:new().
-
-activity_cons( _Id, [], [], Tail) -> Tail;
-activity_cons(Sender, Pubs, Acks, Tail) -> queue:in({Sender, Pubs, Acks}, Tail).
-
-activity_finalise(Activity) -> queue:to_list(Activity).
-
-maybe_send_activity([], _State) ->
- ok;
-maybe_send_activity(Activity, #state { self = Self,
- right = {Right, _MRefR},
- view = View }) ->
- send_right(Right, View, {activity, Self, Activity}).
-
-send_right(Right, View, Msg) ->
- ok = gen_server2:cast(get_pid(Right), {?TAG, view_version(View), Msg}).
-
-callback(Args, Module, Activity) ->
- Result =
- lists:foldl(
- fun ({Id, Pubs, _Acks}, {Args1, Module1, ok}) ->
- lists:foldl(fun ({_PubNum, Pub}, Acc = {Args2, Module2, ok}) ->
- case Module2:handle_msg(
- Args2, get_pid(Id), Pub) of
- ok ->
- Acc;
- {become, Module3, Args3} ->
- {Args3, Module3, ok};
- {stop, _Reason} = Error ->
- Error
- end;
- (_, Error = {stop, _Reason}) ->
- Error
- end, {Args1, Module1, ok}, Pubs);
- (_, Error = {stop, _Reason}) ->
- Error
- end, {Args, Module, ok}, Activity),
- case Result of
- {Args, Module, ok} -> ok;
- {Args1, Module1, ok} -> {become, Module1, Args1};
- {stop, _Reason} = Error -> Error
- end.
-
-callback_view_changed(Args, Module, OldView, NewView) ->
- OldMembers = all_known_members(OldView),
- NewMembers = all_known_members(NewView),
- Births = NewMembers -- OldMembers,
- Deaths = OldMembers -- NewMembers,
- case {Births, Deaths} of
- {[], []} -> ok;
- _ -> Module:members_changed(
- Args, get_pids(Births), get_pids(Deaths),
- get_pids(NewMembers))
- end.
-
-handle_callback_result({Result, State}) ->
- if_callback_success(
- Result, fun no_reply_true/3, fun no_reply_false/3, undefined, State);
-handle_callback_result({Result, Reply, State}) ->
- if_callback_success(
- Result, fun reply_true/3, fun reply_false/3, Reply, State).
-
-no_reply_true (_Result, _Undefined, State) -> noreply(State).
-no_reply_false({stop, Reason}, _Undefined, State) -> {stop, Reason, State}.
-
-reply_true (_Result, Reply, State) -> reply(Reply, State).
-reply_false({stop, Reason}, Reply, State) -> {stop, Reason, Reply, State}.
-
-handle_msg_true (_Result, Msg, State) -> handle_msg(Msg, State).
-handle_msg_false(Result, _Msg, State) -> {Result, State}.
-
-activity_true(_Result, Activity, State = #state { module = Module,
- callback_args = Args }) ->
- {callback(Args, Module, Activity), State}.
-activity_false(Result, _Activity, State) ->
- {Result, State}.
-
-if_callback_success(ok, True, _False, Arg, State) ->
- True(ok, Arg, State);
-if_callback_success(
- {become, Module, Args} = Result, True, _False, Arg, State) ->
- True(Result, Arg, State #state { module = Module,
- callback_args = Args });
-if_callback_success({stop, _Reason} = Result, _True, False, Arg, State) ->
- False(Result, Arg, State).
-
-maybe_confirm(_Self, _Id, Confirms, []) ->
- Confirms;
-maybe_confirm(Self, Self, Confirms, [PubNum | PubNums]) ->
- case queue:out(Confirms) of
- {empty, _Confirms} ->
- Confirms;
- {{value, {PubNum, From}}, Confirms1} ->
- gen_server2:reply(From, ok),
- maybe_confirm(Self, Self, Confirms1, PubNums);
- {{value, {PubNum1, _From}}, _Confirms} when PubNum1 > PubNum ->
- maybe_confirm(Self, Self, Confirms, PubNums)
- end;
-maybe_confirm(_Self, _Id, Confirms, _PubNums) ->
- Confirms.
-
-purge_confirms(Confirms) ->
- [gen_server2:reply(From, ok) || {_PubNum, From} <- queue:to_list(Confirms)],
- queue:new().
-
-
-%% ---------------------------------------------------------------------------
-%% Msg transformation
-%% ---------------------------------------------------------------------------
-
-acks_from_queue(Q) -> [PubNum || {PubNum, _Msg} <- queue:to_list(Q)].
-
-pubs_from_queue(Q) -> queue:to_list(Q).
-
-queue_from_pubs(Pubs) -> queue:from_list(Pubs).
-
-apply_acks( [], Pubs) -> Pubs;
-apply_acks(List, Pubs) -> {_, Pubs1} = queue:split(length(List), Pubs),
- Pubs1.
-
-join_pubs(Q, []) -> Q;
-join_pubs(Q, Pubs) -> queue:join(Q, queue_from_pubs(Pubs)).
-
-last_ack( [], LA) -> LA;
-last_ack(List, LA) -> LA1 = lists:last(List),
- true = LA1 > LA, %% ASSERTION
- LA1.
-
-last_pub( [], LP) -> LP;
-last_pub(List, LP) -> {PubNum, _Msg} = lists:last(List),
- true = PubNum > LP, %% ASSERTION
- PubNum.
diff --git a/src/gm_soak_test.erl b/src/gm_soak_test.erl
deleted file mode 100644
index 701cb0f7..00000000
--- a/src/gm_soak_test.erl
+++ /dev/null
@@ -1,133 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(gm_soak_test).
-
--export([test/0]).
--export([joined/2, members_changed/4, handle_msg/3, terminate/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
-%% ---------------------------------------------------------------------------
-%% Soak test
-%% ---------------------------------------------------------------------------
-
-get_state() ->
- get(state).
-
-with_state(Fun) ->
- put(state, Fun(get_state())).
-
-inc() ->
- case 1 + get(count) of
- 100000 -> Now = now(),
- Start = put(ts, Now),
- Diff = timer:now_diff(Now, Start),
- Rate = 100000 / (Diff / 1000000),
- io:format("~p seeing ~p msgs/sec~n", [self(), Rate]),
- put(count, 0);
- N -> put(count, N)
- end.
-
-joined([], Members) ->
- io:format("Joined ~p (~p members)~n", [self(), length(Members)]),
- put(state, dict:from_list([{Member, empty} || Member <- Members])),
- put(count, 0),
- put(ts, now()),
- ok.
-
-members_changed([], Births, Deaths, _Live) ->
- with_state(
- fun (State) ->
- State1 =
- lists:foldl(
- fun (Born, StateN) ->
- false = dict:is_key(Born, StateN),
- dict:store(Born, empty, StateN)
- end, State, Births),
- lists:foldl(
- fun (Died, StateN) ->
- true = dict:is_key(Died, StateN),
- dict:store(Died, died, StateN)
- end, State1, Deaths)
- end),
- ok.
-
-handle_msg([], From, {test_msg, Num}) ->
- inc(),
- with_state(
- fun (State) ->
- ok = case dict:find(From, State) of
- {ok, died} ->
- exit({{from, From},
- {received_posthumous_delivery, Num}});
- {ok, empty} -> ok;
- {ok, Num} -> ok;
- {ok, Num1} when Num < Num1 ->
- exit({{from, From},
- {duplicate_delivery_of, Num},
- {expecting, Num1}});
- {ok, Num1} ->
- exit({{from, From},
- {received_early, Num},
- {expecting, Num1}});
- error ->
- exit({{from, From},
- {received_premature_delivery, Num}})
- end,
- dict:store(From, Num + 1, State)
- end),
- ok.
-
-terminate([], Reason) ->
- io:format("Left ~p (~p)~n", [self(), Reason]),
- ok.
-
-spawn_member() ->
- spawn_link(
- fun () ->
- {MegaSecs, Secs, MicroSecs} = now(),
- random:seed(MegaSecs, Secs, MicroSecs),
- %% start up delay of no more than 10 seconds
- timer:sleep(random:uniform(10000)),
- {ok, Pid} = gm:start_link(
- ?MODULE, ?MODULE, [],
- fun rabbit_misc:execute_mnesia_transaction/1),
- Start = random:uniform(10000),
- send_loop(Pid, Start, Start + random:uniform(10000)),
- gm:leave(Pid),
- spawn_more()
- end).
-
-spawn_more() ->
- [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))].
-
-send_loop(_Pid, Target, Target) ->
- ok;
-send_loop(Pid, Count, Target) when Target > Count ->
- case random:uniform(3) of
- 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count});
- _ -> gm:broadcast(Pid, {test_msg, Count})
- end,
- timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms
- send_loop(Pid, Count + 1, Target).
-
-test() ->
- ok = gm:create_tables(),
- spawn_member(),
- spawn_member().
diff --git a/src/gm_speed_test.erl b/src/gm_speed_test.erl
deleted file mode 100644
index 0f65a792..00000000
--- a/src/gm_speed_test.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(gm_speed_test).
-
--export([test/3]).
--export([joined/2, members_changed/4, handle_msg/3, terminate/2]).
--export([wile_e_coyote/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
-%% callbacks
-
-joined(Owner, _Members) ->
- Owner ! joined,
- ok.
-
-members_changed(_Owner, _Births, _Deaths, _Live) ->
- ok.
-
-handle_msg(Owner, _From, ping) ->
- Owner ! ping,
- ok.
-
-terminate(Owner, _Reason) ->
- Owner ! terminated,
- ok.
-
-%% other
-
-wile_e_coyote(Time, WriteUnit) ->
- {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- receive joined -> ok end,
- timer:sleep(1000), %% wait for all to join
- timer:send_after(Time, stop),
- Start = now(),
- {Sent, Received} = loop(Pid, WriteUnit, 0, 0),
- End = now(),
- ok = gm:leave(Pid),
- receive terminated -> ok end,
- Elapsed = timer:now_diff(End, Start) / 1000000,
- io:format("Sending rate: ~p msgs/sec~nReceiving rate: ~p msgs/sec~n~n",
- [Sent/Elapsed, Received/Elapsed]),
- ok.
-
-loop(Pid, WriteUnit, Sent, Received) ->
- case read(Received) of
- {stop, Received1} -> {Sent, Received1};
- {ok, Received1} -> ok = write(Pid, WriteUnit),
- loop(Pid, WriteUnit, Sent + WriteUnit, Received1)
- end.
-
-read(Count) ->
- receive
- ping -> read(Count + 1);
- stop -> {stop, Count}
- after 5 ->
- {ok, Count}
- end.
-
-write(_Pid, 0) -> ok;
-write(Pid, N) -> ok = gm:broadcast(Pid, ping),
- write(Pid, N - 1).
-
-test(Time, WriteUnit, Nodes) ->
- ok = gm:create_tables(),
- [spawn(Node, ?MODULE, wile_e_coyote, [Time, WriteUnit]) || Node <- Nodes].
diff --git a/src/gm_tests.erl b/src/gm_tests.erl
deleted file mode 100644
index 9a348076..00000000
--- a/src/gm_tests.erl
+++ /dev/null
@@ -1,186 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(gm_tests).
-
--export([test_join_leave/0,
- test_broadcast/0,
- test_confirmed_broadcast/0,
- test_member_death/0,
- test_receive_in_order/0,
- all_tests/0]).
--export([joined/2, members_changed/4, handle_msg/3, terminate/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
--define(RECEIVE_OR_THROW(Body, Bool, Error),
- receive Body ->
- true = Bool,
- passed
- after 1000 ->
- throw(Error)
- end).
-
-joined(Pid, Members) ->
- Pid ! {joined, self(), Members},
- ok.
-
-members_changed(Pid, Births, Deaths, _Live) ->
- Pid ! {members_changed, self(), Births, Deaths},
- ok.
-
-handle_msg(Pid, From, Msg) ->
- Pid ! {msg, self(), From, Msg},
- ok.
-
-terminate(Pid, Reason) ->
- Pid ! {termination, self(), Reason},
- ok.
-
-%% ---------------------------------------------------------------------------
-%% Functional tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
- passed = test_join_leave(),
- passed = test_broadcast(),
- passed = test_confirmed_broadcast(),
- passed = test_member_death(),
- passed = test_receive_in_order(),
- passed.
-
-test_join_leave() ->
- with_two_members(fun (_Pid, _Pid2) -> passed end).
-
-test_broadcast() ->
- test_broadcast(fun gm:broadcast/2).
-
-test_confirmed_broadcast() ->
- test_broadcast(fun gm:confirmed_broadcast/2).
-
-test_member_death() ->
- with_two_members(
- fun (Pid, Pid2) ->
- {ok, Pid3} = gm:start_link(
- ?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
- timeout_joining_gm_group_3),
- passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
- passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
-
- unlink(Pid3),
- exit(Pid3, kill),
-
- %% Have to do some broadcasts to ensure that all members
- %% find out about the death.
- passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))(
- Pid, Pid2),
-
- passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
- passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
-
- passed
- end).
-
-test_receive_in_order() ->
- with_two_members(
- fun (Pid, Pid2) ->
- Numbers = lists:seq(1,1000),
- [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
- || N <- Numbers],
- passed = receive_numbers(
- Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
- passed = receive_numbers(
- Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
- passed = receive_numbers(
- Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
- passed = receive_numbers(
- Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
- passed
- end).
-
-test_broadcast(Fun) ->
- with_two_members(test_broadcast_fun(Fun)).
-
-test_broadcast_fun(Fun) ->
- fun (Pid, Pid2) ->
- ok = Fun(Pid, magic_message),
- passed = receive_or_throw({msg, Pid, Pid, magic_message},
- timeout_waiting_for_msg),
- passed = receive_or_throw({msg, Pid2, Pid, magic_message},
- timeout_waiting_for_msg)
- end.
-
-with_two_members(Fun) ->
- ok = gm:create_tables(),
-
- {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
-
- {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
- passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
-
- passed = Fun(Pid, Pid2),
-
- ok = gm:leave(Pid),
- passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
- passed =
- receive_termination(Pid, normal, timeout_waiting_for_termination_1),
-
- ok = gm:leave(Pid2),
- passed =
- receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
-
- receive X -> throw({unexpected_message, X})
- after 0 -> passed
- end.
-
-receive_or_throw(Pattern, Error) ->
- ?RECEIVE_OR_THROW(Pattern, true, Error).
-
-receive_birth(From, Born, Error) ->
- ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
- ([Born] == Birth) andalso ([] == Death),
- Error).
-
-receive_death(From, Died, Error) ->
- ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
- ([] == Birth) andalso ([Died] == Death),
- Error).
-
-receive_joined(From, Members, Error) ->
- ?RECEIVE_OR_THROW({joined, From, Members1},
- lists:usort(Members) == lists:usort(Members1),
- Error).
-
-receive_termination(From, Reason, Error) ->
- ?RECEIVE_OR_THROW({termination, From, Reason1},
- Reason == Reason1,
- Error).
-
-receive_numbers(_Pid, _Sender, _Error, []) ->
- passed;
-receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
- ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
- M == N,
- Error),
- receive_numbers(Pid, Sender, Error, Numbers).
diff --git a/src/lqueue.erl b/src/lqueue.erl
deleted file mode 100644
index 4ff7cc0b..00000000
--- a/src/lqueue.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(lqueue).
-
--export([new/0, is_empty/1, len/1, in/2, in_r/2, out/1, out_r/1, join/2,
- foldl/3, foldr/3, from_list/1, to_list/1, peek/1, peek_r/1]).
-
--define(QUEUE, queue).
-
--ifdef(use_specs).
-
--export_type([?MODULE/0]).
-
--opaque(?MODULE() :: {non_neg_integer(), ?QUEUE()}).
--type(value() :: any()).
--type(result() :: 'empty' | {'value', value()}).
-
--spec(new/0 :: () -> ?MODULE()).
--spec(is_empty/1 :: (?MODULE()) -> boolean()).
--spec(len/1 :: (?MODULE()) -> non_neg_integer()).
--spec(in/2 :: (value(), ?MODULE()) -> ?MODULE()).
--spec(in_r/2 :: (value(), ?MODULE()) -> ?MODULE()).
--spec(out/1 :: (?MODULE()) -> {result(), ?MODULE()}).
--spec(out_r/1 :: (?MODULE()) -> {result(), ?MODULE()}).
--spec(join/2 :: (?MODULE(), ?MODULE()) -> ?MODULE()).
--spec(foldl/3 :: (fun ((value(), B) -> B), B, ?MODULE()) -> B).
--spec(foldr/3 :: (fun ((value(), B) -> B), B, ?MODULE()) -> B).
--spec(from_list/1 :: ([value()]) -> ?MODULE()).
--spec(to_list/1 :: (?MODULE()) -> [value()]).
--spec(peek/1 :: (?MODULE()) -> result()).
--spec(peek_r/1 :: (?MODULE()) -> result()).
-
--endif.
-
-new() -> {0, ?QUEUE:new()}.
-
-is_empty({0, _Q}) -> true;
-is_empty(_) -> false.
-
-in(V, {L, Q}) -> {L+1, ?QUEUE:in(V, Q)}.
-
-in_r(V, {L, Q}) -> {L+1, ?QUEUE:in_r(V, Q)}.
-
-out({0, _Q} = Q) -> {empty, Q};
-out({L, Q}) -> {Result, Q1} = ?QUEUE:out(Q),
- {Result, {L-1, Q1}}.
-
-out_r({0, _Q} = Q) -> {empty, Q};
-out_r({L, Q}) -> {Result, Q1} = ?QUEUE:out_r(Q),
- {Result, {L-1, Q1}}.
-
-join({L1, Q1}, {L2, Q2}) -> {L1 + L2, ?QUEUE:join(Q1, Q2)}.
-
-to_list({_L, Q}) -> ?QUEUE:to_list(Q).
-
-from_list(L) -> {length(L), ?QUEUE:from_list(L)}.
-
-foldl(Fun, Init, Q) ->
- case out(Q) of
- {empty, _Q} -> Init;
- {{value, V}, Q1} -> foldl(Fun, Fun(V, Init), Q1)
- end.
-
-foldr(Fun, Init, Q) ->
- case out_r(Q) of
- {empty, _Q} -> Init;
- {{value, V}, Q1} -> foldr(Fun, Fun(V, Init), Q1)
- end.
-
-len({L, _Q}) -> L.
-
-peek({ 0, _Q}) -> empty;
-peek({_L, Q}) -> ?QUEUE:peek(Q).
-
-peek_r({ 0, _Q}) -> empty;
-peek_r({_L, Q}) -> ?QUEUE:peek_r(Q).
diff --git a/src/mirrored_supervisor.erl b/src/mirrored_supervisor.erl
deleted file mode 100644
index 3b16c53a..00000000
--- a/src/mirrored_supervisor.erl
+++ /dev/null
@@ -1,505 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(mirrored_supervisor).
-
-%% Mirrored Supervisor
-%% ===================
-%%
-%% This module implements a new type of supervisor. It acts like a
-%% normal supervisor, but at creation time you also provide the name
-%% of a process group to join. All the supervisors within the
-%% process group act like a single large distributed supervisor:
-%%
-%% * A process with a given child_id will only exist on one
-%% supervisor within the group.
-%%
-%% * If one supervisor fails, children may migrate to surviving
-%% supervisors within the group.
-%%
-%% In almost all cases you will want to use the module name for the
-%% process group. Using multiple process groups with the same module
-%% name is supported. Having multiple module names for the same
-%% process group will lead to undefined behaviour.
-%%
-%% Motivation
-%% ----------
-%%
-%% Sometimes you have processes which:
-%%
-%% * Only need to exist once per cluster.
-%%
-%% * Does not contain much state (or can reconstruct its state easily).
-%%
-%% * Needs to be restarted elsewhere should it be running on a node
-%% which fails.
-%%
-%% By creating a mirrored supervisor group with one supervisor on
-%% each node, that's what you get.
-%%
-%%
-%% API use
-%% -------
-%%
-%% This is basically the same as for supervisor, except that:
-%%
-%% 1) start_link(Module, Args) becomes
-%% start_link(Group, Module, Args).
-%%
-%% 2) start_link({local, Name}, Module, Args) becomes
-%% start_link({local, Name}, Group, Module, Args).
-%%
-%% 3) start_link({global, Name}, Module, Args) is not available.
-%%
-%% 4) The restart strategy simple_one_for_one is not available.
-%%
-%% 5) Mnesia is used to hold global state. At some point your
-%% application should invoke create_tables() (or table_definitions()
-%% if it wants to manage table creation itself).
-%%
-%% Internals
-%% ---------
-%%
-%% Each mirrored_supervisor consists of three processes - the overall
-%% supervisor, the delegate supervisor and the mirroring server. The
-%% overall supervisor supervises the other two processes. Its pid is
-%% the one returned from start_link; the pids of the other two
-%% processes are effectively hidden in the API.
-%%
-%% The delegate supervisor is in charge of supervising all the child
-%% processes that are added to the supervisor as usual.
-%%
-%% The mirroring server intercepts calls to the supervisor API
-%% (directed at the overall supervisor), does any special handling,
-%% and forwards everything to the delegate supervisor.
-%%
-%% This module implements all three, hence init/1 is somewhat overloaded.
-%%
-%% The mirroring server creates and joins a process group on
-%% startup. It monitors all the existing members of this group, and
-%% broadcasts a "hello" message to them so that they can monitor it in
-%% turn. When it receives a 'DOWN' message, it checks to see if it's
-%% the "first" server in the group and restarts all the child
-%% processes from the dead supervisor if so.
-%%
-%% In the future we might load balance this.
-%%
-%% Startup is slightly fiddly. The mirroring server needs to know the
-%% Pid of the overall supervisor, but we don't have that until it has
-%% started. Therefore we set this after the fact. We also start any
-%% children we found in Module:init() at this point, since starting
-%% children requires knowing the overall supervisor pid.
-
--define(SUPERVISOR, supervisor2).
--define(GEN_SERVER, gen_server2).
--define(PG2, pg2_fixed).
-
--define(TABLE, mirrored_sup_childspec).
--define(TABLE_DEF,
- {?TABLE,
- [{record_name, mirrored_sup_childspec},
- {type, ordered_set},
- {attributes, record_info(fields, mirrored_sup_childspec)}]}).
--define(TABLE_MATCH, {match, #mirrored_sup_childspec{ _ = '_' }}).
-
--export([start_link/3, start_link/4,
- start_child/2, restart_child/2,
- delete_child/2, terminate_child/2,
- which_children/1, count_children/1, check_childspecs/1]).
-
--behaviour(?GEN_SERVER).
--behaviour(?SUPERVISOR).
-
--export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
- handle_cast/2]).
-
--export([start_internal/2]).
--export([create_tables/0, table_definitions/0]).
-
--record(mirrored_sup_childspec, {key, mirroring_pid, childspec}).
-
--record(state, {overall,
- delegate,
- group,
- initial_childspecs}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
-%%--------------------------------------------------------------------------
-%% Callback behaviour
-%%--------------------------------------------------------------------------
-
--callback init(Args :: term()) ->
- {ok, {{RestartStrategy :: supervisor2:strategy(),
- MaxR :: non_neg_integer(),
- MaxT :: non_neg_integer()},
- [ChildSpec :: supervisor2:child_spec()]}}
- | ignore.
-
-%%--------------------------------------------------------------------------
-%% Specs
-%%--------------------------------------------------------------------------
-
--type startlink_err() :: {'already_started', pid()} | 'shutdown' | term().
--type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
-
--type group_name() :: any().
-
--spec start_link(GroupName, Module, Args) -> startlink_ret() when
- GroupName :: group_name(),
- Module :: module(),
- Args :: term().
-
--spec start_link(SupName, GroupName, Module, Args) -> startlink_ret() when
- SupName :: supervisor2:sup_name(),
- GroupName :: group_name(),
- Module :: module(),
- Args :: term().
-
--spec start_internal(Group, ChildSpecs) -> Result when
- Group :: group_name(),
- ChildSpecs :: [supervisor2:child_spec()],
- Result :: {'ok', pid()} | {'error', term()}.
-
--spec create_tables() -> Result when
- Result :: 'ok'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) -> [{init,1}];
-behaviour_info(_Other) -> undefined.
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Group, Mod, Args) ->
- start_link0([], Group, init(Mod, Args)).
-
-start_link({local, SupName}, Group, Mod, Args) ->
- start_link0([{local, SupName}], Group, init(Mod, Args));
-
-start_link({global, _SupName}, _Group, _Mod, _Args) ->
- erlang:error(badarg).
-
-start_link0(Prefix, Group, Init) ->
- case apply(?SUPERVISOR, start_link,
- Prefix ++ [?MODULE, {overall, Group, Init}]) of
- {ok, Pid} -> case catch call(Pid, {init, Pid}) of
- ok -> {ok, Pid};
- E -> E
- end;
- Other -> Other
- end.
-
-init(Mod, Args) ->
- case Mod:init(Args) of
- {ok, {{Bad, _, _}, _ChildSpecs}} when
- Bad =:= simple_one_for_one orelse
- Bad =:= simple_one_for_one_terminate -> erlang:error(badarg);
- Init -> Init
- end.
-
-start_child(Sup, ChildSpec) -> call(Sup, {start_child, ChildSpec}).
-delete_child(Sup, Id) -> find_call(Sup, Id, {delete_child, Id}).
-restart_child(Sup, Id) -> find_call(Sup, Id, {msg, restart_child, [Id]}).
-terminate_child(Sup, Id) -> find_call(Sup, Id, {msg, terminate_child, [Id]}).
-which_children(Sup) -> fold(which_children, Sup, fun lists:append/2).
-count_children(Sup) -> fold(count_children, Sup, fun add_proplists/2).
-check_childspecs(Specs) -> ?SUPERVISOR:check_childspecs(Specs).
-
-call(Sup, Msg) -> ?GEN_SERVER:call(mirroring(Sup), Msg, infinity).
-cast(Sup, Msg) -> ?GEN_SERVER:cast(mirroring(Sup), Msg).
-
-find_call(Sup, Id, Msg) ->
- Group = call(Sup, group),
- MatchHead = #mirrored_sup_childspec{mirroring_pid = '$1',
- key = {Group, Id},
- _ = '_'},
- %% If we did this inside a tx we could still have failover
- %% immediately after the tx - we can't be 100% here. So we may as
- %% well dirty_select.
- case mnesia:dirty_select(?TABLE, [{MatchHead, [], ['$1']}]) of
- [Mirror] -> call(Mirror, Msg);
- [] -> {error, not_found}
- end.
-
-fold(FunAtom, Sup, AggFun) ->
- Group = call(Sup, group),
- lists:foldl(AggFun, [],
- [apply(?SUPERVISOR, FunAtom, [D]) ||
- M <- ?PG2:get_members(Group),
- D <- [delegate(M)]]).
-
-child(Sup, Id) ->
- [Pid] = [Pid || {Id1, Pid, _, _} <- ?SUPERVISOR:which_children(Sup),
- Id1 =:= Id],
- Pid.
-
-delegate(Sup) -> child(Sup, delegate).
-mirroring(Sup) -> child(Sup, mirroring).
-
-%%----------------------------------------------------------------------------
-
-start_internal(Group, ChildSpecs) ->
- ?GEN_SERVER:start_link(?MODULE, {mirroring, Group, ChildSpecs},
- [{timeout, infinity}]).
-
-%%----------------------------------------------------------------------------
-
-init({overall, _Group, ignore}) -> ignore;
-init({overall, Group, {ok, {Restart, ChildSpecs}}}) ->
- %% Important: Delegate MUST start before Mirroring so that when we
- %% shut down from above it shuts down last, so Mirroring does not
- %% see it die.
- %%
- %% See comment in handle_info('DOWN', ...) below
- {ok, {{one_for_all, 0, 1},
- [{delegate, {?SUPERVISOR, start_link, [?MODULE, {delegate, Restart}]},
- temporary, 16#ffffffff, supervisor, [?SUPERVISOR]},
- {mirroring, {?MODULE, start_internal, [Group, ChildSpecs]},
- permanent, 16#ffffffff, worker, [?MODULE]}]}};
-
-
-init({delegate, Restart}) ->
- {ok, {Restart, []}};
-
-init({mirroring, Group, ChildSpecs}) ->
- {ok, #state{group = Group, initial_childspecs = ChildSpecs}}.
-
-handle_call({init, Overall}, _From,
- State = #state{overall = undefined,
- delegate = undefined,
- group = Group,
- initial_childspecs = ChildSpecs}) ->
- process_flag(trap_exit, true),
- ?PG2:create(Group),
- ok = ?PG2:join(Group, Overall),
- Rest = ?PG2:get_members(Group) -- [Overall],
- case Rest of
- [] -> {atomic, _} = mnesia:transaction(fun() -> delete_all(Group) end);
- _ -> ok
- end,
- [begin
- ?GEN_SERVER:cast(mirroring(Pid), {ensure_monitoring, Overall}),
- erlang:monitor(process, Pid)
- end || Pid <- Rest],
- Delegate = delegate(Overall),
- erlang:monitor(process, Delegate),
- State1 = State#state{overall = Overall, delegate = Delegate},
- case errors([maybe_start(Group, Overall, Delegate, S) || S <- ChildSpecs]) of
- [] -> {reply, ok, State1};
- Errors -> {stop, {shutdown, Errors}, State1}
- end;
-
-handle_call({start_child, ChildSpec}, _From,
- State = #state{overall = Overall,
- delegate = Delegate,
- group = Group}) ->
- {reply, case maybe_start(Group, Overall, Delegate, ChildSpec) of
- already_in_mnesia -> {error, already_present};
- {already_in_mnesia, Pid} -> {error, {already_started, Pid}};
- Else -> Else
- end, State};
-
-handle_call({delete_child, Id}, _From, State = #state{delegate = Delegate,
- group = Group}) ->
- {reply, stop(Group, Delegate, Id), State};
-
-handle_call({msg, F, A}, _From, State = #state{delegate = Delegate}) ->
- {reply, apply(?SUPERVISOR, F, [Delegate | A]), State};
-
-handle_call(group, _From, State = #state{group = Group}) ->
- {reply, Group, State};
-
-handle_call(Msg, _From, State) ->
- {stop, {unexpected_call, Msg}, State}.
-
-handle_cast({ensure_monitoring, Pid}, State) ->
- erlang:monitor(process, Pid),
- {noreply, State};
-
-handle_cast({die, Reason}, State = #state{group = Group}) ->
- tell_all_peers_to_die(Group, Reason),
- {stop, Reason, State};
-
-handle_cast(Msg, State) ->
- {stop, {unexpected_cast, Msg}, State}.
-
-handle_info({'DOWN', _Ref, process, Pid, Reason},
- State = #state{overall = Pid, group = Group}) ->
- %% Since the delegate is temporary, its death won't cause us to
- %% die. Since the overall supervisor kills processes in reverse
- %% order when shutting down "from above" and we started after the
- %% delegate, if we see the delegate die then that means it died
- %% "from below" i.e. due to the behaviour of its children, not
- %% because the whole app was being torn down.
- %%
- %% Therefore if we get here we know we need to cause the entire
- %% mirrored sup to shut down, not just fail over.
- tell_all_peers_to_die(Group, Reason),
- {stop, Reason, State};
-
-handle_info({'DOWN', _Ref, process, Pid, _Reason},
- State = #state{delegate = Delegate, group = Group,
- overall = O}) ->
- %% TODO load balance this
- %% No guarantee pg2 will have received the DOWN before us.
- R = case lists:sort(?PG2:get_members(Group)) -- [Pid] of
- [O | _] -> {atomic, ChildSpecs} =
- mnesia:transaction(
- fun() -> update_all(O, Pid) end),
- [start(Delegate, ChildSpec) || ChildSpec <- ChildSpecs];
- _ -> []
- end,
- case errors(R) of
- [] -> {noreply, State};
- Errors -> {stop, {shutdown, Errors}, State}
- end;
-
-handle_info(Info, State) ->
- {stop, {unexpected_info, Info}, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-
-tell_all_peers_to_die(Group, Reason) ->
- [cast(P, {die, Reason}) || P <- ?PG2:get_members(Group) -- [self()]].
-
-maybe_start(Group, Overall, Delegate, ChildSpec) ->
- case mnesia:transaction(
- fun() -> check_start(Group, Overall, Delegate, ChildSpec) end) of
- {atomic, start} -> start(Delegate, ChildSpec);
- {atomic, undefined} -> already_in_mnesia;
- {atomic, Pid} -> {already_in_mnesia, Pid};
- %% If we are torn down while in the transaction...
- {aborted, E} -> {error, E}
- end.
-
-check_start(Group, Overall, Delegate, ChildSpec) ->
- case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of
- [] -> write(Group, Overall, ChildSpec),
- start;
- [S] -> #mirrored_sup_childspec{key = {Group, Id},
- mirroring_pid = Pid} = S,
- case Overall of
- Pid -> child(Delegate, Id);
- _ -> case supervisor(Pid) of
- dead -> write(Group, Overall, ChildSpec),
- start;
- Delegate0 -> child(Delegate0, Id)
- end
- end
- end.
-
-supervisor(Pid) -> with_exit_handler(fun() -> dead end,
- fun() -> delegate(Pid) end).
-
-write(Group, Overall, ChildSpec) ->
- ok = mnesia:write(
- #mirrored_sup_childspec{key = {Group, id(ChildSpec)},
- mirroring_pid = Overall,
- childspec = ChildSpec}),
- ChildSpec.
-
-delete(Group, Id) ->
- ok = mnesia:delete({?TABLE, {Group, Id}}).
-
-start(Delegate, ChildSpec) ->
- apply(?SUPERVISOR, start_child, [Delegate, ChildSpec]).
-
-stop(Group, Delegate, Id) ->
- case mnesia:transaction(fun() -> check_stop(Group, Delegate, Id) end) of
- {atomic, deleted} -> apply(?SUPERVISOR, delete_child, [Delegate, Id]);
- {atomic, running} -> {error, running};
- {aborted, E} -> {error, E}
- end.
-
-check_stop(Group, Delegate, Id) ->
- case child(Delegate, Id) of
- undefined -> delete(Group, Id),
- deleted;
- _ -> running
- end.
-
-id({Id, _, _, _, _, _}) -> Id.
-
-update_all(Overall, OldOverall) ->
- MatchHead = #mirrored_sup_childspec{mirroring_pid = OldOverall,
- key = '$1',
- childspec = '$2',
- _ = '_'},
- [write(Group, Overall, C) ||
- [{Group, _Id}, C] <- mnesia:select(?TABLE, [{MatchHead, [], ['$$']}])].
-
-delete_all(Group) ->
- MatchHead = #mirrored_sup_childspec{key = {Group, '_'},
- childspec = '$1',
- _ = '_'},
- [delete(Group, id(C)) ||
- C <- mnesia:select(?TABLE, [{MatchHead, [], ['$1']}])].
-
-errors(Results) -> [E || {error, E} <- Results].
-
-%%----------------------------------------------------------------------------
-
-create_tables() -> create_tables([?TABLE_DEF]).
-
-create_tables([]) ->
- ok;
-create_tables([{Table, Attributes} | Ts]) ->
- case mnesia:create_table(Table, Attributes) of
- {atomic, ok} -> create_tables(Ts);
- {aborted, {already_exists, ?TABLE}} -> create_tables(Ts);
- Err -> Err
- end.
-
-table_definitions() ->
- {Name, Attributes} = ?TABLE_DEF,
- [{Name, [?TABLE_MATCH | Attributes]}].
-
-%%----------------------------------------------------------------------------
-
-with_exit_handler(Handler, Thunk) ->
- try
- Thunk()
- catch
- exit:{R, _} when R =:= noproc; R =:= nodedown;
- R =:= normal; R =:= shutdown ->
- Handler();
- exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
- Handler()
- end.
-
-add_proplists(P1, P2) ->
- add_proplists(lists:keysort(1, P1), lists:keysort(1, P2), []).
-add_proplists([], P2, Acc) -> P2 ++ Acc;
-add_proplists(P1, [], Acc) -> P1 ++ Acc;
-add_proplists([{K, V1} | P1], [{K, V2} | P2], Acc) ->
- add_proplists(P1, P2, [{K, V1 + V2} | Acc]);
-add_proplists([{K1, _} = KV | P1], [{K2, _} | _] = P2, Acc) when K1 < K2 ->
- add_proplists(P1, P2, [KV | Acc]);
-add_proplists(P1, [KV | P2], Acc) ->
- add_proplists(P1, P2, [KV | Acc]).
diff --git a/src/mirrored_supervisor_tests.erl b/src/mirrored_supervisor_tests.erl
deleted file mode 100644
index 780ef11d..00000000
--- a/src/mirrored_supervisor_tests.erl
+++ /dev/null
@@ -1,339 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(mirrored_supervisor_tests).
-
--compile([export_all]).
-
--export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
- handle_cast/2]).
-
--behaviour(gen_server).
--behaviour(mirrored_supervisor).
-
--define(MS, mirrored_supervisor).
-
-%% ---------------------------------------------------------------------------
-%% Functional tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
- passed = test_migrate(),
- passed = test_migrate_twice(),
- passed = test_already_there(),
- passed = test_delete_restart(),
- passed = test_which_children(),
- passed = test_large_group(),
- passed = test_childspecs_at_init(),
- passed = test_anonymous_supervisors(),
- passed = test_no_migration_on_shutdown(),
- passed = test_start_idempotence(),
- passed = test_unsupported(),
- passed = test_ignore(),
- passed = test_startup_failure(),
- passed.
-
-%% Simplest test
-test_migrate() ->
- with_sups(fun([A, _]) ->
- ?MS:start_child(a, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [a, b]).
-
-%% Is migration transitive?
-test_migrate_twice() ->
- with_sups(fun([A, B]) ->
- ?MS:start_child(a, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- {ok, C} = start_sup(c),
- Pid2 = pid_of(worker),
- kill_registered(B, Pid2),
- Pid3 = pid_of(worker),
- false = (Pid1 =:= Pid3),
- kill(C)
- end, [a, b]).
-
-%% Can't start the same child twice
-test_already_there() ->
- with_sups(fun([_, _]) ->
- S = childspec(worker),
- {ok, Pid} = ?MS:start_child(a, S),
- {error, {already_started, Pid}} = ?MS:start_child(b, S)
- end, [a, b]).
-
-%% Deleting and restarting should work as per a normal supervisor
-test_delete_restart() ->
- with_sups(fun([_, _]) ->
- S = childspec(worker),
- {ok, Pid1} = ?MS:start_child(a, S),
- {error, running} = ?MS:delete_child(a, worker),
- ok = ?MS:terminate_child(a, worker),
- ok = ?MS:delete_child(a, worker),
- {ok, Pid2} = ?MS:start_child(b, S),
- false = (Pid1 =:= Pid2),
- ok = ?MS:terminate_child(b, worker),
- {ok, Pid3} = ?MS:restart_child(b, worker),
- Pid3 = pid_of(worker),
- false = (Pid2 =:= Pid3),
- %% Not the same supervisor as the worker is on
- ok = ?MS:terminate_child(a, worker),
- ok = ?MS:delete_child(a, worker),
- {ok, Pid4} = ?MS:start_child(a, S),
- false = (Pid3 =:= Pid4)
- end, [a, b]).
-
-test_which_children() ->
- with_sups(
- fun([A, B] = Both) ->
- ?MS:start_child(A, childspec(worker)),
- assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
- ok = ?MS:terminate_child(a, worker),
- assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
- {ok, _} = ?MS:restart_child(a, worker),
- assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
- ?MS:start_child(B, childspec(worker2)),
- assert_wc(Both, fun (C) -> 2 = length(C) end)
- end, [a, b]).
-
-assert_wc(Sups, Fun) ->
- [Fun(?MS:which_children(Sup)) || Sup <- Sups].
-
-wc_pid(Child) ->
- {worker, Pid, worker, [mirrored_supervisor_tests]} = Child,
- Pid.
-
-%% Not all the members of the group should actually do the failover
-test_large_group() ->
- with_sups(fun([A, _, _, _]) ->
- ?MS:start_child(a, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [a, b, c, d]).
-
-%% Do childspecs work when returned from init?
-test_childspecs_at_init() ->
- S = childspec(worker),
- with_sups(fun([A, _]) ->
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [{a, [S]}, {b, [S]}]).
-
-test_anonymous_supervisors() ->
- with_sups(fun([A, _B]) ->
- ?MS:start_child(A, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [anon, anon]).
-
-%% When a mirrored_supervisor terminates, we should not migrate, but
-%% the whole supervisor group should shut down. To test this we set up
-%% a situation where the gen_server will only fail if it's running
-%% under the supervisor called 'evil'. It should not migrate to
-%% 'good' and survive, rather the whole group should go away.
-test_no_migration_on_shutdown() ->
- with_sups(fun([Evil, _]) ->
- ?MS:start_child(Evil, childspec(worker)),
- try
- call(worker, ping, 1000, 100),
- exit(worker_should_not_have_migrated)
- catch exit:{timeout_waiting_for_server, _, _} ->
- ok
- end
- end, [evil, good]).
-
-test_start_idempotence() ->
- with_sups(fun([_]) ->
- CS = childspec(worker),
- {ok, Pid} = ?MS:start_child(a, CS),
- {error, {already_started, Pid}} = ?MS:start_child(a, CS),
- ?MS:terminate_child(a, worker),
- {error, already_present} = ?MS:start_child(a, CS)
- end, [a]).
-
-test_unsupported() ->
- try
- ?MS:start_link({global, foo}, get_group(group), ?MODULE,
- {sup, one_for_one, []}),
- exit(no_global)
- catch error:badarg ->
- ok
- end,
- try
- ?MS:start_link({local, foo}, get_group(group), ?MODULE,
- {sup, simple_one_for_one, []}),
- exit(no_sofo)
- catch error:badarg ->
- ok
- end,
- passed.
-
-%% Just test we don't blow up
-test_ignore() ->
- ?MS:start_link({local, foo}, get_group(group), ?MODULE,
- {sup, fake_strategy_for_ignore, []}),
- passed.
-
-test_startup_failure() ->
- [test_startup_failure(F) || F <- [want_error, want_exit]],
- passed.
-
-test_startup_failure(Fail) ->
- process_flag(trap_exit, true),
- ?MS:start_link(get_group(group), ?MODULE,
- {sup, one_for_one, [childspec(Fail)]}),
- receive
- {'EXIT', _, shutdown} ->
- ok
- after 1000 ->
- exit({did_not_exit, Fail})
- end,
- process_flag(trap_exit, false).
-
-%% ---------------------------------------------------------------------------
-
-with_sups(Fun, Sups) ->
- inc_group(),
- Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
- Fun(Pids),
- [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
- timer:sleep(500),
- passed.
-
-start_sup(Spec) ->
- start_sup(Spec, group).
-
-start_sup({Name, ChildSpecs}, Group) ->
- {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
- %% We are not a supervisor, when we kill the supervisor we do not
- %% want to die!
- unlink(Pid),
- {ok, Pid};
-
-start_sup(Name, Group) ->
- start_sup({Name, []}, Group).
-
-start_sup0(anon, Group, ChildSpecs) ->
- ?MS:start_link(Group, ?MODULE, {sup, one_for_one, ChildSpecs});
-
-start_sup0(Name, Group, ChildSpecs) ->
- ?MS:start_link({local, Name}, Group, ?MODULE,
- {sup, one_for_one, ChildSpecs}).
-
-childspec(Id) ->
- {Id, {?MODULE, start_gs, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
-
-start_gs(want_error) ->
- {error, foo};
-
-start_gs(want_exit) ->
- exit(foo);
-
-start_gs(Id) ->
- gen_server:start_link({local, Id}, ?MODULE, server, []).
-
-pid_of(Id) ->
- {received, Pid, ping} = call(Id, ping),
- Pid.
-
-inc_group() ->
- Count = case get(counter) of
- undefined -> 0;
- C -> C
- end + 1,
- put(counter, Count).
-
-get_group(Group) ->
- {Group, get(counter)}.
-
-call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
-
-call(Id, Msg, 0, _Decr) ->
- exit({timeout_waiting_for_server, {Id, Msg}, erlang:get_stacktrace()});
-
-call(Id, Msg, MaxDelay, Decr) ->
- try
- gen_server:call(Id, Msg, infinity)
- catch exit:_ -> timer:sleep(Decr),
- call(Id, Msg, MaxDelay - Decr, Decr)
- end.
-
-kill(Pid) -> kill(Pid, []).
-kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
-kill(Pid, Waits) ->
- erlang:monitor(process, Pid),
- [erlang:monitor(process, P) || P <- Waits],
- exit(Pid, bang),
- kill_wait(Pid),
- [kill_wait(P) || P <- Waits].
-
-kill_registered(Pid, Child) ->
- {registered_name, Name} = erlang:process_info(Child, registered_name),
- kill(Pid, Child),
- false = (Child =:= whereis(Name)),
- ok.
-
-kill_wait(Pid) ->
- receive
- {'DOWN', _Ref, process, Pid, _Reason} ->
- ok
- end.
-
-%% ---------------------------------------------------------------------------
-%% Dumb gen_server we can supervise
-%% ---------------------------------------------------------------------------
-
-init({sup, fake_strategy_for_ignore, _ChildSpecs}) ->
- ignore;
-
-init({sup, Strategy, ChildSpecs}) ->
- {ok, {{Strategy, 0, 1}, ChildSpecs}};
-
-init(server) ->
- {ok, state}.
-
-handle_call(Msg, _From, State) ->
- die_if_my_supervisor_is_evil(),
- {reply, {received, self(), Msg}, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-die_if_my_supervisor_is_evil() ->
- try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
- false -> ok;
- _ -> exit(doooom)
- catch
- exit:{noproc, _} -> ok
- end.
diff --git a/src/mnesia_sync.erl b/src/mnesia_sync.erl
deleted file mode 100644
index 78c566e1..00000000
--- a/src/mnesia_sync.erl
+++ /dev/null
@@ -1,77 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(mnesia_sync).
-
-%% mnesia:sync_transaction/3 fails to guarantee that the log is flushed to disk
-%% at commit. This module is an attempt to minimise the risk of data loss by
-%% performing a coalesced log fsync. Unfortunately this is performed regardless
-%% of whether or not the log was appended to.
-
--behaviour(gen_server).
-
--export([sync/0]).
-
--export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--define(SERVER, ?MODULE).
-
--record(state, {waiting, disc_node}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(sync/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
-
-sync() ->
- gen_server:call(?SERVER, sync, infinity).
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, #state{disc_node = mnesia:system_info(use_dir), waiting = []}}.
-
-handle_call(sync, _From, #state{disc_node = false} = State) ->
- {reply, ok, State};
-handle_call(sync, From, #state{waiting = Waiting} = State) ->
- {noreply, State#state{waiting = [From | Waiting]}, 0};
-handle_call(Request, _From, State) ->
- {stop, {unhandled_call, Request}, State}.
-
-handle_cast(Request, State) ->
- {stop, {unhandled_cast, Request}, State}.
-
-handle_info(timeout, #state{waiting = Waiting} = State) ->
- ok = disk_log:sync(latest_log),
- [gen_server:reply(From, ok) || From <- Waiting],
- {noreply, State#state{waiting = []}};
-handle_info(Message, State) ->
- {stop, {unhandled_info, Message}, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/mochijson2.erl b/src/mochijson2.erl
deleted file mode 100644
index bddb52cc..00000000
--- a/src/mochijson2.erl
+++ /dev/null
@@ -1,893 +0,0 @@
-%% This file is a copy of `mochijson2.erl' from mochiweb, revision
-%% d541e9a0f36c00dcadc2e589f20e47fbf46fc76f. For the license, see
-%% `LICENSE-MIT-Mochi'.
-
-%% @author Bob Ippolito <bob@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
-%% with binaries as strings, arrays as lists (without an {array, _})
-%% wrapper and it only knows how to decode UTF-8 (and ASCII).
-%%
-%% JSON terms are decoded as follows (javascript -> erlang):
-%% <ul>
-%% <li>{"key": "value"} ->
-%% {struct, [{&lt;&lt;"key">>, &lt;&lt;"value">>}]}</li>
-%% <li>["array", 123, 12.34, true, false, null] ->
-%% [&lt;&lt;"array">>, 123, 12.34, true, false, null]
-%% </li>
-%% </ul>
-%% <ul>
-%% <li>Strings in JSON decode to UTF-8 binaries in Erlang</li>
-%% <li>Objects decode to {struct, PropList}</li>
-%% <li>Numbers decode to integer or float</li>
-%% <li>true, false, null decode to their respective terms.</li>
-%% </ul>
-%% The encoder will accept the same format that the decoder will produce,
-%% but will also allow additional cases for leniency:
-%% <ul>
-%% <li>atoms other than true, false, null will be considered UTF-8
-%% strings (even as a proplist key)
-%% </li>
-%% <li>{json, IoList} will insert IoList directly into the output
-%% with no validation
-%% </li>
-%% <li>{array, Array} will be encoded as Array
-%% (legacy mochijson style)
-%% </li>
-%% <li>A non-empty raw proplist will be encoded as an object as long
-%% as the first pair does not have an atom key of json, struct,
-%% or array
-%% </li>
-%% </ul>
-
--module(mochijson2).
--author('bob@mochimedia.com').
--export([encoder/1, encode/1]).
--export([decoder/1, decode/1, decode/2]).
-
-%% This is a macro to placate syntax highlighters..
--define(Q, $\").
--define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
- column=N+S#decoder.column}).
--define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
- column=1+S#decoder.column}).
--define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
- column=1,
- line=1+S#decoder.line}).
--define(INC_CHAR(S, C),
- case C of
- $\n ->
- S#decoder{column=1,
- line=1+S#decoder.line,
- offset=1+S#decoder.offset};
- _ ->
- S#decoder{column=1+S#decoder.column,
- offset=1+S#decoder.offset}
- end).
--define(IS_WHITESPACE(C),
- (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
-
-%% @type json_string() = atom | binary()
-%% @type json_number() = integer() | float()
-%% @type json_array() = [json_term()]
-%% @type json_object() = {struct, [{json_string(), json_term()}]}
-%% @type json_eep18_object() = {[{json_string(), json_term()}]}
-%% @type json_iolist() = {json, iolist()}
-%% @type json_term() = json_string() | json_number() | json_array() |
-%% json_object() | json_eep18_object() | json_iolist()
-
--record(encoder, {handler=null,
- utf8=false}).
-
--record(decoder, {object_hook=null,
- offset=0,
- line=1,
- column=1,
- state=null}).
-
-%% @spec encoder([encoder_option()]) -> function()
-%% @doc Create an encoder/1 with the given options.
-%% @type encoder_option() = handler_option() | utf8_option()
-%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
-encoder(Options) ->
- State = parse_encoder_options(Options, #encoder{}),
- fun (O) -> json_encode(O, State) end.
-
-%% @spec encode(json_term()) -> iolist()
-%% @doc Encode the given as JSON to an iolist.
-encode(Any) ->
- json_encode(Any, #encoder{}).
-
-%% @spec decoder([decoder_option()]) -> function()
-%% @doc Create a decoder/1 with the given options.
-decoder(Options) ->
- State = parse_decoder_options(Options, #decoder{}),
- fun (O) -> json_decode(O, State) end.
-
-%% @spec decode(iolist(), [{format, proplist | eep18 | struct}]) -> json_term()
-%% @doc Decode the given iolist to Erlang terms using the given object format
-%% for decoding, where proplist returns JSON objects as [{binary(), json_term()}]
-%% proplists, eep18 returns JSON objects as {[binary(), json_term()]}, and struct
-%% returns them as-is.
-decode(S, Options) ->
- json_decode(S, parse_decoder_options(Options, #decoder{})).
-
-%% @spec decode(iolist()) -> json_term()
-%% @doc Decode the given iolist to Erlang terms.
-decode(S) ->
- json_decode(S, #decoder{}).
-
-%% Internal API
-
-parse_encoder_options([], State) ->
- State;
-parse_encoder_options([{handler, Handler} | Rest], State) ->
- parse_encoder_options(Rest, State#encoder{handler=Handler});
-parse_encoder_options([{utf8, Switch} | Rest], State) ->
- parse_encoder_options(Rest, State#encoder{utf8=Switch}).
-
-parse_decoder_options([], State) ->
- State;
-parse_decoder_options([{object_hook, Hook} | Rest], State) ->
- parse_decoder_options(Rest, State#decoder{object_hook=Hook});
-parse_decoder_options([{format, Format} | Rest], State)
- when Format =:= struct orelse Format =:= eep18 orelse Format =:= proplist ->
- parse_decoder_options(Rest, State#decoder{object_hook=Format}).
-
-json_encode(true, _State) ->
- <<"true">>;
-json_encode(false, _State) ->
- <<"false">>;
-json_encode(null, _State) ->
- <<"null">>;
-json_encode(I, _State) when is_integer(I) ->
- integer_to_list(I);
-json_encode(F, _State) when is_float(F) ->
- mochinum:digits(F);
-json_encode(S, State) when is_binary(S); is_atom(S) ->
- json_encode_string(S, State);
-json_encode([{K, _}|_] = Props, State) when (K =/= struct andalso
- K =/= array andalso
- K =/= json) ->
- json_encode_proplist(Props, State);
-json_encode({struct, Props}, State) when is_list(Props) ->
- json_encode_proplist(Props, State);
-json_encode({Props}, State) when is_list(Props) ->
- json_encode_proplist(Props, State);
-json_encode({}, State) ->
- json_encode_proplist([], State);
-json_encode(Array, State) when is_list(Array) ->
- json_encode_array(Array, State);
-json_encode({array, Array}, State) when is_list(Array) ->
- json_encode_array(Array, State);
-json_encode({json, IoList}, _State) ->
- IoList;
-json_encode(Bad, #encoder{handler=null}) ->
- exit({json_encode, {bad_term, Bad}});
-json_encode(Bad, State=#encoder{handler=Handler}) ->
- json_encode(Handler(Bad), State).
-
-json_encode_array([], _State) ->
- <<"[]">>;
-json_encode_array(L, State) ->
- F = fun (O, Acc) ->
- [$,, json_encode(O, State) | Acc]
- end,
- [$, | Acc1] = lists:foldl(F, "[", L),
- lists:reverse([$\] | Acc1]).
-
-json_encode_proplist([], _State) ->
- <<"{}">>;
-json_encode_proplist(Props, State) ->
- F = fun ({K, V}, Acc) ->
- KS = json_encode_string(K, State),
- VS = json_encode(V, State),
- [$,, VS, $:, KS | Acc]
- end,
- [$, | Acc1] = lists:foldl(F, "{", Props),
- lists:reverse([$\} | Acc1]).
-
-json_encode_string(A, State) when is_atom(A) ->
- L = atom_to_list(A),
- case json_string_is_safe(L) of
- true ->
- [?Q, L, ?Q];
- false ->
- json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
- end;
-json_encode_string(B, State) when is_binary(B) ->
- case json_bin_is_safe(B) of
- true ->
- [?Q, B, ?Q];
- false ->
- json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
- end;
-json_encode_string(I, _State) when is_integer(I) ->
- [?Q, integer_to_list(I), ?Q];
-json_encode_string(L, State) when is_list(L) ->
- case json_string_is_safe(L) of
- true ->
- [?Q, L, ?Q];
- false ->
- json_encode_string_unicode(L, State, [?Q])
- end.
-
-json_string_is_safe([]) ->
- true;
-json_string_is_safe([C | Rest]) ->
- case C of
- ?Q ->
- false;
- $\\ ->
- false;
- $\b ->
- false;
- $\f ->
- false;
- $\n ->
- false;
- $\r ->
- false;
- $\t ->
- false;
- C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
- false;
- C when C < 16#7f ->
- json_string_is_safe(Rest);
- _ ->
- false
- end.
-
-json_bin_is_safe(<<>>) ->
- true;
-json_bin_is_safe(<<C, Rest/binary>>) ->
- case C of
- ?Q ->
- false;
- $\\ ->
- false;
- $\b ->
- false;
- $\f ->
- false;
- $\n ->
- false;
- $\r ->
- false;
- $\t ->
- false;
- C when C >= 0, C < $\s; C >= 16#7f ->
- false;
- C when C < 16#7f ->
- json_bin_is_safe(Rest)
- end.
-
-json_encode_string_unicode([], _State, Acc) ->
- lists:reverse([$\" | Acc]);
-json_encode_string_unicode([C | Cs], State, Acc) ->
- Acc1 = case C of
- ?Q ->
- [?Q, $\\ | Acc];
- %% Escaping solidus is only useful when trying to protect
- %% against "</script>" injection attacks which are only
- %% possible when JSON is inserted into a HTML document
- %% in-line. mochijson2 does not protect you from this, so
- %% if you do insert directly into HTML then you need to
- %% uncomment the following case or escape the output of encode.
- %%
- %% $/ ->
- %% [$/, $\\ | Acc];
- %%
- $\\ ->
- [$\\, $\\ | Acc];
- $\b ->
- [$b, $\\ | Acc];
- $\f ->
- [$f, $\\ | Acc];
- $\n ->
- [$n, $\\ | Acc];
- $\r ->
- [$r, $\\ | Acc];
- $\t ->
- [$t, $\\ | Acc];
- C when C >= 0, C < $\s ->
- [unihex(C) | Acc];
- C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
- [xmerl_ucs:to_utf8(C) | Acc];
- C when C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
- [unihex(C) | Acc];
- C when C < 16#7f ->
- [C | Acc];
- _ ->
- exit({json_encode, {bad_char, C}})
- end,
- json_encode_string_unicode(Cs, State, Acc1).
-
-hexdigit(C) when C >= 0, C =< 9 ->
- C + $0;
-hexdigit(C) when C =< 15 ->
- C + $a - 10.
-
-unihex(C) when C < 16#10000 ->
- <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
- Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
- [$\\, $u | Digits];
-unihex(C) when C =< 16#10FFFF ->
- N = C - 16#10000,
- S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
- S2 = 16#dc00 bor (N band 16#3ff),
- [unihex(S1), unihex(S2)].
-
-json_decode(L, S) when is_list(L) ->
- json_decode(iolist_to_binary(L), S);
-json_decode(B, S) ->
- {Res, S1} = decode1(B, S),
- {eof, _} = tokenize(B, S1#decoder{state=trim}),
- Res.
-
-decode1(B, S=#decoder{state=null}) ->
- case tokenize(B, S#decoder{state=any}) of
- {{const, C}, S1} ->
- {C, S1};
- {start_array, S1} ->
- decode_array(B, S1);
- {start_object, S1} ->
- decode_object(B, S1)
- end.
-
-make_object(V, #decoder{object_hook=N}) when N =:= null orelse N =:= struct ->
- V;
-make_object({struct, P}, #decoder{object_hook=eep18}) ->
- {P};
-make_object({struct, P}, #decoder{object_hook=proplist}) ->
- P;
-make_object(V, #decoder{object_hook=Hook}) ->
- Hook(V).
-
-decode_object(B, S) ->
- decode_object(B, S#decoder{state=key}, []).
-
-decode_object(B, S=#decoder{state=key}, Acc) ->
- case tokenize(B, S) of
- {end_object, S1} ->
- V = make_object({struct, lists:reverse(Acc)}, S1),
- {V, S1#decoder{state=null}};
- {{const, K}, S1} ->
- {colon, S2} = tokenize(B, S1),
- {V, S3} = decode1(B, S2#decoder{state=null}),
- decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
- end;
-decode_object(B, S=#decoder{state=comma}, Acc) ->
- case tokenize(B, S) of
- {end_object, S1} ->
- V = make_object({struct, lists:reverse(Acc)}, S1),
- {V, S1#decoder{state=null}};
- {comma, S1} ->
- decode_object(B, S1#decoder{state=key}, Acc)
- end.
-
-decode_array(B, S) ->
- decode_array(B, S#decoder{state=any}, []).
-
-decode_array(B, S=#decoder{state=any}, Acc) ->
- case tokenize(B, S) of
- {end_array, S1} ->
- {lists:reverse(Acc), S1#decoder{state=null}};
- {start_array, S1} ->
- {Array, S2} = decode_array(B, S1),
- decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
- {start_object, S1} ->
- {Array, S2} = decode_object(B, S1),
- decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
- {{const, Const}, S1} ->
- decode_array(B, S1#decoder{state=comma}, [Const | Acc])
- end;
-decode_array(B, S=#decoder{state=comma}, Acc) ->
- case tokenize(B, S) of
- {end_array, S1} ->
- {lists:reverse(Acc), S1#decoder{state=null}};
- {comma, S1} ->
- decode_array(B, S1#decoder{state=any}, Acc)
- end.
-
-tokenize_string(B, S=#decoder{offset=O}) ->
- case tokenize_string_fast(B, O) of
- {escape, O1} ->
- Length = O1 - O,
- S1 = ?ADV_COL(S, Length),
- <<_:O/binary, Head:Length/binary, _/binary>> = B,
- tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
- O1 ->
- Length = O1 - O,
- <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
- {{const, String}, ?ADV_COL(S, Length + 1)}
- end.
-
-tokenize_string_fast(B, O) ->
- case B of
- <<_:O/binary, ?Q, _/binary>> ->
- O;
- <<_:O/binary, $\\, _/binary>> ->
- {escape, O};
- <<_:O/binary, C1, _/binary>> when C1 < 128 ->
- tokenize_string_fast(B, 1 + O);
- <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
- C2 >= 128, C2 =< 191 ->
- tokenize_string_fast(B, 2 + O);
- <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191 ->
- tokenize_string_fast(B, 3 + O);
- <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191,
- C4 >= 128, C4 =< 191 ->
- tokenize_string_fast(B, 4 + O);
- _ ->
- throw(invalid_utf8)
- end.
-
-tokenize_string(B, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, ?Q, _/binary>> ->
- {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
- <<_:O/binary, "\\\"", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
- <<_:O/binary, "\\\\", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
- <<_:O/binary, "\\/", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
- <<_:O/binary, "\\b", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
- <<_:O/binary, "\\f", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
- <<_:O/binary, "\\n", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
- <<_:O/binary, "\\r", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
- <<_:O/binary, "\\t", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
- <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
- C = erlang:list_to_integer([C3, C2, C1, C0], 16),
- if C > 16#D7FF, C < 16#DC00 ->
- %% coalesce UTF-16 surrogate pair
- <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
- D = erlang:list_to_integer([D3,D2,D1,D0], 16),
- [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
- D:16/big-unsigned-integer>>),
- Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
- tokenize_string(B, ?ADV_COL(S, 12), Acc1);
- true ->
- Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
- tokenize_string(B, ?ADV_COL(S, 6), Acc1)
- end;
- <<_:O/binary, C1, _/binary>> when C1 < 128 ->
- tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
- <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
- C2 >= 128, C2 =< 191 ->
- tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
- <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191 ->
- tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
- <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191,
- C4 >= 128, C4 =< 191 ->
- tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
- _ ->
- throw(invalid_utf8)
- end.
-
-tokenize_number(B, S) ->
- case tokenize_number(B, sign, S, []) of
- {{int, Int}, S1} ->
- {{const, list_to_integer(Int)}, S1};
- {{float, Float}, S1} ->
- {{const, list_to_float(Float)}, S1}
- end.
-
-tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
- case B of
- <<_:O/binary, $-, _/binary>> ->
- tokenize_number(B, int, ?INC_COL(S), [$-]);
- _ ->
- tokenize_number(B, int, S, [])
- end;
-tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, $0, _/binary>> ->
- tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
- <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
- tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
- end;
-tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
- _ ->
- tokenize_number(B, frac, S, Acc)
- end;
-tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
- tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
- <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
- tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
- _ ->
- {{int, lists:reverse(Acc)}, S}
- end;
-tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
- <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
- tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
- _ ->
- {{float, lists:reverse(Acc)}, S}
- end;
-tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
- tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
- _ ->
- tokenize_number(B, eint, S, Acc)
- end;
-tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
- end;
-tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
- _ ->
- {{float, lists:reverse(Acc)}, S}
- end.
-
-tokenize(B, S=#decoder{offset=O}) ->
- case B of
- <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
- tokenize(B, ?INC_CHAR(S, C));
- <<_:O/binary, "{", _/binary>> ->
- {start_object, ?INC_COL(S)};
- <<_:O/binary, "}", _/binary>> ->
- {end_object, ?INC_COL(S)};
- <<_:O/binary, "[", _/binary>> ->
- {start_array, ?INC_COL(S)};
- <<_:O/binary, "]", _/binary>> ->
- {end_array, ?INC_COL(S)};
- <<_:O/binary, ",", _/binary>> ->
- {comma, ?INC_COL(S)};
- <<_:O/binary, ":", _/binary>> ->
- {colon, ?INC_COL(S)};
- <<_:O/binary, "null", _/binary>> ->
- {{const, null}, ?ADV_COL(S, 4)};
- <<_:O/binary, "true", _/binary>> ->
- {{const, true}, ?ADV_COL(S, 4)};
- <<_:O/binary, "false", _/binary>> ->
- {{const, false}, ?ADV_COL(S, 5)};
- <<_:O/binary, "\"", _/binary>> ->
- tokenize_string(B, ?INC_COL(S));
- <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
- orelse C =:= $- ->
- tokenize_number(B, S);
- <<_:O/binary>> ->
- trim = S#decoder.state,
- {eof, S}
- end.
-%%
-%% Tests
-%%
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-%% testing constructs borrowed from the Yaws JSON implementation.
-
-%% Create an object from a list of Key/Value pairs.
-
-obj_new() ->
- {struct, []}.
-
-is_obj({struct, Props}) ->
- F = fun ({K, _}) when is_binary(K) -> true end,
- lists:all(F, Props).
-
-obj_from_list(Props) ->
- Obj = {struct, Props},
- ?assert(is_obj(Obj)),
- Obj.
-
-%% Test for equivalence of Erlang terms.
-%% Due to arbitrary order of construction, equivalent objects might
-%% compare unequal as erlang terms, so we need to carefully recurse
-%% through aggregates (tuples and objects).
-
-equiv({struct, Props1}, {struct, Props2}) ->
- equiv_object(Props1, Props2);
-equiv(L1, L2) when is_list(L1), is_list(L2) ->
- equiv_list(L1, L2);
-equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
-equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
-equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
-
-%% Object representation and traversal order is unknown.
-%% Use the sledgehammer and sort property lists.
-
-equiv_object(Props1, Props2) ->
- L1 = lists:keysort(1, Props1),
- L2 = lists:keysort(1, Props2),
- Pairs = lists:zip(L1, L2),
- true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
- equiv(K1, K2) and equiv(V1, V2)
- end, Pairs).
-
-%% Recursively compare tuple elements for equivalence.
-
-equiv_list([], []) ->
- true;
-equiv_list([V1 | L1], [V2 | L2]) ->
- equiv(V1, V2) andalso equiv_list(L1, L2).
-
-decode_test() ->
- [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
- <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
-
-e2j_vec_test() ->
- test_one(e2j_test_vec(utf8), 1).
-
-test_one([], _N) ->
- %% io:format("~p tests passed~n", [N-1]),
- ok;
-test_one([{E, J} | Rest], N) ->
- %% io:format("[~p] ~p ~p~n", [N, E, J]),
- true = equiv(E, decode(J)),
- true = equiv(E, decode(encode(E))),
- test_one(Rest, 1+N).
-
-e2j_test_vec(utf8) ->
- [
- {1, "1"},
- {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
- {-1, "-1"},
- {-3.1416, "-3.14160"},
- {12.0e10, "1.20000e+11"},
- {1.234E+10, "1.23400e+10"},
- {-1.234E-10, "-1.23400e-10"},
- {10.0, "1.0e+01"},
- {123.456, "1.23456E+2"},
- {10.0, "1e1"},
- {<<"foo">>, "\"foo\""},
- {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
- {<<"">>, "\"\""},
- {<<"\n\n\n">>, "\"\\n\\n\\n\""},
- {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
- {obj_new(), "{}"},
- {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
- {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
- "{\"foo\":\"bar\",\"baz\":123}"},
- {[], "[]"},
- {[[]], "[[]]"},
- {[1, <<"foo">>], "[1,\"foo\"]"},
-
- %% json array in a json object
- {obj_from_list([{<<"foo">>, [123]}]),
- "{\"foo\":[123]}"},
-
- %% json object in a json object
- {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
- "{\"foo\":{\"bar\":true}}"},
-
- %% fold evaluation order
- {obj_from_list([{<<"foo">>, []},
- {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
- {<<"alice">>, <<"bob">>}]),
- "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
-
- %% json object in a json array
- {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
- "[-123,\"foo\",{\"bar\":[]},null]"}
- ].
-
-%% test utf8 encoding
-encoder_utf8_test() ->
- %% safe conversion case (default)
- [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
- encode(<<1,"\321\202\320\265\321\201\321\202">>),
-
- %% raw utf8 output (optional)
- Enc = mochijson2:encoder([{utf8, true}]),
- [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
- Enc(<<1,"\321\202\320\265\321\201\321\202">>).
-
-input_validation_test() ->
- Good = [
- {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
- {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
- {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
- ],
- lists:foreach(fun({CodePoint, UTF8}) ->
- Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
- Expect = decode(UTF8)
- end, Good),
-
- Bad = [
- %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
- <<?Q, 16#80, ?Q>>,
- %% missing continuations, last byte in each should be 80-BF
- <<?Q, 16#C2, 16#7F, ?Q>>,
- <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
- <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
- %% we don't support code points > 10FFFF per RFC 3629
- <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
- %% escape characters trigger a different code path
- <<?Q, $\\, $\n, 16#80, ?Q>>
- ],
- lists:foreach(
- fun(X) ->
- ok = try decode(X) catch invalid_utf8 -> ok end,
- %% could be {ucs,{bad_utf8_character_code}} or
- %% {json_encode,{bad_char,_}}
- {'EXIT', _} = (catch encode(X))
- end, Bad).
-
-inline_json_test() ->
- ?assertEqual(<<"\"iodata iodata\"">>,
- iolist_to_binary(
- encode({json, [<<"\"iodata">>, " iodata\""]}))),
- ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
- decode(
- encode({struct,
- [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
- ok.
-
-big_unicode_test() ->
- UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
- ?assertEqual(
- <<"\"\\ud834\\udd20\"">>,
- iolist_to_binary(encode(UTF8Seq))),
- ?assertEqual(
- UTF8Seq,
- decode(iolist_to_binary(encode(UTF8Seq)))),
- ok.
-
-custom_decoder_test() ->
- ?assertEqual(
- {struct, [{<<"key">>, <<"value">>}]},
- (decoder([]))("{\"key\": \"value\"}")),
- F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
- ?assertEqual(
- win,
- (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
- ok.
-
-atom_test() ->
- %% JSON native atoms
- [begin
- ?assertEqual(A, decode(atom_to_list(A))),
- ?assertEqual(iolist_to_binary(atom_to_list(A)),
- iolist_to_binary(encode(A)))
- end || A <- [true, false, null]],
- %% Atom to string
- ?assertEqual(
- <<"\"foo\"">>,
- iolist_to_binary(encode(foo))),
- ?assertEqual(
- <<"\"\\ud834\\udd20\"">>,
- iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
- ok.
-
-key_encode_test() ->
- %% Some forms are accepted as keys that would not be strings in other
- %% cases
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode({struct, [{foo, 1}]}))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode({struct, [{"foo", 1}]}))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode([{foo, 1}]))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode([{<<"foo">>, 1}]))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode([{"foo", 1}]))),
- ?assertEqual(
- <<"{\"\\ud834\\udd20\":1}">>,
- iolist_to_binary(
- encode({struct, [{[16#0001d120], 1}]}))),
- ?assertEqual(
- <<"{\"1\":1}">>,
- iolist_to_binary(encode({struct, [{1, 1}]}))),
- ok.
-
-unsafe_chars_test() ->
- Chars = "\"\\\b\f\n\r\t",
- [begin
- ?assertEqual(false, json_string_is_safe([C])),
- ?assertEqual(false, json_bin_is_safe(<<C>>)),
- ?assertEqual(<<C>>, decode(encode(<<C>>)))
- end || C <- Chars],
- ?assertEqual(
- false,
- json_string_is_safe([16#0001d120])),
- ?assertEqual(
- false,
- json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
- ?assertEqual(
- [16#0001d120],
- xmerl_ucs:from_utf8(
- binary_to_list(
- decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
- ?assertEqual(
- false,
- json_string_is_safe([16#110000])),
- ?assertEqual(
- false,
- json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
- %% solidus can be escaped but isn't unsafe by default
- ?assertEqual(
- <<"/">>,
- decode(<<"\"\\/\"">>)),
- ok.
-
-int_test() ->
- ?assertEqual(0, decode("0")),
- ?assertEqual(1, decode("1")),
- ?assertEqual(11, decode("11")),
- ok.
-
-large_int_test() ->
- ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
- iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
- ?assertEqual(<<"2147483649214748364921474836492147483649">>,
- iolist_to_binary(encode(2147483649214748364921474836492147483649))),
- ok.
-
-float_test() ->
- ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
- ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
- ok.
-
-handler_test() ->
- ?assertEqual(
- {'EXIT',{json_encode,{bad_term,{x,y}}}},
- catch encode({x,y})),
- F = fun ({x,y}) -> [] end,
- ?assertEqual(
- <<"[]">>,
- iolist_to_binary((encoder([{handler, F}]))({x, y}))),
- ok.
-
-encode_empty_test_() ->
- [{A, ?_assertEqual(<<"{}">>, iolist_to_binary(encode(B)))}
- || {A, B} <- [{"eep18 {}", {}},
- {"eep18 {[]}", {[]}},
- {"{struct, []}", {struct, []}}]].
-
-encode_test_() ->
- P = [{<<"k">>, <<"v">>}],
- JSON = iolist_to_binary(encode({struct, P})),
- [{atom_to_list(F),
- ?_assertEqual(JSON, iolist_to_binary(encode(decode(JSON, [{format, F}]))))}
- || F <- [struct, eep18, proplist]].
-
-format_test_() ->
- P = [{<<"k">>, <<"v">>}],
- JSON = iolist_to_binary(encode({struct, P})),
- [{atom_to_list(F),
- ?_assertEqual(A, decode(JSON, [{format, F}]))}
- || {F, A} <- [{struct, {struct, P}},
- {eep18, {P}},
- {proplist, P}]].
-
--endif.
diff --git a/src/mochinum.erl b/src/mochinum.erl
deleted file mode 100644
index 4ea7a22a..00000000
--- a/src/mochinum.erl
+++ /dev/null
@@ -1,358 +0,0 @@
-%% This file is a copy of `mochijson2.erl' from mochiweb, revision
-%% d541e9a0f36c00dcadc2e589f20e47fbf46fc76f. For the license, see
-%% `LICENSE-MIT-Mochi'.
-
-%% @copyright 2007 Mochi Media, Inc.
-%% @author Bob Ippolito <bob@mochimedia.com>
-
-%% @doc Useful numeric algorithms for floats that cover some deficiencies
-%% in the math module. More interesting is digits/1, which implements
-%% the algorithm from:
-%% http://www.cs.indiana.edu/~burger/fp/index.html
-%% See also "Printing Floating-Point Numbers Quickly and Accurately"
-%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
-%% Design and Implementation.
-
--module(mochinum).
--author("Bob Ippolito <bob@mochimedia.com>").
--export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
-
-%% IEEE 754 Float exponent bias
--define(FLOAT_BIAS, 1022).
--define(MIN_EXP, -1074).
--define(BIG_POW, 4503599627370496).
-
-%% External API
-
-%% @spec digits(number()) -> string()
-%% @doc Returns a string that accurately represents the given integer or float
-%% using a conservative amount of digits. Great for generating
-%% human-readable output, or compact ASCII serializations for floats.
-digits(N) when is_integer(N) ->
- integer_to_list(N);
-digits(0.0) ->
- "0.0";
-digits(Float) ->
- {Frac1, Exp1} = frexp_int(Float),
- [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
- {Place, Digits} = transform_digits(Place0, Digits0),
- R = insert_decimal(Place, Digits),
- case Float < 0 of
- true ->
- [$- | R];
- _ ->
- R
- end.
-
-%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
-%% @doc Return the fractional and exponent part of an IEEE 754 double,
-%% equivalent to the libc function of the same name.
-%% F = Frac * pow(2, Exp).
-frexp(F) ->
- frexp1(unpack(F)).
-
-%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
-%% @doc Moderately efficient way to exponentiate integers.
-%% int_pow(10, 2) = 100.
-int_pow(_X, 0) ->
- 1;
-int_pow(X, N) when N > 0 ->
- int_pow(X, N, 1).
-
-%% @spec int_ceil(F::float()) -> integer()
-%% @doc Return the ceiling of F as an integer. The ceiling is defined as
-%% F when F == trunc(F);
-%% trunc(F) when F &lt; 0;
-%% trunc(F) + 1 when F &gt; 0.
-int_ceil(X) ->
- T = trunc(X),
- case (X - T) of
- Pos when Pos > 0 -> T + 1;
- _ -> T
- end.
-
-
-%% Internal API
-
-int_pow(X, N, R) when N < 2 ->
- R * X;
-int_pow(X, N, R) ->
- int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
-
-insert_decimal(0, S) ->
- "0." ++ S;
-insert_decimal(Place, S) when Place > 0 ->
- L = length(S),
- case Place - L of
- 0 ->
- S ++ ".0";
- N when N < 0 ->
- {S0, S1} = lists:split(L + N, S),
- S0 ++ "." ++ S1;
- N when N < 6 ->
- %% More places than digits
- S ++ lists:duplicate(N, $0) ++ ".0";
- _ ->
- insert_decimal_exp(Place, S)
- end;
-insert_decimal(Place, S) when Place > -6 ->
- "0." ++ lists:duplicate(abs(Place), $0) ++ S;
-insert_decimal(Place, S) ->
- insert_decimal_exp(Place, S).
-
-insert_decimal_exp(Place, S) ->
- [C | S0] = S,
- S1 = case S0 of
- [] ->
- "0";
- _ ->
- S0
- end,
- Exp = case Place < 0 of
- true ->
- "e-";
- false ->
- "e+"
- end,
- [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
-
-
-digits1(Float, Exp, Frac) ->
- Round = ((Frac band 1) =:= 0),
- case Exp >= 0 of
- true ->
- BExp = 1 bsl Exp,
- case (Frac =/= ?BIG_POW) of
- true ->
- scale((Frac * BExp * 2), 2, BExp, BExp,
- Round, Round, Float);
- false ->
- scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
- Round, Round, Float)
- end;
- false ->
- case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
- true ->
- scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
- Round, Round, Float);
- false ->
- scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
- Round, Round, Float)
- end
- end.
-
-scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
- Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
- %% Note that the scheme implementation uses a 326 element look-up table
- %% for int_pow(10, N) where we do not.
- case Est >= 0 of
- true ->
- fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
- LowOk, HighOk);
- false ->
- Scale = int_pow(10, -Est),
- fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
- LowOk, HighOk)
- end.
-
-fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
- TooLow = case HighOk of
- true ->
- (R + MPlus) >= S;
- false ->
- (R + MPlus) > S
- end,
- case TooLow of
- true ->
- [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
- false ->
- [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
- end.
-
-generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
- D = R0 div S,
- R = R0 rem S,
- TC1 = case LowOk of
- true ->
- R =< MMinus;
- false ->
- R < MMinus
- end,
- TC2 = case HighOk of
- true ->
- (R + MPlus) >= S;
- false ->
- (R + MPlus) > S
- end,
- case TC1 of
- false ->
- case TC2 of
- false ->
- [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
- LowOk, HighOk)];
- true ->
- [D + 1]
- end;
- true ->
- case TC2 of
- false ->
- [D];
- true ->
- case R * 2 < S of
- true ->
- [D];
- false ->
- [D + 1]
- end
- end
- end.
-
-unpack(Float) ->
- <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
- {Sign, Exp, Frac}.
-
-frexp1({_Sign, 0, 0}) ->
- {0.0, 0};
-frexp1({Sign, 0, Frac}) ->
- Exp = log2floor(Frac),
- <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
- {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
-frexp1({Sign, Exp, Frac}) ->
- <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
- {Frac1, Exp - ?FLOAT_BIAS}.
-
-log2floor(Int) ->
- log2floor(Int, 0).
-
-log2floor(0, N) ->
- N;
-log2floor(Int, N) ->
- log2floor(Int bsr 1, 1 + N).
-
-
-transform_digits(Place, [0 | Rest]) ->
- transform_digits(Place, Rest);
-transform_digits(Place, Digits) ->
- {Place, [$0 + D || D <- Digits]}.
-
-
-frexp_int(F) ->
- case unpack(F) of
- {_Sign, 0, Frac} ->
- {Frac, ?MIN_EXP};
- {_Sign, Exp, Frac} ->
- {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
- end.
-
-%%
-%% Tests
-%%
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-int_ceil_test() ->
- ?assertEqual(1, int_ceil(0.0001)),
- ?assertEqual(0, int_ceil(0.0)),
- ?assertEqual(1, int_ceil(0.99)),
- ?assertEqual(1, int_ceil(1.0)),
- ?assertEqual(-1, int_ceil(-1.5)),
- ?assertEqual(-2, int_ceil(-2.0)),
- ok.
-
-int_pow_test() ->
- ?assertEqual(1, int_pow(1, 1)),
- ?assertEqual(1, int_pow(1, 0)),
- ?assertEqual(1, int_pow(10, 0)),
- ?assertEqual(10, int_pow(10, 1)),
- ?assertEqual(100, int_pow(10, 2)),
- ?assertEqual(1000, int_pow(10, 3)),
- ok.
-
-digits_test() ->
- ?assertEqual("0",
- digits(0)),
- ?assertEqual("0.0",
- digits(0.0)),
- ?assertEqual("1.0",
- digits(1.0)),
- ?assertEqual("-1.0",
- digits(-1.0)),
- ?assertEqual("0.1",
- digits(0.1)),
- ?assertEqual("0.01",
- digits(0.01)),
- ?assertEqual("0.001",
- digits(0.001)),
- ?assertEqual("1.0e+6",
- digits(1000000.0)),
- ?assertEqual("0.5",
- digits(0.5)),
- ?assertEqual("4503599627370496.0",
- digits(4503599627370496.0)),
- %% small denormalized number
- %% 4.94065645841246544177e-324 =:= 5.0e-324
- <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
- ?assertEqual("5.0e-324",
- digits(SmallDenorm)),
- ?assertEqual(SmallDenorm,
- list_to_float(digits(SmallDenorm))),
- %% large denormalized number
- %% 2.22507385850720088902e-308
- <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
- ?assertEqual("2.225073858507201e-308",
- digits(BigDenorm)),
- ?assertEqual(BigDenorm,
- list_to_float(digits(BigDenorm))),
- %% small normalized number
- %% 2.22507385850720138309e-308
- <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
- ?assertEqual("2.2250738585072014e-308",
- digits(SmallNorm)),
- ?assertEqual(SmallNorm,
- list_to_float(digits(SmallNorm))),
- %% large normalized number
- %% 1.79769313486231570815e+308
- <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
- ?assertEqual("1.7976931348623157e+308",
- digits(LargeNorm)),
- ?assertEqual(LargeNorm,
- list_to_float(digits(LargeNorm))),
- %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
- ?assertEqual("5.0e-324",
- digits(math:pow(2, -1074))),
- ok.
-
-frexp_test() ->
- %% zero
- ?assertEqual({0.0, 0}, frexp(0.0)),
- %% one
- ?assertEqual({0.5, 1}, frexp(1.0)),
- %% negative one
- ?assertEqual({-0.5, 1}, frexp(-1.0)),
- %% small denormalized number
- %% 4.94065645841246544177e-324
- <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
- ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
- %% large denormalized number
- %% 2.22507385850720088902e-308
- <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
- ?assertEqual(
- {0.99999999999999978, -1022},
- frexp(BigDenorm)),
- %% small normalized number
- %% 2.22507385850720138309e-308
- <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
- ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
- %% large normalized number
- %% 1.79769313486231570815e+308
- <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
- ?assertEqual(
- {0.99999999999999989, 1024},
- frexp(LargeNorm)),
- %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
- ?assertEqual(
- {0.5, -1073},
- frexp(math:pow(2, -1074))),
- ok.
-
--endif.
diff --git a/src/pg2_fixed.erl b/src/pg2_fixed.erl
deleted file mode 100644
index 8926b83b..00000000
--- a/src/pg2_fixed.erl
+++ /dev/null
@@ -1,400 +0,0 @@
-%% This is the version of pg2 from R14B02, which contains the fix
-%% described at
-%% http://erlang.2086793.n4.nabble.com/pg2-still-busted-in-R13B04-td2230601.html.
-%% The changes are a search-and-replace to rename the module and avoid
-%% clashes with other versions of pg2, and also a simple rewrite of
-%% "andalso" and "orelse" expressions to case statements where the second
-%% operand is not a boolean since R12B does not allow this.
-
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
--module(pg2_fixed).
-
--export([create/1, delete/1, join/2, leave/2]).
--export([get_members/1, get_local_members/1]).
--export([get_closest_pid/1, which_groups/0]).
--export([start/0,start_link/0,init/1,handle_call/3,handle_cast/2,handle_info/2,
- terminate/2]).
-
-%%% As of R13B03 monitors are used instead of links.
-
-%%%
-%%% Exported functions
-%%%
-
--spec start_link() -> {'ok', pid()} | {'error', term()}.
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
--spec start() -> {'ok', pid()} | {'error', term()}.
-
-start() ->
- ensure_started().
-
--spec create(term()) -> 'ok'.
-
-create(Name) ->
- ensure_started(),
- case ets:member(pg2_fixed_table, {group, Name}) of
- false ->
- global:trans({{?MODULE, Name}, self()},
- fun() ->
- gen_server:multi_call(?MODULE, {create, Name})
- end),
- ok;
- true ->
- ok
- end.
-
--type name() :: term().
-
--spec delete(name()) -> 'ok'.
-
-delete(Name) ->
- ensure_started(),
- global:trans({{?MODULE, Name}, self()},
- fun() ->
- gen_server:multi_call(?MODULE, {delete, Name})
- end),
- ok.
-
--spec join(name(), pid()) -> 'ok' | {'error', {'no_such_group', term()}}.
-
-join(Name, Pid) when is_pid(Pid) ->
- ensure_started(),
- case ets:member(pg2_fixed_table, {group, Name}) of
- false ->
- {error, {no_such_group, Name}};
- true ->
- global:trans({{?MODULE, Name}, self()},
- fun() ->
- gen_server:multi_call(?MODULE,
- {join, Name, Pid})
- end),
- ok
- end.
-
--spec leave(name(), pid()) -> 'ok' | {'error', {'no_such_group', name()}}.
-
-leave(Name, Pid) when is_pid(Pid) ->
- ensure_started(),
- case ets:member(pg2_fixed_table, {group, Name}) of
- false ->
- {error, {no_such_group, Name}};
- true ->
- global:trans({{?MODULE, Name}, self()},
- fun() ->
- gen_server:multi_call(?MODULE,
- {leave, Name, Pid})
- end),
- ok
- end.
-
--type get_members_ret() :: [pid()] | {'error', {'no_such_group', name()}}.
-
--spec get_members(name()) -> get_members_ret().
-
-get_members(Name) ->
- ensure_started(),
- case ets:member(pg2_fixed_table, {group, Name}) of
- true ->
- group_members(Name);
- false ->
- {error, {no_such_group, Name}}
- end.
-
--spec get_local_members(name()) -> get_members_ret().
-
-get_local_members(Name) ->
- ensure_started(),
- case ets:member(pg2_fixed_table, {group, Name}) of
- true ->
- local_group_members(Name);
- false ->
- {error, {no_such_group, Name}}
- end.
-
--spec which_groups() -> [name()].
-
-which_groups() ->
- ensure_started(),
- all_groups().
-
--type gcp_error_reason() :: {'no_process', term()} | {'no_such_group', term()}.
-
--spec get_closest_pid(term()) -> pid() | {'error', gcp_error_reason()}.
-
-get_closest_pid(Name) ->
- case get_local_members(Name) of
- [Pid] ->
- Pid;
- [] ->
- {_,_,X} = erlang:now(),
- case get_members(Name) of
- [] -> {error, {no_process, Name}};
- Members ->
- lists:nth((X rem length(Members))+1, Members)
- end;
- Members when is_list(Members) ->
- {_,_,X} = erlang:now(),
- lists:nth((X rem length(Members))+1, Members);
- Else ->
- Else
- end.
-
-%%%
-%%% Callback functions from gen_server
-%%%
-
--record(state, {}).
-
--spec init([]) -> {'ok', #state{}}.
-
-init([]) ->
- Ns = nodes(),
- net_kernel:monitor_nodes(true),
- lists:foreach(fun(N) ->
- {?MODULE, N} ! {new_pg2_fixed, node()},
- self() ! {nodeup, N}
- end, Ns),
- pg2_fixed_table = ets:new(pg2_fixed_table, [ordered_set, protected, named_table]),
- {ok, #state{}}.
-
--type call() :: {'create', name()}
- | {'delete', name()}
- | {'join', name(), pid()}
- | {'leave', name(), pid()}.
-
--spec handle_call(call(), _, #state{}) ->
- {'reply', 'ok', #state{}}.
-
-handle_call({create, Name}, _From, S) ->
- assure_group(Name),
- {reply, ok, S};
-handle_call({join, Name, Pid}, _From, S) ->
- case ets:member(pg2_fixed_table, {group, Name}) of
- true -> join_group(Name, Pid);
- _ -> ok
- end,
- {reply, ok, S};
-handle_call({leave, Name, Pid}, _From, S) ->
- case ets:member(pg2_fixed_table, {group, Name}) of
- true -> leave_group(Name, Pid);
- _ -> ok
- end,
- {reply, ok, S};
-handle_call({delete, Name}, _From, S) ->
- delete_group(Name),
- {reply, ok, S};
-handle_call(Request, From, S) ->
- error_logger:warning_msg("The pg2_fixed server received an unexpected message:\n"
- "handle_call(~p, ~p, _)\n",
- [Request, From]),
- {noreply, S}.
-
--type all_members() :: [[name(),...]].
--type cast() :: {'exchange', node(), all_members()}
- | {'del_member', name(), pid()}.
-
--spec handle_cast(cast(), #state{}) -> {'noreply', #state{}}.
-
-handle_cast({exchange, _Node, List}, S) ->
- store(List),
- {noreply, S};
-handle_cast(_, S) ->
- %% Ignore {del_member, Name, Pid}.
- {noreply, S}.
-
--spec handle_info(tuple(), #state{}) -> {'noreply', #state{}}.
-
-handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S) ->
- member_died(MonitorRef),
- {noreply, S};
-handle_info({nodeup, Node}, S) ->
- gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
- {noreply, S};
-handle_info({new_pg2_fixed, Node}, S) ->
- gen_server:cast({?MODULE, Node}, {exchange, node(), all_members()}),
- {noreply, S};
-handle_info(_, S) ->
- {noreply, S}.
-
--spec terminate(term(), #state{}) -> 'ok'.
-
-terminate(_Reason, _S) ->
- true = ets:delete(pg2_fixed_table),
- ok.
-
-%%%
-%%% Local functions
-%%%
-
-%%% One ETS table, pg2_fixed_table, is used for bookkeeping. The type of the
-%%% table is ordered_set, and the fast matching of partially
-%%% instantiated keys is used extensively.
-%%%
-%%% {{group, Name}}
-%%% Process group Name.
-%%% {{ref, Pid}, RPid, MonitorRef, Counter}
-%%% {{ref, MonitorRef}, Pid}
-%%% Each process has one monitor. Sometimes a process is spawned to
-%%% monitor the pid (RPid). Counter is incremented when the Pid joins
-%%% some group.
-%%% {{member, Name, Pid}, GroupCounter}
-%%% {{local_member, Name, Pid}}
-%%% Pid is a member of group Name, GroupCounter is incremented when the
-%%% Pid joins the group Name.
-%%% {{pid, Pid, Name}}
-%%% Pid is a member of group Name.
-
-store(List) ->
- _ = [case assure_group(Name) of
- true ->
- [join_group(Name, P) || P <- Members -- group_members(Name)];
- _ ->
- ok
- end || [Name, Members] <- List],
- ok.
-
-assure_group(Name) ->
- Key = {group, Name},
- ets:member(pg2_fixed_table, Key) orelse true =:= ets:insert(pg2_fixed_table, {Key}).
-
-delete_group(Name) ->
- _ = [leave_group(Name, Pid) || Pid <- group_members(Name)],
- true = ets:delete(pg2_fixed_table, {group, Name}),
- ok.
-
-member_died(Ref) ->
- [{{ref, Ref}, Pid}] = ets:lookup(pg2_fixed_table, {ref, Ref}),
- Names = member_groups(Pid),
- _ = [leave_group(Name, P) ||
- Name <- Names,
- P <- member_in_group(Pid, Name)],
- %% Kept for backward compatibility with links. Can be removed, eventually.
- _ = [gen_server:abcast(nodes(), ?MODULE, {del_member, Name, Pid}) ||
- Name <- Names],
- ok.
-
-join_group(Name, Pid) ->
- Ref_Pid = {ref, Pid},
- try _ = ets:update_counter(pg2_fixed_table, Ref_Pid, {4, +1})
- catch _:_ ->
- {RPid, Ref} = do_monitor(Pid),
- true = ets:insert(pg2_fixed_table, {Ref_Pid, RPid, Ref, 1}),
- true = ets:insert(pg2_fixed_table, {{ref, Ref}, Pid})
- end,
- Member_Name_Pid = {member, Name, Pid},
- try _ = ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, +1, 1, 1})
- catch _:_ ->
- true = ets:insert(pg2_fixed_table, {Member_Name_Pid, 1}),
- _ = [ets:insert(pg2_fixed_table, {{local_member, Name, Pid}}) ||
- node(Pid) =:= node()],
- true = ets:insert(pg2_fixed_table, {{pid, Pid, Name}})
- end.
-
-leave_group(Name, Pid) ->
- Member_Name_Pid = {member, Name, Pid},
- try ets:update_counter(pg2_fixed_table, Member_Name_Pid, {2, -1, 0, 0}) of
- N ->
- if
- N =:= 0 ->
- true = ets:delete(pg2_fixed_table, {pid, Pid, Name}),
- _ = [ets:delete(pg2_fixed_table, {local_member, Name, Pid}) ||
- node(Pid) =:= node()],
- true = ets:delete(pg2_fixed_table, Member_Name_Pid);
- true ->
- ok
- end,
- Ref_Pid = {ref, Pid},
- case ets:update_counter(pg2_fixed_table, Ref_Pid, {4, -1}) of
- 0 ->
- [{Ref_Pid,RPid,Ref,0}] = ets:lookup(pg2_fixed_table, Ref_Pid),
- true = ets:delete(pg2_fixed_table, {ref, Ref}),
- true = ets:delete(pg2_fixed_table, Ref_Pid),
- true = erlang:demonitor(Ref, [flush]),
- kill_monitor_proc(RPid, Pid);
- _ ->
- ok
- end
- catch _:_ ->
- ok
- end.
-
-all_members() ->
- [[G, group_members(G)] || G <- all_groups()].
-
-group_members(Name) ->
- [P ||
- [P, N] <- ets:match(pg2_fixed_table, {{member, Name, '$1'},'$2'}),
- _ <- lists:seq(1, N)].
-
-local_group_members(Name) ->
- [P ||
- [Pid] <- ets:match(pg2_fixed_table, {{local_member, Name, '$1'}}),
- P <- member_in_group(Pid, Name)].
-
-member_in_group(Pid, Name) ->
- case ets:lookup(pg2_fixed_table, {member, Name, Pid}) of
- [] -> [];
- [{{member, Name, Pid}, N}] ->
- lists:duplicate(N, Pid)
- end.
-
-member_groups(Pid) ->
- [Name || [Name] <- ets:match(pg2_fixed_table, {{pid, Pid, '$1'}})].
-
-all_groups() ->
- [N || [N] <- ets:match(pg2_fixed_table, {{group,'$1'}})].
-
-ensure_started() ->
- case whereis(?MODULE) of
- undefined ->
- C = {pg2_fixed, {?MODULE, start_link, []}, permanent,
- 1000, worker, [?MODULE]},
- supervisor:start_child(kernel_safe_sup, C);
- Pg2_FixedPid ->
- {ok, Pg2_FixedPid}
- end.
-
-
-kill_monitor_proc(RPid, Pid) ->
- case RPid of
- Pid -> ok;
- _ -> exit(RPid, kill)
- end.
-
-%% When/if erlang:monitor() returns before trying to connect to the
-%% other node this function can be removed.
-do_monitor(Pid) ->
- case (node(Pid) =:= node()) orelse lists:member(node(Pid), nodes()) of
- true ->
- %% Assume the node is still up
- {Pid, erlang:monitor(process, Pid)};
- false ->
- F = fun() ->
- Ref = erlang:monitor(process, Pid),
- receive
- {'DOWN', Ref, process, Pid, _Info} ->
- exit(normal)
- end
- end,
- erlang:spawn_monitor(F)
- end.
diff --git a/src/pg_local.erl b/src/pg_local.erl
deleted file mode 100644
index f535b136..00000000
--- a/src/pg_local.erl
+++ /dev/null
@@ -1,213 +0,0 @@
-%% This file is a copy of pg2.erl from the R13B-3 Erlang/OTP
-%% distribution, with the following modifications:
-%%
-%% 1) Process groups are node-local only.
-%%
-%% 2) Groups are created/deleted implicitly.
-%%
-%% 3) 'join' and 'leave' are asynchronous.
-%%
-%% 4) the type specs of the exported non-callback functions have been
-%% extracted into a separate, guarded section, and rewritten in
-%% old-style spec syntax, for better compatibility with older
-%% versions of Erlang/OTP. The remaining type specs have been
-%% removed.
-
-%% All modifications are (C) 2010-2013 GoPivotal, Inc.
-
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
--module(pg_local).
-
--export([join/2, leave/2, get_members/1]).
--export([sync/0]). %% intended for testing only; not part of official API
--export([start/0, start_link/0, init/1, handle_call/3, handle_cast/2,
- handle_info/2, terminate/2]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(name() :: term()).
-
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(start/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(join/2 :: (name(), pid()) -> 'ok').
--spec(leave/2 :: (name(), pid()) -> 'ok').
--spec(get_members/1 :: (name()) -> [pid()]).
-
--spec(sync/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%%% As of R13B03 monitors are used instead of links.
-
-%%%
-%%% Exported functions
-%%%
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-start() ->
- ensure_started().
-
-join(Name, Pid) when is_pid(Pid) ->
- ensure_started(),
- gen_server:cast(?MODULE, {join, Name, Pid}).
-
-leave(Name, Pid) when is_pid(Pid) ->
- ensure_started(),
- gen_server:cast(?MODULE, {leave, Name, Pid}).
-
-get_members(Name) ->
- ensure_started(),
- group_members(Name).
-
-sync() ->
- ensure_started(),
- gen_server:call(?MODULE, sync, infinity).
-
-%%%
-%%% Callback functions from gen_server
-%%%
-
--record(state, {}).
-
-init([]) ->
- pg_local_table = ets:new(pg_local_table, [ordered_set, protected, named_table]),
- {ok, #state{}}.
-
-handle_call(sync, _From, S) ->
- {reply, ok, S};
-
-handle_call(Request, From, S) ->
- error_logger:warning_msg("The pg_local server received an unexpected message:\n"
- "handle_call(~p, ~p, _)\n",
- [Request, From]),
- {noreply, S}.
-
-handle_cast({join, Name, Pid}, S) ->
- join_group(Name, Pid),
- {noreply, S};
-handle_cast({leave, Name, Pid}, S) ->
- leave_group(Name, Pid),
- {noreply, S};
-handle_cast(_, S) ->
- {noreply, S}.
-
-handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S) ->
- member_died(MonitorRef),
- {noreply, S};
-handle_info(_, S) ->
- {noreply, S}.
-
-terminate(_Reason, _S) ->
- true = ets:delete(pg_local_table),
- ok.
-
-%%%
-%%% Local functions
-%%%
-
-%%% One ETS table, pg_local_table, is used for bookkeeping. The type of the
-%%% table is ordered_set, and the fast matching of partially
-%%% instantiated keys is used extensively.
-%%%
-%%% {{ref, Pid}, MonitorRef, Counter}
-%%% {{ref, MonitorRef}, Pid}
-%%% Each process has one monitor. Counter is incremented when the
-%%% Pid joins some group.
-%%% {{member, Name, Pid}, _}
-%%% Pid is a member of group Name, GroupCounter is incremented when the
-%%% Pid joins the group Name.
-%%% {{pid, Pid, Name}}
-%%% Pid is a member of group Name.
-
-member_died(Ref) ->
- [{{ref, Ref}, Pid}] = ets:lookup(pg_local_table, {ref, Ref}),
- Names = member_groups(Pid),
- _ = [leave_group(Name, P) ||
- Name <- Names,
- P <- member_in_group(Pid, Name)],
- ok.
-
-join_group(Name, Pid) ->
- Ref_Pid = {ref, Pid},
- try _ = ets:update_counter(pg_local_table, Ref_Pid, {3, +1})
- catch _:_ ->
- Ref = erlang:monitor(process, Pid),
- true = ets:insert(pg_local_table, {Ref_Pid, Ref, 1}),
- true = ets:insert(pg_local_table, {{ref, Ref}, Pid})
- end,
- Member_Name_Pid = {member, Name, Pid},
- try _ = ets:update_counter(pg_local_table, Member_Name_Pid, {2, +1})
- catch _:_ ->
- true = ets:insert(pg_local_table, {Member_Name_Pid, 1}),
- true = ets:insert(pg_local_table, {{pid, Pid, Name}})
- end.
-
-leave_group(Name, Pid) ->
- Member_Name_Pid = {member, Name, Pid},
- try ets:update_counter(pg_local_table, Member_Name_Pid, {2, -1}) of
- N ->
- if
- N =:= 0 ->
- true = ets:delete(pg_local_table, {pid, Pid, Name}),
- true = ets:delete(pg_local_table, Member_Name_Pid);
- true ->
- ok
- end,
- Ref_Pid = {ref, Pid},
- case ets:update_counter(pg_local_table, Ref_Pid, {3, -1}) of
- 0 ->
- [{Ref_Pid,Ref,0}] = ets:lookup(pg_local_table, Ref_Pid),
- true = ets:delete(pg_local_table, {ref, Ref}),
- true = ets:delete(pg_local_table, Ref_Pid),
- true = erlang:demonitor(Ref, [flush]),
- ok;
- _ ->
- ok
- end
- catch _:_ ->
- ok
- end.
-
-group_members(Name) ->
- [P ||
- [P, N] <- ets:match(pg_local_table, {{member, Name, '$1'},'$2'}),
- _ <- lists:seq(1, N)].
-
-member_in_group(Pid, Name) ->
- [{{member, Name, Pid}, N}] = ets:lookup(pg_local_table, {member, Name, Pid}),
- lists:duplicate(N, Pid).
-
-member_groups(Pid) ->
- [Name || [Name] <- ets:match(pg_local_table, {{pid, Pid, '$1'}})].
-
-ensure_started() ->
- case whereis(?MODULE) of
- undefined ->
- C = {pg_local, {?MODULE, start_link, []}, permanent,
- 16#ffffffff, worker, [?MODULE]},
- supervisor:start_child(kernel_safe_sup, C);
- PgLocalPid ->
- {ok, PgLocalPid}
- end.
diff --git a/src/pmon.erl b/src/pmon.erl
deleted file mode 100644
index b9db66fb..00000000
--- a/src/pmon.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(pmon).
-
--export([new/0, monitor/2, monitor_all/2, demonitor/2, is_monitored/2, erase/2,
- monitored/1, is_empty/1]).
-
--compile({no_auto_import, [monitor/2]}).
-
--ifdef(use_specs).
-
-%%----------------------------------------------------------------------------
-
--export_type([?MODULE/0]).
-
--opaque(?MODULE() :: dict()).
-
--type(item() :: pid() | {atom(), node()}).
-
--spec(new/0 :: () -> ?MODULE()).
--spec(monitor/2 :: (item(), ?MODULE()) -> ?MODULE()).
--spec(monitor_all/2 :: ([item()], ?MODULE()) -> ?MODULE()).
--spec(demonitor/2 :: (item(), ?MODULE()) -> ?MODULE()).
--spec(is_monitored/2 :: (item(), ?MODULE()) -> boolean()).
--spec(erase/2 :: (item(), ?MODULE()) -> ?MODULE()).
--spec(monitored/1 :: (?MODULE()) -> [item()]).
--spec(is_empty/1 :: (?MODULE()) -> boolean()).
-
--endif.
-
-new() -> dict:new().
-
-monitor(Item, M) ->
- case dict:is_key(Item, M) of
- true -> M;
- false -> dict:store(Item, erlang:monitor(process, Item), M)
- end.
-
-monitor_all([], M) -> M; %% optimisation
-monitor_all([Item], M) -> monitor(Item, M); %% optimisation
-monitor_all(Items, M) -> lists:foldl(fun monitor/2, M, Items).
-
-demonitor(Item, M) ->
- case dict:find(Item, M) of
- {ok, MRef} -> erlang:demonitor(MRef),
- dict:erase(Item, M);
- error -> M
- end.
-
-is_monitored(Item, M) -> dict:is_key(Item, M).
-
-erase(Item, M) -> dict:erase(Item, M).
-
-monitored(M) -> dict:fetch_keys(M).
-
-is_empty(M) -> dict:size(M) == 0.
diff --git a/src/priority_queue.erl b/src/priority_queue.erl
deleted file mode 100644
index 6995c3be..00000000
--- a/src/priority_queue.erl
+++ /dev/null
@@ -1,194 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-%% Priority queues have essentially the same interface as ordinary
-%% queues, except that a) there is an in/3 that takes a priority, and
-%% b) we have only implemented the core API we need.
-%%
-%% Priorities should be integers - the higher the value the higher the
-%% priority - but we don't actually check that.
-%%
-%% in/2 inserts items with priority 0.
-%%
-%% We optimise the case where a priority queue is being used just like
-%% an ordinary queue. When that is the case we represent the priority
-%% queue as an ordinary queue. We could just call into the 'queue'
-%% module for that, but for efficiency we implement the relevant
-%% functions directly in here, thus saving on inter-module calls and
-%% eliminating a level of boxing.
-%%
-%% When the queue contains items with non-zero priorities, it is
-%% represented as a sorted kv list with the inverted Priority as the
-%% key and an ordinary queue as the value. Here again we use our own
-%% ordinary queue implemention for efficiency, often making recursive
-%% calls into the same function knowing that ordinary queues represent
-%% a base case.
-
-
--module(priority_queue).
-
--export([new/0, is_queue/1, is_empty/1, len/1, to_list/1, in/2, in/3,
- out/1, join/2]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([q/0]).
-
--type(q() :: pqueue()).
--type(priority() :: integer() | 'infinity').
--type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
--type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
-
--spec(new/0 :: () -> pqueue()).
--spec(is_queue/1 :: (any()) -> boolean()).
--spec(is_empty/1 :: (pqueue()) -> boolean()).
--spec(len/1 :: (pqueue()) -> non_neg_integer()).
--spec(to_list/1 :: (pqueue()) -> [{priority(), any()}]).
--spec(in/2 :: (any(), pqueue()) -> pqueue()).
--spec(in/3 :: (any(), priority(), pqueue()) -> pqueue()).
--spec(out/1 :: (pqueue()) -> {empty | {value, any()}, pqueue()}).
--spec(join/2 :: (pqueue(), pqueue()) -> pqueue()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-new() ->
- {queue, [], [], 0}.
-
-is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) ->
- true;
-is_queue({pqueue, Queues}) when is_list(Queues) ->
- lists:all(fun ({infinity, Q}) -> is_queue(Q);
- ({P, Q}) -> is_integer(P) andalso is_queue(Q)
- end, Queues);
-is_queue(_) ->
- false.
-
-is_empty({queue, [], [], 0}) ->
- true;
-is_empty(_) ->
- false.
-
-len({queue, _R, _F, L}) ->
- L;
-len({pqueue, Queues}) ->
- lists:sum([len(Q) || {_, Q} <- Queues]).
-
-to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) ->
- [{0, V} || V <- Out ++ lists:reverse(In, [])];
-to_list({pqueue, Queues}) ->
- [{maybe_negate_priority(P), V} || {P, Q} <- Queues,
- {0, V} <- to_list(Q)].
-
-in(Item, Q) ->
- in(Item, 0, Q).
-
-in(X, 0, {queue, [_] = In, [], 1}) ->
- {queue, [X], In, 2};
-in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) ->
- {queue, [X|In], Out, Len + 1};
-in(X, Priority, _Q = {queue, [], [], 0}) ->
- in(X, Priority, {pqueue, []});
-in(X, Priority, Q = {queue, _, _, _}) ->
- in(X, Priority, {pqueue, [{0, Q}]});
-in(X, Priority, {pqueue, Queues}) ->
- P = maybe_negate_priority(Priority),
- {pqueue, case lists:keysearch(P, 1, Queues) of
- {value, {_, Q}} ->
- lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
- false when P == infinity ->
- [{P, {queue, [X], [], 1}} | Queues];
- false ->
- case Queues of
- [{infinity, InfQueue} | Queues1] ->
- [{infinity, InfQueue} |
- lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])];
- _ ->
- lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
- end
- end}.
-
-out({queue, [], [], 0} = Q) ->
- {empty, Q};
-out({queue, [V], [], 1}) ->
- {{value, V}, {queue, [], [], 0}};
-out({queue, [Y|In], [], Len}) ->
- [V|Out] = lists:reverse(In, []),
- {{value, V}, {queue, [Y], Out}, Len - 1};
-out({queue, In, [V], Len}) when is_list(In) ->
- {{value,V}, r2f(In, Len - 1)};
-out({queue, In,[V|Out], Len}) when is_list(In) ->
- {{value, V}, {queue, In, Out, Len - 1}};
-out({pqueue, [{P, Q} | Queues]}) ->
- {R, Q1} = out(Q),
- NewQ = case is_empty(Q1) of
- true -> case Queues of
- [] -> {queue, [], [], 0};
- [{0, OnlyQ}] -> OnlyQ;
- [_|_] -> {pqueue, Queues}
- end;
- false -> {pqueue, [{P, Q1} | Queues]}
- end,
- {R, NewQ}.
-
-join(A, {queue, [], [], 0}) ->
- A;
-join({queue, [], [], 0}, B) ->
- B;
-join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) ->
- {queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen};
-join(A = {queue, _, _, _}, {pqueue, BPQ}) ->
- {Pre, Post} =
- lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
- Post1 = case Post of
- [] -> [ {0, A} ];
- [ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
- _ -> [ {0, A} | Post ]
- end,
- {pqueue, Pre ++ Post1};
-join({pqueue, APQ}, B = {queue, _, _, _}) ->
- {Pre, Post} =
- lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
- Post1 = case Post of
- [] -> [ {0, B} ];
- [ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
- _ -> [ {0, B} | Post ]
- end,
- {pqueue, Pre ++ Post1};
-join({pqueue, APQ}, {pqueue, BPQ}) ->
- {pqueue, merge(APQ, BPQ, [])}.
-
-merge([], BPQ, Acc) ->
- lists:reverse(Acc, BPQ);
-merge(APQ, [], Acc) ->
- lists:reverse(Acc, APQ);
-merge([{P, A}|As], [{P, B}|Bs], Acc) ->
- merge(As, Bs, [ {P, join(A, B)} | Acc ]);
-merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
- merge(As, Bs, [ {PA, A} | Acc ]);
-merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
- merge(As, Bs, [ {PB, B} | Acc ]).
-
-r2f([], 0) -> {queue, [], [], 0};
-r2f([_] = R, 1) -> {queue, [], R, 1};
-r2f([X,Y], 2) -> {queue, [X], [Y], 2};
-r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}.
-
-maybe_negate_priority(infinity) -> infinity;
-maybe_negate_priority(P) -> -P.
diff --git a/src/rabbit.erl b/src/rabbit.erl
deleted file mode 100644
index cb9e6376..00000000
--- a/src/rabbit.erl
+++ /dev/null
@@ -1,776 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit).
-
--behaviour(application).
-
--export([start/0, boot/0, stop/0,
- stop_and_halt/0, await_startup/0, status/0, is_running/0,
- is_running/1, environment/0, rotate_logs/1, force_event_refresh/0,
- start_fhc/0]).
-
--export([start/2, stop/1]).
-
--export([log_location/1]). %% for testing
-
-%%---------------------------------------------------------------------------
-%% Boot steps.
--export([maybe_insert_default_data/0, boot_delegate/0, recover/0]).
-
--rabbit_boot_step({pre_boot, [{description, "rabbit boot start"}]}).
-
--rabbit_boot_step({codec_correctness_check,
- [{description, "codec correctness check"},
- {mfa, {rabbit_binary_generator,
- check_empty_frame_size,
- []}},
- {requires, pre_boot},
- {enables, external_infrastructure}]}).
-
--rabbit_boot_step({database,
- [{mfa, {rabbit_mnesia, init, []}},
- {requires, file_handle_cache},
- {enables, external_infrastructure}]}).
-
--rabbit_boot_step({database_sync,
- [{description, "database sync"},
- {mfa, {rabbit_sup, start_child, [mnesia_sync]}},
- {requires, database},
- {enables, external_infrastructure}]}).
-
--rabbit_boot_step({file_handle_cache,
- [{description, "file handle cache server"},
- {mfa, {rabbit, start_fhc, []}},
- {requires, pre_boot},
- {enables, worker_pool}]}).
-
--rabbit_boot_step({worker_pool,
- [{description, "worker pool"},
- {mfa, {rabbit_sup, start_supervisor_child,
- [worker_pool_sup]}},
- {requires, pre_boot},
- {enables, external_infrastructure}]}).
-
--rabbit_boot_step({external_infrastructure,
- [{description, "external infrastructure ready"}]}).
-
--rabbit_boot_step({rabbit_registry,
- [{description, "plugin registry"},
- {mfa, {rabbit_sup, start_child,
- [rabbit_registry]}},
- {requires, external_infrastructure},
- {enables, kernel_ready}]}).
-
--rabbit_boot_step({rabbit_log,
- [{description, "logging server"},
- {mfa, {rabbit_sup, start_restartable_child,
- [rabbit_log]}},
- {requires, external_infrastructure},
- {enables, kernel_ready}]}).
-
--rabbit_boot_step({rabbit_event,
- [{description, "statistics event manager"},
- {mfa, {rabbit_sup, start_restartable_child,
- [rabbit_event]}},
- {requires, external_infrastructure},
- {enables, kernel_ready}]}).
-
--rabbit_boot_step({kernel_ready,
- [{description, "kernel ready"},
- {requires, external_infrastructure}]}).
-
--rabbit_boot_step({rabbit_alarm,
- [{description, "alarm handler"},
- {mfa, {rabbit_alarm, start, []}},
- {requires, kernel_ready},
- {enables, core_initialized}]}).
-
--rabbit_boot_step({rabbit_memory_monitor,
- [{description, "memory monitor"},
- {mfa, {rabbit_sup, start_restartable_child,
- [rabbit_memory_monitor]}},
- {requires, rabbit_alarm},
- {enables, core_initialized}]}).
-
--rabbit_boot_step({guid_generator,
- [{description, "guid generator"},
- {mfa, {rabbit_sup, start_restartable_child,
- [rabbit_guid]}},
- {requires, kernel_ready},
- {enables, core_initialized}]}).
-
--rabbit_boot_step({delegate_sup,
- [{description, "cluster delegate"},
- {mfa, {rabbit, boot_delegate, []}},
- {requires, kernel_ready},
- {enables, core_initialized}]}).
-
--rabbit_boot_step({rabbit_node_monitor,
- [{description, "node monitor"},
- {mfa, {rabbit_sup, start_restartable_child,
- [rabbit_node_monitor]}},
- {requires, rabbit_alarm},
- {enables, core_initialized}]}).
-
--rabbit_boot_step({core_initialized,
- [{description, "core initialized"},
- {requires, kernel_ready}]}).
-
--rabbit_boot_step({empty_db_check,
- [{description, "empty DB check"},
- {mfa, {?MODULE, maybe_insert_default_data, []}},
- {requires, core_initialized},
- {enables, routing_ready}]}).
-
--rabbit_boot_step({recovery,
- [{description, "exchange, queue and binding recovery"},
- {mfa, {rabbit, recover, []}},
- {requires, core_initialized},
- {enables, routing_ready}]}).
-
--rabbit_boot_step({mirror_queue_slave_sup,
- [{description, "mirror queue slave sup"},
- {mfa, {rabbit_sup, start_supervisor_child,
- [rabbit_mirror_queue_slave_sup]}},
- {requires, recovery},
- {enables, routing_ready}]}).
-
--rabbit_boot_step({mirrored_queues,
- [{description, "adding mirrors to queues"},
- {mfa, {rabbit_mirror_queue_misc, on_node_up, []}},
- {requires, mirror_queue_slave_sup},
- {enables, routing_ready}]}).
-
--rabbit_boot_step({routing_ready,
- [{description, "message delivery logic ready"},
- {requires, core_initialized}]}).
-
--rabbit_boot_step({log_relay,
- [{description, "error log relay"},
- {mfa, {rabbit_sup, start_child,
- [rabbit_error_logger_lifecycle,
- supervised_lifecycle,
- [rabbit_error_logger_lifecycle,
- {rabbit_error_logger, start, []},
- {rabbit_error_logger, stop, []}]]}},
- {requires, routing_ready},
- {enables, networking}]}).
-
--rabbit_boot_step({direct_client,
- [{description, "direct client"},
- {mfa, {rabbit_direct, boot, []}},
- {requires, log_relay}]}).
-
--rabbit_boot_step({networking,
- [{mfa, {rabbit_networking, boot, []}},
- {requires, log_relay}]}).
-
--rabbit_boot_step({notify_cluster,
- [{description, "notify cluster nodes"},
- {mfa, {rabbit_node_monitor, notify_node_up, []}},
- {requires, networking}]}).
-
--rabbit_boot_step({background_gc,
- [{description, "background garbage collection"},
- {mfa, {rabbit_sup, start_restartable_child,
- [background_gc]}},
- {enables, networking}]}).
-
-%%---------------------------------------------------------------------------
-
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--define(APPS, [os_mon, mnesia, rabbit]).
-
-%% HiPE compilation uses multiple cores anyway, but some bits are
-%% IO-bound so we can go faster if we parallelise a bit more. In
-%% practice 2 processes seems just as fast as any other number > 1,
-%% and keeps the progress bar realistic-ish.
--define(HIPE_PROCESSES, 2).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(file_suffix() :: binary()).
-%% this really should be an abstract type
--type(log_location() :: 'tty' | 'undefined' | file:filename()).
--type(param() :: atom()).
-
--spec(start/0 :: () -> 'ok').
--spec(boot/0 :: () -> 'ok').
--spec(stop/0 :: () -> 'ok').
--spec(stop_and_halt/0 :: () -> no_return()).
--spec(await_startup/0 :: () -> 'ok').
--spec(status/0 ::
- () -> [{pid, integer()} |
- {running_applications, [{atom(), string(), string()}]} |
- {os, {atom(), atom()}} |
- {erlang_version, string()} |
- {memory, any()}]).
--spec(is_running/0 :: () -> boolean()).
--spec(is_running/1 :: (node()) -> boolean()).
--spec(environment/0 :: () -> [{param(), term()}]).
--spec(rotate_logs/1 :: (file_suffix()) -> rabbit_types:ok_or_error(any())).
--spec(force_event_refresh/0 :: () -> 'ok').
-
--spec(log_location/1 :: ('sasl' | 'kernel') -> log_location()).
-
--spec(start/2 :: ('normal',[]) ->
- {'error',
- {'erlang_version_too_old',
- {'found',[any()]},
- {'required',[any(),...]}}} |
- {'ok',pid()}).
--spec(stop/1 :: (_) -> 'ok').
-
--spec(maybe_insert_default_data/0 :: () -> 'ok').
--spec(boot_delegate/0 :: () -> 'ok').
--spec(recover/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% HiPE compilation happens before we have log handlers - so we have
-%% to io:format/2, it's all we can do.
-
-maybe_hipe_compile() ->
- {ok, Want} = application:get_env(rabbit, hipe_compile),
- Can = code:which(hipe) =/= non_existing,
- case {Want, Can} of
- {true, true} -> hipe_compile(),
- true;
- {true, false} -> false;
- {false, _} -> true
- end.
-
-warn_if_hipe_compilation_failed(true) ->
- ok;
-warn_if_hipe_compilation_failed(false) ->
- error_logger:warning_msg(
- "Not HiPE compiling: HiPE not found in this Erlang installation.~n").
-
-%% HiPE compilation happens before we have log handlers and can take a
-%% long time, so make an exception to our no-stdout policy and display
-%% progress via stdout.
-hipe_compile() ->
- {ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules),
- HipeModules = [HM || HM <- HipeModulesAll, code:which(HM) =/= non_existing],
- Count = length(HipeModules),
- io:format("~nHiPE compiling: |~s|~n |",
- [string:copies("-", Count)]),
- T1 = erlang:now(),
- PidMRefs = [spawn_monitor(fun () -> [begin
- {ok, M} = hipe:c(M, [o3]),
- io:format("#")
- end || M <- Ms]
- end) ||
- Ms <- split(HipeModules, ?HIPE_PROCESSES)],
- [receive
- {'DOWN', MRef, process, _, normal} -> ok;
- {'DOWN', MRef, process, _, Reason} -> exit(Reason)
- end || {_Pid, MRef} <- PidMRefs],
- T2 = erlang:now(),
- io:format("|~n~nCompiled ~B modules in ~Bs~n",
- [Count, timer:now_diff(T2, T1) div 1000000]).
-
-split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]).
-
-split0([], Ls) -> Ls;
-split0([I | Is], [L | Ls]) -> split0(Is, Ls ++ [[I | L]]).
-
-ensure_application_loaded() ->
- %% We end up looking at the rabbit app's env for HiPE and log
- %% handling, so it needs to be loaded. But during the tests, it
- %% may end up getting loaded twice, so guard against that.
- case application:load(rabbit) of
- ok -> ok;
- {error, {already_loaded, rabbit}} -> ok
- end.
-
-start() ->
- start_it(fun() ->
- %% We do not want to HiPE compile or upgrade
- %% mnesia after just restarting the app
- ok = ensure_application_loaded(),
- ok = ensure_working_log_handlers(),
- rabbit_node_monitor:prepare_cluster_status_files(),
- rabbit_mnesia:check_cluster_consistency(),
- ok = app_utils:start_applications(
- app_startup_order(), fun handle_app_error/2),
- ok = log_broker_started(rabbit_plugins:active())
- end).
-
-boot() ->
- start_it(fun() ->
- ok = ensure_application_loaded(),
- Success = maybe_hipe_compile(),
- ok = ensure_working_log_handlers(),
- warn_if_hipe_compilation_failed(Success),
- rabbit_node_monitor:prepare_cluster_status_files(),
- ok = rabbit_upgrade:maybe_upgrade_mnesia(),
- %% It's important that the consistency check happens after
- %% the upgrade, since if we are a secondary node the
- %% primary node will have forgotten us
- rabbit_mnesia:check_cluster_consistency(),
- Plugins = rabbit_plugins:setup(),
- ToBeLoaded = Plugins ++ ?APPS,
- ok = app_utils:load_applications(ToBeLoaded),
- StartupApps = app_utils:app_dependency_order(ToBeLoaded,
- false),
- ok = app_utils:start_applications(
- StartupApps, fun handle_app_error/2),
- ok = log_broker_started(Plugins)
- end).
-
-handle_app_error(App, {bad_return, {_MFA, {'EXIT', {Reason, _}}}}) ->
- throw({could_not_start, App, Reason});
-
-handle_app_error(App, Reason) ->
- throw({could_not_start, App, Reason}).
-
-start_it(StartFun) ->
- Marker = spawn_link(fun() -> receive stop -> ok end end),
- register(rabbit_boot, Marker),
- try
- StartFun()
- catch
- throw:{could_not_start, _App, _Reason}=Err ->
- boot_error(Err, not_available);
- _:Reason ->
- boot_error(Reason, erlang:get_stacktrace())
- after
- unlink(Marker),
- Marker ! stop,
- %% give the error loggers some time to catch up
- timer:sleep(100)
- end.
-
-stop() ->
- case whereis(rabbit_boot) of
- undefined -> ok;
- _ -> await_startup()
- end,
- rabbit_log:info("Stopping RabbitMQ~n"),
- ok = app_utils:stop_applications(app_shutdown_order()).
-
-stop_and_halt() ->
- try
- stop()
- after
- rabbit_misc:local_info_msg("Halting Erlang VM~n", []),
- init:stop()
- end,
- ok.
-
-await_startup() ->
- app_utils:wait_for_applications(app_startup_order()).
-
-status() ->
- S1 = [{pid, list_to_integer(os:getpid())},
- {running_applications, rabbit_misc:which_applications()},
- {os, os:type()},
- {erlang_version, erlang:system_info(system_version)},
- {memory, rabbit_vm:memory()}],
- S2 = rabbit_misc:filter_exit_map(
- fun ({Key, {M, F, A}}) -> {Key, erlang:apply(M, F, A)} end,
- [{vm_memory_high_watermark, {vm_memory_monitor,
- get_vm_memory_high_watermark, []}},
- {vm_memory_limit, {vm_memory_monitor,
- get_memory_limit, []}},
- {disk_free_limit, {rabbit_disk_monitor,
- get_disk_free_limit, []}},
- {disk_free, {rabbit_disk_monitor,
- get_disk_free, []}}]),
- S3 = rabbit_misc:with_exit_handler(
- fun () -> [] end,
- fun () -> [{file_descriptors, file_handle_cache:info()}] end),
- S4 = [{processes, [{limit, erlang:system_info(process_limit)},
- {used, erlang:system_info(process_count)}]},
- {run_queue, erlang:statistics(run_queue)},
- {uptime, begin
- {T,_} = erlang:statistics(wall_clock),
- T div 1000
- end}],
- S1 ++ S2 ++ S3 ++ S4.
-
-is_running() -> is_running(node()).
-
-is_running(Node) -> rabbit_nodes:is_process_running(Node, rabbit).
-
-environment() ->
- lists:keysort(1, [P || P = {K, _} <- application:get_all_env(rabbit),
- K =/= default_pass]).
-
-rotate_logs(BinarySuffix) ->
- Suffix = binary_to_list(BinarySuffix),
- rabbit_misc:local_info_msg("Rotating logs with suffix '~s'~n", [Suffix]),
- log_rotation_result(rotate_logs(log_location(kernel),
- Suffix,
- rabbit_error_logger_file_h),
- rotate_logs(log_location(sasl),
- Suffix,
- rabbit_sasl_report_file_h)).
-
-%%--------------------------------------------------------------------
-
-start(normal, []) ->
- case erts_version_check() of
- ok ->
- {ok, Vsn} = application:get_key(rabbit, vsn),
- error_logger:info_msg("Starting RabbitMQ ~s on Erlang ~s~n~s~n~s~n",
- [Vsn, erlang:system_info(otp_release),
- ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE]),
- {ok, SupPid} = rabbit_sup:start_link(),
- true = register(rabbit, self()),
- print_banner(),
- log_banner(),
- [ok = run_boot_step(Step) || Step <- boot_steps()],
- {ok, SupPid};
- Error ->
- Error
- end.
-
-stop(_State) ->
- ok = rabbit_alarm:stop(),
- ok = case rabbit_mnesia:is_clustered() of
- true -> rabbit_amqqueue:on_node_down(node());
- false -> rabbit_table:clear_ram_only_tables()
- end,
- ok.
-
-%%---------------------------------------------------------------------------
-%% application life cycle
-
-app_startup_order() ->
- ok = app_utils:load_applications(?APPS),
- app_utils:app_dependency_order(?APPS, false).
-
-app_shutdown_order() ->
- Apps = ?APPS ++ rabbit_plugins:active(),
- app_utils:app_dependency_order(Apps, true).
-
-%%---------------------------------------------------------------------------
-%% boot step logic
-
-run_boot_step({_StepName, Attributes}) ->
- case [MFA || {mfa, MFA} <- Attributes] of
- [] ->
- ok;
- MFAs ->
- [try
- apply(M,F,A)
- of
- ok -> ok;
- {error, Reason} -> boot_error(Reason, not_available)
- catch
- _:Reason -> boot_error(Reason, erlang:get_stacktrace())
- end || {M,F,A} <- MFAs],
- ok
- end.
-
-boot_steps() ->
- sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)).
-
-vertices(_Module, Steps) ->
- [{StepName, {StepName, Atts}} || {StepName, Atts} <- Steps].
-
-edges(_Module, Steps) ->
- [case Key of
- requires -> {StepName, OtherStep};
- enables -> {OtherStep, StepName}
- end || {StepName, Atts} <- Steps,
- {Key, OtherStep} <- Atts,
- Key =:= requires orelse Key =:= enables].
-
-sort_boot_steps(UnsortedSteps) ->
- case rabbit_misc:build_acyclic_graph(fun vertices/2, fun edges/2,
- UnsortedSteps) of
- {ok, G} ->
- %% Use topological sort to find a consistent ordering (if
- %% there is one, otherwise fail).
- SortedSteps = lists:reverse(
- [begin
- {StepName, Step} = digraph:vertex(G,
- StepName),
- Step
- end || StepName <- digraph_utils:topsort(G)]),
- digraph:delete(G),
- %% Check that all mentioned {M,F,A} triples are exported.
- case [{StepName, {M,F,A}} ||
- {StepName, Attributes} <- SortedSteps,
- {mfa, {M,F,A}} <- Attributes,
- not erlang:function_exported(M, F, length(A))] of
- [] -> SortedSteps;
- MissingFunctions -> basic_boot_error(
- {missing_functions, MissingFunctions},
- "Boot step functions not exported: ~p~n",
- [MissingFunctions])
- end;
- {error, {vertex, duplicate, StepName}} ->
- basic_boot_error({duplicate_boot_step, StepName},
- "Duplicate boot step name: ~w~n", [StepName]);
- {error, {edge, Reason, From, To}} ->
- basic_boot_error(
- {invalid_boot_step_dependency, From, To},
- "Could not add boot step dependency of ~w on ~w:~n~s",
- [To, From,
- case Reason of
- {bad_vertex, V} ->
- io_lib:format("Boot step not registered: ~w~n", [V]);
- {bad_edge, [First | Rest]} ->
- [io_lib:format("Cyclic dependency: ~w", [First]),
- [io_lib:format(" depends on ~w", [Next]) ||
- Next <- Rest],
- io_lib:format(" depends on ~w~n", [First])]
- end])
- end.
-
--ifdef(use_specs).
--spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()).
--endif.
-boot_error(Term={error, {timeout_waiting_for_tables, _}}, _Stacktrace) ->
- AllNodes = rabbit_mnesia:cluster_nodes(all),
- {Err, Nodes} =
- case AllNodes -- [node()] of
- [] -> {"Timeout contacting cluster nodes. Since RabbitMQ was"
- " shut down forcefully~nit cannot determine which nodes"
- " are timing out.~n", []};
- Ns -> {rabbit_misc:format(
- "Timeout contacting cluster nodes: ~p.~n", [Ns]),
- Ns}
- end,
- basic_boot_error(Term,
- Err ++ rabbit_nodes:diagnostics(Nodes) ++ "~n~n", []);
-boot_error(Reason, Stacktrace) ->
- Fmt = "Error description:~n ~p~n~n" ++
- "Log files (may contain more information):~n ~s~n ~s~n~n",
- Args = [Reason, log_location(kernel), log_location(sasl)],
- boot_error(Reason, Fmt, Args, Stacktrace).
-
--ifdef(use_specs).
--spec(boot_error/4 :: (term(), string(), [any()], not_available | [tuple()])
- -> no_return()).
--endif.
-boot_error(Reason, Fmt, Args, not_available) ->
- basic_boot_error(Reason, Fmt, Args);
-boot_error(Reason, Fmt, Args, Stacktrace) ->
- basic_boot_error(Reason, Fmt ++ "Stack trace:~n ~p~n~n",
- Args ++ [Stacktrace]).
-
-basic_boot_error(Reason, Format, Args) ->
- io:format("~n~nBOOT FAILED~n===========~n~n" ++ Format, Args),
- rabbit_misc:local_info_msg(Format, Args),
- timer:sleep(1000),
- exit({?MODULE, failure_during_boot, Reason}).
-
-%%---------------------------------------------------------------------------
-%% boot step functions
-
-boot_delegate() ->
- {ok, Count} = application:get_env(rabbit, delegate_count),
- rabbit_sup:start_supervisor_child(delegate_sup, [Count]).
-
-recover() ->
- Qs = rabbit_amqqueue:recover(),
- ok = rabbit_binding:recover(rabbit_exchange:recover(),
- [QName || #amqqueue{name = QName} <- Qs]),
- rabbit_amqqueue:start(Qs).
-
-maybe_insert_default_data() ->
- case rabbit_table:is_empty() of
- true -> insert_default_data();
- false -> ok
- end.
-
-insert_default_data() ->
- {ok, DefaultUser} = application:get_env(default_user),
- {ok, DefaultPass} = application:get_env(default_pass),
- {ok, DefaultTags} = application:get_env(default_user_tags),
- {ok, DefaultVHost} = application:get_env(default_vhost),
- {ok, [DefaultConfigurePerm, DefaultWritePerm, DefaultReadPerm]} =
- application:get_env(default_permissions),
- ok = rabbit_vhost:add(DefaultVHost),
- ok = rabbit_auth_backend_internal:add_user(DefaultUser, DefaultPass),
- ok = rabbit_auth_backend_internal:set_tags(DefaultUser, DefaultTags),
- ok = rabbit_auth_backend_internal:set_permissions(DefaultUser,
- DefaultVHost,
- DefaultConfigurePerm,
- DefaultWritePerm,
- DefaultReadPerm),
- ok.
-
-%%---------------------------------------------------------------------------
-%% logging
-
-ensure_working_log_handlers() ->
- Handlers = gen_event:which_handlers(error_logger),
- ok = ensure_working_log_handler(error_logger_tty_h,
- rabbit_error_logger_file_h,
- error_logger_tty_h,
- log_location(kernel),
- Handlers),
-
- ok = ensure_working_log_handler(sasl_report_tty_h,
- rabbit_sasl_report_file_h,
- sasl_report_tty_h,
- log_location(sasl),
- Handlers),
- ok.
-
-ensure_working_log_handler(OldHandler, NewHandler, TTYHandler,
- LogLocation, Handlers) ->
- case LogLocation of
- undefined -> ok;
- tty -> case lists:member(TTYHandler, Handlers) of
- true -> ok;
- false ->
- throw({error, {cannot_log_to_tty,
- TTYHandler, not_installed}})
- end;
- _ -> case lists:member(NewHandler, Handlers) of
- true -> ok;
- false -> case rotate_logs(LogLocation, "",
- OldHandler, NewHandler) of
- ok -> ok;
- {error, Reason} ->
- throw({error, {cannot_log_to_file,
- LogLocation, Reason}})
- end
- end
- end.
-
-log_location(Type) ->
- case application:get_env(rabbit, case Type of
- kernel -> error_logger;
- sasl -> sasl_error_logger
- end) of
- {ok, {file, File}} -> File;
- {ok, false} -> undefined;
- {ok, tty} -> tty;
- {ok, silent} -> undefined;
- {ok, Bad} -> throw({error, {cannot_log_to_file, Bad}});
- _ -> undefined
- end.
-
-rotate_logs(File, Suffix, Handler) ->
- rotate_logs(File, Suffix, Handler, Handler).
-
-rotate_logs(undefined, _Suffix, _OldHandler, _NewHandler) -> ok;
-rotate_logs(tty, _Suffix, _OldHandler, _NewHandler) -> ok;
-rotate_logs(File, Suffix, OldHandler, NewHandler) ->
- gen_event:swap_handler(error_logger,
- {OldHandler, swap},
- {NewHandler, {File, Suffix}}).
-
-log_rotation_result({error, MainLogError}, {error, SaslLogError}) ->
- {error, {{cannot_rotate_main_logs, MainLogError},
- {cannot_rotate_sasl_logs, SaslLogError}}};
-log_rotation_result({error, MainLogError}, ok) ->
- {error, {cannot_rotate_main_logs, MainLogError}};
-log_rotation_result(ok, {error, SaslLogError}) ->
- {error, {cannot_rotate_sasl_logs, SaslLogError}};
-log_rotation_result(ok, ok) ->
- ok.
-
-force_event_refresh() ->
- rabbit_direct:force_event_refresh(),
- rabbit_networking:force_connection_event_refresh(),
- rabbit_channel:force_event_refresh(),
- rabbit_amqqueue:force_event_refresh().
-
-%%---------------------------------------------------------------------------
-%% misc
-
-log_broker_started(Plugins) ->
- rabbit_misc:with_local_io(
- fun() ->
- PluginList = iolist_to_binary([rabbit_misc:format(" * ~s~n", [P])
- || P <- Plugins]),
- error_logger:info_msg(
- "Server startup complete; ~b plugins started.~n~s",
- [length(Plugins), PluginList]),
- io:format(" completed with ~p plugins.~n", [length(Plugins)])
- end).
-
-erts_version_check() ->
- FoundVer = erlang:system_info(version),
- case rabbit_misc:version_compare(?ERTS_MINIMUM, FoundVer, lte) of
- true -> ok;
- false -> {error, {erlang_version_too_old,
- {found, FoundVer}, {required, ?ERTS_MINIMUM}}}
- end.
-
-print_banner() ->
- {ok, Product} = application:get_key(id),
- {ok, Version} = application:get_key(vsn),
- io:format("~n ~s ~s. ~s"
- "~n ## ## ~s"
- "~n ## ##"
- "~n ########## Logs: ~s"
- "~n ###### ## ~s"
- "~n ##########"
- "~n Starting broker...",
- [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE,
- log_location(kernel), log_location(sasl)]).
-
-log_banner() ->
- Settings = [{"node", node()},
- {"home dir", home_dir()},
- {"config file(s)", config_files()},
- {"cookie hash", rabbit_nodes:cookie_hash()},
- {"log", log_location(kernel)},
- {"sasl log", log_location(sasl)},
- {"database dir", rabbit_mnesia:dir()}],
- DescrLen = 1 + lists:max([length(K) || {K, _V} <- Settings]),
- Format = fun (K, V) ->
- rabbit_misc:format(
- "~-" ++ integer_to_list(DescrLen) ++ "s: ~s~n", [K, V])
- end,
- Banner = iolist_to_binary(
- [case S of
- {"config file(s)" = K, []} ->
- Format(K, "(none)");
- {"config file(s)" = K, [V0 | Vs]} ->
- [Format(K, V0) | [Format("", V) || V <- Vs]];
- {K, V} ->
- Format(K, V)
- end || S <- Settings]),
- error_logger:info_msg("~s", [Banner]).
-
-home_dir() ->
- case init:get_argument(home) of
- {ok, [[Home]]} -> Home;
- Other -> Other
- end.
-
-config_files() ->
- case init:get_argument(config) of
- {ok, Files} -> [filename:absname(
- filename:rootname(File, ".config") ++ ".config") ||
- [File] <- Files];
- error -> []
- end.
-
-%% We don't want this in fhc since it references rabbit stuff. And we can't put
-%% this in the bootstep directly.
-start_fhc() ->
- rabbit_sup:start_restartable_child(
- file_handle_cache,
- [fun rabbit_alarm:set_alarm/1, fun rabbit_alarm:clear_alarm/1]).
diff --git a/src/rabbit_access_control.erl b/src/rabbit_access_control.erl
deleted file mode 100644
index 90be4f80..00000000
--- a/src/rabbit_access_control.erl
+++ /dev/null
@@ -1,108 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_access_control).
-
--include("rabbit.hrl").
-
--export([check_user_pass_login/2, check_user_login/2,
- check_vhost_access/2, check_resource_access/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([permission_atom/0]).
-
--type(permission_atom() :: 'configure' | 'read' | 'write').
-
--spec(check_user_pass_login/2 ::
- (rabbit_types:username(), rabbit_types:password())
- -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
--spec(check_user_login/2 ::
- (rabbit_types:username(), [{atom(), any()}])
- -> {'ok', rabbit_types:user()} | {'refused', string(), [any()]}).
--spec(check_vhost_access/2 ::
- (rabbit_types:user(), rabbit_types:vhost())
- -> 'ok' | rabbit_types:channel_exit()).
--spec(check_resource_access/3 ::
- (rabbit_types:user(), rabbit_types:r(atom()), permission_atom())
- -> 'ok' | rabbit_types:channel_exit()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-check_user_pass_login(Username, Password) ->
- check_user_login(Username, [{password, Password}]).
-
-check_user_login(Username, AuthProps) ->
- {ok, Modules} = application:get_env(rabbit, auth_backends),
- lists:foldl(
- fun(Module, {refused, _, _}) ->
- case Module:check_user_login(Username, AuthProps) of
- {error, E} ->
- {refused, "~s failed authenticating ~s: ~p~n",
- [Module, Username, E]};
- Else ->
- Else
- end;
- (_, {ok, User}) ->
- {ok, User}
- end, {refused, "No modules checked '~s'", [Username]}, Modules).
-
-check_vhost_access(User = #user{ username = Username,
- auth_backend = Module }, VHostPath) ->
- check_access(
- fun() ->
- %% TODO this could be an andalso shortcut under >R13A
- case rabbit_vhost:exists(VHostPath) of
- false -> false;
- true -> Module:check_vhost_access(User, VHostPath)
- end
- end,
- "~s failed checking vhost access to ~s for ~s: ~p~n",
- [Module, VHostPath, Username],
- "access to vhost '~s' refused for user '~s'",
- [VHostPath, Username]).
-
-check_resource_access(User, R = #resource{kind = exchange, name = <<"">>},
- Permission) ->
- check_resource_access(User, R#resource{name = <<"amq.default">>},
- Permission);
-check_resource_access(User = #user{username = Username, auth_backend = Module},
- Resource, Permission) ->
- check_access(
- fun() -> Module:check_resource_access(User, Resource, Permission) end,
- "~s failed checking resource access to ~p for ~s: ~p~n",
- [Module, Resource, Username],
- "access to ~s refused for user '~s'",
- [rabbit_misc:rs(Resource), Username]).
-
-check_access(Fun, ErrStr, ErrArgs, RefStr, RefArgs) ->
- Allow = case Fun() of
- {error, _} = E ->
- rabbit_log:error(ErrStr, ErrArgs ++ [E]),
- false;
- Else ->
- Else
- end,
- case Allow of
- true ->
- ok;
- false ->
- rabbit_misc:protocol_error(access_refused, RefStr, RefArgs)
- end.
diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl
deleted file mode 100644
index 6607c4f6..00000000
--- a/src/rabbit_alarm.erl
+++ /dev/null
@@ -1,238 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_alarm).
-
--behaviour(gen_event).
-
--export([start_link/0, start/0, stop/0, register/2, set_alarm/1,
- clear_alarm/1, get_alarms/0, on_node_up/1, on_node_down/1]).
-
--export([init/1, handle_call/2, handle_event/2, handle_info/2,
- terminate/2, code_change/3]).
-
--export([remote_conserve_resources/3]). %% Internal use only
-
--define(SERVER, ?MODULE).
-
--record(alarms, {alertees, alarmed_nodes, alarms}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start/0 :: () -> 'ok').
--spec(stop/0 :: () -> 'ok').
--spec(register/2 :: (pid(), rabbit_types:mfargs()) -> boolean()).
--spec(set_alarm/1 :: (any()) -> 'ok').
--spec(clear_alarm/1 :: (any()) -> 'ok').
--spec(on_node_up/1 :: (node()) -> 'ok').
--spec(on_node_down/1 :: (node()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_event:start_link({local, ?SERVER}).
-
-start() ->
- ok = rabbit_sup:start_restartable_child(?MODULE),
- ok = gen_event:add_handler(?SERVER, ?MODULE, []),
- {ok, MemoryWatermark} = application:get_env(vm_memory_high_watermark),
- rabbit_sup:start_restartable_child(
- vm_memory_monitor, [MemoryWatermark,
- fun (Alarm) ->
- background_gc:run(),
- set_alarm(Alarm)
- end,
- fun clear_alarm/1]),
- {ok, DiskLimit} = application:get_env(disk_free_limit),
- rabbit_sup:start_restartable_child(rabbit_disk_monitor, [DiskLimit]),
- ok.
-
-stop() -> ok.
-
-register(Pid, AlertMFA) ->
- gen_event:call(?SERVER, ?MODULE, {register, Pid, AlertMFA}, infinity).
-
-set_alarm(Alarm) -> gen_event:notify(?SERVER, {set_alarm, Alarm}).
-clear_alarm(Alarm) -> gen_event:notify(?SERVER, {clear_alarm, Alarm}).
-
-get_alarms() -> gen_event:call(?SERVER, ?MODULE, get_alarms, infinity).
-
-on_node_up(Node) -> gen_event:notify(?SERVER, {node_up, Node}).
-on_node_down(Node) -> gen_event:notify(?SERVER, {node_down, Node}).
-
-remote_conserve_resources(Pid, Source, true) ->
- gen_event:notify({?SERVER, node(Pid)},
- {set_alarm, {{resource_limit, Source, node()}, []}});
-remote_conserve_resources(Pid, Source, false) ->
- gen_event:notify({?SERVER, node(Pid)},
- {clear_alarm, {resource_limit, Source, node()}}).
-
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, #alarms{alertees = dict:new(),
- alarmed_nodes = dict:new(),
- alarms = []}}.
-
-handle_call({register, Pid, AlertMFA}, State) ->
- {ok, 0 < dict:size(State#alarms.alarmed_nodes),
- internal_register(Pid, AlertMFA, State)};
-
-handle_call(get_alarms, State = #alarms{alarms = Alarms}) ->
- {ok, Alarms, State};
-
-handle_call(_Request, State) ->
- {ok, not_understood, State}.
-
-handle_event({set_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
- handle_set_alarm(Alarm, State#alarms{alarms = [Alarm|Alarms]});
-
-handle_event({clear_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
- handle_clear_alarm(Alarm, State#alarms{alarms = lists:keydelete(Alarm, 1,
- Alarms)});
-
-handle_event({node_up, Node}, State) ->
- %% Must do this via notify and not call to avoid possible deadlock.
- ok = gen_event:notify(
- {?SERVER, Node},
- {register, self(), {?MODULE, remote_conserve_resources, []}}),
- {ok, State};
-
-handle_event({node_down, Node}, State) ->
- {ok, maybe_alert(fun dict_unappend_all/3, Node, [], State)};
-
-handle_event({register, Pid, AlertMFA}, State) ->
- {ok, internal_register(Pid, AlertMFA, State)};
-
-handle_event(_Event, State) ->
- {ok, State}.
-
-handle_info({'DOWN', _MRef, process, Pid, _Reason},
- State = #alarms{alertees = Alertees}) ->
- {ok, State#alarms{alertees = dict:erase(Pid, Alertees)}};
-
-handle_info(_Info, State) ->
- {ok, State}.
-
-terminate(_Arg, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-
-dict_unappend_all(Key, _Val, Dict) ->
- dict:erase(Key, Dict).
-
-dict_unappend(Key, Val, Dict) ->
- case lists:delete(Val, dict:fetch(Key, Dict)) of
- [] -> dict:erase(Key, Dict);
- X -> dict:store(Key, X, Dict)
- end.
-
-count_dict_values(Val, Dict) ->
- dict:fold(fun (_Node, List, Count) ->
- Count + case lists:member(Val, List) of
- true -> 1;
- false -> 0
- end
- end, 0, Dict).
-
-maybe_alert(UpdateFun, Node, Source,
- State = #alarms{alarmed_nodes = AN,
- alertees = Alertees}) ->
- AN1 = UpdateFun(Node, Source, AN),
- BeforeSz = count_dict_values(Source, AN),
- AfterSz = count_dict_values(Source, AN1),
-
- %% If we have changed our alarm state, inform the remotes.
- IsLocal = Node =:= node(),
- if IsLocal andalso BeforeSz < AfterSz ->
- ok = alert_remote(true, Alertees, Source);
- IsLocal andalso BeforeSz > AfterSz ->
- ok = alert_remote(false, Alertees, Source);
- true ->
- ok
- end,
- %% If the overall alarm state has changed, inform the locals.
- case {dict:size(AN), dict:size(AN1)} of
- {0, 1} -> ok = alert_local(true, Alertees, Source);
- {1, 0} -> ok = alert_local(false, Alertees, Source);
- {_, _} -> ok
- end,
- State#alarms{alarmed_nodes = AN1}.
-
-alert_local(Alert, Alertees, Source) ->
- alert(Alertees, Source, Alert, fun erlang:'=:='/2).
-
-alert_remote(Alert, Alertees, Source) ->
- alert(Alertees, Source, Alert, fun erlang:'=/='/2).
-
-alert(Alertees, Source, Alert, NodeComparator) ->
- Node = node(),
- dict:fold(fun (Pid, {M, F, A}, ok) ->
- case NodeComparator(Node, node(Pid)) of
- true -> apply(M, F, A ++ [Pid, Source, Alert]);
- false -> ok
- end
- end, ok, Alertees).
-
-internal_register(Pid, {M, F, A} = AlertMFA,
- State = #alarms{alertees = Alertees}) ->
- _MRef = erlang:monitor(process, Pid),
- case dict:find(node(), State#alarms.alarmed_nodes) of
- {ok, Sources} -> [apply(M, F, A ++ [Pid, R, true]) || R <- Sources];
- error -> ok
- end,
- NewAlertees = dict:store(Pid, AlertMFA, Alertees),
- State#alarms{alertees = NewAlertees}.
-
-handle_set_alarm({{resource_limit, Source, Node}, []}, State) ->
- rabbit_log:warning(
- "~s resource limit alarm set on node ~p.~n~n"
- "**********************************************************~n"
- "*** Publishers will be blocked until this alarm clears ***~n"
- "**********************************************************~n",
- [Source, Node]),
- {ok, maybe_alert(fun dict:append/3, Node, Source, State)};
-handle_set_alarm({file_descriptor_limit, []}, State) ->
- rabbit_log:warning(
- "file descriptor limit alarm set.~n~n"
- "********************************************************************~n"
- "*** New connections will not be accepted until this alarm clears ***~n"
- "********************************************************************~n"),
- {ok, State};
-handle_set_alarm(Alarm, State) ->
- rabbit_log:warning("alarm '~p' set~n", [Alarm]),
- {ok, State}.
-
-handle_clear_alarm({resource_limit, Source, Node}, State) ->
- rabbit_log:warning("~s resource limit alarm cleared on node ~p~n",
- [Source, Node]),
- {ok, maybe_alert(fun dict_unappend/3, Node, Source, State)};
-handle_clear_alarm(file_descriptor_limit, State) ->
- rabbit_log:warning("file descriptor limit alarm cleared~n"),
- {ok, State};
-handle_clear_alarm(Alarm, State) ->
- rabbit_log:warning("alarm '~p' cleared~n", [Alarm]),
- {ok, State}.
diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl
deleted file mode 100644
index 32feac30..00000000
--- a/src/rabbit_amqqueue.erl
+++ /dev/null
@@ -1,727 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_amqqueue).
-
--export([recover/0, stop/0, start/1, declare/5,
- delete_immediately/1, delete/3, purge/1, forget_all_durable/1]).
--export([pseudo_queue/2]).
--export([lookup/1, not_found_or_absent/1, with/2, with/3, with_or_die/2,
- assert_equivalence/5,
- check_exclusive_access/2, with_exclusive_access_or_die/3,
- stat/1, deliver/2, deliver_flow/2, requeue/3, ack/3, reject/4]).
--export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]).
--export([force_event_refresh/0, wake_up/1]).
--export([consumers/1, consumers_all/1, consumer_info_keys/0]).
--export([basic_get/4, basic_consume/9, basic_cancel/4]).
--export([notify_sent/2, notify_sent_queue_down/1, resume/2, flush_all/2]).
--export([notify_down_all/2, activate_limit_all/2, credit/5]).
--export([on_node_down/1]).
--export([update/2, store_queue/1, policy_changed/2]).
--export([start_mirroring/1, stop_mirroring/1, sync_mirrors/1,
- cancel_sync_mirrors/1]).
-
-%% internal
--export([internal_declare/2, internal_delete/1, run_backing_queue/3,
- set_ram_duration_target/2, set_maximum_since_use/2]).
-
--include("rabbit.hrl").
--include_lib("stdlib/include/qlc.hrl").
-
--define(INTEGER_ARG_TYPES, [byte, short, signedint, long]).
-
--define(MORE_CONSUMER_CREDIT_AFTER, 50).
-
--define(FAILOVER_WAIT_MILLIS, 100).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([name/0, qmsg/0, routing_result/0]).
-
--type(name() :: rabbit_types:r('queue')).
--type(qpids() :: [pid()]).
--type(qlen() :: rabbit_types:ok(non_neg_integer())).
--type(qfun(A) :: fun ((rabbit_types:amqqueue()) -> A | no_return())).
--type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}).
--type(msg_id() :: non_neg_integer()).
--type(ok_or_errors() ::
- 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}).
--type(routing_result() :: 'routed' | 'unroutable').
--type(queue_or_absent() :: rabbit_types:amqqueue() |
- {'absent', rabbit_types:amqqueue()}).
--type(not_found_or_absent() :: 'not_found' |
- {'absent', rabbit_types:amqqueue()}).
--spec(recover/0 :: () -> [rabbit_types:amqqueue()]).
--spec(stop/0 :: () -> 'ok').
--spec(start/1 :: ([rabbit_types:amqqueue()]) -> 'ok').
--spec(declare/5 ::
- (name(), boolean(), boolean(),
- rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
- -> {'new' | 'existing' | 'absent' | 'owner_died',
- rabbit_types:amqqueue()} | rabbit_types:channel_exit()).
--spec(internal_declare/2 ::
- (rabbit_types:amqqueue(), boolean())
- -> queue_or_absent() | rabbit_misc:thunk(queue_or_absent())).
--spec(update/2 ::
- (name(),
- fun((rabbit_types:amqqueue()) -> rabbit_types:amqqueue())) -> 'ok').
--spec(lookup/1 ::
- (name()) -> rabbit_types:ok(rabbit_types:amqqueue()) |
- rabbit_types:error('not_found');
- ([name()]) -> [rabbit_types:amqqueue()]).
--spec(not_found_or_absent/1 :: (name()) -> not_found_or_absent()).
--spec(with/2 :: (name(), qfun(A)) ->
- A | rabbit_types:error(not_found_or_absent())).
--spec(with/3 :: (name(), qfun(A), fun((not_found_or_absent()) -> B)) -> A | B).
--spec(with_or_die/2 ::
- (name(), qfun(A)) -> A | rabbit_types:channel_exit()).
--spec(assert_equivalence/5 ::
- (rabbit_types:amqqueue(), boolean(), boolean(),
- rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
- -> 'ok' | rabbit_types:channel_exit() |
- rabbit_types:connection_exit()).
--spec(check_exclusive_access/2 ::
- (rabbit_types:amqqueue(), pid())
- -> 'ok' | rabbit_types:channel_exit()).
--spec(with_exclusive_access_or_die/3 ::
- (name(), pid(), qfun(A)) -> A | rabbit_types:channel_exit()).
--spec(list/0 :: () -> [rabbit_types:amqqueue()]).
--spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (rabbit_types:amqqueue()) -> rabbit_types:infos()).
--spec(info/2 ::
- (rabbit_types:amqqueue(), rabbit_types:info_keys())
- -> rabbit_types:infos()).
--spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(info_all/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
- -> [rabbit_types:infos()]).
--spec(force_event_refresh/0 :: () -> 'ok').
--spec(wake_up/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(consumers/1 ::
- (rabbit_types:amqqueue())
- -> [{pid(), rabbit_types:ctag(), boolean()}]).
--spec(consumer_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(consumers_all/1 ::
- (rabbit_types:vhost())
- -> [{name(), pid(), rabbit_types:ctag(), boolean()}]).
--spec(stat/1 ::
- (rabbit_types:amqqueue())
- -> {'ok', non_neg_integer(), non_neg_integer()}).
--spec(delete_immediately/1 :: (qpids()) -> 'ok').
--spec(delete/3 ::
- (rabbit_types:amqqueue(), 'false', 'false')
- -> qlen();
- (rabbit_types:amqqueue(), 'true' , 'false')
- -> qlen() | rabbit_types:error('in_use');
- (rabbit_types:amqqueue(), 'false', 'true' )
- -> qlen() | rabbit_types:error('not_empty');
- (rabbit_types:amqqueue(), 'true' , 'true' )
- -> qlen() |
- rabbit_types:error('in_use') |
- rabbit_types:error('not_empty')).
--spec(purge/1 :: (rabbit_types:amqqueue()) -> qlen()).
--spec(forget_all_durable/1 :: (node()) -> 'ok').
--spec(deliver/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
- {routing_result(), qpids()}).
--spec(deliver_flow/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
- {routing_result(), qpids()}).
--spec(requeue/3 :: (pid(), [msg_id()], pid()) -> 'ok').
--spec(ack/3 :: (pid(), [msg_id()], pid()) -> 'ok').
--spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok').
--spec(notify_down_all/2 :: (qpids(), pid()) -> ok_or_errors()).
--spec(activate_limit_all/2 :: (qpids(), pid()) -> ok_or_errors()).
--spec(basic_get/4 :: (rabbit_types:amqqueue(), pid(), boolean(), pid()) ->
- {'ok', non_neg_integer(), qmsg()} | 'empty').
--spec(credit/5 :: (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(),
- non_neg_integer(), boolean()) -> 'ok').
--spec(basic_consume/9 ::
- (rabbit_types:amqqueue(), boolean(), pid(), pid(), boolean(),
- rabbit_types:ctag(), boolean(), {non_neg_integer(), boolean()} | 'none', any())
- -> rabbit_types:ok_or_error('exclusive_consume_unavailable')).
--spec(basic_cancel/4 ::
- (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok').
--spec(notify_sent/2 :: (pid(), pid()) -> 'ok').
--spec(notify_sent_queue_down/1 :: (pid()) -> 'ok').
--spec(resume/2 :: (pid(), pid()) -> 'ok').
--spec(flush_all/2 :: (qpids(), pid()) -> 'ok').
--spec(internal_delete/1 ::
- (name()) -> rabbit_types:ok_or_error('not_found') |
- rabbit_types:connection_exit() |
- fun (() -> rabbit_types:ok_or_error('not_found') |
- rabbit_types:connection_exit())).
--spec(run_backing_queue/3 ::
- (pid(), atom(),
- (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok').
--spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok').
--spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(on_node_down/1 :: (node()) -> 'ok').
--spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()).
--spec(store_queue/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(policy_changed/2 ::
- (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
--spec(start_mirroring/1 :: (pid()) -> 'ok').
--spec(stop_mirroring/1 :: (pid()) -> 'ok').
--spec(sync_mirrors/1 :: (pid()) -> 'ok' | rabbit_types:error('not_mirrored')).
--spec(cancel_sync_mirrors/1 :: (pid()) -> 'ok' | {'ok', 'not_syncing'}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(CONSUMER_INFO_KEYS,
- [queue_name, channel_pid, consumer_tag, ack_required]).
-
-recover() ->
- %% Clear out remnants of old incarnation, in case we restarted
- %% faster than other nodes handled DOWN messages from us.
- on_node_down(node()),
- DurableQueues = find_durable_queues(),
- {ok, BQ} = application:get_env(rabbit, backing_queue_module),
- ok = BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]),
- {ok,_} = supervisor:start_child(
- rabbit_sup,
- {rabbit_amqqueue_sup,
- {rabbit_amqqueue_sup, start_link, []},
- transient, infinity, supervisor, [rabbit_amqqueue_sup]}),
- recover_durable_queues(DurableQueues).
-
-stop() ->
- ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup),
- ok = supervisor:delete_child(rabbit_sup, rabbit_amqqueue_sup),
- {ok, BQ} = application:get_env(rabbit, backing_queue_module),
- ok = BQ:stop().
-
-start(Qs) ->
- %% At this point all recovered queues and their bindings are
- %% visible to routing, so now it is safe for them to complete
- %% their initialisation (which may involve interacting with other
- %% queues).
- [Pid ! {self(), go} || #amqqueue{pid = Pid} <- Qs],
- ok.
-
-find_durable_queues() ->
- Node = node(),
- %% TODO: use dirty ops instead
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- qlc:e(qlc:q([Q || Q = #amqqueue{name = Name,
- pid = Pid}
- <- mnesia:table(rabbit_durable_queue),
- mnesia:read(rabbit_queue, Name, read) =:= [],
- node(Pid) == Node]))
- end).
-
-recover_durable_queues(DurableQueues) ->
- Qs = [start_queue_process(node(), Q) || Q <- DurableQueues],
- [Q || Q = #amqqueue{pid = Pid} <- Qs,
- gen_server2:call(Pid, {init, self()}, infinity) == {new, Q}].
-
-declare(QueueName, Durable, AutoDelete, Args, Owner) ->
- ok = check_declare_arguments(QueueName, Args),
- Q0 = rabbit_policy:set(#amqqueue{name = QueueName,
- durable = Durable,
- auto_delete = AutoDelete,
- arguments = Args,
- exclusive_owner = Owner,
- pid = none,
- slave_pids = [],
- sync_slave_pids = [],
- gm_pids = []}),
- {Node, _MNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0),
- Q1 = start_queue_process(Node, Q0),
- gen_server2:call(Q1#amqqueue.pid, {init, new}, infinity).
-
-internal_declare(Q, true) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () -> ok = store_queue(Q), rabbit_misc:const(Q) end);
-internal_declare(Q = #amqqueue{name = QueueName}, false) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () ->
- case mnesia:wread({rabbit_queue, QueueName}) of
- [] ->
- case not_found_or_absent(QueueName) of
- not_found -> Q1 = rabbit_policy:set(Q),
- ok = store_queue(Q1),
- B = add_default_binding(Q1),
- fun () -> B(), Q1 end;
- {absent, _Q} = R -> rabbit_misc:const(R)
- end;
- [ExistingQ = #amqqueue{pid = QPid}] ->
- case rabbit_misc:is_process_alive(QPid) of
- true -> rabbit_misc:const(ExistingQ);
- false -> TailFun = internal_delete(QueueName),
- fun () -> TailFun(), ExistingQ end
- end
- end
- end).
-
-update(Name, Fun) ->
- case mnesia:wread({rabbit_queue, Name}) of
- [Q = #amqqueue{durable = Durable}] ->
- Q1 = Fun(Q),
- ok = mnesia:write(rabbit_queue, Q1, write),
- case Durable of
- true -> ok = mnesia:write(rabbit_durable_queue, Q1, write);
- _ -> ok
- end;
- [] ->
- ok
- end.
-
-store_queue(Q = #amqqueue{durable = true}) ->
- ok = mnesia:write(rabbit_durable_queue,
- Q#amqqueue{slave_pids = [],
- sync_slave_pids = [],
- gm_pids = []}, write),
- ok = mnesia:write(rabbit_queue, Q, write),
- ok;
-store_queue(Q = #amqqueue{durable = false}) ->
- ok = mnesia:write(rabbit_queue, Q, write),
- ok.
-
-policy_changed(Q1, Q2) ->
- rabbit_mirror_queue_misc:update_mirrors(Q1, Q2),
- %% Make sure we emit a stats event even if nothing
- %% mirroring-related has changed - the policy may have changed anyway.
- wake_up(Q1).
-
-start_queue_process(Node, Q) ->
- {ok, Pid} = rabbit_amqqueue_sup:start_child(Node, [Q]),
- Q#amqqueue{pid = Pid}.
-
-add_default_binding(#amqqueue{name = QueueName}) ->
- ExchangeName = rabbit_misc:r(QueueName, exchange, <<>>),
- RoutingKey = QueueName#resource.name,
- rabbit_binding:add(#binding{source = ExchangeName,
- destination = QueueName,
- key = RoutingKey,
- args = []}).
-
-lookup([]) -> []; %% optimisation
-lookup([Name]) -> ets:lookup(rabbit_queue, Name); %% optimisation
-lookup(Names) when is_list(Names) ->
- %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
- %% expensive for reasons explained in rabbit_misc:dirty_read/1.
- lists:append([ets:lookup(rabbit_queue, Name) || Name <- Names]);
-lookup(Name) ->
- rabbit_misc:dirty_read({rabbit_queue, Name}).
-
-not_found_or_absent(Name) ->
- %% NB: we assume that the caller has already performed a lookup on
- %% rabbit_queue and not found anything
- case mnesia:read({rabbit_durable_queue, Name}) of
- [] -> not_found;
- [Q] -> {absent, Q} %% Q exists on stopped node
- end.
-
-not_found_or_absent_dirty(Name) ->
- %% We should read from both tables inside a tx, to get a
- %% consistent view. But the chances of an inconsistency are small,
- %% and only affect the error kind.
- case rabbit_misc:dirty_read({rabbit_durable_queue, Name}) of
- {error, not_found} -> not_found;
- {ok, Q} -> {absent, Q}
- end.
-
-with(Name, F, E) ->
- case lookup(Name) of
- {ok, Q = #amqqueue{pid = QPid}} ->
- %% We check is_process_alive(QPid) in case we receive a
- %% nodedown (for example) in F() that has nothing to do
- %% with the QPid.
- rabbit_misc:with_exit_handler(
- fun () ->
- case rabbit_misc:is_process_alive(QPid) of
- true -> E(not_found_or_absent_dirty(Name));
- false -> timer:sleep(25),
- with(Name, F, E)
- end
- end, fun () -> F(Q) end);
- {error, not_found} ->
- E(not_found_or_absent_dirty(Name))
- end.
-
-with(Name, F) -> with(Name, F, fun (E) -> {error, E} end).
-
-with_or_die(Name, F) ->
- with(Name, F, fun (not_found) -> rabbit_misc:not_found(Name);
- ({absent, Q}) -> rabbit_misc:absent(Q)
- end).
-
-assert_equivalence(#amqqueue{durable = Durable,
- auto_delete = AutoDelete} = Q,
- Durable, AutoDelete, RequiredArgs, Owner) ->
- assert_args_equivalence(Q, RequiredArgs),
- check_exclusive_access(Q, Owner, strict);
-assert_equivalence(#amqqueue{name = QueueName},
- _Durable, _AutoDelete, _RequiredArgs, _Owner) ->
- rabbit_misc:protocol_error(
- precondition_failed, "parameters for ~s not equivalent",
- [rabbit_misc:rs(QueueName)]).
-
-check_exclusive_access(Q, Owner) -> check_exclusive_access(Q, Owner, lax).
-
-check_exclusive_access(#amqqueue{exclusive_owner = Owner}, Owner, _MatchType) ->
- ok;
-check_exclusive_access(#amqqueue{exclusive_owner = none}, _ReaderPid, lax) ->
- ok;
-check_exclusive_access(#amqqueue{name = QueueName}, _ReaderPid, _MatchType) ->
- rabbit_misc:protocol_error(
- resource_locked,
- "cannot obtain exclusive access to locked ~s",
- [rabbit_misc:rs(QueueName)]).
-
-with_exclusive_access_or_die(Name, ReaderPid, F) ->
- with_or_die(Name,
- fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end).
-
-assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args},
- RequiredArgs) ->
- rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName,
- [Key || {Key, _Fun} <- args()]).
-
-check_declare_arguments(QueueName, Args) ->
- [case rabbit_misc:table_lookup(Args, Key) of
- undefined -> ok;
- TypeVal -> case Fun(TypeVal, Args) of
- ok -> ok;
- {error, Error} -> rabbit_misc:protocol_error(
- precondition_failed,
- "invalid arg '~s' for ~s: ~255p",
- [Key, rabbit_misc:rs(QueueName),
- Error])
- end
- end || {Key, Fun} <- args()],
- ok.
-
-args() ->
- [{<<"x-expires">>, fun check_expires_arg/2},
- {<<"x-message-ttl">>, fun check_message_ttl_arg/2},
- {<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2},
- {<<"x-max-length">>, fun check_max_length_arg/2}].
-
-check_int_arg({Type, _}, _) ->
- case lists:member(Type, ?INTEGER_ARG_TYPES) of
- true -> ok;
- false -> {error, {unacceptable_type, Type}}
- end.
-
-check_max_length_arg({Type, Val}, Args) ->
- case check_int_arg({Type, Val}, Args) of
- ok when Val >= 0 -> ok;
- ok -> {error, {value_negative, Val}};
- Error -> Error
- end.
-
-check_expires_arg({Type, Val}, Args) ->
- case check_int_arg({Type, Val}, Args) of
- ok when Val == 0 -> {error, {value_zero, Val}};
- ok -> rabbit_misc:check_expiry(Val);
- Error -> Error
- end.
-
-check_message_ttl_arg({Type, Val}, Args) ->
- case check_int_arg({Type, Val}, Args) of
- ok -> rabbit_misc:check_expiry(Val);
- Error -> Error
- end.
-
-check_dlxrk_arg({longstr, _}, Args) ->
- case rabbit_misc:table_lookup(Args, <<"x-dead-letter-exchange">>) of
- undefined -> {error, routing_key_but_no_dlx_defined};
- _ -> ok
- end;
-check_dlxrk_arg({Type, _}, _Args) ->
- {error, {unacceptable_type, Type}}.
-
-list() -> mnesia:dirty_match_object(rabbit_queue, #amqqueue{_ = '_'}).
-
-list(VHostPath) ->
- mnesia:dirty_match_object(
- rabbit_queue,
- #amqqueue{name = rabbit_misc:r(VHostPath, queue), _ = '_'}).
-
-info_keys() -> rabbit_amqqueue_process:info_keys().
-
-map(VHostPath, F) -> rabbit_misc:filter_exit_map(F, list(VHostPath)).
-
-info(#amqqueue{ pid = QPid }) -> delegate:call(QPid, info).
-
-info(#amqqueue{ pid = QPid }, Items) ->
- case delegate:call(QPid, {info, Items}) of
- {ok, Res} -> Res;
- {error, Error} -> throw(Error)
- end.
-
-info_all(VHostPath) -> map(VHostPath, fun (Q) -> info(Q) end).
-
-info_all(VHostPath, Items) -> map(VHostPath, fun (Q) -> info(Q, Items) end).
-
-%% We need to account for the idea that queues may be mid-promotion
-%% during force_event_refresh (since it's likely we're doing this in
-%% the first place since a node failed). Therefore we keep poking at
-%% the list of queues until we were able to talk to a live process or
-%% the queue no longer exists.
-force_event_refresh() -> force_event_refresh([Q#amqqueue.name || Q <- list()]).
-
-force_event_refresh(QNames) ->
- Qs = [Q || Q <- list(), lists:member(Q#amqqueue.name, QNames)],
- {_, Bad} = rabbit_misc:multi_call(
- [Q#amqqueue.pid || Q <- Qs], force_event_refresh),
- FailedPids = [Pid || {Pid, _Reason} <- Bad],
- Failed = [Name || #amqqueue{name = Name, pid = Pid} <- Qs,
- lists:member(Pid, FailedPids)],
- case Failed of
- [] -> ok;
- _ -> timer:sleep(?FAILOVER_WAIT_MILLIS),
- force_event_refresh(Failed)
- end.
-
-wake_up(#amqqueue{pid = QPid}) -> gen_server2:cast(QPid, wake_up).
-
-consumers(#amqqueue{ pid = QPid }) -> delegate:call(QPid, consumers).
-
-consumer_info_keys() -> ?CONSUMER_INFO_KEYS.
-
-consumers_all(VHostPath) ->
- ConsumerInfoKeys=consumer_info_keys(),
- lists:append(
- map(VHostPath,
- fun (Q) ->
- [lists:zip(ConsumerInfoKeys,
- [Q#amqqueue.name, ChPid, ConsumerTag, AckRequired]) ||
- {ChPid, ConsumerTag, AckRequired} <- consumers(Q)]
- end)).
-
-stat(#amqqueue{pid = QPid}) -> delegate:call(QPid, stat).
-
-delete_immediately(QPids) ->
- [gen_server2:cast(QPid, delete_immediately) || QPid <- QPids],
- ok.
-
-delete(#amqqueue{ pid = QPid }, IfUnused, IfEmpty) ->
- delegate:call(QPid, {delete, IfUnused, IfEmpty}).
-
-purge(#amqqueue{ pid = QPid }) -> delegate:call(QPid, purge).
-
-deliver(Qs, Delivery) -> deliver(Qs, Delivery, noflow).
-
-deliver_flow(Qs, Delivery) -> deliver(Qs, Delivery, flow).
-
-requeue(QPid, MsgIds, ChPid) -> delegate:call(QPid, {requeue, MsgIds, ChPid}).
-
-ack(QPid, MsgIds, ChPid) -> delegate:cast(QPid, {ack, MsgIds, ChPid}).
-
-reject(QPid, MsgIds, Requeue, ChPid) ->
- delegate:cast(QPid, {reject, MsgIds, Requeue, ChPid}).
-
-notify_down_all(QPids, ChPid) ->
- {_, Bads} = delegate:call(QPids, {notify_down, ChPid}),
- case lists:filter(
- fun ({_Pid, {exit, {R, _}, _}}) -> rabbit_misc:is_abnormal_exit(R);
- ({_Pid, _}) -> false
- end, Bads) of
- [] -> ok;
- Bads1 -> {error, Bads1}
- end.
-
-activate_limit_all(QPids, ChPid) ->
- delegate:cast(QPids, {activate_limit, ChPid}).
-
-credit(#amqqueue{pid = QPid}, ChPid, CTag, Credit, Drain) ->
- delegate:cast(QPid, {credit, ChPid, CTag, Credit, Drain}).
-
-basic_get(#amqqueue{pid = QPid}, ChPid, NoAck, LimiterPid) ->
- delegate:call(QPid, {basic_get, ChPid, NoAck, LimiterPid}).
-
-basic_consume(#amqqueue{pid = QPid}, NoAck, ChPid, LimiterPid, LimiterActive,
- ConsumerTag, ExclusiveConsume, CreditArgs, OkMsg) ->
- delegate:call(QPid, {basic_consume, NoAck, ChPid, LimiterPid, LimiterActive,
- ConsumerTag, ExclusiveConsume, CreditArgs, OkMsg}).
-
-basic_cancel(#amqqueue{pid = QPid}, ChPid, ConsumerTag, OkMsg) ->
- delegate:call(QPid, {basic_cancel, ChPid, ConsumerTag, OkMsg}).
-
-notify_sent(QPid, ChPid) ->
- Key = {consumer_credit_to, QPid},
- put(Key, case get(Key) of
- 1 -> gen_server2:cast(
- QPid, {notify_sent, ChPid,
- ?MORE_CONSUMER_CREDIT_AFTER}),
- ?MORE_CONSUMER_CREDIT_AFTER;
- undefined -> erlang:monitor(process, QPid),
- ?MORE_CONSUMER_CREDIT_AFTER - 1;
- C -> C - 1
- end),
- ok.
-
-notify_sent_queue_down(QPid) ->
- erase({consumer_credit_to, QPid}),
- ok.
-
-resume(QPid, ChPid) -> delegate:cast(QPid, {resume, ChPid}).
-
-flush_all(QPids, ChPid) -> delegate:cast(QPids, {flush, ChPid}).
-
-internal_delete1(QueueName) ->
- ok = mnesia:delete({rabbit_queue, QueueName}),
- %% this 'guarded' delete prevents unnecessary writes to the mnesia
- %% disk log
- case mnesia:wread({rabbit_durable_queue, QueueName}) of
- [] -> ok;
- [_] -> ok = mnesia:delete({rabbit_durable_queue, QueueName})
- end,
- %% we want to execute some things, as decided by rabbit_exchange,
- %% after the transaction.
- rabbit_binding:remove_for_destination(QueueName).
-
-internal_delete(QueueName) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () ->
- case {mnesia:wread({rabbit_queue, QueueName}),
- mnesia:wread({rabbit_durable_queue, QueueName})} of
- {[], []} ->
- rabbit_misc:const({error, not_found});
- _ ->
- Deletions = internal_delete1(QueueName),
- T = rabbit_binding:process_deletions(Deletions),
- fun() ->
- ok = T(),
- ok = rabbit_event:notify(queue_deleted,
- [{name, QueueName}])
- end
- end
- end).
-
-forget_all_durable(Node) ->
- %% Note rabbit is not running so we avoid e.g. the worker pool. Also why
- %% we don't invoke the return from rabbit_binding:process_deletions/1.
- {atomic, ok} =
- mnesia:sync_transaction(
- fun () ->
- Qs = mnesia:match_object(rabbit_durable_queue,
- #amqqueue{_ = '_'}, write),
- [rabbit_binding:process_deletions(
- internal_delete1(Name)) ||
- #amqqueue{name = Name, pid = Pid} = Q <- Qs,
- node(Pid) =:= Node,
- rabbit_policy:get(<<"ha-mode">>, Q)
- =:= {error, not_found}],
- ok
- end),
- ok.
-
-run_backing_queue(QPid, Mod, Fun) ->
- gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}).
-
-set_ram_duration_target(QPid, Duration) ->
- gen_server2:cast(QPid, {set_ram_duration_target, Duration}).
-
-set_maximum_since_use(QPid, Age) ->
- gen_server2:cast(QPid, {set_maximum_since_use, Age}).
-
-start_mirroring(QPid) -> ok = delegate:cast(QPid, start_mirroring).
-stop_mirroring(QPid) -> ok = delegate:cast(QPid, stop_mirroring).
-
-sync_mirrors(QPid) -> delegate:call(QPid, sync_mirrors).
-cancel_sync_mirrors(QPid) -> delegate:call(QPid, cancel_sync_mirrors).
-
-on_node_down(Node) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () -> QsDels =
- qlc:e(qlc:q([{QName, delete_queue(QName)} ||
- #amqqueue{name = QName, pid = Pid,
- slave_pids = []}
- <- mnesia:table(rabbit_queue),
- node(Pid) == Node andalso
- not rabbit_misc:is_process_alive(Pid)])),
- {Qs, Dels} = lists:unzip(QsDels),
- T = rabbit_binding:process_deletions(
- lists:foldl(fun rabbit_binding:combine_deletions/2,
- rabbit_binding:new_deletions(), Dels)),
- fun () ->
- T(),
- lists:foreach(
- fun(QName) ->
- ok = rabbit_event:notify(queue_deleted,
- [{name, QName}])
- end, Qs)
- end
- end).
-
-delete_queue(QueueName) ->
- ok = mnesia:delete({rabbit_queue, QueueName}),
- rabbit_binding:remove_transient_for_destination(QueueName).
-
-pseudo_queue(QueueName, Pid) ->
- #amqqueue{name = QueueName,
- durable = false,
- auto_delete = false,
- arguments = [],
- pid = Pid,
- slave_pids = []}.
-
-deliver([], #delivery{mandatory = false}, _Flow) ->
- %% /dev/null optimisation
- {routed, []};
-
-deliver(Qs, Delivery = #delivery{mandatory = false}, Flow) ->
- %% optimisation: when Mandatory = false, rabbit_amqqueue:deliver
- %% will deliver the message to the queue process asynchronously,
- %% and return true, which means all the QPids will always be
- %% returned. It is therefore safe to use a fire-and-forget cast
- %% here and return the QPids - the semantics is preserved. This
- %% scales much better than the case below.
- {MPids, SPids} = qpids(Qs),
- QPids = MPids ++ SPids,
- case Flow of
- flow -> [credit_flow:send(QPid) || QPid <- QPids];
- noflow -> ok
- end,
-
- %% We let slaves know that they were being addressed as slaves at
- %% the time - if they receive such a message from the channel
- %% after they have become master they should mark the message as
- %% 'delivered' since they do not know what the master may have
- %% done with it.
- MMsg = {deliver, Delivery, false, Flow},
- SMsg = {deliver, Delivery, true, Flow},
- delegate:cast(MPids, MMsg),
- delegate:cast(SPids, SMsg),
- {routed, QPids};
-
-deliver(Qs, Delivery, _Flow) ->
- {MPids, SPids} = qpids(Qs),
- %% see comment above
- MMsg = {deliver, Delivery, false},
- SMsg = {deliver, Delivery, true},
- {MRouted, _} = delegate:call(MPids, MMsg),
- {SRouted, _} = delegate:call(SPids, SMsg),
- case MRouted ++ SRouted of
- [] -> {unroutable, []};
- R -> {routed, [QPid || {QPid, ok} <- R]}
- end.
-
-qpids([]) -> {[], []}; %% optimisation
-qpids([#amqqueue{pid = QPid, slave_pids = SPids}]) -> {[QPid], SPids}; %% opt
-qpids(Qs) ->
- {MPids, SPids} = lists:foldl(fun (#amqqueue{pid = QPid, slave_pids = SPids},
- {MPidAcc, SPidAcc}) ->
- {[QPid | MPidAcc], [SPids | SPidAcc]}
- end, {[], []}, Qs),
- {MPids, lists:append(SPids)}.
diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl
deleted file mode 100644
index add75d89..00000000
--- a/src/rabbit_amqqueue_process.erl
+++ /dev/null
@@ -1,1462 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_amqqueue_process).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--behaviour(gen_server2).
-
--define(UNSENT_MESSAGE_LIMIT, 200).
--define(SYNC_INTERVAL, 25). %% milliseconds
--define(RAM_DURATION_UPDATE_INTERVAL, 5000).
-
--export([start_link/1, info_keys/0]).
-
--export([init_with_backing_queue_state/7]).
-
--export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
- handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
- prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
-
-%% Queue's state
--record(q, {q,
- exclusive_consumer,
- has_had_consumers,
- backing_queue,
- backing_queue_state,
- active_consumers,
- expires,
- sync_timer_ref,
- rate_timer_ref,
- expiry_timer_ref,
- stats_timer,
- msg_id_to_channel,
- ttl,
- ttl_timer_ref,
- ttl_timer_expiry,
- senders,
- dlx,
- dlx_routing_key,
- max_length,
- status
- }).
-
--record(consumer, {tag, ack_required}).
-
-%% These are held in our process dictionary
--record(cr, {ch_pid,
- monitor_ref,
- acktags,
- consumer_count,
- %% Queue of {ChPid, #consumer{}} for consumers which have
- %% been blocked for any reason
- blocked_consumers,
- %% The limiter itself
- limiter,
- %% Internal flow control for queue -> writer
- unsent_message_count}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 ::
- (rabbit_types:amqqueue()) -> rabbit_types:ok_pid_or_error()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(init_with_backing_queue_state/7 ::
- (rabbit_types:amqqueue(), atom(), tuple(), any(),
- [rabbit_types:delivery()], pmon:pmon(), dict()) -> #q{}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(STATISTICS_KEYS,
- [name,
- policy,
- exclusive_consumer_pid,
- exclusive_consumer_tag,
- messages_ready,
- messages_unacknowledged,
- messages,
- consumers,
- memory,
- slave_pids,
- synchronised_slave_pids,
- backing_queue_status,
- status
- ]).
-
--define(CREATION_EVENT_KEYS,
- [name,
- durable,
- auto_delete,
- arguments,
- owner_pid
- ]).
-
--define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name]]).
-
-%%----------------------------------------------------------------------------
-
-start_link(Q) -> gen_server2:start_link(?MODULE, Q, []).
-
-info_keys() -> ?INFO_KEYS.
-
-%%----------------------------------------------------------------------------
-
-init(Q) ->
- process_flag(trap_exit, true),
- {ok, init_state(Q#amqqueue{pid = self()}), hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS,
- RateTRef, Deliveries, Senders, MTC) ->
- case Owner of
- none -> ok;
- _ -> erlang:monitor(process, Owner)
- end,
- State = init_state(Q),
- State1 = State#q{backing_queue = BQ,
- backing_queue_state = BQS,
- rate_timer_ref = RateTRef,
- senders = Senders,
- msg_id_to_channel = MTC},
- State2 = process_args(State1),
- lists:foldl(fun (Delivery, StateN) ->
- deliver_or_enqueue(Delivery, true, StateN)
- end, State2, Deliveries).
-
-init_state(Q) ->
- State = #q{q = Q,
- exclusive_consumer = none,
- has_had_consumers = false,
- active_consumers = queue:new(),
- senders = pmon:new(),
- msg_id_to_channel = gb_trees:empty(),
- status = running},
- rabbit_event:init_stats_timer(State, #q.stats_timer).
-
-terminate(shutdown = R, State = #q{backing_queue = BQ}) ->
- terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
-terminate({shutdown, missing_owner} = Reason, State) ->
- %% if the owner was missing then there will be no queue, so don't emit stats
- terminate_shutdown(terminate_delete(false, Reason, State), State);
-terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) ->
- terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
-terminate(Reason, State) ->
- terminate_shutdown(terminate_delete(true, Reason, State), State).
-
-terminate_delete(EmitStats, Reason,
- State = #q{q = #amqqueue{name = QName},
- backing_queue = BQ}) ->
- fun (BQS) ->
- BQS1 = BQ:delete_and_terminate(Reason, BQS),
- if EmitStats -> rabbit_event:if_enabled(State, #q.stats_timer,
- fun() -> emit_stats(State) end);
- true -> ok
- end,
- %% don't care if the internal delete doesn't return 'ok'.
- rabbit_amqqueue:internal_delete(QName),
- BQS1
- end.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-
-declare(Recover, From, State = #q{q = Q,
- backing_queue = undefined,
- backing_queue_state = undefined}) ->
- case rabbit_amqqueue:internal_declare(Q, Recover =/= new) of
- #amqqueue{} = Q1 ->
- case matches(Recover, Q, Q1) of
- true ->
- gen_server2:reply(From, {new, Q}),
- ok = file_handle_cache:register_callback(
- rabbit_amqqueue, set_maximum_since_use, [self()]),
- ok = rabbit_memory_monitor:register(
- self(), {rabbit_amqqueue,
- set_ram_duration_target, [self()]}),
- BQ = backing_queue_module(Q1),
- BQS = bq_init(BQ, Q, Recover),
- recovery_barrier(Recover),
- State1 = process_args(State#q{backing_queue = BQ,
- backing_queue_state = BQS}),
- rabbit_event:notify(queue_created,
- infos(?CREATION_EVENT_KEYS, State1)),
- rabbit_event:if_enabled(State1, #q.stats_timer,
- fun() -> emit_stats(State1) end),
- noreply(State1);
- false ->
- {stop, normal, {existing, Q1}, State}
- end;
- Err ->
- {stop, normal, Err, State}
- end.
-
-matches(new, Q1, Q2) ->
- %% i.e. not policy
- Q1#amqqueue.name =:= Q2#amqqueue.name andalso
- Q1#amqqueue.durable =:= Q2#amqqueue.durable andalso
- Q1#amqqueue.auto_delete =:= Q2#amqqueue.auto_delete andalso
- Q1#amqqueue.exclusive_owner =:= Q2#amqqueue.exclusive_owner andalso
- Q1#amqqueue.arguments =:= Q2#amqqueue.arguments andalso
- Q1#amqqueue.pid =:= Q2#amqqueue.pid andalso
- Q1#amqqueue.slave_pids =:= Q2#amqqueue.slave_pids;
-matches(_, Q, Q) -> true;
-matches(_, _Q, _Q1) -> false.
-
-bq_init(BQ, Q, Recover) ->
- Self = self(),
- BQ:init(Q, Recover =/= new,
- fun (Mod, Fun) ->
- rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
- end).
-
-recovery_barrier(new) ->
- ok;
-recovery_barrier(BarrierPid) ->
- MRef = erlang:monitor(process, BarrierPid),
- receive
- {BarrierPid, go} -> erlang:demonitor(MRef, [flush]);
- {'DOWN', MRef, process, _, _} -> ok
- end.
-
-process_args(State = #q{q = #amqqueue{arguments = Arguments}}) ->
- lists:foldl(
- fun({Arg, Fun}, State1) ->
- case rabbit_misc:table_lookup(Arguments, Arg) of
- {_Type, Val} -> Fun(Val, State1);
- undefined -> State1
- end
- end, State,
- [{<<"x-expires">>, fun init_expires/2},
- {<<"x-dead-letter-exchange">>, fun init_dlx/2},
- {<<"x-dead-letter-routing-key">>, fun init_dlx_routing_key/2},
- {<<"x-message-ttl">>, fun init_ttl/2},
- {<<"x-max-length">>, fun init_max_length/2}]).
-
-init_expires(Expires, State) -> ensure_expiry_timer(State#q{expires = Expires}).
-
-init_ttl(TTL, State) -> drop_expired_msgs(State#q{ttl = TTL}).
-
-init_dlx(DLX, State = #q{q = #amqqueue{name = QName}}) ->
- State#q{dlx = rabbit_misc:r(QName, exchange, DLX)}.
-
-init_dlx_routing_key(RoutingKey, State) ->
- State#q{dlx_routing_key = RoutingKey}.
-
-init_max_length(MaxLen, State) -> State#q{max_length = MaxLen}.
-
-terminate_shutdown(Fun, State) ->
- State1 = #q{backing_queue_state = BQS} =
- lists:foldl(fun (F, S) -> F(S) end, State,
- [fun stop_sync_timer/1,
- fun stop_rate_timer/1,
- fun stop_expiry_timer/1,
- fun stop_ttl_timer/1]),
- case BQS of
- undefined -> State1;
- _ -> ok = rabbit_memory_monitor:deregister(self()),
- QName = qname(State),
- [emit_consumer_deleted(Ch, CTag, QName)
- || {Ch, CTag, _} <- consumers(State1)],
- State1#q{backing_queue_state = Fun(BQS)}
- end.
-
-reply(Reply, NewState) ->
- {NewState1, Timeout} = next_state(NewState),
- {reply, Reply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
-
-noreply(NewState) ->
- {NewState1, Timeout} = next_state(NewState),
- {noreply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
-
-next_state(State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
- assert_invariant(State),
- {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
- State1 = confirm_messages(MsgIds, State#q{backing_queue_state = BQS1}),
- case BQ:needs_timeout(BQS1) of
- false -> {stop_sync_timer(State1), hibernate };
- idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
- timed -> {ensure_sync_timer(State1), 0 }
- end.
-
-backing_queue_module(Q) ->
- case rabbit_mirror_queue_misc:is_mirrored(Q) of
- false -> {ok, BQM} = application:get_env(backing_queue_module),
- BQM;
- true -> rabbit_mirror_queue_master
- end.
-
-ensure_sync_timer(State) ->
- rabbit_misc:ensure_timer(State, #q.sync_timer_ref,
- ?SYNC_INTERVAL, sync_timeout).
-
-stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #q.sync_timer_ref).
-
-ensure_rate_timer(State) ->
- rabbit_misc:ensure_timer(State, #q.rate_timer_ref,
- ?RAM_DURATION_UPDATE_INTERVAL,
- update_ram_duration).
-
-stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #q.rate_timer_ref).
-
-%% We wish to expire only when there are no consumers *and* the expiry
-%% hasn't been refreshed (by queue.declare or basic.get) for the
-%% configured period.
-ensure_expiry_timer(State = #q{expires = undefined}) ->
- State;
-ensure_expiry_timer(State = #q{expires = Expires}) ->
- case is_unused(State) of
- true -> NewState = stop_expiry_timer(State),
- rabbit_misc:ensure_timer(NewState, #q.expiry_timer_ref,
- Expires, maybe_expire);
- false -> State
- end.
-
-stop_expiry_timer(State) -> rabbit_misc:stop_timer(State, #q.expiry_timer_ref).
-
-ensure_ttl_timer(undefined, State) ->
- State;
-ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = undefined}) ->
- After = (case Expiry - now_micros() of
- V when V > 0 -> V + 999; %% always fire later
- _ -> 0
- end) div 1000,
- TRef = erlang:send_after(After, self(), drop_expired),
- State#q{ttl_timer_ref = TRef, ttl_timer_expiry = Expiry};
-ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = TRef,
- ttl_timer_expiry = TExpiry})
- when Expiry + 1000 < TExpiry ->
- case erlang:cancel_timer(TRef) of
- false -> State;
- _ -> ensure_ttl_timer(Expiry, State#q{ttl_timer_ref = undefined})
- end;
-ensure_ttl_timer(_Expiry, State) ->
- State.
-
-stop_ttl_timer(State) -> rabbit_misc:stop_timer(State, #q.ttl_timer_ref).
-
-ensure_stats_timer(State) ->
- rabbit_event:ensure_stats_timer(State, #q.stats_timer, emit_stats).
-
-assert_invariant(State = #q{active_consumers = AC}) ->
- true = (queue:is_empty(AC) orelse is_empty(State)).
-
-is_empty(#q{backing_queue = BQ, backing_queue_state = BQS}) -> BQ:is_empty(BQS).
-
-lookup_ch(ChPid) ->
- case get({ch, ChPid}) of
- undefined -> not_found;
- C -> C
- end.
-
-ch_record(ChPid, LimiterPid) ->
- Key = {ch, ChPid},
- case get(Key) of
- undefined -> MonitorRef = erlang:monitor(process, ChPid),
- Limiter = rabbit_limiter:client(LimiterPid),
- C = #cr{ch_pid = ChPid,
- monitor_ref = MonitorRef,
- acktags = queue:new(),
- consumer_count = 0,
- blocked_consumers = queue:new(),
- limiter = Limiter,
- unsent_message_count = 0},
- put(Key, C),
- C;
- C = #cr{} -> C
- end.
-
-update_ch_record(C = #cr{consumer_count = ConsumerCount,
- acktags = ChAckTags,
- unsent_message_count = UnsentMessageCount}) ->
- case {queue:is_empty(ChAckTags), ConsumerCount, UnsentMessageCount} of
- {true, 0, 0} -> ok = erase_ch_record(C);
- _ -> ok = store_ch_record(C)
- end,
- C.
-
-store_ch_record(C = #cr{ch_pid = ChPid}) ->
- put({ch, ChPid}, C),
- ok.
-
-erase_ch_record(#cr{ch_pid = ChPid, monitor_ref = MonitorRef}) ->
- erlang:demonitor(MonitorRef),
- erase({ch, ChPid}),
- ok.
-
-all_ch_record() -> [C || {{ch, _}, C} <- get()].
-
-block_consumer(C = #cr{blocked_consumers = Blocked}, QEntry) ->
- update_ch_record(C#cr{blocked_consumers = queue:in(QEntry, Blocked)}).
-
-is_ch_blocked(#cr{unsent_message_count = Count, limiter = Limiter}) ->
- Count >= ?UNSENT_MESSAGE_LIMIT orelse rabbit_limiter:is_suspended(Limiter).
-
-maybe_send_drained(WasEmpty, State) ->
- case (not WasEmpty) andalso is_empty(State) of
- true -> [send_drained(C) || C <- all_ch_record()];
- false -> ok
- end,
- State.
-
-send_drained(C = #cr{ch_pid = ChPid, limiter = Limiter}) ->
- case rabbit_limiter:drained(Limiter) of
- {[], Limiter} -> ok;
- {CTagCredit, Limiter2} -> rabbit_channel:send_drained(
- ChPid, CTagCredit),
- update_ch_record(C#cr{limiter = Limiter2})
- end.
-
-deliver_msgs_to_consumers(_DeliverFun, true, State) ->
- {true, State};
-deliver_msgs_to_consumers(DeliverFun, false,
- State = #q{active_consumers = ActiveConsumers}) ->
- case queue:out(ActiveConsumers) of
- {empty, _} ->
- {false, State};
- {{value, QEntry}, Tail} ->
- {Stop, State1} = deliver_msg_to_consumer(
- DeliverFun, QEntry,
- State#q{active_consumers = Tail}),
- deliver_msgs_to_consumers(DeliverFun, Stop, State1)
- end.
-
-deliver_msg_to_consumer(DeliverFun, E = {ChPid, Consumer}, State) ->
- C = lookup_ch(ChPid),
- case is_ch_blocked(C) of
- true -> block_consumer(C, E),
- {false, State};
- false -> case rabbit_limiter:can_send(C#cr.limiter,
- Consumer#consumer.ack_required,
- Consumer#consumer.tag) of
- {suspend, Limiter} ->
- block_consumer(C#cr{limiter = Limiter}, E),
- {false, State};
- {continue, Limiter} ->
- AC1 = queue:in(E, State#q.active_consumers),
- deliver_msg_to_consumer(
- DeliverFun, Consumer, C#cr{limiter = Limiter},
- State#q{active_consumers = AC1})
- end
- end.
-
-deliver_msg_to_consumer(DeliverFun,
- #consumer{tag = ConsumerTag,
- ack_required = AckRequired},
- C = #cr{ch_pid = ChPid,
- acktags = ChAckTags,
- unsent_message_count = Count},
- State = #q{q = #amqqueue{name = QName}}) ->
- {{Message, IsDelivered, AckTag}, Stop, State1} =
- DeliverFun(AckRequired, State),
- rabbit_channel:deliver(ChPid, ConsumerTag, AckRequired,
- {QName, self(), AckTag, IsDelivered, Message}),
- ChAckTags1 = case AckRequired of
- true -> queue:in(AckTag, ChAckTags);
- false -> ChAckTags
- end,
- update_ch_record(C#cr{acktags = ChAckTags1,
- unsent_message_count = Count + 1}),
- {Stop, State1}.
-
-deliver_from_queue_deliver(AckRequired, State) ->
- {Result, State1} = fetch(AckRequired, State),
- {Result, is_empty(State1), State1}.
-
-confirm_messages([], State) ->
- State;
-confirm_messages(MsgIds, State = #q{msg_id_to_channel = MTC}) ->
- {CMs, MTC1} =
- lists:foldl(
- fun(MsgId, {CMs, MTC0}) ->
- case gb_trees:lookup(MsgId, MTC0) of
- {value, {SenderPid, MsgSeqNo}} ->
- {rabbit_misc:gb_trees_cons(SenderPid,
- MsgSeqNo, CMs),
- gb_trees:delete(MsgId, MTC0)};
- none ->
- {CMs, MTC0}
- end
- end, {gb_trees:empty(), MTC}, MsgIds),
- rabbit_misc:gb_trees_foreach(fun rabbit_misc:confirm_to_sender/2, CMs),
- State#q{msg_id_to_channel = MTC1}.
-
-send_or_record_confirm(#delivery{msg_seq_no = undefined}, State) ->
- {never, State};
-send_or_record_confirm(#delivery{sender = SenderPid,
- msg_seq_no = MsgSeqNo,
- message = #basic_message {
- is_persistent = true,
- id = MsgId}},
- State = #q{q = #amqqueue{durable = true},
- msg_id_to_channel = MTC}) ->
- MTC1 = gb_trees:insert(MsgId, {SenderPid, MsgSeqNo}, MTC),
- {eventually, State#q{msg_id_to_channel = MTC1}};
-send_or_record_confirm(#delivery{sender = SenderPid,
- msg_seq_no = MsgSeqNo}, State) ->
- rabbit_misc:confirm_to_sender(SenderPid, [MsgSeqNo]),
- {immediately, State}.
-
-discard(#delivery{sender = SenderPid,
- msg_seq_no = MsgSeqNo,
- message = #basic_message{id = MsgId}}, State) ->
- State1 = #q{backing_queue = BQ, backing_queue_state = BQS} =
- case MsgSeqNo of
- undefined -> State;
- _ -> confirm_messages([MsgId], State)
- end,
- BQS1 = BQ:discard(MsgId, SenderPid, BQS),
- State1#q{backing_queue_state = BQS1}.
-
-run_message_queue(State) ->
- {_IsEmpty1, State1} = deliver_msgs_to_consumers(
- fun deliver_from_queue_deliver/2,
- is_empty(State), State),
- State1.
-
-attempt_delivery(Delivery = #delivery{sender = SenderPid, message = Message},
- Props, Delivered, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- case BQ:is_duplicate(Message, BQS) of
- {false, BQS1} ->
- deliver_msgs_to_consumers(
- fun (true, State1 = #q{backing_queue_state = BQS2}) ->
- true = BQ:is_empty(BQS2),
- {AckTag, BQS3} = BQ:publish_delivered(
- Message, Props, SenderPid, BQS2),
- {{Message, Delivered, AckTag},
- true, State1#q{backing_queue_state = BQS3}};
- (false, State1) ->
- {{Message, Delivered, undefined},
- true, discard(Delivery, State1)}
- end, false, State#q{backing_queue_state = BQS1});
- {true, BQS1} ->
- {true, State#q{backing_queue_state = BQS1}}
- end.
-
-deliver_or_enqueue(Delivery = #delivery{message = Message, sender = SenderPid},
- Delivered, State) ->
- {Confirm, State1} = send_or_record_confirm(Delivery, State),
- Props = message_properties(Message, Confirm, State),
- case attempt_delivery(Delivery, Props, Delivered, State1) of
- {true, State2} ->
- State2;
- %% The next one is an optimisation
- {false, State2 = #q{ttl = 0, dlx = undefined}} ->
- discard(Delivery, State2);
- {false, State2 = #q{backing_queue = BQ, backing_queue_state = BQS}} ->
- BQS1 = BQ:publish(Message, Props, Delivered, SenderPid, BQS),
- {Dropped, State3 = #q{backing_queue_state = BQS2}} =
- maybe_drop_head(State2#q{backing_queue_state = BQS1}),
- QLen = BQ:len(BQS2),
- %% optimisation: it would be perfectly safe to always
- %% invoke drop_expired_msgs here, but that is expensive so
- %% we only do that if a new message that might have an
- %% expiry ends up at the head of the queue. If the head
- %% remains unchanged, or if the newly published message
- %% has no expiry and becomes the head of the queue then
- %% the call is unnecessary.
- case {Dropped > 0, QLen =:= 1, Props#message_properties.expiry} of
- {false, false, _} -> State3;
- {true, true, undefined} -> State3;
- {_, _, _} -> drop_expired_msgs(State3)
- end
- end.
-
-maybe_drop_head(State = #q{max_length = undefined}) ->
- {0, State};
-maybe_drop_head(State = #q{max_length = MaxLen,
- backing_queue = BQ,
- backing_queue_state = BQS}) ->
- case BQ:len(BQS) - MaxLen of
- Excess when Excess > 0 ->
- {Excess,
- with_dlx(
- State#q.dlx,
- fun (X) -> dead_letter_maxlen_msgs(X, Excess, State) end,
- fun () ->
- {_, BQS1} = lists:foldl(fun (_, {_, BQS0}) ->
- BQ:drop(false, BQS0)
- end, {ok, BQS},
- lists:seq(1, Excess)),
- State#q{backing_queue_state = BQS1}
- end)};
- _ -> {0, State}
- end.
-
-requeue_and_run(AckTags, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- WasEmpty = BQ:is_empty(BQS),
- {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
- {_Dropped, State1} = maybe_drop_head(State#q{backing_queue_state = BQS1}),
- run_message_queue(maybe_send_drained(WasEmpty, drop_expired_msgs(State1))).
-
-fetch(AckRequired, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- {Result, BQS1} = BQ:fetch(AckRequired, BQS),
- State1 = drop_expired_msgs(State#q{backing_queue_state = BQS1}),
- {Result, maybe_send_drained(Result =:= empty, State1)}.
-
-ack(AckTags, ChPid, State) ->
- subtract_acks(ChPid, AckTags, State,
- fun (State1 = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- {_Guids, BQS1} = BQ:ack(AckTags, BQS),
- State1#q{backing_queue_state = BQS1}
- end).
-
-requeue(AckTags, ChPid, State) ->
- subtract_acks(ChPid, AckTags, State,
- fun (State1) -> requeue_and_run(AckTags, State1) end).
-
-remove_consumer(ChPid, ConsumerTag, Queue) ->
- queue:filter(fun ({CP, #consumer{tag = CTag}}) ->
- (CP /= ChPid) or (CTag /= ConsumerTag)
- end, Queue).
-
-remove_consumers(ChPid, Queue, QName) ->
- queue:filter(fun ({CP, #consumer{tag = CTag}}) when CP =:= ChPid ->
- emit_consumer_deleted(ChPid, CTag, QName),
- false;
- (_) ->
- true
- end, Queue).
-
-possibly_unblock(State, ChPid, Update) ->
- case lookup_ch(ChPid) of
- not_found -> State;
- C -> C1 = Update(C),
- case is_ch_blocked(C) andalso not is_ch_blocked(C1) of
- false -> update_ch_record(C1),
- State;
- true -> unblock(State, C1)
- end
- end.
-
-unblock(State, C = #cr{limiter = Limiter}) ->
- case lists:partition(
- fun({_ChPid, #consumer{tag = CTag}}) ->
- rabbit_limiter:is_consumer_blocked(Limiter, CTag)
- end, queue:to_list(C#cr.blocked_consumers)) of
- {_, []} ->
- update_ch_record(C),
- State;
- {Blocked, Unblocked} ->
- BlockedQ = queue:from_list(Blocked),
- UnblockedQ = queue:from_list(Unblocked),
- update_ch_record(C#cr{blocked_consumers = BlockedQ}),
- AC1 = queue:join(State#q.active_consumers, UnblockedQ),
- run_message_queue(State#q{active_consumers = AC1})
- end.
-
-should_auto_delete(#q{q = #amqqueue{auto_delete = false}}) -> false;
-should_auto_delete(#q{has_had_consumers = false}) -> false;
-should_auto_delete(State) -> is_unused(State).
-
-handle_ch_down(DownPid, State = #q{exclusive_consumer = Holder,
- senders = Senders}) ->
- Senders1 = case pmon:is_monitored(DownPid, Senders) of
- false -> Senders;
- true -> credit_flow:peer_down(DownPid),
- pmon:demonitor(DownPid, Senders)
- end,
- case lookup_ch(DownPid) of
- not_found ->
- {ok, State#q{senders = Senders1}};
- C = #cr{ch_pid = ChPid,
- acktags = ChAckTags,
- blocked_consumers = Blocked} ->
- QName = qname(State),
- _ = remove_consumers(ChPid, Blocked, QName), %% for stats emission
- ok = erase_ch_record(C),
- State1 = State#q{
- exclusive_consumer = case Holder of
- {ChPid, _} -> none;
- Other -> Other
- end,
- active_consumers = remove_consumers(
- ChPid, State#q.active_consumers,
- QName),
- senders = Senders1},
- case should_auto_delete(State1) of
- true -> {stop, State1};
- false -> {ok, requeue_and_run(queue:to_list(ChAckTags),
- ensure_expiry_timer(State1))}
- end
- end.
-
-check_exclusive_access({_ChPid, _ConsumerTag}, _ExclusiveConsume, _State) ->
- in_use;
-check_exclusive_access(none, false, _State) ->
- ok;
-check_exclusive_access(none, true, State) ->
- case is_unused(State) of
- true -> ok;
- false -> in_use
- end.
-
-consumer_count() ->
- lists:sum([Count || #cr{consumer_count = Count} <- all_ch_record()]).
-
-is_unused(_State) -> consumer_count() == 0.
-
-maybe_send_reply(_ChPid, undefined) -> ok;
-maybe_send_reply(ChPid, Msg) -> ok = rabbit_channel:send_command(ChPid, Msg).
-
-qname(#q{q = #amqqueue{name = QName}}) -> QName.
-
-backing_queue_timeout(State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- State#q{backing_queue_state = BQ:timeout(BQS)}.
-
-subtract_acks(ChPid, AckTags, State, Fun) ->
- case lookup_ch(ChPid) of
- not_found ->
- State;
- C = #cr{acktags = ChAckTags} ->
- update_ch_record(
- C#cr{acktags = subtract_acks(AckTags, [], ChAckTags)}),
- Fun(State)
- end.
-
-subtract_acks([], [], AckQ) ->
- AckQ;
-subtract_acks([], Prefix, AckQ) ->
- queue:join(queue:from_list(lists:reverse(Prefix)), AckQ);
-subtract_acks([T | TL] = AckTags, Prefix, AckQ) ->
- case queue:out(AckQ) of
- {{value, T}, QTail} -> subtract_acks(TL, Prefix, QTail);
- {{value, AT}, QTail} -> subtract_acks(AckTags, [AT | Prefix], QTail)
- end.
-
-message_properties(Message, Confirm, #q{ttl = TTL}) ->
- #message_properties{expiry = calculate_msg_expiry(Message, TTL),
- needs_confirming = Confirm == eventually}.
-
-calculate_msg_expiry(#basic_message{content = Content}, TTL) ->
- #content{properties = Props} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- %% We assert that the expiration must be valid - we check in the channel.
- {ok, MsgTTL} = rabbit_basic:parse_expiration(Props),
- case lists:min([TTL, MsgTTL]) of
- undefined -> undefined;
- T -> now_micros() + T * 1000
- end.
-
-%% Logically this function should invoke maybe_send_drained/2.
-%% However, that is expensive. Since some frequent callers of
-%% drop_expired_msgs/1, in particular deliver_or_enqueue/3, cannot
-%% possibly cause the queue to become empty, we push the
-%% responsibility to the callers. So be cautious when adding new ones.
-drop_expired_msgs(State) ->
- case is_empty(State) of
- true -> State;
- false -> drop_expired_msgs(now_micros(), State)
- end.
-
-drop_expired_msgs(Now, State = #q{backing_queue_state = BQS,
- backing_queue = BQ }) ->
- ExpirePred = fun (#message_properties{expiry = Exp}) -> Now >= Exp end,
- {Props, State1} =
- with_dlx(
- State#q.dlx,
- fun (X) -> dead_letter_expired_msgs(ExpirePred, X, State) end,
- fun () -> {Next, BQS1} = BQ:dropwhile(ExpirePred, BQS),
- {Next, State#q{backing_queue_state = BQS1}} end),
- ensure_ttl_timer(case Props of
- undefined -> undefined;
- #message_properties{expiry = Exp} -> Exp
- end, State1).
-
-with_dlx(undefined, _With, Without) -> Without();
-with_dlx(DLX, With, Without) -> case rabbit_exchange:lookup(DLX) of
- {ok, X} -> With(X);
- {error, not_found} -> Without()
- end.
-
-dead_letter_expired_msgs(ExpirePred, X, State = #q{backing_queue = BQ}) ->
- dead_letter_msgs(fun (DLFun, Acc, BQS1) ->
- BQ:fetchwhile(ExpirePred, DLFun, Acc, BQS1)
- end, expired, X, State).
-
-dead_letter_rejected_msgs(AckTags, X, State = #q{backing_queue = BQ}) ->
- {ok, State1} =
- dead_letter_msgs(
- fun (DLFun, Acc, BQS) ->
- {Acc1, BQS1} = BQ:ackfold(DLFun, Acc, BQS, AckTags),
- {ok, Acc1, BQS1}
- end, rejected, X, State),
- State1.
-
-dead_letter_maxlen_msgs(X, Excess, State = #q{backing_queue = BQ}) ->
- {ok, State1} =
- dead_letter_msgs(
- fun (DLFun, Acc, BQS) ->
- lists:foldl(fun (_, {ok, Acc0, BQS0}) ->
- {{Msg, _, AckTag}, BQS1} =
- BQ:fetch(true, BQS0),
- {ok, DLFun(Msg, AckTag, Acc0), BQS1}
- end, {ok, Acc, BQS}, lists:seq(1, Excess))
- end, maxlen, X, State),
- State1.
-
-dead_letter_msgs(Fun, Reason, X, State = #q{dlx_routing_key = RK,
- backing_queue_state = BQS,
- backing_queue = BQ}) ->
- QName = qname(State),
- {Res, Acks1, BQS1} =
- Fun(fun (Msg, AckTag, Acks) ->
- dead_letter_publish(Msg, Reason, X, RK, QName),
- [AckTag | Acks]
- end, [], BQS),
- {_Guids, BQS2} = BQ:ack(Acks1, BQS1),
- {Res, State#q{backing_queue_state = BQS2}}.
-
-dead_letter_publish(Msg, Reason, X, RK, QName) ->
- DLMsg = make_dead_letter_msg(Msg, Reason, X#exchange.name, RK, QName),
- Delivery = rabbit_basic:delivery(false, DLMsg, undefined),
- {Queues, Cycles} = detect_dead_letter_cycles(
- Reason, DLMsg, rabbit_exchange:route(X, Delivery)),
- lists:foreach(fun log_cycle_once/1, Cycles),
- rabbit_amqqueue:deliver( rabbit_amqqueue:lookup(Queues), Delivery),
- ok.
-
-stop(State) -> stop(noreply, State).
-
-stop(noreply, State) -> {stop, normal, State};
-stop(Reply, State) -> {stop, normal, Reply, State}.
-
-
-detect_dead_letter_cycles(expired,
- #basic_message{content = Content}, Queues) ->
- #content{properties = #'P_basic'{headers = Headers}} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- NoCycles = {Queues, []},
- case Headers of
- undefined ->
- NoCycles;
- _ ->
- case rabbit_misc:table_lookup(Headers, <<"x-death">>) of
- {array, Deaths} ->
- {Cycling, NotCycling} =
- lists:partition(
- fun (#resource{name = Queue}) ->
- is_dead_letter_cycle(Queue, Deaths)
- end, Queues),
- OldQueues = [rabbit_misc:table_lookup(D, <<"queue">>) ||
- {table, D} <- Deaths],
- OldQueues1 = [QName || {longstr, QName} <- OldQueues],
- {NotCycling, [[QName | OldQueues1] ||
- #resource{name = QName} <- Cycling]};
- _ ->
- NoCycles
- end
- end;
-detect_dead_letter_cycles(_Reason, _Msg, Queues) ->
- {Queues, []}.
-
-is_dead_letter_cycle(Queue, Deaths) ->
- {Cycle, Rest} =
- lists:splitwith(
- fun ({table, D}) ->
- {longstr, Queue} =/= rabbit_misc:table_lookup(D, <<"queue">>);
- (_) ->
- true
- end, Deaths),
- %% Is there a cycle, and if so, is it entirely due to expiry?
- case Rest of
- [] -> false;
- [H|_] -> lists:all(
- fun ({table, D}) ->
- {longstr, <<"expired">>} =:=
- rabbit_misc:table_lookup(D, <<"reason">>);
- (_) ->
- false
- end, Cycle ++ [H])
- end.
-
-make_dead_letter_msg(Msg = #basic_message{content = Content,
- exchange_name = Exchange,
- routing_keys = RoutingKeys},
- Reason, DLX, RK, #resource{name = QName}) ->
- {DeathRoutingKeys, HeadersFun1} =
- case RK of
- undefined -> {RoutingKeys, fun (H) -> H end};
- _ -> {[RK], fun (H) -> lists:keydelete(<<"CC">>, 1, H) end}
- end,
- ReasonBin = list_to_binary(atom_to_list(Reason)),
- TimeSec = rabbit_misc:now_ms() div 1000,
- PerMsgTTL = per_msg_ttl_header(Content#content.properties),
- HeadersFun2 =
- fun (Headers) ->
- %% The first routing key is the one specified in the
- %% basic.publish; all others are CC or BCC keys.
- RKs = [hd(RoutingKeys) | rabbit_basic:header_routes(Headers)],
- RKs1 = [{longstr, Key} || Key <- RKs],
- Info = [{<<"reason">>, longstr, ReasonBin},
- {<<"queue">>, longstr, QName},
- {<<"time">>, timestamp, TimeSec},
- {<<"exchange">>, longstr, Exchange#resource.name},
- {<<"routing-keys">>, array, RKs1}] ++ PerMsgTTL,
- HeadersFun1(rabbit_basic:prepend_table_header(<<"x-death">>,
- Info, Headers))
- end,
- Content1 = #content{properties = Props} =
- rabbit_basic:map_headers(HeadersFun2, Content),
- Content2 = Content1#content{properties =
- Props#'P_basic'{expiration = undefined}},
- Msg#basic_message{exchange_name = DLX,
- id = rabbit_guid:gen(),
- routing_keys = DeathRoutingKeys,
- content = Content2}.
-
-per_msg_ttl_header(#'P_basic'{expiration = undefined}) ->
- [];
-per_msg_ttl_header(#'P_basic'{expiration = Expiration}) ->
- [{<<"original-expiration">>, longstr, Expiration}];
-per_msg_ttl_header(_) ->
- [].
-
-now_micros() -> timer:now_diff(now(), {0,0,0}).
-
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
-
-i(name, #q{q = #amqqueue{name = Name}}) -> Name;
-i(durable, #q{q = #amqqueue{durable = Durable}}) -> Durable;
-i(auto_delete, #q{q = #amqqueue{auto_delete = AutoDelete}}) -> AutoDelete;
-i(arguments, #q{q = #amqqueue{arguments = Arguments}}) -> Arguments;
-i(pid, _) ->
- self();
-i(owner_pid, #q{q = #amqqueue{exclusive_owner = none}}) ->
- '';
-i(owner_pid, #q{q = #amqqueue{exclusive_owner = ExclusiveOwner}}) ->
- ExclusiveOwner;
-i(policy, #q{q = #amqqueue{name = Name}}) ->
- {ok, Q} = rabbit_amqqueue:lookup(Name),
- case rabbit_policy:name(Q) of
- none -> '';
- Policy -> Policy
- end;
-i(exclusive_consumer_pid, #q{exclusive_consumer = none}) ->
- '';
-i(exclusive_consumer_pid, #q{exclusive_consumer = {ChPid, _ConsumerTag}}) ->
- ChPid;
-i(exclusive_consumer_tag, #q{exclusive_consumer = none}) ->
- '';
-i(exclusive_consumer_tag, #q{exclusive_consumer = {_ChPid, ConsumerTag}}) ->
- ConsumerTag;
-i(messages_ready, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
- BQ:len(BQS);
-i(messages_unacknowledged, _) ->
- lists:sum([queue:len(C#cr.acktags) || C <- all_ch_record()]);
-i(messages, State) ->
- lists:sum([i(Item, State) || Item <- [messages_ready,
- messages_unacknowledged]]);
-i(consumers, _) ->
- consumer_count();
-i(memory, _) ->
- {memory, M} = process_info(self(), memory),
- M;
-i(slave_pids, #q{q = #amqqueue{name = Name}}) ->
- {ok, Q = #amqqueue{slave_pids = SPids}} =
- rabbit_amqqueue:lookup(Name),
- case rabbit_mirror_queue_misc:is_mirrored(Q) of
- false -> '';
- true -> SPids
- end;
-i(synchronised_slave_pids, #q{q = #amqqueue{name = Name}}) ->
- {ok, Q = #amqqueue{sync_slave_pids = SSPids}} =
- rabbit_amqqueue:lookup(Name),
- case rabbit_mirror_queue_misc:is_mirrored(Q) of
- false -> '';
- true -> SSPids
- end;
-i(status, #q{status = Status}) ->
- Status;
-i(backing_queue_status, #q{backing_queue_state = BQS, backing_queue = BQ}) ->
- BQ:status(BQS);
-i(Item, _) ->
- throw({bad_argument, Item}).
-
-consumers(#q{active_consumers = ActiveConsumers}) ->
- lists:foldl(fun (C, Acc) -> consumers(C#cr.blocked_consumers, Acc) end,
- consumers(ActiveConsumers, []), all_ch_record()).
-
-consumers(Consumers, Acc) ->
- rabbit_misc:queue_fold(
- fun ({ChPid, #consumer{tag = CTag, ack_required = AckRequired}}, Acc1) ->
- [{ChPid, CTag, AckRequired} | Acc1]
- end, Acc, Consumers).
-
-emit_stats(State) ->
- emit_stats(State, []).
-
-emit_stats(State, Extra) ->
- rabbit_event:notify(queue_stats, Extra ++ infos(?STATISTICS_KEYS, State)).
-
-emit_consumer_created(ChPid, ConsumerTag, Exclusive, AckRequired, QName) ->
- rabbit_event:notify(consumer_created,
- [{consumer_tag, ConsumerTag},
- {exclusive, Exclusive},
- {ack_required, AckRequired},
- {channel, ChPid},
- {queue, QName}]).
-
-emit_consumer_deleted(ChPid, ConsumerTag, QName) ->
- rabbit_event:notify(consumer_deleted,
- [{consumer_tag, ConsumerTag},
- {channel, ChPid},
- {queue, QName}]).
-
-%%----------------------------------------------------------------------------
-
-prioritise_call(Msg, _From, _Len, _State) ->
- case Msg of
- info -> 9;
- {info, _Items} -> 9;
- consumers -> 9;
- stat -> 7;
- _ -> 0
- end.
-
-prioritise_cast(Msg, _Len, _State) ->
- case Msg of
- delete_immediately -> 8;
- {set_ram_duration_target, _Duration} -> 8;
- {set_maximum_since_use, _Age} -> 8;
- {run_backing_queue, _Mod, _Fun} -> 6;
- _ -> 0
- end.
-
-prioritise_info(Msg, _Len, #q{q = #amqqueue{exclusive_owner = DownPid}}) ->
- case Msg of
- {'DOWN', _, process, DownPid, _} -> 8;
- update_ram_duration -> 8;
- maybe_expire -> 8;
- drop_expired -> 8;
- emit_stats -> 7;
- sync_timeout -> 6;
- _ -> 0
- end.
-
-handle_call({init, Recover}, From,
- State = #q{q = #amqqueue{exclusive_owner = none}}) ->
- declare(Recover, From, State);
-
-handle_call({init, Recover}, From,
- State = #q{q = #amqqueue{exclusive_owner = Owner}}) ->
- case rabbit_misc:is_process_alive(Owner) of
- true -> erlang:monitor(process, Owner),
- declare(Recover, From, State);
- false -> #q{backing_queue = undefined,
- backing_queue_state = undefined,
- q = Q} = State,
- gen_server2:reply(From, {owner_died, Q}),
- BQ = backing_queue_module(Q),
- BQS = bq_init(BQ, Q, Recover),
- %% Rely on terminate to delete the queue.
- {stop, {shutdown, missing_owner},
- State#q{backing_queue = BQ, backing_queue_state = BQS}}
- end;
-
-handle_call(info, _From, State) ->
- reply(infos(?INFO_KEYS, State), State);
-
-handle_call({info, Items}, _From, State) ->
- try
- reply({ok, infos(Items, State)}, State)
- catch Error -> reply({error, Error}, State)
- end;
-
-handle_call(consumers, _From, State) ->
- reply(consumers(State), State);
-
-handle_call({deliver, Delivery, Delivered}, From, State) ->
- %% Synchronous, "mandatory" deliver mode.
- gen_server2:reply(From, ok),
- noreply(deliver_or_enqueue(Delivery, Delivered, State));
-
-handle_call({notify_down, ChPid}, _From, State) ->
- %% we want to do this synchronously, so that auto_deleted queues
- %% are no longer visible by the time we send a response to the
- %% client. The queue is ultimately deleted in terminate/2; if we
- %% return stop with a reply, terminate/2 will be called by
- %% gen_server2 *before* the reply is sent.
- case handle_ch_down(ChPid, State) of
- {ok, State1} -> reply(ok, State1);
- {stop, State1} -> stop(ok, State1)
- end;
-
-handle_call({basic_get, ChPid, NoAck, LimiterPid}, _From,
- State = #q{q = #amqqueue{name = QName}}) ->
- AckRequired = not NoAck,
- State1 = ensure_expiry_timer(State),
- case fetch(AckRequired, State1) of
- {empty, State2} ->
- reply(empty, State2);
- {{Message, IsDelivered, AckTag}, State2} ->
- State3 = #q{backing_queue = BQ, backing_queue_state = BQS} =
- case AckRequired of
- true -> C = #cr{acktags = ChAckTags} =
- ch_record(ChPid, LimiterPid),
- ChAckTags1 = queue:in(AckTag, ChAckTags),
- update_ch_record(C#cr{acktags = ChAckTags1}),
- State2;
- false -> State2
- end,
- Msg = {QName, self(), AckTag, IsDelivered, Message},
- reply({ok, BQ:len(BQS), Msg}, State3)
- end;
-
-handle_call({basic_consume, NoAck, ChPid, LimiterPid, LimiterActive,
- ConsumerTag, ExclusiveConsume, CreditArgs, OkMsg},
- _From, State = #q{exclusive_consumer = Holder}) ->
- case check_exclusive_access(Holder, ExclusiveConsume, State) of
- in_use ->
- reply({error, exclusive_consume_unavailable}, State);
- ok ->
- C = #cr{consumer_count = Count,
- limiter = Limiter} = ch_record(ChPid, LimiterPid),
- Limiter1 = case LimiterActive of
- true -> rabbit_limiter:activate(Limiter);
- false -> Limiter
- end,
- Limiter2 = case CreditArgs of
- none -> Limiter1;
- {Crd, Drain} -> rabbit_limiter:credit(
- Limiter1, ConsumerTag, Crd, Drain)
- end,
- C1 = update_ch_record(C#cr{consumer_count = Count + 1,
- limiter = Limiter2}),
- case is_empty(State) of
- true -> send_drained(C1);
- false -> ok
- end,
- Consumer = #consumer{tag = ConsumerTag,
- ack_required = not NoAck},
- ExclusiveConsumer = if ExclusiveConsume -> {ChPid, ConsumerTag};
- true -> Holder
- end,
- State1 = State#q{has_had_consumers = true,
- exclusive_consumer = ExclusiveConsumer},
- ok = maybe_send_reply(ChPid, OkMsg),
- emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume,
- not NoAck, qname(State1)),
- AC1 = queue:in({ChPid, Consumer}, State1#q.active_consumers),
- reply(ok, run_message_queue(State1#q{active_consumers = AC1}))
- end;
-
-handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg}, _From,
- State = #q{exclusive_consumer = Holder}) ->
- ok = maybe_send_reply(ChPid, OkMsg),
- case lookup_ch(ChPid) of
- not_found ->
- reply(ok, State);
- C = #cr{consumer_count = Count,
- limiter = Limiter,
- blocked_consumers = Blocked} ->
- emit_consumer_deleted(ChPid, ConsumerTag, qname(State)),
- Blocked1 = remove_consumer(ChPid, ConsumerTag, Blocked),
- Limiter1 = case Count of
- 1 -> rabbit_limiter:deactivate(Limiter);
- _ -> Limiter
- end,
- Limiter2 = rabbit_limiter:forget_consumer(Limiter1, ConsumerTag),
- update_ch_record(C#cr{consumer_count = Count - 1,
- limiter = Limiter2,
- blocked_consumers = Blocked1}),
- State1 = State#q{
- exclusive_consumer = case Holder of
- {ChPid, ConsumerTag} -> none;
- _ -> Holder
- end,
- active_consumers = remove_consumer(
- ChPid, ConsumerTag,
- State#q.active_consumers)},
- case should_auto_delete(State1) of
- false -> reply(ok, ensure_expiry_timer(State1));
- true -> stop(ok, State1)
- end
- end;
-
-handle_call(stat, _From, State) ->
- State1 = #q{backing_queue = BQ, backing_queue_state = BQS} =
- ensure_expiry_timer(State),
- reply({ok, BQ:len(BQS), consumer_count()}, State1);
-
-handle_call({delete, IfUnused, IfEmpty}, _From,
- State = #q{backing_queue_state = BQS, backing_queue = BQ}) ->
- IsEmpty = BQ:is_empty(BQS),
- IsUnused = is_unused(State),
- if
- IfEmpty and not(IsEmpty) -> reply({error, not_empty}, State);
- IfUnused and not(IsUnused) -> reply({error, in_use}, State);
- true -> stop({ok, BQ:len(BQS)}, State)
- end;
-
-handle_call(purge, _From, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- {Count, BQS1} = BQ:purge(BQS),
- State1 = State#q{backing_queue_state = BQS1},
- reply({ok, Count}, maybe_send_drained(Count =:= 0, State1));
-
-handle_call({requeue, AckTags, ChPid}, From, State) ->
- gen_server2:reply(From, ok),
- noreply(requeue(AckTags, ChPid, State));
-
-handle_call(sync_mirrors, _From,
- State = #q{backing_queue = rabbit_mirror_queue_master,
- backing_queue_state = BQS}) ->
- S = fun(BQSN) -> State#q{backing_queue_state = BQSN} end,
- HandleInfo = fun (Status) ->
- receive {'$gen_call', From, {info, Items}} ->
- Infos = infos(Items, State#q{status = Status}),
- gen_server2:reply(From, {ok, Infos})
- after 0 ->
- ok
- end
- end,
- EmitStats = fun (Status) ->
- rabbit_event:if_enabled(
- State, #q.stats_timer,
- fun() -> emit_stats(State#q{status = Status}) end)
- end,
- case rabbit_mirror_queue_master:sync_mirrors(HandleInfo, EmitStats, BQS) of
- {ok, BQS1} -> reply(ok, S(BQS1));
- {stop, Reason, BQS1} -> {stop, Reason, S(BQS1)}
- end;
-
-handle_call(sync_mirrors, _From, State) ->
- reply({error, not_mirrored}, State);
-
-%% By definition if we get this message here we do not have to do anything.
-handle_call(cancel_sync_mirrors, _From, State) ->
- reply({ok, not_syncing}, State);
-
-handle_call(force_event_refresh, _From,
- State = #q{exclusive_consumer = Exclusive}) ->
- rabbit_event:notify(queue_created, infos(?CREATION_EVENT_KEYS, State)),
- QName = qname(State),
- case Exclusive of
- none -> [emit_consumer_created(
- Ch, CTag, false, AckRequired, QName) ||
- {Ch, CTag, AckRequired} <- consumers(State)];
- {Ch, CTag} -> [{Ch, CTag, AckRequired}] = consumers(State),
- emit_consumer_created(Ch, CTag, true, AckRequired, QName)
- end,
- reply(ok, State).
-
-handle_cast({run_backing_queue, Mod, Fun},
- State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
- noreply(State#q{backing_queue_state = BQ:invoke(Mod, Fun, BQS)});
-
-handle_cast({deliver, Delivery = #delivery{sender = Sender}, Delivered, Flow},
- State = #q{senders = Senders}) ->
- %% Asynchronous, non-"mandatory" deliver mode.
- Senders1 = case Flow of
- flow -> credit_flow:ack(Sender),
- pmon:monitor(Sender, Senders);
- noflow -> Senders
- end,
- State1 = State#q{senders = Senders1},
- noreply(deliver_or_enqueue(Delivery, Delivered, State1));
-
-handle_cast({ack, AckTags, ChPid}, State) ->
- noreply(ack(AckTags, ChPid, State));
-
-handle_cast({reject, AckTags, true, ChPid}, State) ->
- noreply(requeue(AckTags, ChPid, State));
-
-handle_cast({reject, AckTags, false, ChPid}, State) ->
- noreply(with_dlx(
- State#q.dlx,
- fun (X) -> subtract_acks(ChPid, AckTags, State,
- fun (State1) ->
- dead_letter_rejected_msgs(
- AckTags, X, State1)
- end) end,
- fun () -> ack(AckTags, ChPid, State) end));
-
-handle_cast(delete_immediately, State) ->
- stop(State);
-
-handle_cast({resume, ChPid}, State) ->
- noreply(
- possibly_unblock(State, ChPid,
- fun (C = #cr{limiter = Limiter}) ->
- C#cr{limiter = rabbit_limiter:resume(Limiter)}
- end));
-
-handle_cast({notify_sent, ChPid, Credit}, State) ->
- noreply(
- possibly_unblock(State, ChPid,
- fun (C = #cr{unsent_message_count = Count}) ->
- C#cr{unsent_message_count = Count - Credit}
- end));
-
-handle_cast({activate_limit, ChPid}, State) ->
- noreply(
- possibly_unblock(State, ChPid,
- fun (C = #cr{limiter = Limiter}) ->
- C#cr{limiter = rabbit_limiter:activate(Limiter)}
- end));
-
-handle_cast({flush, ChPid}, State) ->
- ok = rabbit_channel:flushed(ChPid, self()),
- noreply(State);
-
-handle_cast({set_ram_duration_target, Duration},
- State = #q{backing_queue = BQ, backing_queue_state = BQS}) ->
- BQS1 = BQ:set_ram_duration_target(Duration, BQS),
- noreply(State#q{backing_queue_state = BQS1});
-
-handle_cast({set_maximum_since_use, Age}, State) ->
- ok = file_handle_cache:set_maximum_since_use(Age),
- noreply(State);
-
-handle_cast(start_mirroring, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- %% lookup again to get policy for init_with_existing_bq
- {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
- true = BQ =/= rabbit_mirror_queue_master, %% assertion
- BQ1 = rabbit_mirror_queue_master,
- BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS),
- noreply(State#q{backing_queue = BQ1,
- backing_queue_state = BQS1});
-
-handle_cast(stop_mirroring, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- BQ = rabbit_mirror_queue_master, %% assertion
- {BQ1, BQS1} = BQ:stop_mirroring(BQS),
- noreply(State#q{backing_queue = BQ1,
- backing_queue_state = BQS1});
-
-handle_cast({credit, ChPid, CTag, Credit, Drain},
- State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- Len = BQ:len(BQS),
- rabbit_channel:send_credit_reply(ChPid, Len),
- C = #cr{limiter = Limiter} = lookup_ch(ChPid),
- C1 = C#cr{limiter = rabbit_limiter:credit(Limiter, CTag, Credit, Drain)},
- noreply(case Drain andalso Len == 0 of
- true -> update_ch_record(C1),
- send_drained(C1),
- State;
- false -> case is_ch_blocked(C1) of
- true -> update_ch_record(C1),
- State;
- false -> unblock(State, C1)
- end
- end);
-
-handle_cast(wake_up, State) ->
- noreply(State).
-
-handle_info(maybe_expire, State) ->
- case is_unused(State) of
- true -> stop(State);
- false -> noreply(State#q{expiry_timer_ref = undefined})
- end;
-
-handle_info(drop_expired, State) ->
- WasEmpty = is_empty(State),
- State1 = drop_expired_msgs(State#q{ttl_timer_ref = undefined}),
- noreply(maybe_send_drained(WasEmpty, State1));
-
-handle_info(emit_stats, State) ->
- emit_stats(State),
- %% Don't call noreply/1, we don't want to set timers
- {State1, Timeout} = next_state(rabbit_event:reset_stats_timer(
- State, #q.stats_timer)),
- {noreply, State1, Timeout};
-
-handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason},
- State = #q{q = #amqqueue{exclusive_owner = DownPid}}) ->
- %% Exclusively owned queues must disappear with their owner. In
- %% the case of clean shutdown we delete the queue synchronously in
- %% the reader - although not required by the spec this seems to
- %% match what people expect (see bug 21824). However we need this
- %% monitor-and-async- delete in case the connection goes away
- %% unexpectedly.
- stop(State);
-
-handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) ->
- case handle_ch_down(DownPid, State) of
- {ok, State1} -> noreply(State1);
- {stop, State1} -> stop(State1)
- end;
-
-handle_info(update_ram_duration, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- {RamDuration, BQS1} = BQ:ram_duration(BQS),
- DesiredDuration =
- rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
- BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
- %% Don't call noreply/1, we don't want to set timers
- {State1, Timeout} = next_state(State#q{rate_timer_ref = undefined,
- backing_queue_state = BQS2}),
- {noreply, State1, Timeout};
-
-handle_info(sync_timeout, State) ->
- noreply(backing_queue_timeout(State#q{sync_timer_ref = undefined}));
-
-handle_info(timeout, State) ->
- noreply(backing_queue_timeout(State));
-
-handle_info({'EXIT', _Pid, Reason}, State) ->
- {stop, Reason, State};
-
-handle_info({bump_credit, Msg}, State) ->
- credit_flow:handle_bump_msg(Msg),
- noreply(State);
-
-handle_info(Info, State) ->
- {stop, {unhandled_info, Info}, State}.
-
-handle_pre_hibernate(State = #q{backing_queue_state = undefined}) ->
- {hibernate, State};
-handle_pre_hibernate(State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- {RamDuration, BQS1} = BQ:ram_duration(BQS),
- DesiredDuration =
- rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
- BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
- BQS3 = BQ:handle_pre_hibernate(BQS2),
- rabbit_event:if_enabled(
- State, #q.stats_timer,
- fun () -> emit_stats(State, [{idle_since, now()}]) end),
- State1 = rabbit_event:stop_stats_timer(State#q{backing_queue_state = BQS3},
- #q.stats_timer),
- {hibernate, stop_rate_timer(State1)}.
-
-format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
-
-log_cycle_once(Queues) ->
- Key = {queue_cycle, Queues},
- case get(Key) of
- true -> ok;
- undefined -> rabbit_log:warning(
- "Message dropped. Dead-letter queues cycle detected" ++
- ": ~p~nThis cycle will NOT be reported again.~n",
- [Queues]),
- put(Key, true)
- end.
diff --git a/src/rabbit_amqqueue_sup.erl b/src/rabbit_amqqueue_sup.erl
deleted file mode 100644
index 0515e82e..00000000
--- a/src/rabbit_amqqueue_sup.erl
+++ /dev/null
@@ -1,52 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_amqqueue_sup).
-
--behaviour(supervisor2).
-
--export([start_link/0, start_child/2]).
-
--export([init/1]).
-
--include("rabbit.hrl").
-
--define(SERVER, ?MODULE).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_child/2 ::
- (node(), [any()]) -> rabbit_types:ok(pid() | undefined) |
- rabbit_types:ok({pid(), any()}) |
- rabbit_types:error(any())).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
-
-start_child(Node, Args) ->
- supervisor2:start_child({?SERVER, Node}, Args).
-
-init([]) ->
- {ok, {{simple_one_for_one_terminate, 10, 10},
- [{rabbit_amqqueue, {rabbit_amqqueue_process, start_link, []},
- temporary, ?MAX_WAIT, worker, [rabbit_amqqueue_process]}]}}.
diff --git a/src/rabbit_auth_backend.erl b/src/rabbit_auth_backend.erl
deleted file mode 100644
index 4ffc8c3a..00000000
--- a/src/rabbit_auth_backend.erl
+++ /dev/null
@@ -1,72 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_auth_backend).
-
--ifdef(use_specs).
-
-%% A description proplist as with auth mechanisms,
-%% exchanges. Currently unused.
--callback description() -> [proplists:property()].
-
-%% Check a user can log in, given a username and a proplist of
-%% authentication information (e.g. [{password, Password}]).
-%%
-%% Possible responses:
-%% {ok, User}
-%% Authentication succeeded, and here's the user record.
-%% {error, Error}
-%% Something went wrong. Log and die.
-%% {refused, Msg, Args}
-%% Client failed authentication. Log and die.
--callback check_user_login(rabbit_types:username(), [term()]) ->
- {'ok', rabbit_types:user()} |
- {'refused', string(), [any()]} |
- {'error', any()}.
-
-%% Given #user and vhost, can a user log in to a vhost?
-%% Possible responses:
-%% true
-%% false
-%% {error, Error}
-%% Something went wrong. Log and die.
--callback check_vhost_access(rabbit_types:user(), rabbit_types:vhost()) ->
- boolean() | {'error', any()}.
-
-
-%% Given #user, resource and permission, can a user access a resource?
-%%
-%% Possible responses:
-%% true
-%% false
-%% {error, Error}
-%% Something went wrong. Log and die.
--callback check_resource_access(rabbit_types:user(),
- rabbit_types:r(atom()),
- rabbit_access_control:permission_atom()) ->
- boolean() | {'error', any()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {check_user_login, 2}, {check_vhost_access, 2},
- {check_resource_access, 3}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl
deleted file mode 100644
index 61919d05..00000000
--- a/src/rabbit_auth_backend_internal.erl
+++ /dev/null
@@ -1,331 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_auth_backend_internal).
--include("rabbit.hrl").
-
--behaviour(rabbit_auth_backend).
-
--export([description/0]).
--export([check_user_login/2, check_vhost_access/2, check_resource_access/3]).
-
--export([add_user/2, delete_user/1, change_password/2, set_tags/2,
- list_users/0, user_info_keys/0, lookup_user/1, clear_password/1]).
--export([make_salt/0, check_password/2, change_password_hash/2,
- hash_password/1]).
--export([set_permissions/5, clear_permissions/2,
- list_permissions/0, list_vhost_permissions/1, list_user_permissions/1,
- list_user_vhost_permissions/2, perms_info_keys/0,
- vhost_perms_info_keys/0, user_perms_info_keys/0,
- user_vhost_perms_info_keys/0]).
-
--ifdef(use_specs).
-
--type(regexp() :: binary()).
-
--spec(add_user/2 :: (rabbit_types:username(), rabbit_types:password()) -> 'ok').
--spec(delete_user/1 :: (rabbit_types:username()) -> 'ok').
--spec(change_password/2 :: (rabbit_types:username(), rabbit_types:password())
- -> 'ok').
--spec(clear_password/1 :: (rabbit_types:username()) -> 'ok').
--spec(make_salt/0 :: () -> binary()).
--spec(check_password/2 :: (rabbit_types:password(),
- rabbit_types:password_hash()) -> boolean()).
--spec(change_password_hash/2 :: (rabbit_types:username(),
- rabbit_types:password_hash()) -> 'ok').
--spec(hash_password/1 :: (rabbit_types:password())
- -> rabbit_types:password_hash()).
--spec(set_tags/2 :: (rabbit_types:username(), [atom()]) -> 'ok').
--spec(list_users/0 :: () -> [rabbit_types:infos()]).
--spec(user_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(lookup_user/1 :: (rabbit_types:username())
- -> rabbit_types:ok(rabbit_types:internal_user())
- | rabbit_types:error('not_found')).
--spec(set_permissions/5 ::(rabbit_types:username(), rabbit_types:vhost(),
- regexp(), regexp(), regexp()) -> 'ok').
--spec(clear_permissions/2 :: (rabbit_types:username(), rabbit_types:vhost())
- -> 'ok').
--spec(list_permissions/0 :: () -> [rabbit_types:infos()]).
--spec(list_vhost_permissions/1 ::
- (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(list_user_permissions/1 ::
- (rabbit_types:username()) -> [rabbit_types:infos()]).
--spec(list_user_vhost_permissions/2 ::
- (rabbit_types:username(), rabbit_types:vhost())
- -> [rabbit_types:infos()]).
--spec(perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(user_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(user_vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(PERMS_INFO_KEYS, [configure, write, read]).
--define(USER_INFO_KEYS, [user, tags]).
-
-%% Implementation of rabbit_auth_backend
-
-description() ->
- [{name, <<"Internal">>},
- {description, <<"Internal user / password database">>}].
-
-check_user_login(Username, []) ->
- internal_check_user_login(Username, fun(_) -> true end);
-check_user_login(Username, [{password, Password}]) ->
- internal_check_user_login(
- Username, fun(#internal_user{password_hash = Hash}) ->
- check_password(Password, Hash)
- end);
-check_user_login(Username, AuthProps) ->
- exit({unknown_auth_props, Username, AuthProps}).
-
-internal_check_user_login(Username, Fun) ->
- Refused = {refused, "user '~s' - invalid credentials", [Username]},
- case lookup_user(Username) of
- {ok, User = #internal_user{tags = Tags}} ->
- case Fun(User) of
- true -> {ok, #user{username = Username,
- tags = Tags,
- auth_backend = ?MODULE,
- impl = User}};
- _ -> Refused
- end;
- {error, not_found} ->
- Refused
- end.
-
-check_vhost_access(#user{username = Username}, VHostPath) ->
- case mnesia:dirty_read({rabbit_user_permission,
- #user_vhost{username = Username,
- virtual_host = VHostPath}}) of
- [] -> false;
- [_R] -> true
- end.
-
-check_resource_access(#user{username = Username},
- #resource{virtual_host = VHostPath, name = Name},
- Permission) ->
- case mnesia:dirty_read({rabbit_user_permission,
- #user_vhost{username = Username,
- virtual_host = VHostPath}}) of
- [] ->
- false;
- [#user_permission{permission = P}] ->
- PermRegexp = case element(permission_index(Permission), P) of
- %% <<"^$">> breaks Emacs' erlang mode
- <<"">> -> <<$^, $$>>;
- RE -> RE
- end,
- case re:run(Name, PermRegexp, [{capture, none}]) of
- match -> true;
- nomatch -> false
- end
- end.
-
-permission_index(configure) -> #permission.configure;
-permission_index(write) -> #permission.write;
-permission_index(read) -> #permission.read.
-
-%%----------------------------------------------------------------------------
-%% Manipulation of the user database
-
-add_user(Username, Password) ->
- rabbit_log:info("Creating user '~s'~n", [Username]),
- R = rabbit_misc:execute_mnesia_transaction(
- fun () ->
- case mnesia:wread({rabbit_user, Username}) of
- [] ->
- ok = mnesia:write(
- rabbit_user,
- #internal_user{username = Username,
- password_hash =
- hash_password(Password),
- tags = []},
- write);
- _ ->
- mnesia:abort({user_already_exists, Username})
- end
- end),
- R.
-
-delete_user(Username) ->
- rabbit_log:info("Deleting user '~s'~n", [Username]),
- R = rabbit_misc:execute_mnesia_transaction(
- rabbit_misc:with_user(
- Username,
- fun () ->
- ok = mnesia:delete({rabbit_user, Username}),
- [ok = mnesia:delete_object(
- rabbit_user_permission, R, write) ||
- R <- mnesia:match_object(
- rabbit_user_permission,
- #user_permission{user_vhost = #user_vhost{
- username = Username,
- virtual_host = '_'},
- permission = '_'},
- write)],
- ok
- end)),
- R.
-
-change_password(Username, Password) ->
- rabbit_log:info("Changing password for '~s'~n", [Username]),
- change_password_hash(Username, hash_password(Password)).
-
-clear_password(Username) ->
- rabbit_log:info("Clearing password for '~s'~n", [Username]),
- change_password_hash(Username, <<"">>).
-
-change_password_hash(Username, PasswordHash) ->
- R = update_user(Username, fun(User) ->
- User#internal_user{
- password_hash = PasswordHash }
- end),
- R.
-
-hash_password(Cleartext) ->
- Salt = make_salt(),
- Hash = salted_md5(Salt, Cleartext),
- <<Salt/binary, Hash/binary>>.
-
-check_password(Cleartext, <<Salt:4/binary, Hash/binary>>) ->
- Hash =:= salted_md5(Salt, Cleartext);
-check_password(_Cleartext, _Any) ->
- false.
-
-make_salt() ->
- {A1,A2,A3} = now(),
- random:seed(A1, A2, A3),
- Salt = random:uniform(16#ffffffff),
- <<Salt:32>>.
-
-salted_md5(Salt, Cleartext) ->
- Salted = <<Salt/binary, Cleartext/binary>>,
- erlang:md5(Salted).
-
-set_tags(Username, Tags) ->
- rabbit_log:info("Setting user tags for user '~s' to ~p~n", [Username, Tags]),
- R = update_user(Username, fun(User) ->
- User#internal_user{tags = Tags}
- end),
- R.
-
-update_user(Username, Fun) ->
- rabbit_misc:execute_mnesia_transaction(
- rabbit_misc:with_user(
- Username,
- fun () ->
- {ok, User} = lookup_user(Username),
- ok = mnesia:write(rabbit_user, Fun(User), write)
- end)).
-
-list_users() ->
- [[{user, Username}, {tags, Tags}] ||
- #internal_user{username = Username, tags = Tags} <-
- mnesia:dirty_match_object(rabbit_user, #internal_user{_ = '_'})].
-
-user_info_keys() -> ?USER_INFO_KEYS.
-
-lookup_user(Username) ->
- rabbit_misc:dirty_read({rabbit_user, Username}).
-
-validate_regexp(RegexpBin) ->
- Regexp = binary_to_list(RegexpBin),
- case re:compile(Regexp) of
- {ok, _} -> ok;
- {error, Reason} -> throw({error, {invalid_regexp, Regexp, Reason}})
- end.
-
-set_permissions(Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm) ->
- rabbit_log:info("Setting permissions for '~s' in '~s' to '~s', '~s', '~s'~n",
- [Username, VHostPath, ConfigurePerm, WritePerm, ReadPerm]),
- lists:map(fun validate_regexp/1, [ConfigurePerm, WritePerm, ReadPerm]),
- rabbit_misc:execute_mnesia_transaction(
- rabbit_misc:with_user_and_vhost(
- Username, VHostPath,
- fun () -> ok = mnesia:write(
- rabbit_user_permission,
- #user_permission{user_vhost = #user_vhost{
- username = Username,
- virtual_host = VHostPath},
- permission = #permission{
- configure = ConfigurePerm,
- write = WritePerm,
- read = ReadPerm}},
- write)
- end)).
-
-
-clear_permissions(Username, VHostPath) ->
- rabbit_misc:execute_mnesia_transaction(
- rabbit_misc:with_user_and_vhost(
- Username, VHostPath,
- fun () ->
- ok = mnesia:delete({rabbit_user_permission,
- #user_vhost{username = Username,
- virtual_host = VHostPath}})
- end)).
-
-perms_info_keys() -> [user, vhost | ?PERMS_INFO_KEYS].
-vhost_perms_info_keys() -> [user | ?PERMS_INFO_KEYS].
-user_perms_info_keys() -> [vhost | ?PERMS_INFO_KEYS].
-user_vhost_perms_info_keys() -> ?PERMS_INFO_KEYS.
-
-list_permissions() ->
- list_permissions(perms_info_keys(), match_user_vhost('_', '_')).
-
-list_vhost_permissions(VHostPath) ->
- list_permissions(
- vhost_perms_info_keys(),
- rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath))).
-
-list_user_permissions(Username) ->
- list_permissions(
- user_perms_info_keys(),
- rabbit_misc:with_user(Username, match_user_vhost(Username, '_'))).
-
-list_user_vhost_permissions(Username, VHostPath) ->
- list_permissions(
- user_vhost_perms_info_keys(),
- rabbit_misc:with_user_and_vhost(
- Username, VHostPath, match_user_vhost(Username, VHostPath))).
-
-filter_props(Keys, Props) -> [T || T = {K, _} <- Props, lists:member(K, Keys)].
-
-list_permissions(Keys, QueryThunk) ->
- [filter_props(Keys, [{user, Username},
- {vhost, VHostPath},
- {configure, ConfigurePerm},
- {write, WritePerm},
- {read, ReadPerm}]) ||
- #user_permission{user_vhost = #user_vhost{username = Username,
- virtual_host = VHostPath},
- permission = #permission{ configure = ConfigurePerm,
- write = WritePerm,
- read = ReadPerm}} <-
- %% TODO: use dirty ops instead
- rabbit_misc:execute_mnesia_transaction(QueryThunk)].
-
-match_user_vhost(Username, VHostPath) ->
- fun () -> mnesia:match_object(
- rabbit_user_permission,
- #user_permission{user_vhost = #user_vhost{
- username = Username,
- virtual_host = VHostPath},
- permission = '_'},
- read)
- end.
diff --git a/src/rabbit_auth_mechanism.erl b/src/rabbit_auth_mechanism.erl
deleted file mode 100644
index 21528b11..00000000
--- a/src/rabbit_auth_mechanism.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_auth_mechanism).
-
--ifdef(use_specs).
-
-%% A description.
--callback description() -> [proplists:property()].
-
-%% If this mechanism is enabled, should it be offered for a given socket?
-%% (primarily so EXTERNAL can be SSL-only)
--callback should_offer(rabbit_net:socket()) -> boolean().
-
-%% Called before authentication starts. Should create a state
-%% object to be passed through all the stages of authentication.
--callback init(rabbit_net:socket()) -> any().
-
-%% Handle a stage of authentication. Possible responses:
-%% {ok, User}
-%% Authentication succeeded, and here's the user record.
-%% {challenge, Challenge, NextState}
-%% Another round is needed. Here's the state I want next time.
-%% {protocol_error, Msg, Args}
-%% Client got the protocol wrong. Log and die.
-%% {refused, Msg, Args}
-%% Client failed authentication. Log and die.
--callback handle_response(binary(), any()) ->
- {'ok', rabbit_types:user()} |
- {'challenge', binary(), any()} |
- {'protocol_error', string(), [any()]} |
- {'refused', string(), [any()]}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {should_offer, 1}, {init, 1}, {handle_response, 2}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_auth_mechanism_amqplain.erl b/src/rabbit_auth_mechanism_amqplain.erl
deleted file mode 100644
index 8e896b45..00000000
--- a/src/rabbit_auth_mechanism_amqplain.erl
+++ /dev/null
@@ -1,55 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_auth_mechanism_amqplain).
--include("rabbit.hrl").
-
--behaviour(rabbit_auth_mechanism).
-
--export([description/0, should_offer/1, init/1, handle_response/2]).
-
--rabbit_boot_step({?MODULE,
- [{description, "auth mechanism amqplain"},
- {mfa, {rabbit_registry, register,
- [auth_mechanism, <<"AMQPLAIN">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-%% AMQPLAIN, as used by Qpid Python test suite. The 0-8 spec actually
-%% defines this as PLAIN, but in 0-9 that definition is gone, instead
-%% referring generically to "SASL security mechanism", i.e. the above.
-
-description() ->
- [{description, <<"QPid AMQPLAIN mechanism">>}].
-
-should_offer(_Sock) ->
- true.
-
-init(_Sock) ->
- [].
-
-handle_response(Response, _State) ->
- LoginTable = rabbit_binary_parser:parse_table(Response),
- case {lists:keysearch(<<"LOGIN">>, 1, LoginTable),
- lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of
- {{value, {_, longstr, User}},
- {value, {_, longstr, Pass}}} ->
- rabbit_access_control:check_user_pass_login(User, Pass);
- _ ->
- {protocol_error,
- "AMQPLAIN auth info ~w is missing LOGIN or PASSWORD field",
- [LoginTable]}
- end.
diff --git a/src/rabbit_auth_mechanism_cr_demo.erl b/src/rabbit_auth_mechanism_cr_demo.erl
deleted file mode 100644
index 8699a9fa..00000000
--- a/src/rabbit_auth_mechanism_cr_demo.erl
+++ /dev/null
@@ -1,57 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_auth_mechanism_cr_demo).
--include("rabbit.hrl").
-
--behaviour(rabbit_auth_mechanism).
-
--export([description/0, should_offer/1, init/1, handle_response/2]).
-
--rabbit_boot_step({?MODULE,
- [{description, "auth mechanism cr-demo"},
- {mfa, {rabbit_registry, register,
- [auth_mechanism, <<"RABBIT-CR-DEMO">>,
- ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
--record(state, {username = undefined}).
-
-%% Provides equivalent security to PLAIN but demos use of Connection.Secure(Ok)
-%% START-OK: Username
-%% SECURE: "Please tell me your password"
-%% SECURE-OK: "My password is ~s", [Password]
-
-description() ->
- [{description, <<"RabbitMQ Demo challenge-response authentication "
- "mechanism">>}].
-
-should_offer(_Sock) ->
- true.
-
-init(_Sock) ->
- #state{}.
-
-handle_response(Response, State = #state{username = undefined}) ->
- {challenge, <<"Please tell me your password">>,
- State#state{username = Response}};
-
-handle_response(<<"My password is ", Password/binary>>,
- #state{username = Username}) ->
- rabbit_access_control:check_user_pass_login(Username, Password);
-handle_response(Response, _State) ->
- {protocol_error, "Invalid response '~s'", [Response]}.
diff --git a/src/rabbit_auth_mechanism_plain.erl b/src/rabbit_auth_mechanism_plain.erl
deleted file mode 100644
index a7e8fb36..00000000
--- a/src/rabbit_auth_mechanism_plain.erl
+++ /dev/null
@@ -1,73 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_auth_mechanism_plain).
--include("rabbit.hrl").
-
--behaviour(rabbit_auth_mechanism).
-
--export([description/0, should_offer/1, init/1, handle_response/2]).
-
--rabbit_boot_step({?MODULE,
- [{description, "auth mechanism plain"},
- {mfa, {rabbit_registry, register,
- [auth_mechanism, <<"PLAIN">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-%% SASL PLAIN, as used by the Qpid Java client and our clients. Also,
-%% apparently, by OpenAMQ.
-
-%% TODO: once the minimum erlang becomes R13B03, reimplement this
-%% using the binary module - that makes use of BIFs to do binary
-%% matching and will thus be much faster.
-
-description() ->
- [{description, <<"SASL PLAIN authentication mechanism">>}].
-
-should_offer(_Sock) ->
- true.
-
-init(_Sock) ->
- [].
-
-handle_response(Response, _State) ->
- case extract_user_pass(Response) of
- {ok, User, Pass} ->
- rabbit_access_control:check_user_pass_login(User, Pass);
- error ->
- {protocol_error, "response ~p invalid", [Response]}
- end.
-
-extract_user_pass(Response) ->
- case extract_elem(Response) of
- {ok, User, Response1} -> case extract_elem(Response1) of
- {ok, Pass, <<>>} -> {ok, User, Pass};
- _ -> error
- end;
- error -> error
- end.
-
-extract_elem(<<0:8, Rest/binary>>) ->
- Count = next_null_pos(Rest, 0),
- <<Elem:Count/binary, Rest1/binary>> = Rest,
- {ok, Elem, Rest1};
-extract_elem(_) ->
- error.
-
-next_null_pos(<<>>, Count) -> Count;
-next_null_pos(<<0:8, _Rest/binary>>, Count) -> Count;
-next_null_pos(<<_:8, Rest/binary>>, Count) -> next_null_pos(Rest, Count + 1).
diff --git a/src/rabbit_autoheal.erl b/src/rabbit_autoheal.erl
deleted file mode 100644
index a5b91867..00000000
--- a/src/rabbit_autoheal.erl
+++ /dev/null
@@ -1,199 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_autoheal).
-
--export([init/0, maybe_start/1, node_down/2, handle_msg/3]).
-
-%% The named process we are running in.
--define(SERVER, rabbit_node_monitor).
-
-%%----------------------------------------------------------------------------
-
-%% In order to autoheal we want to:
-%%
-%% * Find the winning partition
-%% * Stop all nodes in other partitions
-%% * Wait for them all to be stopped
-%% * Start them again
-%%
-%% To keep things simple, we assume all nodes are up. We don't start
-%% unless all nodes are up, and if a node goes down we abandon the
-%% whole process. To further keep things simple we also defer the
-%% decision as to the winning node to the "leader" - arbitrarily
-%% selected as the first node in the cluster.
-%%
-%% To coordinate the restarting nodes we pick a special node from the
-%% winning partition - the "winner". Restarting nodes then stop, tell
-%% the winner they have done so, and wait for it to tell them it is
-%% safe to start again. The winner and the leader are not necessarily
-%% the same node.
-%%
-%% Possible states:
-%%
-%% not_healing
-%% - the default
-%%
-%% {winner_waiting, OutstandingStops, Notify}
-%% - we are the winner and are waiting for all losing nodes to stop
-%% before telling them they can restart
-%%
-%% restarting
-%% - we are restarting. Of course the node monitor immediately dies
-%% then so this state does not last long. We therefore send the
-%% autoheal_safe_to_start message to the rabbit_outside_app_process
-%% instead.
-
-%%----------------------------------------------------------------------------
-
-init() -> not_healing.
-
-maybe_start(not_healing) ->
- case enabled() of
- true -> [Leader | _] = lists:usort(rabbit_mnesia:cluster_nodes(all)),
- send(Leader, {request_start, node()}),
- rabbit_log:info("Autoheal request sent to ~p~n", [Leader]),
- not_healing;
- false -> not_healing
- end;
-maybe_start(State) ->
- State.
-
-enabled() ->
- {ok, autoheal} =:= application:get_env(rabbit, cluster_partition_handling).
-
-node_down(_Node, {winner_waiting, _Nodes, _Notify} = Autoheal) ->
- Autoheal;
-node_down(_Node, not_healing) ->
- not_healing;
-node_down(Node, _State) ->
- rabbit_log:info("Autoheal: aborting - ~p went down~n", [Node]),
- not_healing.
-
-%% By receiving this message we become the leader
-%% TODO should we try to debounce this?
-handle_msg({request_start, Node},
- not_healing, Partitions) ->
- rabbit_log:info("Autoheal request received from ~p~n", [Node]),
- case rabbit_node_monitor:all_rabbit_nodes_up() of
- false -> not_healing;
- true -> AllPartitions = all_partitions(Partitions),
- {Winner, Losers} = make_decision(AllPartitions),
- rabbit_log:info("Autoheal decision~n"
- " * Partitions: ~p~n"
- " * Winner: ~p~n"
- " * Losers: ~p~n",
- [AllPartitions, Winner, Losers]),
- send(Winner, {become_winner, Losers}),
- [send(L, {winner_is, Winner}) || L <- Losers],
- not_healing
- end;
-
-handle_msg({request_start, Node},
- State, _Partitions) ->
- rabbit_log:info("Autoheal request received from ~p when in state ~p; "
- "ignoring~n", [Node, State]),
- State;
-
-handle_msg({become_winner, Losers},
- not_healing, _Partitions) ->
- rabbit_log:info("Autoheal: I am the winner, waiting for ~p to stop~n",
- [Losers]),
- {winner_waiting, Losers, Losers};
-
-handle_msg({become_winner, Losers},
- {winner_waiting, WaitFor, Notify}, _Partitions) ->
- rabbit_log:info("Autoheal: I am the winner, waiting additionally for "
- "~p to stop~n", [Losers]),
- {winner_waiting, lists:usort(Losers ++ WaitFor),
- lists:usort(Losers ++ Notify)};
-
-handle_msg({winner_is, Winner},
- not_healing, _Partitions) ->
- rabbit_log:warning(
- "Autoheal: we were selected to restart; winner is ~p~n", [Winner]),
- rabbit_node_monitor:run_outside_applications(
- fun () ->
- MRef = erlang:monitor(process, {?SERVER, Winner}),
- rabbit:stop(),
- send(Winner, {node_stopped, node()}),
- receive
- {'DOWN', MRef, process, {?SERVER, Winner}, _Reason} -> ok;
- autoheal_safe_to_start -> ok
- end,
- erlang:demonitor(MRef, [flush]),
- rabbit:start()
- end),
- restarting;
-
-%% This is the winner receiving its last notification that a node has
-%% stopped - all nodes can now start again
-handle_msg({node_stopped, Node},
- {winner_waiting, [Node], Notify}, _Partitions) ->
- rabbit_log:info("Autoheal: final node has stopped, starting...~n",[]),
- [{rabbit_outside_app_process, N} ! autoheal_safe_to_start || N <- Notify],
- not_healing;
-
-handle_msg({node_stopped, Node},
- {winner_waiting, WaitFor, Notify}, _Partitions) ->
- {winner_waiting, WaitFor -- [Node], Notify};
-
-handle_msg(_, restarting, _Partitions) ->
- %% ignore, we can contribute no further
- restarting;
-
-handle_msg({node_stopped, _Node}, State, _Partitions) ->
- %% ignore, we already cancelled the autoheal process
- State.
-
-%%----------------------------------------------------------------------------
-
-send(Node, Msg) -> {?SERVER, Node} ! {autoheal_msg, Msg}.
-
-make_decision(AllPartitions) ->
- Sorted = lists:sort([{partition_value(P), P} || P <- AllPartitions]),
- [[Winner | _] | Rest] = lists:reverse([P || {_, P} <- Sorted]),
- {Winner, lists:append(Rest)}.
-
-partition_value(Partition) ->
- Connections = [Res || Node <- Partition,
- Res <- [rpc:call(Node, rabbit_networking,
- connections_local, [])],
- is_list(Res)],
- {length(lists:append(Connections)), length(Partition)}.
-
-%% We have our local understanding of what partitions exist; but we
-%% only know which nodes we have been partitioned from, not which
-%% nodes are partitioned from each other.
-all_partitions(PartitionedWith) ->
- Nodes = rabbit_mnesia:cluster_nodes(all),
- Partitions = [{node(), PartitionedWith} |
- rabbit_node_monitor:partitions(Nodes -- [node()])],
- all_partitions(Partitions, [Nodes]).
-
-all_partitions([], Partitions) ->
- Partitions;
-all_partitions([{Node, CantSee} | Rest], Partitions) ->
- {[Containing], Others} =
- lists:partition(fun (Part) -> lists:member(Node, Part) end, Partitions),
- A = Containing -- CantSee,
- B = Containing -- A,
- Partitions1 = case {A, B} of
- {[], _} -> Partitions;
- {_, []} -> Partitions;
- _ -> [A, B | Others]
- end,
- all_partitions(Rest, Partitions1).
diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl
deleted file mode 100644
index 61b504bc..00000000
--- a/src/rabbit_backing_queue.erl
+++ /dev/null
@@ -1,237 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_backing_queue).
-
--ifdef(use_specs).
-
-%% We can't specify a per-queue ack/state with callback signatures
--type(ack() :: any()).
--type(state() :: any()).
-
--type(msg_ids() :: [rabbit_types:msg_id()]).
--type(fetch_result(Ack) ::
- ('empty' | {rabbit_types:basic_message(), boolean(), Ack})).
--type(drop_result(Ack) ::
- ('empty' | {rabbit_types:msg_id(), Ack})).
--type(attempt_recovery() :: boolean()).
--type(purged_msg_count() :: non_neg_integer()).
--type(async_callback() ::
- fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok')).
--type(duration() :: ('undefined' | 'infinity' | number())).
-
--type(msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A)).
--type(msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean())).
-
-%% Called on startup with a list of durable queue names. The queues
-%% aren't being started at this point, but this call allows the
-%% backing queue to perform any checking necessary for the consistency
-%% of those queues, or initialise any other shared resources.
--callback start([rabbit_amqqueue:name()]) -> 'ok'.
-
-%% Called to tear down any state/resources. NB: Implementations should
-%% not depend on this function being called on shutdown and instead
-%% should hook into the rabbit supervision hierarchy.
--callback stop() -> 'ok'.
-
-%% Initialise the backing queue and its state.
-%%
-%% Takes
-%% 1. the amqqueue record
-%% 2. a boolean indicating whether the queue is an existing queue that
-%% should be recovered
-%% 3. an asynchronous callback which accepts a function of type
-%% backing-queue-state to backing-queue-state. This callback
-%% function can be safely invoked from any process, which makes it
-%% useful for passing messages back into the backing queue,
-%% especially as the backing queue does not have control of its own
-%% mailbox.
--callback init(rabbit_types:amqqueue(), attempt_recovery(),
- async_callback()) -> state().
-
-%% Called on queue shutdown when queue isn't being deleted.
--callback terminate(any(), state()) -> state().
-
-%% Called when the queue is terminating and needs to delete all its
-%% content.
--callback delete_and_terminate(any(), state()) -> state().
-
-%% Remove all 'fetchable' messages from the queue, i.e. all messages
-%% except those that have been fetched already and are pending acks.
--callback purge(state()) -> {purged_msg_count(), state()}.
-
-%% Remove all messages in the queue which have been fetched and are
-%% pending acks.
--callback purge_acks(state()) -> state().
-
-%% Publish a message.
--callback publish(rabbit_types:basic_message(),
- rabbit_types:message_properties(), boolean(), pid(),
- state()) -> state().
-
-%% Called for messages which have already been passed straight
-%% out to a client. The queue will be empty for these calls
-%% (i.e. saves the round trip through the backing queue).
--callback publish_delivered(rabbit_types:basic_message(),
- rabbit_types:message_properties(), pid(), state())
- -> {ack(), state()}.
-
-%% Called to inform the BQ about messages which have reached the
-%% queue, but are not going to be further passed to BQ.
--callback discard(rabbit_types:msg_id(), pid(), state()) -> state().
-
-%% Return ids of messages which have been confirmed since the last
-%% invocation of this function (or initialisation).
-%%
-%% Message ids should only appear in the result of drain_confirmed
-%% under the following circumstances:
-%%
-%% 1. The message appears in a call to publish_delivered/4 and the
-%% first argument (ack_required) is false; or
-%% 2. The message is fetched from the queue with fetch/2 and the first
-%% argument (ack_required) is false; or
-%% 3. The message is acked (ack/2 is called for the message); or
-%% 4. The message is fully fsync'd to disk in such a way that the
-%% recovery of the message is guaranteed in the event of a crash of
-%% this rabbit node (excluding hardware failure).
-%%
-%% In addition to the above conditions, a message id may only appear
-%% in the result of drain_confirmed if
-%% #message_properties.needs_confirming = true when the msg was
-%% published (through whichever means) to the backing queue.
-%%
-%% It is legal for the same message id to appear in the results of
-%% multiple calls to drain_confirmed, which means that the backing
-%% queue is not required to keep track of which messages it has
-%% already confirmed. The confirm will be issued to the publisher the
-%% first time the message id appears in the result of
-%% drain_confirmed. All subsequent appearances of that message id will
-%% be ignored.
--callback drain_confirmed(state()) -> {msg_ids(), state()}.
-
-%% Drop messages from the head of the queue while the supplied
-%% predicate on message properties returns true. Returns the first
-%% message properties for which the predictate returned false, or
-%% 'undefined' if the whole backing queue was traversed w/o the
-%% predicate ever returning false.
--callback dropwhile(msg_pred(), state())
- -> {rabbit_types:message_properties() | undefined, state()}.
-
-%% Like dropwhile, except messages are fetched in "require
-%% acknowledgement" mode and are passed, together with their ack tag,
-%% to the supplied function. The function is also fed an
-%% accumulator. The result of fetchwhile is as for dropwhile plus the
-%% accumulator.
--callback fetchwhile(msg_pred(), msg_fun(A), A, state())
- -> {rabbit_types:message_properties() | undefined,
- A, state()}.
-
-%% Produce the next message.
--callback fetch(true, state()) -> {fetch_result(ack()), state()};
- (false, state()) -> {fetch_result(undefined), state()}.
-
-%% Remove the next message.
--callback drop(true, state()) -> {drop_result(ack()), state()};
- (false, state()) -> {drop_result(undefined), state()}.
-
-%% Acktags supplied are for messages which can now be forgotten
-%% about. Must return 1 msg_id per Ack, in the same order as Acks.
--callback ack([ack()], state()) -> {msg_ids(), state()}.
-
-%% Reinsert messages into the queue which have already been delivered
-%% and were pending acknowledgement.
--callback requeue([ack()], state()) -> {msg_ids(), state()}.
-
-%% Fold over messages by ack tag. The supplied function is called with
-%% each message, its ack tag, and an accumulator.
--callback ackfold(msg_fun(A), A, state(), [ack()]) -> {A, state()}.
-
-%% Fold over all the messages in a queue and return the accumulated
-%% results, leaving the queue undisturbed.
--callback fold(fun((rabbit_types:basic_message(),
- rabbit_types:message_properties(),
- boolean(), A) -> {('stop' | 'cont'), A}),
- A, state()) -> {A, state()}.
-
-%% How long is my queue?
--callback len(state()) -> non_neg_integer().
-
-%% Is my queue empty?
--callback is_empty(state()) -> boolean().
-
-%% What's the queue depth, where depth = length + number of pending acks
--callback depth(state()) -> non_neg_integer().
-
-%% For the next three functions, the assumption is that you're
-%% monitoring something like the ingress and egress rates of the
-%% queue. The RAM duration is thus the length of time represented by
-%% the messages held in RAM given the current rates. If you want to
-%% ignore all of this stuff, then do so, and return 0 in
-%% ram_duration/1.
-
-%% The target is to have no more messages in RAM than indicated by the
-%% duration and the current queue rates.
--callback set_ram_duration_target(duration(), state()) -> state().
-
-%% Optionally recalculate the duration internally (likely to be just
-%% update your internal rates), and report how many seconds the
-%% messages in RAM represent given the current rates of the queue.
--callback ram_duration(state()) -> {duration(), state()}.
-
-%% Should 'timeout' be called as soon as the queue process can manage
-%% (either on an empty mailbox, or when a timer fires)?
--callback needs_timeout(state()) -> 'false' | 'timed' | 'idle'.
-
-%% Called (eventually) after needs_timeout returns 'idle' or 'timed'.
-%% Note this may be called more than once for each 'idle' or 'timed'
-%% returned from needs_timeout
--callback timeout(state()) -> state().
-
-%% Called immediately before the queue hibernates.
--callback handle_pre_hibernate(state()) -> state().
-
-%% Exists for debugging purposes, to be able to expose state via
-%% rabbitmqctl list_queues backing_queue_status
--callback status(state()) -> [{atom(), any()}].
-
-%% Passed a function to be invoked with the relevant backing queue's
-%% state. Useful for when the backing queue or other components need
-%% to pass functions into the backing queue.
--callback invoke(atom(), fun ((atom(), A) -> A), state()) -> state().
-
-%% Called prior to a publish or publish_delivered call. Allows the BQ
-%% to signal that it's already seen this message, (e.g. it was published
-%% or discarded previously) and thus the message should be dropped.
--callback is_duplicate(rabbit_types:basic_message(), state())
- -> {boolean(), state()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{start, 1}, {stop, 0}, {init, 3}, {terminate, 2},
- {delete_and_terminate, 2}, {purge, 1}, {purge_acks, 1}, {publish, 5},
- {publish_delivered, 4}, {discard, 3}, {drain_confirmed, 1},
- {dropwhile, 2}, {fetchwhile, 4},
- {fetch, 2}, {ack, 2}, {requeue, 2}, {ackfold, 4}, {fold, 3}, {len, 1},
- {is_empty, 1}, {depth, 1}, {set_ram_duration_target, 2},
- {ram_duration, 1}, {needs_timeout, 1}, {timeout, 1},
- {handle_pre_hibernate, 1}, {status, 1}, {invoke, 3}, {is_duplicate, 2}] ;
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_backing_queue_qc.erl b/src/rabbit_backing_queue_qc.erl
deleted file mode 100644
index e2bc3247..00000000
--- a/src/rabbit_backing_queue_qc.erl
+++ /dev/null
@@ -1,453 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_backing_queue_qc).
--ifdef(use_proper_qc).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
--include_lib("proper/include/proper.hrl").
-
--behaviour(proper_statem).
-
--define(BQMOD, rabbit_variable_queue).
--define(QUEUE_MAXLEN, 10000).
--define(TIMEOUT_LIMIT, 100).
-
--define(RECORD_INDEX(Key, Record),
- proplists:get_value(
- Key, lists:zip(record_info(fields, Record),
- lists:seq(2, record_info(size, Record))))).
-
--export([initial_state/0, command/1, precondition/2, postcondition/3,
- next_state/3]).
-
--export([prop_backing_queue_test/0, publish_multiple/1, timeout/2]).
-
--record(state, {bqstate,
- len, %% int
- next_seq_id, %% int
- messages, %% gb_trees of seqid => {msg_props, basic_msg}
- acks, %% [{acktag, {seqid, {msg_props, basic_msg}}}]
- confirms, %% set of msgid
- publishing}).%% int
-
-%% Initialise model
-
-initial_state() ->
- #state{bqstate = qc_variable_queue_init(qc_test_queue()),
- len = 0,
- next_seq_id = 0,
- messages = gb_trees:empty(),
- acks = [],
- confirms = gb_sets:new(),
- publishing = 0}.
-
-%% Property
-
-prop_backing_queue_test() ->
- ?FORALL(Cmds, commands(?MODULE, initial_state()),
- backing_queue_test(Cmds)).
-
-backing_queue_test(Cmds) ->
- {ok, FileSizeLimit} =
- application:get_env(rabbit, msg_store_file_size_limit),
- application:set_env(rabbit, msg_store_file_size_limit, 512,
- infinity),
- {ok, MaxJournal} =
- application:get_env(rabbit, queue_index_max_journal_entries),
- application:set_env(rabbit, queue_index_max_journal_entries, 128,
- infinity),
-
- {_H, #state{bqstate = BQ}, Res} = run_commands(?MODULE, Cmds),
-
- application:set_env(rabbit, msg_store_file_size_limit,
- FileSizeLimit, infinity),
- application:set_env(rabbit, queue_index_max_journal_entries,
- MaxJournal, infinity),
-
- ?BQMOD:delete_and_terminate(shutdown, BQ),
- ?WHENFAIL(
- io:format("Result: ~p~n", [Res]),
- aggregate(command_names(Cmds), Res =:= ok)).
-
-%% Commands
-
-%% Command frequencies are tuned so that queues are normally
-%% reasonably short, but they may sometimes exceed
-%% ?QUEUE_MAXLEN. Publish-multiple and purging cause extreme queue
-%% lengths, so these have lower probabilities. Fetches/drops are
-%% sufficiently frequent so that commands that need acktags get decent
-%% coverage.
-
-command(S) ->
- frequency([{10, qc_publish(S)},
- {1, qc_publish_delivered(S)},
- {1, qc_publish_multiple(S)}, %% very slow
- {9, qc_fetch(S)}, %% needed for ack and requeue
- {6, qc_drop(S)}, %%
- {15, qc_ack(S)},
- {15, qc_requeue(S)},
- {3, qc_set_ram_duration_target(S)},
- {1, qc_ram_duration(S)},
- {1, qc_drain_confirmed(S)},
- {1, qc_dropwhile(S)},
- {1, qc_is_empty(S)},
- {1, qc_timeout(S)},
- {1, qc_purge(S)},
- {1, qc_fold(S)}]).
-
-qc_publish(#state{bqstate = BQ}) ->
- {call, ?BQMOD, publish,
- [qc_message(),
- #message_properties{needs_confirming = frequency([{1, true},
- {20, false}]),
- expiry = oneof([undefined | lists:seq(1, 10)])},
- false, self(), BQ]}.
-
-qc_publish_multiple(#state{}) ->
- {call, ?MODULE, publish_multiple, [resize(?QUEUE_MAXLEN, pos_integer())]}.
-
-qc_publish_delivered(#state{bqstate = BQ}) ->
- {call, ?BQMOD, publish_delivered,
- [qc_message(), #message_properties{}, self(), BQ]}.
-
-qc_fetch(#state{bqstate = BQ}) ->
- {call, ?BQMOD, fetch, [boolean(), BQ]}.
-
-qc_drop(#state{bqstate = BQ}) ->
- {call, ?BQMOD, drop, [boolean(), BQ]}.
-
-qc_ack(#state{bqstate = BQ, acks = Acks}) ->
- {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), BQ]}.
-
-qc_requeue(#state{bqstate = BQ, acks = Acks}) ->
- {call, ?BQMOD, requeue, [rand_choice(proplists:get_keys(Acks)), BQ]}.
-
-qc_set_ram_duration_target(#state{bqstate = BQ}) ->
- {call, ?BQMOD, set_ram_duration_target,
- [oneof([0, 1, 2, resize(1000, pos_integer()), infinity]), BQ]}.
-
-qc_ram_duration(#state{bqstate = BQ}) ->
- {call, ?BQMOD, ram_duration, [BQ]}.
-
-qc_drain_confirmed(#state{bqstate = BQ}) ->
- {call, ?BQMOD, drain_confirmed, [BQ]}.
-
-qc_dropwhile(#state{bqstate = BQ}) ->
- {call, ?BQMOD, dropwhile, [fun dropfun/1, BQ]}.
-
-qc_is_empty(#state{bqstate = BQ}) ->
- {call, ?BQMOD, is_empty, [BQ]}.
-
-qc_timeout(#state{bqstate = BQ}) ->
- {call, ?MODULE, timeout, [BQ, ?TIMEOUT_LIMIT]}.
-
-qc_purge(#state{bqstate = BQ}) ->
- {call, ?BQMOD, purge, [BQ]}.
-
-qc_fold(#state{bqstate = BQ}) ->
- {call, ?BQMOD, fold, [makefoldfun(pos_integer()), foldacc(), BQ]}.
-
-%% Preconditions
-
-%% Create long queues by only allowing publishing
-precondition(#state{publishing = Count}, {call, _Mod, Fun, _Arg})
- when Count > 0, Fun /= publish ->
- false;
-precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg})
- when Fun =:= ack; Fun =:= requeue ->
- length(Acks) > 0;
-precondition(#state{messages = Messages},
- {call, ?BQMOD, publish_delivered, _Arg}) ->
- gb_trees:is_empty(Messages);
-precondition(_S, {call, ?BQMOD, _Fun, _Arg}) ->
- true;
-precondition(_S, {call, ?MODULE, timeout, _Arg}) ->
- true;
-precondition(#state{len = Len}, {call, ?MODULE, publish_multiple, _Arg}) ->
- Len < ?QUEUE_MAXLEN.
-
-%% Model updates
-
-next_state(S, BQ, {call, ?BQMOD, publish, [Msg, MsgProps, _Del, _Pid, _BQ]}) ->
- #state{len = Len,
- messages = Messages,
- confirms = Confirms,
- publishing = PublishCount,
- next_seq_id = NextSeq} = S,
- MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
- NeedsConfirm =
- {call, erlang, element,
- [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
- S#state{bqstate = BQ,
- len = Len + 1,
- next_seq_id = NextSeq + 1,
- messages = gb_trees:insert(NextSeq, {MsgProps, Msg}, Messages),
- publishing = {call, erlang, max, [0, {call, erlang, '-',
- [PublishCount, 1]}]},
- confirms = case eval(NeedsConfirm) of
- true -> gb_sets:add(MsgId, Confirms);
- _ -> Confirms
- end};
-
-next_state(S, _BQ, {call, ?MODULE, publish_multiple, [PublishCount]}) ->
- S#state{publishing = PublishCount};
-
-next_state(S, Res,
- {call, ?BQMOD, publish_delivered,
- [Msg, MsgProps, _Pid, _BQ]}) ->
- #state{confirms = Confirms, acks = Acks, next_seq_id = NextSeq} = S,
- AckTag = {call, erlang, element, [1, Res]},
- BQ1 = {call, erlang, element, [2, Res]},
- MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
- NeedsConfirm =
- {call, erlang, element,
- [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
- S#state{bqstate = BQ1,
- next_seq_id = NextSeq + 1,
- confirms = case eval(NeedsConfirm) of
- true -> gb_sets:add(MsgId, Confirms);
- _ -> Confirms
- end,
- acks = [{AckTag, {NextSeq, {MsgProps, Msg}}}|Acks]
- };
-
-next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) ->
- next_state_fetch_and_drop(S, Res, AckReq, 3);
-
-next_state(S, Res, {call, ?BQMOD, drop, [AckReq, _BQ]}) ->
- next_state_fetch_and_drop(S, Res, AckReq, 2);
-
-next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) ->
- #state{acks = AcksState} = S,
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1,
- acks = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
-
-next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _V]}) ->
- #state{messages = Messages, acks = AcksState} = S,
- BQ1 = {call, erlang, element, [2, Res]},
- Messages1 = lists:foldl(fun (AckTag, Msgs) ->
- {SeqId, MsgPropsMsg} =
- proplists:get_value(AckTag, AcksState),
- gb_trees:insert(SeqId, MsgPropsMsg, Msgs)
- end, Messages, AcksArg),
- S#state{bqstate = BQ1,
- len = gb_trees:size(Messages1),
- messages = Messages1,
- acks = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
-
-next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) ->
- S#state{bqstate = BQ};
-
-next_state(S, Res, {call, ?BQMOD, ram_duration, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1};
-
-next_state(S, Res, {call, ?BQMOD, drain_confirmed, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1};
-
-next_state(S, Res, {call, ?BQMOD, dropwhile, _Args}) ->
- BQ = {call, erlang, element, [2, Res]},
- #state{messages = Messages} = S,
- Msgs1 = drop_messages(Messages),
- S#state{bqstate = BQ, len = gb_trees:size(Msgs1), messages = Msgs1};
-
-next_state(S, _Res, {call, ?BQMOD, is_empty, _Args}) ->
- S;
-
-next_state(S, BQ, {call, ?MODULE, timeout, _Args}) ->
- S#state{bqstate = BQ};
-
-next_state(S, Res, {call, ?BQMOD, purge, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1, len = 0, messages = gb_trees:empty()};
-
-next_state(S, Res, {call, ?BQMOD, fold, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1}.
-
-%% Postconditions
-
-postcondition(S, {call, ?BQMOD, fetch, _Args}, Res) ->
- #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
- case Res of
- {{MsgFetched, _IsDelivered, AckTag}, _BQ} ->
- {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
- MsgFetched =:= Msg andalso
- not proplists:is_defined(AckTag, Acks) andalso
- not gb_sets:is_element(AckTag, Confrms) andalso
- Len =/= 0;
- {empty, _BQ} ->
- Len =:= 0
- end;
-
-postcondition(S, {call, ?BQMOD, drop, _Args}, Res) ->
- #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
- case Res of
- {{MsgIdFetched, AckTag}, _BQ} ->
- {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
- MsgId = eval({call, erlang, element,
- [?RECORD_INDEX(id, basic_message), Msg]}),
- MsgIdFetched =:= MsgId andalso
- not proplists:is_defined(AckTag, Acks) andalso
- not gb_sets:is_element(AckTag, Confrms) andalso
- Len =/= 0;
- {empty, _BQ} ->
- Len =:= 0
- end;
-
-postcondition(S, {call, ?BQMOD, publish_delivered, _Args}, {AckTag, _BQ}) ->
- #state{acks = Acks, confirms = Confrms} = S,
- not proplists:is_defined(AckTag, Acks) andalso
- not gb_sets:is_element(AckTag, Confrms);
-
-postcondition(#state{len = Len}, {call, ?BQMOD, purge, _Args}, Res) ->
- {PurgeCount, _BQ} = Res,
- Len =:= PurgeCount;
-
-postcondition(#state{len = Len}, {call, ?BQMOD, is_empty, _Args}, Res) ->
- (Len =:= 0) =:= Res;
-
-postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) ->
- #state{confirms = Confirms} = S,
- {ReportedConfirmed, _BQ} = Res,
- lists:all(fun (M) -> gb_sets:is_element(M, Confirms) end,
- ReportedConfirmed);
-
-postcondition(S, {call, ?BQMOD, fold, [FoldFun, Acc0, _BQ0]}, {Res, _BQ1}) ->
- #state{messages = Messages} = S,
- {_, Model} = lists:foldl(fun ({_SeqId, {_MsgProps, _Msg}}, {stop, Acc}) ->
- {stop, Acc};
- ({_SeqId, {MsgProps, Msg}}, {cont, Acc}) ->
- FoldFun(Msg, MsgProps, false, Acc)
- end, {cont, Acc0}, gb_trees:to_list(Messages)),
- true = Model =:= Res;
-
-postcondition(#state{bqstate = BQ, len = Len}, {call, _M, _F, _A}, _Res) ->
- ?BQMOD:len(BQ) =:= Len.
-
-%% Helpers
-
-publish_multiple(_C) ->
- ok.
-
-timeout(BQ, 0) ->
- BQ;
-timeout(BQ, AtMost) ->
- case ?BQMOD:needs_timeout(BQ) of
- false -> BQ;
- _ -> timeout(?BQMOD:timeout(BQ), AtMost - 1)
- end.
-
-qc_message_payload() -> ?SIZED(Size, resize(Size * Size, binary())).
-
-qc_routing_key() -> noshrink(binary(10)).
-
-qc_delivery_mode() -> oneof([1, 2]).
-
-qc_message() -> qc_message(qc_delivery_mode()).
-
-qc_message(DeliveryMode) ->
- {call, rabbit_basic, message, [qc_default_exchange(),
- qc_routing_key(),
- #'P_basic'{delivery_mode = DeliveryMode},
- qc_message_payload()]}.
-
-qc_default_exchange() ->
- {call, rabbit_misc, r, [<<>>, exchange, <<>>]}.
-
-qc_variable_queue_init(Q) ->
- {call, ?BQMOD, init,
- [Q, false, function(2, ok)]}.
-
-qc_test_q() -> {call, rabbit_misc, r, [<<"/">>, queue, noshrink(binary(16))]}.
-
-qc_test_queue() -> qc_test_queue(boolean()).
-
-qc_test_queue(Durable) ->
- #amqqueue{name = qc_test_q(),
- durable = Durable,
- auto_delete = false,
- arguments = [],
- pid = self()}.
-
-rand_choice([]) -> [];
-rand_choice(List) -> rand_choice(List, [], random:uniform(length(List))).
-
-rand_choice(_List, Selection, 0) ->
- Selection;
-rand_choice(List, Selection, N) ->
- Picked = lists:nth(random:uniform(length(List)), List),
- rand_choice(List -- [Picked], [Picked | Selection],
- N - 1).
-
-makefoldfun(Size) ->
- fun (Msg, _MsgProps, Unacked, Acc) ->
- case {length(Acc) > Size, Unacked} of
- {false, false} -> {cont, [Msg | Acc]};
- {false, true} -> {cont, Acc};
- {true, _} -> {stop, Acc}
- end
- end.
-foldacc() -> [].
-
-dropfun(Props) ->
- Expiry = eval({call, erlang, element,
- [?RECORD_INDEX(expiry, message_properties), Props]}),
- Expiry =/= 1.
-
-drop_messages(Messages) ->
- case gb_trees:is_empty(Messages) of
- true ->
- Messages;
- false -> {_Seq, MsgProps_Msg, M2} = gb_trees:take_smallest(Messages),
- MsgProps = {call, erlang, element, [1, MsgProps_Msg]},
- case dropfun(MsgProps) of
- true -> drop_messages(M2);
- false -> Messages
- end
- end.
-
-next_state_fetch_and_drop(S, Res, AckReq, AckTagIdx) ->
- #state{len = Len, messages = Messages, acks = Acks} = S,
- ResultInfo = {call, erlang, element, [1, Res]},
- BQ1 = {call, erlang, element, [2, Res]},
- AckTag = {call, erlang, element, [AckTagIdx, ResultInfo]},
- S1 = S#state{bqstate = BQ1},
- case gb_trees:is_empty(Messages) of
- true -> S1;
- false -> {SeqId, MsgProp_Msg, M2} = gb_trees:take_smallest(Messages),
- S2 = S1#state{len = Len - 1, messages = M2},
- case AckReq of
- true ->
- S2#state{acks = [{AckTag, {SeqId, MsgProp_Msg}}|Acks]};
- false ->
- S2
- end
- end.
-
--else.
-
--export([prop_disabled/0]).
-
-prop_disabled() ->
- exit({compiled_without_proper,
- "PropEr was not present during compilation of the test module. "
- "Hence all tests are disabled."}).
-
--endif.
diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl
deleted file mode 100644
index 2e825536..00000000
--- a/src/rabbit_basic.erl
+++ /dev/null
@@ -1,276 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_basic).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--export([publish/4, publish/5, publish/1,
- message/3, message/4, properties/1, prepend_table_header/3,
- extract_headers/1, map_headers/2, delivery/3, header_routes/1,
- parse_expiration/1]).
--export([build_content/2, from_content/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(properties_input() ::
- (rabbit_framing:amqp_property_record() | [{atom(), any()}])).
--type(publish_result() ::
- ({ok, rabbit_amqqueue:routing_result(), [pid()]}
- | rabbit_types:error('not_found'))).
--type(headers() :: rabbit_framing:amqp_table() | 'undefined').
-
--type(exchange_input() :: (rabbit_types:exchange() | rabbit_exchange:name())).
--type(body_input() :: (binary() | [binary()])).
-
--spec(publish/4 ::
- (exchange_input(), rabbit_router:routing_key(), properties_input(),
- body_input()) -> publish_result()).
--spec(publish/5 ::
- (exchange_input(), rabbit_router:routing_key(), boolean(),
- properties_input(), body_input()) -> publish_result()).
--spec(publish/1 ::
- (rabbit_types:delivery()) -> publish_result()).
--spec(delivery/3 ::
- (boolean(), rabbit_types:message(), undefined | integer()) ->
- rabbit_types:delivery()).
--spec(message/4 ::
- (rabbit_exchange:name(), rabbit_router:routing_key(),
- properties_input(), binary()) -> rabbit_types:message()).
--spec(message/3 ::
- (rabbit_exchange:name(), rabbit_router:routing_key(),
- rabbit_types:decoded_content()) ->
- rabbit_types:ok_or_error2(rabbit_types:message(), any())).
--spec(properties/1 ::
- (properties_input()) -> rabbit_framing:amqp_property_record()).
-
--spec(prepend_table_header/3 ::
- (binary(), rabbit_framing:amqp_table(), headers()) -> headers()).
-
--spec(extract_headers/1 :: (rabbit_types:content()) -> headers()).
-
--spec(map_headers/2 :: (fun((headers()) -> headers()), rabbit_types:content())
- -> rabbit_types:content()).
-
--spec(header_routes/1 ::
- (undefined | rabbit_framing:amqp_table()) -> [string()]).
--spec(build_content/2 :: (rabbit_framing:amqp_property_record(),
- binary() | [binary()]) -> rabbit_types:content()).
--spec(from_content/1 :: (rabbit_types:content()) ->
- {rabbit_framing:amqp_property_record(), binary()}).
--spec(parse_expiration/1 ::
- (rabbit_framing:amqp_property_record())
- -> rabbit_types:ok_or_error2('undefined' | non_neg_integer(), any())).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% Convenience function, for avoiding round-trips in calls across the
-%% erlang distributed network.
-publish(Exchange, RoutingKeyBin, Properties, Body) ->
- publish(Exchange, RoutingKeyBin, false, Properties, Body).
-
-%% Convenience function, for avoiding round-trips in calls across the
-%% erlang distributed network.
-publish(X = #exchange{name = XName}, RKey, Mandatory, Props, Body) ->
- Message = message(XName, RKey, properties(Props), Body),
- publish(X, delivery(Mandatory, Message, undefined));
-publish(XName, RKey, Mandatory, Props, Body) ->
- Message = message(XName, RKey, properties(Props), Body),
- publish(delivery(Mandatory, Message, undefined)).
-
-publish(Delivery = #delivery{
- message = #basic_message{exchange_name = XName}}) ->
- case rabbit_exchange:lookup(XName) of
- {ok, X} -> publish(X, Delivery);
- Err -> Err
- end.
-
-publish(X, Delivery) ->
- Qs = rabbit_amqqueue:lookup(rabbit_exchange:route(X, Delivery)),
- {RoutingRes, DeliveredQPids} = rabbit_amqqueue:deliver(Qs, Delivery),
- {ok, RoutingRes, DeliveredQPids}.
-
-delivery(Mandatory, Message, MsgSeqNo) ->
- #delivery{mandatory = Mandatory, sender = self(),
- message = Message, msg_seq_no = MsgSeqNo}.
-
-build_content(Properties, BodyBin) when is_binary(BodyBin) ->
- build_content(Properties, [BodyBin]);
-
-build_content(Properties, PFR) ->
- %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
- {ClassId, _MethodId} =
- rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
- #content{class_id = ClassId,
- properties = Properties,
- properties_bin = none,
- protocol = none,
- payload_fragments_rev = PFR}.
-
-from_content(Content) ->
- #content{class_id = ClassId,
- properties = Props,
- payload_fragments_rev = FragmentsRev} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
- {ClassId, _MethodId} =
- rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
- {Props, list_to_binary(lists:reverse(FragmentsRev))}.
-
-%% This breaks the spec rule forbidding message modification
-strip_header(#content{properties = #'P_basic'{headers = undefined}}
- = DecodedContent, _Key) ->
- DecodedContent;
-strip_header(#content{properties = Props = #'P_basic'{headers = Headers}}
- = DecodedContent, Key) ->
- case lists:keysearch(Key, 1, Headers) of
- false -> DecodedContent;
- {value, Found} -> Headers0 = lists:delete(Found, Headers),
- rabbit_binary_generator:clear_encoded_content(
- DecodedContent#content{
- properties = Props#'P_basic'{
- headers = Headers0}})
- end.
-
-message(XName, RoutingKey, #content{properties = Props} = DecodedContent) ->
- try
- {ok, #basic_message{
- exchange_name = XName,
- content = strip_header(DecodedContent, ?DELETED_HEADER),
- id = rabbit_guid:gen(),
- is_persistent = is_message_persistent(DecodedContent),
- routing_keys = [RoutingKey |
- header_routes(Props#'P_basic'.headers)]}}
- catch
- {error, _Reason} = Error -> Error
- end.
-
-message(XName, RoutingKey, RawProperties, Body) ->
- Properties = properties(RawProperties),
- Content = build_content(Properties, Body),
- {ok, Msg} = message(XName, RoutingKey, Content),
- Msg.
-
-properties(P = #'P_basic'{}) ->
- P;
-properties(P) when is_list(P) ->
- %% Yes, this is O(length(P) * record_info(size, 'P_basic') / 2),
- %% i.e. slow. Use the definition of 'P_basic' directly if
- %% possible!
- lists:foldl(fun ({Key, Value}, Acc) ->
- case indexof(record_info(fields, 'P_basic'), Key) of
- 0 -> throw({unknown_basic_property, Key});
- N -> setelement(N + 1, Acc, Value)
- end
- end, #'P_basic'{}, P).
-
-prepend_table_header(Name, Info, undefined) ->
- prepend_table_header(Name, Info, []);
-prepend_table_header(Name, Info, Headers) ->
- case rabbit_misc:table_lookup(Headers, Name) of
- {array, Existing} ->
- prepend_table(Name, Info, Existing, Headers);
- undefined ->
- prepend_table(Name, Info, [], Headers);
- Other ->
- Headers2 = prepend_table(Name, Info, [], Headers),
- set_invalid_header(Name, Other, Headers2)
- end.
-
-prepend_table(Name, Info, Prior, Headers) ->
- rabbit_misc:set_table_value(Headers, Name, array, [{table, Info} | Prior]).
-
-set_invalid_header(Name, {_, _}=Value, Headers) when is_list(Headers) ->
- case rabbit_misc:table_lookup(Headers, ?INVALID_HEADERS_KEY) of
- undefined ->
- set_invalid([{Name, array, [Value]}], Headers);
- {table, ExistingHdr} ->
- update_invalid(Name, Value, ExistingHdr, Headers);
- Other ->
- %% somehow the x-invalid-headers header is corrupt
- Invalid = [{?INVALID_HEADERS_KEY, array, [Other]}],
- set_invalid_header(Name, Value, set_invalid(Invalid, Headers))
- end.
-
-set_invalid(NewHdr, Headers) ->
- rabbit_misc:set_table_value(Headers, ?INVALID_HEADERS_KEY, table, NewHdr).
-
-update_invalid(Name, Value, ExistingHdr, Header) ->
- Values = case rabbit_misc:table_lookup(ExistingHdr, Name) of
- undefined -> [Value];
- {array, Prior} -> [Value | Prior]
- end,
- NewHdr = rabbit_misc:set_table_value(ExistingHdr, Name, array, Values),
- set_invalid(NewHdr, Header).
-
-extract_headers(Content) ->
- #content{properties = #'P_basic'{headers = Headers}} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- Headers.
-
-map_headers(F, Content) ->
- Content1 = rabbit_binary_parser:ensure_content_decoded(Content),
- #content{properties = #'P_basic'{headers = Headers} = Props} = Content1,
- Headers1 = F(Headers),
- rabbit_binary_generator:clear_encoded_content(
- Content1#content{properties = Props#'P_basic'{headers = Headers1}}).
-
-indexof(L, Element) -> indexof(L, Element, 1).
-
-indexof([], _Element, _N) -> 0;
-indexof([Element | _Rest], Element, N) -> N;
-indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1).
-
-is_message_persistent(#content{properties = #'P_basic'{
- delivery_mode = Mode}}) ->
- case Mode of
- 1 -> false;
- 2 -> true;
- undefined -> false;
- Other -> throw({error, {delivery_mode_unknown, Other}})
- end.
-
-%% Extract CC routes from headers
-header_routes(undefined) ->
- [];
-header_routes(HeadersTable) ->
- lists:append(
- [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of
- {array, Routes} -> [Route || {longstr, Route} <- Routes];
- undefined -> [];
- {Type, _Val} -> throw({error, {unacceptable_type_in_header,
- binary_to_list(HeaderKey), Type}})
- end || HeaderKey <- ?ROUTING_HEADERS]).
-
-parse_expiration(#'P_basic'{expiration = undefined}) ->
- {ok, undefined};
-parse_expiration(#'P_basic'{expiration = Expiration}) ->
- case string:to_integer(binary_to_list(Expiration)) of
- {error, no_integer} = E ->
- E;
- {N, ""} ->
- case rabbit_misc:check_expiry(N) of
- ok -> {ok, N};
- E = {error, _} -> E
- end;
- {_, S} ->
- {error, {leftover_string, S}}
- end.
-
diff --git a/src/rabbit_binary_generator.erl b/src/rabbit_binary_generator.erl
deleted file mode 100644
index ae5bbf51..00000000
--- a/src/rabbit_binary_generator.erl
+++ /dev/null
@@ -1,242 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_binary_generator).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--export([build_simple_method_frame/3,
- build_simple_content_frames/4,
- build_heartbeat_frame/0]).
--export([generate_table/1]).
--export([check_empty_frame_size/0]).
--export([ensure_content_encoded/2, clear_encoded_content/1]).
--export([map_exception/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(frame() :: [binary()]).
-
--spec(build_simple_method_frame/3 ::
- (rabbit_channel:channel_number(), rabbit_framing:amqp_method_record(),
- rabbit_types:protocol())
- -> frame()).
--spec(build_simple_content_frames/4 ::
- (rabbit_channel:channel_number(), rabbit_types:content(),
- non_neg_integer(), rabbit_types:protocol())
- -> [frame()]).
--spec(build_heartbeat_frame/0 :: () -> frame()).
--spec(generate_table/1 :: (rabbit_framing:amqp_table()) -> binary()).
--spec(check_empty_frame_size/0 :: () -> 'ok').
--spec(ensure_content_encoded/2 ::
- (rabbit_types:content(), rabbit_types:protocol()) ->
- rabbit_types:encoded_content()).
--spec(clear_encoded_content/1 ::
- (rabbit_types:content()) -> rabbit_types:unencoded_content()).
--spec(map_exception/3 :: (rabbit_channel:channel_number(),
- rabbit_types:amqp_error() | any(),
- rabbit_types:protocol()) ->
- {rabbit_channel:channel_number(),
- rabbit_framing:amqp_method_record()}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-build_simple_method_frame(ChannelInt, MethodRecord, Protocol) ->
- MethodFields = Protocol:encode_method_fields(MethodRecord),
- MethodName = rabbit_misc:method_record_type(MethodRecord),
- {ClassId, MethodId} = Protocol:method_id(MethodName),
- create_frame(1, ChannelInt, [<<ClassId:16, MethodId:16>>, MethodFields]).
-
-build_simple_content_frames(ChannelInt, Content, FrameMax, Protocol) ->
- #content{class_id = ClassId,
- properties_bin = ContentPropertiesBin,
- payload_fragments_rev = PayloadFragmentsRev} =
- ensure_content_encoded(Content, Protocol),
- {BodySize, ContentFrames} =
- build_content_frames(PayloadFragmentsRev, FrameMax, ChannelInt),
- HeaderFrame = create_frame(2, ChannelInt,
- [<<ClassId:16, 0:16, BodySize:64>>,
- ContentPropertiesBin]),
- [HeaderFrame | ContentFrames].
-
-build_content_frames(FragsRev, FrameMax, ChannelInt) ->
- BodyPayloadMax = if FrameMax == 0 -> iolist_size(FragsRev);
- true -> FrameMax - ?EMPTY_FRAME_SIZE
- end,
- build_content_frames(0, [], BodyPayloadMax, [],
- lists:reverse(FragsRev), BodyPayloadMax, ChannelInt).
-
-build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [],
- [], _BodyPayloadMax, _ChannelInt) ->
- {SizeAcc, lists:reverse(FramesAcc)};
-build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
- Frags, BodyPayloadMax, ChannelInt)
- when FragSizeRem == 0 orelse Frags == [] ->
- Frame = create_frame(3, ChannelInt, lists:reverse(FragAcc)),
- FrameSize = BodyPayloadMax - FragSizeRem,
- build_content_frames(SizeAcc + FrameSize, [Frame | FramesAcc],
- BodyPayloadMax, [], Frags, BodyPayloadMax, ChannelInt);
-build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
- [Frag | Frags], BodyPayloadMax, ChannelInt) ->
- Size = size(Frag),
- {NewFragSizeRem, NewFragAcc, NewFrags} =
- if Size == 0 -> {FragSizeRem, FragAcc, Frags};
- Size =< FragSizeRem -> {FragSizeRem - Size, [Frag | FragAcc], Frags};
- true -> <<Head:FragSizeRem/binary, Tail/binary>> =
- Frag,
- {0, [Head | FragAcc], [Tail | Frags]}
- end,
- build_content_frames(SizeAcc, FramesAcc, NewFragSizeRem, NewFragAcc,
- NewFrags, BodyPayloadMax, ChannelInt).
-
-build_heartbeat_frame() ->
- create_frame(?FRAME_HEARTBEAT, 0, <<>>).
-
-create_frame(TypeInt, ChannelInt, Payload) ->
- [<<TypeInt:8, ChannelInt:16, (iolist_size(Payload)):32>>, Payload,
- ?FRAME_END].
-
-%% table_field_to_binary supports the AMQP 0-8/0-9 standard types, S,
-%% I, D, T and F, as well as the QPid extensions b, d, f, l, s, t, x,
-%% and V.
-table_field_to_binary({FName, T, V}) ->
- [short_string_to_binary(FName) | field_value_to_binary(T, V)].
-
-field_value_to_binary(longstr, V) -> ["S", long_string_to_binary(V)];
-field_value_to_binary(signedint, V) -> ["I", <<V:32/signed>>];
-field_value_to_binary(decimal, V) -> {Before, After} = V,
- ["D", Before, <<After:32>>];
-field_value_to_binary(timestamp, V) -> ["T", <<V:64>>];
-field_value_to_binary(table, V) -> ["F", table_to_binary(V)];
-field_value_to_binary(array, V) -> ["A", array_to_binary(V)];
-field_value_to_binary(byte, V) -> ["b", <<V:8/unsigned>>];
-field_value_to_binary(double, V) -> ["d", <<V:64/float>>];
-field_value_to_binary(float, V) -> ["f", <<V:32/float>>];
-field_value_to_binary(long, V) -> ["l", <<V:64/signed>>];
-field_value_to_binary(short, V) -> ["s", <<V:16/signed>>];
-field_value_to_binary(bool, V) -> ["t", if V -> 1; true -> 0 end];
-field_value_to_binary(binary, V) -> ["x", long_string_to_binary(V)];
-field_value_to_binary(void, _V) -> ["V"].
-
-table_to_binary(Table) when is_list(Table) ->
- BinTable = generate_table(Table),
- [<<(size(BinTable)):32>>, BinTable].
-
-array_to_binary(Array) when is_list(Array) ->
- BinArray = generate_array(Array),
- [<<(size(BinArray)):32>>, BinArray].
-
-generate_table(Table) when is_list(Table) ->
- list_to_binary(lists:map(fun table_field_to_binary/1, Table)).
-
-generate_array(Array) when is_list(Array) ->
- list_to_binary(lists:map(fun ({T, V}) -> field_value_to_binary(T, V) end,
- Array)).
-
-short_string_to_binary(String) when is_binary(String) ->
- Len = size(String),
- if Len < 256 -> [<<Len:8>>, String];
- true -> exit(content_properties_shortstr_overflow)
- end;
-short_string_to_binary(String) ->
- Len = length(String),
- if Len < 256 -> [<<Len:8>>, String];
- true -> exit(content_properties_shortstr_overflow)
- end.
-
-long_string_to_binary(String) when is_binary(String) ->
- [<<(size(String)):32>>, String];
-long_string_to_binary(String) ->
- [<<(length(String)):32>>, String].
-
-check_empty_frame_size() ->
- %% Intended to ensure that EMPTY_FRAME_SIZE is defined correctly.
- case iolist_size(create_frame(?FRAME_BODY, 0, <<>>)) of
- ?EMPTY_FRAME_SIZE -> ok;
- ComputedSize -> exit({incorrect_empty_frame_size,
- ComputedSize, ?EMPTY_FRAME_SIZE})
- end.
-
-ensure_content_encoded(Content = #content{properties_bin = PropBin,
- protocol = Protocol}, Protocol)
- when PropBin =/= none ->
- Content;
-ensure_content_encoded(Content = #content{properties = none,
- properties_bin = PropBin,
- protocol = Protocol}, Protocol1)
- when PropBin =/= none ->
- Props = Protocol:decode_properties(Content#content.class_id, PropBin),
- Content#content{properties = Props,
- properties_bin = Protocol1:encode_properties(Props),
- protocol = Protocol1};
-ensure_content_encoded(Content = #content{properties = Props}, Protocol)
- when Props =/= none ->
- Content#content{properties_bin = Protocol:encode_properties(Props),
- protocol = Protocol}.
-
-clear_encoded_content(Content = #content{properties_bin = none,
- protocol = none}) ->
- Content;
-clear_encoded_content(Content = #content{properties = none}) ->
- %% Only clear when we can rebuild the properties_bin later in
- %% accordance to the content record definition comment - maximum
- %% one of properties and properties_bin can be 'none'
- Content;
-clear_encoded_content(Content = #content{}) ->
- Content#content{properties_bin = none, protocol = none}.
-
-%% NB: this function is also used by the Erlang client
-map_exception(Channel, Reason, Protocol) ->
- {SuggestedClose, ReplyCode, ReplyText, FailedMethod} =
- lookup_amqp_exception(Reason, Protocol),
- {ClassId, MethodId} = case FailedMethod of
- {_, _} -> FailedMethod;
- none -> {0, 0};
- _ -> Protocol:method_id(FailedMethod)
- end,
- case SuggestedClose orelse (Channel == 0) of
- true -> {0, #'connection.close'{reply_code = ReplyCode,
- reply_text = ReplyText,
- class_id = ClassId,
- method_id = MethodId}};
- false -> {Channel, #'channel.close'{reply_code = ReplyCode,
- reply_text = ReplyText,
- class_id = ClassId,
- method_id = MethodId}}
- end.
-
-lookup_amqp_exception(#amqp_error{name = Name,
- explanation = Expl,
- method = Method},
- Protocol) ->
- {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(Name),
- ExplBin = amqp_exception_explanation(Text, Expl),
- {ShouldClose, Code, ExplBin, Method};
-lookup_amqp_exception(Other, Protocol) ->
- rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]),
- {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
- {ShouldClose, Code, Text, none}.
-
-amqp_exception_explanation(Text, Expl) ->
- ExplBin = list_to_binary(Expl),
- CompleteTextBin = <<Text/binary, " - ", ExplBin/binary>>,
- if size(CompleteTextBin) > 255 -> <<CompleteTextBin:252/binary, "...">>;
- true -> CompleteTextBin
- end.
diff --git a/src/rabbit_binary_parser.erl b/src/rabbit_binary_parser.erl
deleted file mode 100644
index dc6d090f..00000000
--- a/src/rabbit_binary_parser.erl
+++ /dev/null
@@ -1,101 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_binary_parser).
-
--include("rabbit.hrl").
-
--export([parse_table/1]).
--export([ensure_content_decoded/1, clear_decoded_content/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(parse_table/1 :: (binary()) -> rabbit_framing:amqp_table()).
--spec(ensure_content_decoded/1 ::
- (rabbit_types:content()) -> rabbit_types:decoded_content()).
--spec(clear_decoded_content/1 ::
- (rabbit_types:content()) -> rabbit_types:undecoded_content()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% parse_table supports the AMQP 0-8/0-9 standard types, S, I, D, T
-%% and F, as well as the QPid extensions b, d, f, l, s, t, x, and V.
-
-parse_table(<<>>) ->
- [];
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary, ValueAndRest/binary>>) ->
- {Type, Value, Rest} = parse_field_value(ValueAndRest),
- [{NameString, Type, Value} | parse_table(Rest)].
-
-parse_array(<<>>) ->
- [];
-parse_array(<<ValueAndRest/binary>>) ->
- {Type, Value, Rest} = parse_field_value(ValueAndRest),
- [{Type, Value} | parse_array(Rest)].
-
-parse_field_value(<<"S", VLen:32/unsigned, V:VLen/binary, R/binary>>) ->
- {longstr, V, R};
-
-parse_field_value(<<"I", V:32/signed, R/binary>>) ->
- {signedint, V, R};
-
-parse_field_value(<<"D", Before:8/unsigned, After:32/unsigned, R/binary>>) ->
- {decimal, {Before, After}, R};
-
-parse_field_value(<<"T", V:64/unsigned, R/binary>>) ->
- {timestamp, V, R};
-
-parse_field_value(<<"F", VLen:32/unsigned, Table:VLen/binary, R/binary>>) ->
- {table, parse_table(Table), R};
-
-parse_field_value(<<"A", VLen:32/unsigned, Array:VLen/binary, R/binary>>) ->
- {array, parse_array(Array), R};
-
-parse_field_value(<<"b", V:8/unsigned, R/binary>>) -> {byte, V, R};
-parse_field_value(<<"d", V:64/float, R/binary>>) -> {double, V, R};
-parse_field_value(<<"f", V:32/float, R/binary>>) -> {float, V, R};
-parse_field_value(<<"l", V:64/signed, R/binary>>) -> {long, V, R};
-parse_field_value(<<"s", V:16/signed, R/binary>>) -> {short, V, R};
-parse_field_value(<<"t", V:8/unsigned, R/binary>>) -> {bool, (V /= 0), R};
-
-parse_field_value(<<"x", VLen:32/unsigned, V:VLen/binary, R/binary>>) ->
- {binary, V, R};
-
-parse_field_value(<<"V", R/binary>>) ->
- {void, undefined, R}.
-
-ensure_content_decoded(Content = #content{properties = Props})
- when Props =/= none ->
- Content;
-ensure_content_decoded(Content = #content{properties_bin = PropBin,
- protocol = Protocol})
- when PropBin =/= none ->
- Content#content{properties = Protocol:decode_properties(
- Content#content.class_id, PropBin)}.
-
-clear_decoded_content(Content = #content{properties = none}) ->
- Content;
-clear_decoded_content(Content = #content{properties_bin = none}) ->
- %% Only clear when we can rebuild the properties later in
- %% accordance to the content record definition comment - maximum
- %% one of properties and properties_bin can be 'none'
- Content;
-clear_decoded_content(Content = #content{}) ->
- Content#content{properties = none}.
diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl
deleted file mode 100644
index 91f42e9c..00000000
--- a/src/rabbit_binding.erl
+++ /dev/null
@@ -1,530 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_binding).
--include("rabbit.hrl").
-
--export([recover/2, exists/1, add/1, add/2, remove/1, remove/2, list/1]).
--export([list_for_source/1, list_for_destination/1,
- list_for_source_and_destination/2]).
--export([new_deletions/0, combine_deletions/2, add_deletion/3,
- process_deletions/1]).
--export([info_keys/0, info/1, info/2, info_all/1, info_all/2]).
-%% these must all be run inside a mnesia tx
--export([has_for_source/1, remove_for_source/1,
- remove_for_destination/1, remove_transient_for_destination/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([key/0, deletions/0]).
-
--type(key() :: binary()).
-
--type(bind_errors() :: rabbit_types:error(
- {'resources_missing',
- [{'not_found', (rabbit_types:binding_source() |
- rabbit_types:binding_destination())} |
- {'absent', rabbit_types:amqqueue()}]})).
-
--type(bind_ok_or_error() :: 'ok' | bind_errors() |
- rabbit_types:error(
- 'binding_not_found' |
- {'binding_invalid', string(), [any()]})).
--type(bind_res() :: bind_ok_or_error() | rabbit_misc:thunk(bind_ok_or_error())).
--type(inner_fun() ::
- fun((rabbit_types:exchange(),
- rabbit_types:exchange() | rabbit_types:amqqueue()) ->
- rabbit_types:ok_or_error(rabbit_types:amqp_error()))).
--type(bindings() :: [rabbit_types:binding()]).
-
--opaque(deletions() :: dict()).
-
--spec(recover/2 :: ([rabbit_exchange:name()], [rabbit_amqqueue:name()]) ->
- 'ok').
--spec(exists/1 :: (rabbit_types:binding()) -> boolean() | bind_errors()).
--spec(add/1 :: (rabbit_types:binding()) -> bind_res()).
--spec(add/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()).
--spec(remove/1 :: (rabbit_types:binding()) -> bind_res()).
--spec(remove/2 :: (rabbit_types:binding(), inner_fun()) -> bind_res()).
--spec(list/1 :: (rabbit_types:vhost()) -> bindings()).
--spec(list_for_source/1 ::
- (rabbit_types:binding_source()) -> bindings()).
--spec(list_for_destination/1 ::
- (rabbit_types:binding_destination()) -> bindings()).
--spec(list_for_source_and_destination/2 ::
- (rabbit_types:binding_source(), rabbit_types:binding_destination()) ->
- bindings()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (rabbit_types:binding()) -> rabbit_types:infos()).
--spec(info/2 :: (rabbit_types:binding(), rabbit_types:info_keys()) ->
- rabbit_types:infos()).
--spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
- -> [rabbit_types:infos()]).
--spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()).
--spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()).
--spec(remove_for_destination/1 ::
- (rabbit_types:binding_destination()) -> deletions()).
--spec(remove_transient_for_destination/1 ::
- (rabbit_types:binding_destination()) -> deletions()).
--spec(process_deletions/1 :: (deletions()) -> rabbit_misc:thunk('ok')).
--spec(combine_deletions/2 :: (deletions(), deletions()) -> deletions()).
--spec(add_deletion/3 :: (rabbit_exchange:name(),
- {'undefined' | rabbit_types:exchange(),
- 'deleted' | 'not_deleted',
- bindings()}, deletions()) -> deletions()).
--spec(new_deletions/0 :: () -> deletions()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(INFO_KEYS, [source_name, source_kind,
- destination_name, destination_kind,
- routing_key, arguments]).
-
-recover(XNames, QNames) ->
- rabbit_misc:table_filter(
- fun (Route) ->
- mnesia:read({rabbit_semi_durable_route, Route}) =:= []
- end,
- fun (Route, true) ->
- ok = mnesia:write(rabbit_semi_durable_route, Route, write);
- (_Route, false) ->
- ok
- end, rabbit_durable_route),
- XNameSet = sets:from_list(XNames),
- QNameSet = sets:from_list(QNames),
- SelectSet = fun (#resource{kind = exchange}) -> XNameSet;
- (#resource{kind = queue}) -> QNameSet
- end,
- {ok, Gatherer} = gatherer:start_link(),
- [recover_semi_durable_route(Gatherer, R, SelectSet(Dst)) ||
- R = #route{binding = #binding{destination = Dst}} <-
- rabbit_misc:dirty_read_all(rabbit_semi_durable_route)],
- empty = gatherer:out(Gatherer),
- ok = gatherer:stop(Gatherer),
- ok.
-
-recover_semi_durable_route(Gatherer, R = #route{binding = B}, ToRecover) ->
- #binding{source = Src, destination = Dst} = B,
- case sets:is_element(Dst, ToRecover) of
- true -> {ok, X} = rabbit_exchange:lookup(Src),
- ok = gatherer:fork(Gatherer),
- ok = worker_pool:submit_async(
- fun () ->
- recover_semi_durable_route_txn(R, X),
- gatherer:finish(Gatherer)
- end);
- false -> ok
- end.
-
-recover_semi_durable_route_txn(R = #route{binding = B}, X) ->
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- case mnesia:match_object(rabbit_semi_durable_route, R, read) of
- [] -> no_recover;
- _ -> ok = sync_transient_route(R, fun mnesia:write/3),
- rabbit_exchange:serial(X)
- end
- end,
- fun (no_recover, _) -> ok;
- (_Serial, true) -> x_callback(transaction, X, add_binding, B);
- (Serial, false) -> x_callback(Serial, X, add_binding, B)
- end).
-
-exists(Binding) ->
- binding_action(
- Binding, fun (_Src, _Dst, B) ->
- rabbit_misc:const(mnesia:read({rabbit_route, B}) /= [])
- end).
-
-add(Binding) -> add(Binding, fun (_Src, _Dst) -> ok end).
-
-add(Binding, InnerFun) ->
- binding_action(
- Binding,
- fun (Src, Dst, B) ->
- case rabbit_exchange:validate_binding(Src, B) of
- ok ->
- %% this argument is used to check queue exclusivity;
- %% in general, we want to fail on that in preference to
- %% anything else
- case InnerFun(Src, Dst) of
- ok ->
- case mnesia:read({rabbit_route, B}) of
- [] -> add(Src, Dst, B);
- [_] -> fun rabbit_misc:const_ok/0
- end;
- {error, _} = Err ->
- rabbit_misc:const(Err)
- end;
- {error, _} = Err ->
- rabbit_misc:const(Err)
- end
- end).
-
-add(Src, Dst, B) ->
- [SrcDurable, DstDurable] = [durable(E) || E <- [Src, Dst]],
- case (SrcDurable andalso DstDurable andalso
- mnesia:read({rabbit_durable_route, B}) =/= []) of
- false -> ok = sync_route(#route{binding = B}, SrcDurable, DstDurable,
- fun mnesia:write/3),
- x_callback(transaction, Src, add_binding, B),
- Serial = rabbit_exchange:serial(Src),
- fun () ->
- x_callback(Serial, Src, add_binding, B),
- ok = rabbit_event:notify(binding_created, info(B))
- end;
- true -> rabbit_misc:const({error, binding_not_found})
- end.
-
-remove(Binding) -> remove(Binding, fun (_Src, _Dst) -> ok end).
-
-remove(Binding, InnerFun) ->
- binding_action(
- Binding,
- fun (Src, Dst, B) ->
- case mnesia:read(rabbit_route, B, write) of
- [] -> rabbit_misc:const({error, binding_not_found});
- [_] -> case InnerFun(Src, Dst) of
- ok -> remove(Src, Dst, B);
- {error, _} = Err -> rabbit_misc:const(Err)
- end
- end
- end).
-
-remove(Src, Dst, B) ->
- ok = sync_route(#route{binding = B}, durable(Src), durable(Dst),
- fun mnesia:delete_object/3),
- Deletions = maybe_auto_delete(B#binding.source, [B], new_deletions()),
- process_deletions(Deletions).
-
-list(VHostPath) ->
- VHostResource = rabbit_misc:r(VHostPath, '_'),
- Route = #route{binding = #binding{source = VHostResource,
- destination = VHostResource,
- _ = '_'},
- _ = '_'},
- [B || #route{binding = B} <- mnesia:dirty_match_object(rabbit_route,
- Route)].
-
-list_for_source(SrcName) ->
- mnesia:async_dirty(
- fun() ->
- Route = #route{binding = #binding{source = SrcName, _ = '_'}},
- [B || #route{binding = B}
- <- mnesia:match_object(rabbit_route, Route, read)]
- end).
-
-list_for_destination(DstName) ->
- mnesia:async_dirty(
- fun() ->
- Route = #route{binding = #binding{destination = DstName,
- _ = '_'}},
- [reverse_binding(B) ||
- #reverse_route{reverse_binding = B} <-
- mnesia:match_object(rabbit_reverse_route,
- reverse_route(Route), read)]
- end).
-
-list_for_source_and_destination(SrcName, DstName) ->
- mnesia:async_dirty(
- fun() ->
- Route = #route{binding = #binding{source = SrcName,
- destination = DstName,
- _ = '_'}},
- [B || #route{binding = B} <- mnesia:match_object(rabbit_route,
- Route, read)]
- end).
-
-info_keys() -> ?INFO_KEYS.
-
-map(VHostPath, F) ->
- %% TODO: there is scope for optimisation here, e.g. using a
- %% cursor, parallelising the function invocation
- lists:map(F, list(VHostPath)).
-
-infos(Items, B) -> [{Item, i(Item, B)} || Item <- Items].
-
-i(source_name, #binding{source = SrcName}) -> SrcName#resource.name;
-i(source_kind, #binding{source = SrcName}) -> SrcName#resource.kind;
-i(destination_name, #binding{destination = DstName}) -> DstName#resource.name;
-i(destination_kind, #binding{destination = DstName}) -> DstName#resource.kind;
-i(routing_key, #binding{key = RoutingKey}) -> RoutingKey;
-i(arguments, #binding{args = Arguments}) -> Arguments;
-i(Item, _) -> throw({bad_argument, Item}).
-
-info(B = #binding{}) -> infos(?INFO_KEYS, B).
-
-info(B = #binding{}, Items) -> infos(Items, B).
-
-info_all(VHostPath) -> map(VHostPath, fun (B) -> info(B) end).
-
-info_all(VHostPath, Items) -> map(VHostPath, fun (B) -> info(B, Items) end).
-
-has_for_source(SrcName) ->
- Match = #route{binding = #binding{source = SrcName, _ = '_'}},
- %% we need to check for durable routes here too in case a bunch of
- %% routes to durable queues have been removed temporarily as a
- %% result of a node failure
- contains(rabbit_route, Match) orelse
- contains(rabbit_semi_durable_route, Match).
-
-remove_for_source(SrcName) ->
- lock_route_tables(),
- Match = #route{binding = #binding{source = SrcName, _ = '_'}},
- remove_routes(
- lists:usort(mnesia:match_object(rabbit_route, Match, write) ++
- mnesia:match_object(rabbit_durable_route, Match, write))).
-
-remove_for_destination(DstName) ->
- remove_for_destination(DstName, fun remove_routes/1).
-
-remove_transient_for_destination(DstName) ->
- remove_for_destination(DstName, fun remove_transient_routes/1).
-
-%%----------------------------------------------------------------------------
-
-durable(#exchange{durable = D}) -> D;
-durable(#amqqueue{durable = D}) -> D.
-
-binding_action(Binding = #binding{source = SrcName,
- destination = DstName,
- args = Arguments}, Fun) ->
- call_with_source_and_destination(
- SrcName, DstName,
- fun (Src, Dst) ->
- SortedArgs = rabbit_misc:sort_field_table(Arguments),
- Fun(Src, Dst, Binding#binding{args = SortedArgs})
- end).
-
-delete_object(Tab, Record, LockKind) ->
- %% this 'guarded' delete prevents unnecessary writes to the mnesia
- %% disk log
- case mnesia:match_object(Tab, Record, LockKind) of
- [] -> ok;
- [_] -> mnesia:delete_object(Tab, Record, LockKind)
- end.
-
-sync_route(R, Fun) -> sync_route(R, true, true, Fun).
-
-sync_route(Route, true, true, Fun) ->
- ok = Fun(rabbit_durable_route, Route, write),
- sync_route(Route, false, true, Fun);
-
-sync_route(Route, false, true, Fun) ->
- ok = Fun(rabbit_semi_durable_route, Route, write),
- sync_route(Route, false, false, Fun);
-
-sync_route(Route, _SrcDurable, false, Fun) ->
- sync_transient_route(Route, Fun).
-
-sync_transient_route(Route, Fun) ->
- ok = Fun(rabbit_route, Route, write),
- ok = Fun(rabbit_reverse_route, reverse_route(Route), write).
-
-call_with_source_and_destination(SrcName, DstName, Fun) ->
- SrcTable = table_for_resource(SrcName),
- DstTable = table_for_resource(DstName),
- ErrFun = fun (Names) ->
- Errs = [not_found_or_absent(Name) || Name <- Names],
- rabbit_misc:const({error, {resources_missing, Errs}})
- end,
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () ->
- case {mnesia:read({SrcTable, SrcName}),
- mnesia:read({DstTable, DstName})} of
- {[Src], [Dst]} -> Fun(Src, Dst);
- {[], [_] } -> ErrFun([SrcName]);
- {[_], [] } -> ErrFun([DstName]);
- {[], [] } -> ErrFun([SrcName, DstName])
- end
- end).
-
-table_for_resource(#resource{kind = exchange}) -> rabbit_exchange;
-table_for_resource(#resource{kind = queue}) -> rabbit_queue.
-
-not_found_or_absent(#resource{kind = exchange} = Name) ->
- {not_found, Name};
-not_found_or_absent(#resource{kind = queue} = Name) ->
- case rabbit_amqqueue:not_found_or_absent(Name) of
- not_found -> {not_found, Name};
- {absent, _Q} = R -> R
- end.
-
-contains(Table, MatchHead) ->
- continue(mnesia:select(Table, [{MatchHead, [], ['$_']}], 1, read)).
-
-continue('$end_of_table') -> false;
-continue({[_|_], _}) -> true;
-continue({[], Continuation}) -> continue(mnesia:select(Continuation)).
-
-%% For bulk operations we lock the tables we are operating on in order
-%% to reduce the time complexity. Without the table locks we end up
-%% with num_tables*num_bulk_bindings row-level locks. Taking each lock
-%% takes time proportional to the number of existing locks, thus
-%% resulting in O(num_bulk_bindings^2) complexity.
-%%
-%% The locks need to be write locks since ultimately we end up
-%% removing all these rows.
-%%
-%% The downside of all this is that no other binding operations except
-%% lookup/routing (which uses dirty ops) can take place
-%% concurrently. However, that is the case already since the bulk
-%% operations involve mnesia:match_object calls with a partial key,
-%% which entails taking a table lock.
-lock_route_tables() ->
- [mnesia:lock({table, T}, write) || T <- [rabbit_route,
- rabbit_reverse_route,
- rabbit_semi_durable_route,
- rabbit_durable_route]].
-
-remove_routes(Routes) ->
- %% This partitioning allows us to suppress unnecessary delete
- %% operations on disk tables, which require an fsync.
- {TransientRoutes, DurableRoutes} =
- lists:partition(fun (R) -> mnesia:match_object(
- rabbit_durable_route, R, write) == [] end,
- Routes),
- [ok = sync_transient_route(R, fun mnesia:delete_object/3) ||
- R <- TransientRoutes],
- [ok = sync_route(R, fun mnesia:delete_object/3) ||
- R <- DurableRoutes],
- [R#route.binding || R <- Routes].
-
-remove_transient_routes(Routes) ->
- [begin
- ok = sync_transient_route(R, fun delete_object/3),
- R#route.binding
- end || R <- Routes].
-
-remove_for_destination(DstName, Fun) ->
- lock_route_tables(),
- Match = reverse_route(
- #route{binding = #binding{destination = DstName, _ = '_'}}),
- Routes = [reverse_route(R) || R <- mnesia:match_object(
- rabbit_reverse_route, Match, write)],
- Bindings = Fun(Routes),
- group_bindings_fold(fun maybe_auto_delete/3, new_deletions(),
- lists:keysort(#binding.source, Bindings)).
-
-%% Requires that its input binding list is sorted in exchange-name
-%% order, so that the grouping of bindings (for passing to
-%% group_bindings_and_auto_delete1) works properly.
-group_bindings_fold(_Fun, Acc, []) ->
- Acc;
-group_bindings_fold(Fun, Acc, [B = #binding{source = SrcName} | Bs]) ->
- group_bindings_fold(Fun, SrcName, Acc, Bs, [B]).
-
-group_bindings_fold(
- Fun, SrcName, Acc, [B = #binding{source = SrcName} | Bs], Bindings) ->
- group_bindings_fold(Fun, SrcName, Acc, Bs, [B | Bindings]);
-group_bindings_fold(Fun, SrcName, Acc, Removed, Bindings) ->
- %% Either Removed is [], or its head has a non-matching SrcName.
- group_bindings_fold(Fun, Fun(SrcName, Bindings, Acc), Removed).
-
-maybe_auto_delete(XName, Bindings, Deletions) ->
- {Entry, Deletions1} =
- case mnesia:read({rabbit_exchange, XName}) of
- [] -> {{undefined, not_deleted, Bindings}, Deletions};
- [X] -> case rabbit_exchange:maybe_auto_delete(X) of
- not_deleted ->
- {{X, not_deleted, Bindings}, Deletions};
- {deleted, Deletions2} ->
- {{X, deleted, Bindings},
- combine_deletions(Deletions, Deletions2)}
- end
- end,
- add_deletion(XName, Entry, Deletions1).
-
-reverse_route(#route{binding = Binding}) ->
- #reverse_route{reverse_binding = reverse_binding(Binding)};
-
-reverse_route(#reverse_route{reverse_binding = Binding}) ->
- #route{binding = reverse_binding(Binding)}.
-
-reverse_binding(#reverse_binding{source = SrcName,
- destination = DstName,
- key = Key,
- args = Args}) ->
- #binding{source = SrcName,
- destination = DstName,
- key = Key,
- args = Args};
-
-reverse_binding(#binding{source = SrcName,
- destination = DstName,
- key = Key,
- args = Args}) ->
- #reverse_binding{source = SrcName,
- destination = DstName,
- key = Key,
- args = Args}.
-
-%% ----------------------------------------------------------------------------
-%% Binding / exchange deletion abstraction API
-%% ----------------------------------------------------------------------------
-
-anything_but( NotThis, NotThis, NotThis) -> NotThis;
-anything_but( NotThis, NotThis, This) -> This;
-anything_but( NotThis, This, NotThis) -> This;
-anything_but(_NotThis, This, This) -> This.
-
-new_deletions() -> dict:new().
-
-add_deletion(XName, Entry, Deletions) ->
- dict:update(XName, fun (Entry1) -> merge_entry(Entry1, Entry) end,
- Entry, Deletions).
-
-combine_deletions(Deletions1, Deletions2) ->
- dict:merge(fun (_XName, Entry1, Entry2) -> merge_entry(Entry1, Entry2) end,
- Deletions1, Deletions2).
-
-merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) ->
- {anything_but(undefined, X1, X2),
- anything_but(not_deleted, Deleted1, Deleted2),
- [Bindings1 | Bindings2]}.
-
-process_deletions(Deletions) ->
- AugmentedDeletions =
- dict:map(fun (_XName, {X, deleted, Bindings}) ->
- Bs = lists:flatten(Bindings),
- x_callback(transaction, X, delete, Bs),
- {X, deleted, Bs, none};
- (_XName, {X, not_deleted, Bindings}) ->
- Bs = lists:flatten(Bindings),
- x_callback(transaction, X, remove_bindings, Bs),
- {X, not_deleted, Bs, rabbit_exchange:serial(X)}
- end, Deletions),
- fun() ->
- dict:fold(fun (XName, {X, deleted, Bs, Serial}, ok) ->
- ok = rabbit_event:notify(
- exchange_deleted, [{name, XName}]),
- del_notify(Bs),
- x_callback(Serial, X, delete, Bs);
- (_XName, {X, not_deleted, Bs, Serial}, ok) ->
- del_notify(Bs),
- x_callback(Serial, X, remove_bindings, Bs)
- end, ok, AugmentedDeletions)
- end.
-
-del_notify(Bs) -> [rabbit_event:notify(binding_deleted, info(B)) || B <- Bs].
-
-x_callback(Serial, X, F, Bs) ->
- ok = rabbit_exchange:callback(X, F, Serial, [X, Bs]).
diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl
deleted file mode 100644
index 6c04f4cd..00000000
--- a/src/rabbit_channel.erl
+++ /dev/null
@@ -1,1656 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_channel).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--behaviour(gen_server2).
-
--export([start_link/11, do/2, do/3, do_flow/3, flush/1, shutdown/1]).
--export([send_command/2, deliver/4, send_credit_reply/2, send_drained/2,
- flushed/2]).
--export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]).
--export([refresh_config_local/0, ready_for_close/1]).
--export([force_event_refresh/0]).
-
--export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
- handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
- prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
-%% Internal
--export([list_local/0]).
-
--record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid,
- conn_name, limiter, tx, next_tag, unacked_message_q, user,
- virtual_host, most_recently_declared_queue,
- queue_names, queue_monitors, consumer_mapping,
- blocking, queue_consumers, delivering_queues,
- queue_collector_pid, stats_timer, confirm_enabled, publish_seqno,
- unconfirmed, confirmed, capabilities, trace_state}).
-
--define(MAX_PERMISSION_CACHE_SIZE, 12).
-
--define(STATISTICS_KEYS,
- [pid,
- transactional,
- confirm,
- consumer_count,
- messages_unacknowledged,
- messages_unconfirmed,
- messages_uncommitted,
- acks_uncommitted,
- prefetch_count,
- client_flow_blocked]).
-
--define(CREATION_EVENT_KEYS,
- [pid,
- name,
- connection,
- number,
- user,
- vhost]).
-
--define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
-
--define(INCR_STATS(Incs, Measure, State),
- case rabbit_event:stats_level(State, #ch.stats_timer) of
- fine -> incr_stats(Incs, Measure);
- _ -> ok
- end).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([channel_number/0]).
-
--type(channel_number() :: non_neg_integer()).
-
--spec(start_link/11 ::
- (channel_number(), pid(), pid(), pid(), string(),
- rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
- rabbit_framing:amqp_table(), pid(), pid()) ->
- rabbit_types:ok_pid_or_error()).
--spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(),
- rabbit_types:maybe(rabbit_types:content())) -> 'ok').
--spec(do_flow/3 :: (pid(), rabbit_framing:amqp_method_record(),
- rabbit_types:maybe(rabbit_types:content())) -> 'ok').
--spec(flush/1 :: (pid()) -> 'ok').
--spec(shutdown/1 :: (pid()) -> 'ok').
--spec(send_command/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(deliver/4 ::
- (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg())
- -> 'ok').
--spec(send_credit_reply/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(send_drained/2 :: (pid(), [{rabbit_types:ctag(), non_neg_integer()}])
- -> 'ok').
--spec(flushed/2 :: (pid(), pid()) -> 'ok').
--spec(list/0 :: () -> [pid()]).
--spec(list_local/0 :: () -> [pid()]).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
--spec(info_all/0 :: () -> [rabbit_types:infos()]).
--spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(refresh_config_local/0 :: () -> 'ok').
--spec(ready_for_close/1 :: (pid()) -> 'ok').
--spec(force_event_refresh/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
- VHost, Capabilities, CollectorPid, Limiter) ->
- gen_server2:start_link(
- ?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol,
- User, VHost, Capabilities, CollectorPid, Limiter], []).
-
-do(Pid, Method) ->
- do(Pid, Method, none).
-
-do(Pid, Method, Content) ->
- gen_server2:cast(Pid, {method, Method, Content, noflow}).
-
-do_flow(Pid, Method, Content) ->
- credit_flow:send(Pid),
- gen_server2:cast(Pid, {method, Method, Content, flow}).
-
-flush(Pid) ->
- gen_server2:call(Pid, flush, infinity).
-
-shutdown(Pid) ->
- gen_server2:cast(Pid, terminate).
-
-send_command(Pid, Msg) ->
- gen_server2:cast(Pid, {command, Msg}).
-
-deliver(Pid, ConsumerTag, AckRequired, Msg) ->
- gen_server2:cast(Pid, {deliver, ConsumerTag, AckRequired, Msg}).
-
-send_credit_reply(Pid, Len) ->
- gen_server2:cast(Pid, {send_credit_reply, Len}).
-
-send_drained(Pid, CTagCredit) ->
- gen_server2:cast(Pid, {send_drained, CTagCredit}).
-
-flushed(Pid, QPid) ->
- gen_server2:cast(Pid, {flushed, QPid}).
-
-list() ->
- rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
- rabbit_channel, list_local, []).
-
-list_local() ->
- pg_local:get_members(rabbit_channels).
-
-info_keys() -> ?INFO_KEYS.
-
-info(Pid) ->
- gen_server2:call(Pid, info, infinity).
-
-info(Pid, Items) ->
- case gen_server2:call(Pid, {info, Items}, infinity) of
- {ok, Res} -> Res;
- {error, Error} -> throw(Error)
- end.
-
-info_all() ->
- rabbit_misc:filter_exit_map(fun (C) -> info(C) end, list()).
-
-info_all(Items) ->
- rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()).
-
-refresh_config_local() ->
- rabbit_misc:upmap(
- fun (C) -> gen_server2:call(C, refresh_config) end, list_local()),
- ok.
-
-ready_for_close(Pid) ->
- gen_server2:cast(Pid, ready_for_close).
-
-force_event_refresh() ->
- [gen_server2:cast(C, force_event_refresh) || C <- list()],
- ok.
-
-%%---------------------------------------------------------------------------
-
-init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
- Capabilities, CollectorPid, LimiterPid]) ->
- process_flag(trap_exit, true),
- ok = pg_local:join(rabbit_channels, self()),
- State = #ch{state = starting,
- protocol = Protocol,
- channel = Channel,
- reader_pid = ReaderPid,
- writer_pid = WriterPid,
- conn_pid = ConnPid,
- conn_name = ConnName,
- limiter = rabbit_limiter:new(LimiterPid),
- tx = none,
- next_tag = 1,
- unacked_message_q = queue:new(),
- user = User,
- virtual_host = VHost,
- most_recently_declared_queue = <<>>,
- queue_names = dict:new(),
- queue_monitors = pmon:new(),
- consumer_mapping = dict:new(),
- blocking = sets:new(),
- queue_consumers = dict:new(),
- delivering_queues = sets:new(),
- queue_collector_pid = CollectorPid,
- confirm_enabled = false,
- publish_seqno = 1,
- unconfirmed = dtree:empty(),
- confirmed = [],
- capabilities = Capabilities,
- trace_state = rabbit_trace:init(VHost)},
- State1 = rabbit_event:init_stats_timer(State, #ch.stats_timer),
- rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State1)),
- rabbit_event:if_enabled(State1, #ch.stats_timer,
- fun() -> emit_stats(State1) end),
- {ok, State1, hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-prioritise_call(Msg, _From, _Len, _State) ->
- case Msg of
- info -> 9;
- {info, _Items} -> 9;
- _ -> 0
- end.
-
-prioritise_cast(Msg, _Len, _State) ->
- case Msg of
- {confirm, _MsgSeqNos, _QPid} -> 5;
- _ -> 0
- end.
-
-prioritise_info(Msg, _Len, _State) ->
- case Msg of
- emit_stats -> 7;
- _ -> 0
- end.
-
-handle_call(flush, _From, State) ->
- reply(ok, State);
-
-handle_call(info, _From, State) ->
- reply(infos(?INFO_KEYS, State), State);
-
-handle_call({info, Items}, _From, State) ->
- try
- reply({ok, infos(Items, State)}, State)
- catch Error -> reply({error, Error}, State)
- end;
-
-handle_call(refresh_config, _From, State = #ch{virtual_host = VHost}) ->
- reply(ok, State#ch{trace_state = rabbit_trace:init(VHost)});
-
-handle_call(_Request, _From, State) ->
- noreply(State).
-
-handle_cast({method, Method, Content, Flow},
- State = #ch{reader_pid = Reader}) ->
- case Flow of
- flow -> credit_flow:ack(Reader);
- noflow -> ok
- end,
- try handle_method(Method, Content, State) of
- {reply, Reply, NewState} ->
- ok = send(Reply, NewState),
- noreply(NewState);
- {noreply, NewState} ->
- noreply(NewState);
- stop ->
- {stop, normal, State}
- catch
- exit:Reason = #amqp_error{} ->
- MethodName = rabbit_misc:method_record_type(Method),
- handle_exception(Reason#amqp_error{method = MethodName}, State);
- _:Reason ->
- {stop, {Reason, erlang:get_stacktrace()}, State}
- end;
-
-handle_cast({flushed, QPid}, State) ->
- {noreply, queue_blocked(QPid, State), hibernate};
-
-handle_cast(ready_for_close, State = #ch{state = closing,
- writer_pid = WriterPid}) ->
- ok = rabbit_writer:send_command_sync(WriterPid, #'channel.close_ok'{}),
- {stop, normal, State};
-
-handle_cast(terminate, State = #ch{writer_pid = WriterPid}) ->
- ok = rabbit_writer:flush(WriterPid),
- {stop, normal, State};
-
-handle_cast({command, #'basic.consume_ok'{consumer_tag = CTag} = Msg}, State) ->
- ok = send(Msg, State),
- noreply(consumer_monitor(CTag, State));
-
-handle_cast({command, Msg}, State) ->
- ok = send(Msg, State),
- noreply(State);
-
-handle_cast({deliver, _CTag, _AckReq, _Msg}, State = #ch{state = closing}) ->
- noreply(State);
-handle_cast({deliver, ConsumerTag, AckRequired,
- Msg = {_QName, QPid, _MsgId, Redelivered,
- #basic_message{exchange_name = ExchangeName,
- routing_keys = [RoutingKey | _CcRoutes],
- content = Content}}},
- State = #ch{writer_pid = WriterPid,
- next_tag = DeliveryTag}) ->
- ok = rabbit_writer:send_command_and_notify(
- WriterPid, QPid, self(),
- #'basic.deliver'{consumer_tag = ConsumerTag,
- delivery_tag = DeliveryTag,
- redelivered = Redelivered,
- exchange = ExchangeName#resource.name,
- routing_key = RoutingKey},
- Content),
- noreply(record_sent(ConsumerTag, AckRequired, Msg, State));
-
-handle_cast({send_credit_reply, Len}, State = #ch{writer_pid = WriterPid}) ->
- ok = rabbit_writer:send_command(
- WriterPid, #'basic.credit_ok'{available = Len}),
- noreply(State);
-
-handle_cast({send_drained, CTagCredit}, State = #ch{writer_pid = WriterPid}) ->
- [ok = rabbit_writer:send_command(
- WriterPid, #'basic.credit_drained'{consumer_tag = ConsumerTag,
- credit_drained = CreditDrained})
- || {ConsumerTag, CreditDrained} <- CTagCredit],
- noreply(State);
-
-handle_cast(force_event_refresh, State) ->
- rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State)),
- noreply(State);
-
-handle_cast({confirm, MsgSeqNos, From}, State) ->
- State1 = #ch{confirmed = C} = confirm(MsgSeqNos, From, State),
- Timeout = case C of [] -> hibernate; _ -> 0 end,
- %% NB: don't call noreply/1 since we don't want to send confirms.
- {noreply, ensure_stats_timer(State1), Timeout}.
-
-handle_info({bump_credit, Msg}, State) ->
- credit_flow:handle_bump_msg(Msg),
- noreply(State);
-
-handle_info(timeout, State) ->
- noreply(State);
-
-handle_info(emit_stats, State) ->
- emit_stats(State),
- State1 = rabbit_event:reset_stats_timer(State, #ch.stats_timer),
- %% NB: don't call noreply/1 since we don't want to kick off the
- %% stats timer.
- {noreply, send_confirms(State1), hibernate};
-
-handle_info({'DOWN', _MRef, process, QPid, Reason}, State) ->
- State1 = handle_publishing_queue_down(QPid, Reason, State),
- State2 = queue_blocked(QPid, State1),
- State3 = handle_consuming_queue_down(QPid, State2),
- State4 = handle_delivering_queue_down(QPid, State3),
- credit_flow:peer_down(QPid),
- #ch{queue_names = QNames, queue_monitors = QMons} = State4,
- case dict:find(QPid, QNames) of
- {ok, QName} -> erase_queue_stats(QName);
- error -> ok
- end,
- noreply(State4#ch{queue_names = dict:erase(QPid, QNames),
- queue_monitors = pmon:erase(QPid, QMons)});
-
-handle_info({'EXIT', _Pid, Reason}, State) ->
- {stop, Reason, State}.
-
-handle_pre_hibernate(State) ->
- ok = clear_permission_cache(),
- rabbit_event:if_enabled(
- State, #ch.stats_timer,
- fun () -> emit_stats(State, [{idle_since, now()}]) end),
- {hibernate, rabbit_event:stop_stats_timer(State, #ch.stats_timer)}.
-
-terminate(Reason, State) ->
- {Res, _State1} = notify_queues(State),
- case Reason of
- normal -> ok = Res;
- shutdown -> ok = Res;
- {shutdown, _Term} -> ok = Res;
- _ -> ok
- end,
- pg_local:leave(rabbit_channels, self()),
- rabbit_event:if_enabled(State, #ch.stats_timer,
- fun() -> emit_stats(State) end),
- rabbit_event:notify(channel_closed, [{pid, self()}]).
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
-
-%%---------------------------------------------------------------------------
-
-reply(Reply, NewState) -> {reply, Reply, next_state(NewState), hibernate}.
-
-noreply(NewState) -> {noreply, next_state(NewState), hibernate}.
-
-next_state(State) -> ensure_stats_timer(send_confirms(State)).
-
-ensure_stats_timer(State) ->
- rabbit_event:ensure_stats_timer(State, #ch.stats_timer, emit_stats).
-
-return_ok(State, true, _Msg) -> {noreply, State};
-return_ok(State, false, Msg) -> {reply, Msg, State}.
-
-ok_msg(true, _Msg) -> undefined;
-ok_msg(false, Msg) -> Msg.
-
-send(_Command, #ch{state = closing}) ->
- ok;
-send(Command, #ch{writer_pid = WriterPid}) ->
- ok = rabbit_writer:send_command(WriterPid, Command).
-
-handle_exception(Reason, State = #ch{protocol = Protocol,
- channel = Channel,
- writer_pid = WriterPid,
- reader_pid = ReaderPid,
- conn_pid = ConnPid}) ->
- %% something bad's happened: notify_queues may not be 'ok'
- {_Result, State1} = notify_queues(State),
- case rabbit_binary_generator:map_exception(Channel, Reason, Protocol) of
- {Channel, CloseMethod} ->
- rabbit_log:error("connection ~p, channel ~p - soft error:~n~p~n",
- [ConnPid, Channel, Reason]),
- ok = rabbit_writer:send_command(WriterPid, CloseMethod),
- {noreply, State1};
- {0, _} ->
- ReaderPid ! {channel_exit, Channel, Reason},
- {stop, normal, State1}
- end.
-
--ifdef(use_specs).
--spec(precondition_failed/1 :: (string()) -> no_return()).
--endif.
-precondition_failed(Format) -> precondition_failed(Format, []).
-
--ifdef(use_specs).
--spec(precondition_failed/2 :: (string(), [any()]) -> no_return()).
--endif.
-precondition_failed(Format, Params) ->
- rabbit_misc:protocol_error(precondition_failed, Format, Params).
-
-return_queue_declare_ok(#resource{name = ActualName},
- NoWait, MessageCount, ConsumerCount, State) ->
- return_ok(State#ch{most_recently_declared_queue = ActualName}, NoWait,
- #'queue.declare_ok'{queue = ActualName,
- message_count = MessageCount,
- consumer_count = ConsumerCount}).
-
-check_resource_access(User, Resource, Perm) ->
- V = {Resource, Perm},
- Cache = case get(permission_cache) of
- undefined -> [];
- Other -> Other
- end,
- case lists:member(V, Cache) of
- true -> ok;
- false -> ok = rabbit_access_control:check_resource_access(
- User, Resource, Perm),
- CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE-1),
- put(permission_cache, [V | CacheTail])
- end.
-
-clear_permission_cache() ->
- erase(permission_cache),
- ok.
-
-check_configure_permitted(Resource, #ch{user = User}) ->
- check_resource_access(User, Resource, configure).
-
-check_write_permitted(Resource, #ch{user = User}) ->
- check_resource_access(User, Resource, write).
-
-check_read_permitted(Resource, #ch{user = User}) ->
- check_resource_access(User, Resource, read).
-
-check_user_id_header(#'P_basic'{user_id = undefined}, _) ->
- ok;
-check_user_id_header(#'P_basic'{user_id = Username},
- #ch{user = #user{username = Username}}) ->
- ok;
-check_user_id_header(#'P_basic'{user_id = Claimed},
- #ch{user = #user{username = Actual,
- tags = Tags}}) ->
- case lists:member(impersonator, Tags) of
- true -> ok;
- false -> precondition_failed(
- "user_id property set to '~s' but authenticated user was "
- "'~s'", [Claimed, Actual])
- end.
-
-check_expiration_header(Props) ->
- case rabbit_basic:parse_expiration(Props) of
- {ok, _} -> ok;
- {error, E} -> precondition_failed("invalid expiration '~s': ~p",
- [Props#'P_basic'.expiration, E])
- end.
-
-check_internal_exchange(#exchange{name = Name, internal = true}) ->
- rabbit_misc:protocol_error(access_refused,
- "cannot publish to internal ~s",
- [rabbit_misc:rs(Name)]);
-check_internal_exchange(_) ->
- ok.
-
-expand_queue_name_shortcut(<<>>, #ch{most_recently_declared_queue = <<>>}) ->
- rabbit_misc:protocol_error(
- not_found, "no previously declared queue", []);
-expand_queue_name_shortcut(<<>>, #ch{virtual_host = VHostPath,
- most_recently_declared_queue = MRDQ}) ->
- rabbit_misc:r(VHostPath, queue, MRDQ);
-expand_queue_name_shortcut(QueueNameBin, #ch{virtual_host = VHostPath}) ->
- rabbit_misc:r(VHostPath, queue, QueueNameBin).
-
-expand_routing_key_shortcut(<<>>, <<>>,
- #ch{most_recently_declared_queue = <<>>}) ->
- rabbit_misc:protocol_error(
- not_found, "no previously declared queue", []);
-expand_routing_key_shortcut(<<>>, <<>>,
- #ch{most_recently_declared_queue = MRDQ}) ->
- MRDQ;
-expand_routing_key_shortcut(_QueueNameBin, RoutingKey, _State) ->
- RoutingKey.
-
-expand_binding(queue, DestinationNameBin, RoutingKey, State) ->
- {expand_queue_name_shortcut(DestinationNameBin, State),
- expand_routing_key_shortcut(DestinationNameBin, RoutingKey, State)};
-expand_binding(exchange, DestinationNameBin, RoutingKey, State) ->
- {rabbit_misc:r(State#ch.virtual_host, exchange, DestinationNameBin),
- RoutingKey}.
-
-check_not_default_exchange(#resource{kind = exchange, name = <<"">>}) ->
- rabbit_misc:protocol_error(
- access_refused, "operation not permitted on the default exchange", []);
-check_not_default_exchange(_) ->
- ok.
-
-%% check that an exchange/queue name does not contain the reserved
-%% "amq." prefix.
-%%
-%% As per the AMQP 0-9-1 spec, the exclusion of "amq." prefixed names
-%% only applies on actual creation, and not in the cases where the
-%% entity already exists or passive=true.
-%%
-%% NB: We deliberately do not enforce the other constraints on names
-%% required by the spec.
-check_name(Kind, NameBin = <<"amq.", _/binary>>) ->
- rabbit_misc:protocol_error(
- access_refused,
- "~s name '~s' contains reserved prefix 'amq.*'",[Kind, NameBin]);
-check_name(_Kind, NameBin) ->
- NameBin.
-
-queue_blocked(QPid, State = #ch{blocking = Blocking}) ->
- case sets:is_element(QPid, Blocking) of
- false -> State;
- true -> maybe_send_flow_ok(
- State#ch{blocking = sets:del_element(QPid, Blocking)})
- end.
-
-maybe_send_flow_ok(State = #ch{blocking = Blocking}) ->
- case sets:size(Blocking) of
- 0 -> ok = send(#'channel.flow_ok'{active = false}, State);
- _ -> ok
- end,
- State.
-
-record_confirms([], State) ->
- State;
-record_confirms(MXs, State = #ch{confirmed = C}) ->
- State#ch{confirmed = [MXs | C]}.
-
-confirm([], _QPid, State) ->
- State;
-confirm(MsgSeqNos, QPid, State = #ch{unconfirmed = UC}) ->
- {MXs, UC1} = dtree:take(MsgSeqNos, QPid, UC),
- record_confirms(MXs, State#ch{unconfirmed = UC1}).
-
-handle_method(#'channel.open'{}, _, State = #ch{state = starting}) ->
- {reply, #'channel.open_ok'{}, State#ch{state = running}};
-
-handle_method(#'channel.open'{}, _, _State) ->
- rabbit_misc:protocol_error(
- command_invalid, "second 'channel.open' seen", []);
-
-handle_method(_Method, _, #ch{state = starting}) ->
- rabbit_misc:protocol_error(channel_error, "expected 'channel.open'", []);
-
-handle_method(#'channel.close_ok'{}, _, #ch{state = closing}) ->
- stop;
-
-handle_method(#'channel.close'{}, _, State = #ch{writer_pid = WriterPid,
- state = closing}) ->
- ok = rabbit_writer:send_command(WriterPid, #'channel.close_ok'{}),
- {noreply, State};
-
-handle_method(_Method, _, State = #ch{state = closing}) ->
- {noreply, State};
-
-handle_method(#'channel.close'{}, _, State = #ch{reader_pid = ReaderPid}) ->
- {ok, State1} = notify_queues(State),
- %% We issue the channel.close_ok response after a handshake with
- %% the reader, the other half of which is ready_for_close. That
- %% way the reader forgets about the channel before we send the
- %% response (and this channel process terminates). If we didn't do
- %% that, a channel.open for the same channel number, which a
- %% client is entitled to send as soon as it has received the
- %% close_ok, might be received by the reader before it has seen
- %% the termination and hence be sent to the old, now dead/dying
- %% channel process, instead of a new process, and thus lost.
- ReaderPid ! {channel_closing, self()},
- {noreply, State1};
-
-%% Even though the spec prohibits the client from sending commands
-%% while waiting for the reply to a synchronous command, we generally
-%% do allow this...except in the case of a pending tx.commit, where
-%% it could wreak havoc.
-handle_method(_Method, _, #ch{tx = Tx})
- when Tx =:= committing orelse Tx =:= failed ->
- rabbit_misc:protocol_error(
- channel_error, "unexpected command while processing 'tx.commit'", []);
-
-handle_method(#'access.request'{},_, State) ->
- {reply, #'access.request_ok'{ticket = 1}, State};
-
-handle_method(#'basic.publish'{immediate = true}, _Content, _State) ->
- rabbit_misc:protocol_error(not_implemented, "immediate=true", []);
-
-handle_method(#'basic.publish'{exchange = ExchangeNameBin,
- routing_key = RoutingKey,
- mandatory = Mandatory},
- Content, State = #ch{virtual_host = VHostPath,
- tx = Tx,
- confirm_enabled = ConfirmEnabled,
- trace_state = TraceState}) ->
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_write_permitted(ExchangeName, State),
- Exchange = rabbit_exchange:lookup_or_die(ExchangeName),
- check_internal_exchange(Exchange),
- %% We decode the content's properties here because we're almost
- %% certain to want to look at delivery-mode and priority.
- DecodedContent = #content {properties = Props} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- check_user_id_header(Props, State),
- check_expiration_header(Props),
- {MsgSeqNo, State1} =
- case {Tx, ConfirmEnabled} of
- {none, false} -> {undefined, State};
- {_, _} -> SeqNo = State#ch.publish_seqno,
- {SeqNo, State#ch{publish_seqno = SeqNo + 1}}
- end,
- case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of
- {ok, Message} ->
- rabbit_trace:tap_in(Message, TraceState),
- Delivery = rabbit_basic:delivery(Mandatory, Message, MsgSeqNo),
- QNames = rabbit_exchange:route(Exchange, Delivery),
- DQ = {Delivery, QNames},
- {noreply, case Tx of
- none -> deliver_to_queues(DQ, State1);
- {Msgs, Acks} -> Msgs1 = queue:in(DQ, Msgs),
- State1#ch{tx = {Msgs1, Acks}}
- end};
- {error, Reason} ->
- precondition_failed("invalid message: ~p", [Reason])
- end;
-
-handle_method(#'basic.nack'{delivery_tag = DeliveryTag,
- multiple = Multiple,
- requeue = Requeue},
- _, State) ->
- reject(DeliveryTag, Requeue, Multiple, State);
-
-handle_method(#'basic.ack'{delivery_tag = DeliveryTag,
- multiple = Multiple},
- _, State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
- {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
- State1 = State#ch{unacked_message_q = Remaining},
- {noreply, case Tx of
- none -> ack(Acked, State1),
- State1;
- {Msgs, Acks} -> Acks1 = ack_cons(ack, Acked, Acks),
- State1#ch{tx = {Msgs, Acks1}}
- end};
-
-handle_method(#'basic.get'{queue = QueueNameBin,
- no_ack = NoAck},
- _, State = #ch{writer_pid = WriterPid,
- conn_pid = ConnPid,
- limiter = Limiter,
- next_tag = DeliveryTag}) ->
- QueueName = expand_queue_name_shortcut(QueueNameBin, State),
- check_read_permitted(QueueName, State),
- case rabbit_amqqueue:with_exclusive_access_or_die(
- QueueName, ConnPid,
- fun (Q) -> rabbit_amqqueue:basic_get(
- Q, self(), NoAck, rabbit_limiter:pid(Limiter))
- end) of
- {ok, MessageCount,
- Msg = {QName, QPid, _MsgId, Redelivered,
- #basic_message{exchange_name = ExchangeName,
- routing_keys = [RoutingKey | _CcRoutes],
- content = Content}}} ->
- ok = rabbit_writer:send_command(
- WriterPid,
- #'basic.get_ok'{delivery_tag = DeliveryTag,
- redelivered = Redelivered,
- exchange = ExchangeName#resource.name,
- routing_key = RoutingKey,
- message_count = MessageCount},
- Content),
- State1 = monitor_delivering_queue(NoAck, QPid, QName, State),
- {noreply, record_sent(none, not(NoAck), Msg, State1)};
- empty ->
- {reply, #'basic.get_empty'{}, State}
- end;
-
-handle_method(#'basic.consume'{queue = QueueNameBin,
- consumer_tag = ConsumerTag,
- no_local = _, % FIXME: implement
- no_ack = NoAck,
- exclusive = ExclusiveConsume,
- nowait = NoWait,
- arguments = Arguments},
- _, State = #ch{conn_pid = ConnPid,
- limiter = Limiter,
- consumer_mapping = ConsumerMapping}) ->
- case dict:find(ConsumerTag, ConsumerMapping) of
- error ->
- QueueName = expand_queue_name_shortcut(QueueNameBin, State),
- check_read_permitted(QueueName, State),
- ActualConsumerTag =
- case ConsumerTag of
- <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
- "amq.ctag");
- Other -> Other
- end,
-
- %% We get the queue process to send the consume_ok on our
- %% behalf. This is for symmetry with basic.cancel - see
- %% the comment in that method for why.
- case rabbit_amqqueue:with_exclusive_access_or_die(
- QueueName, ConnPid,
- fun (Q) ->
- {rabbit_amqqueue:basic_consume(
- Q, NoAck, self(),
- rabbit_limiter:pid(Limiter),
- rabbit_limiter:is_active(Limiter),
- ActualConsumerTag, ExclusiveConsume,
- parse_credit_args(Arguments),
- ok_msg(NoWait, #'basic.consume_ok'{
- consumer_tag = ActualConsumerTag})),
- Q}
- end) of
- {ok, Q = #amqqueue{pid = QPid, name = QName}} ->
- CM1 = dict:store(ActualConsumerTag, Q, ConsumerMapping),
- State1 = monitor_delivering_queue(
- NoAck, QPid, QName,
- State#ch{consumer_mapping = CM1}),
- {noreply,
- case NoWait of
- true -> consumer_monitor(ActualConsumerTag, State1);
- false -> State1
- end};
- {{error, exclusive_consume_unavailable}, _Q} ->
- rabbit_misc:protocol_error(
- access_refused, "~s in exclusive use",
- [rabbit_misc:rs(QueueName)])
- end;
- {ok, _} ->
- %% Attempted reuse of consumer tag.
- rabbit_misc:protocol_error(
- not_allowed, "attempt to reuse consumer tag '~s'", [ConsumerTag])
- end;
-
-handle_method(#'basic.cancel'{consumer_tag = ConsumerTag,
- nowait = NoWait},
- _, State = #ch{consumer_mapping = ConsumerMapping,
- queue_consumers = QCons}) ->
- OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
- case dict:find(ConsumerTag, ConsumerMapping) of
- error ->
- %% Spec requires we ignore this situation.
- return_ok(State, NoWait, OkMsg);
- {ok, Q = #amqqueue{pid = QPid}} ->
- ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping),
- QCons1 =
- case dict:find(QPid, QCons) of
- error -> QCons;
- {ok, CTags} -> CTags1 = gb_sets:delete(ConsumerTag, CTags),
- case gb_sets:is_empty(CTags1) of
- true -> dict:erase(QPid, QCons);
- false -> dict:store(QPid, CTags1, QCons)
- end
- end,
- NewState = State#ch{consumer_mapping = ConsumerMapping1,
- queue_consumers = QCons1},
- %% In order to ensure that no more messages are sent to
- %% the consumer after the cancel_ok has been sent, we get
- %% the queue process to send the cancel_ok on our
- %% behalf. If we were sending the cancel_ok ourselves it
- %% might overtake a message sent previously by the queue.
- case rabbit_misc:with_exit_handler(
- fun () -> {error, not_found} end,
- fun () ->
- rabbit_amqqueue:basic_cancel(
- Q, self(), ConsumerTag, ok_msg(NoWait, OkMsg))
- end) of
- ok ->
- {noreply, NewState};
- {error, not_found} ->
- %% Spec requires we ignore this situation.
- return_ok(NewState, NoWait, OkMsg)
- end
- end;
-
-handle_method(#'basic.qos'{global = true}, _, _State) ->
- rabbit_misc:protocol_error(not_implemented, "global=true", []);
-
-handle_method(#'basic.qos'{prefetch_size = Size}, _, _State) when Size /= 0 ->
- rabbit_misc:protocol_error(not_implemented,
- "prefetch_size!=0 (~w)", [Size]);
-
-handle_method(#'basic.qos'{prefetch_count = 0}, _,
- State = #ch{limiter = Limiter}) ->
- Limiter1 = rabbit_limiter:unlimit_prefetch(Limiter),
- {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
-
-handle_method(#'basic.qos'{prefetch_count = PrefetchCount}, _,
- State = #ch{limiter = Limiter, unacked_message_q = UAMQ}) ->
- %% TODO queue:len(UAMQ) is not strictly right since that counts
- %% unacked messages from basic.get too. Pretty obscure though.
- Limiter1 = rabbit_limiter:limit_prefetch(Limiter,
- PrefetchCount, queue:len(UAMQ)),
- {reply, #'basic.qos_ok'{},
- maybe_limit_queues(Limiter, Limiter1, State#ch{limiter = Limiter1})};
-
-handle_method(#'basic.recover_async'{requeue = true},
- _, State = #ch{unacked_message_q = UAMQ,
- limiter = Limiter}) ->
- OkFun = fun () -> ok end,
- UAMQL = queue:to_list(UAMQ),
- foreach_per_queue(
- fun (QPid, MsgIds) ->
- rabbit_misc:with_exit_handler(
- OkFun,
- fun () -> rabbit_amqqueue:requeue(QPid, MsgIds, self()) end)
- end, lists:reverse(UAMQL)),
- ok = notify_limiter(Limiter, UAMQL),
- %% No answer required - basic.recover is the newer, synchronous
- %% variant of this method
- {noreply, State#ch{unacked_message_q = queue:new()}};
-
-handle_method(#'basic.recover_async'{requeue = false}, _, _State) ->
- rabbit_misc:protocol_error(not_implemented, "requeue=false", []);
-
-handle_method(#'basic.recover'{requeue = Requeue}, Content, State) ->
- {noreply, State1} = handle_method(#'basic.recover_async'{requeue = Requeue},
- Content, State),
- {reply, #'basic.recover_ok'{}, State1};
-
-handle_method(#'basic.reject'{delivery_tag = DeliveryTag,
- requeue = Requeue},
- _, State) ->
- reject(DeliveryTag, Requeue, false, State);
-
-handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
- type = TypeNameBin,
- passive = false,
- durable = Durable,
- auto_delete = AutoDelete,
- internal = Internal,
- nowait = NoWait,
- arguments = Args},
- _, State = #ch{virtual_host = VHostPath}) ->
- CheckedType = rabbit_exchange:check_type(TypeNameBin),
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_not_default_exchange(ExchangeName),
- check_configure_permitted(ExchangeName, State),
- X = case rabbit_exchange:lookup(ExchangeName) of
- {ok, FoundX} -> FoundX;
- {error, not_found} ->
- check_name('exchange', ExchangeNameBin),
- AeKey = <<"alternate-exchange">>,
- case rabbit_misc:r_arg(VHostPath, exchange, Args, AeKey) of
- undefined -> ok;
- {error, {invalid_type, Type}} ->
- precondition_failed(
- "invalid type '~s' for arg '~s' in ~s",
- [Type, AeKey, rabbit_misc:rs(ExchangeName)]);
- AName -> check_read_permitted(ExchangeName, State),
- check_write_permitted(AName, State),
- ok
- end,
- rabbit_exchange:declare(ExchangeName,
- CheckedType,
- Durable,
- AutoDelete,
- Internal,
- Args)
- end,
- ok = rabbit_exchange:assert_equivalence(X, CheckedType, Durable,
- AutoDelete, Internal, Args),
- return_ok(State, NoWait, #'exchange.declare_ok'{});
-
-handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
- passive = true,
- nowait = NoWait},
- _, State = #ch{virtual_host = VHostPath}) ->
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_not_default_exchange(ExchangeName),
- _ = rabbit_exchange:lookup_or_die(ExchangeName),
- return_ok(State, NoWait, #'exchange.declare_ok'{});
-
-handle_method(#'exchange.delete'{exchange = ExchangeNameBin,
- if_unused = IfUnused,
- nowait = NoWait},
- _, State = #ch{virtual_host = VHostPath}) ->
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_not_default_exchange(ExchangeName),
- check_configure_permitted(ExchangeName, State),
- case rabbit_exchange:delete(ExchangeName, IfUnused) of
- {error, not_found} ->
- rabbit_misc:not_found(ExchangeName);
- {error, in_use} ->
- precondition_failed("~s in use", [rabbit_misc:rs(ExchangeName)]);
- ok ->
- return_ok(State, NoWait, #'exchange.delete_ok'{})
- end;
-
-handle_method(#'exchange.bind'{destination = DestinationNameBin,
- source = SourceNameBin,
- routing_key = RoutingKey,
- nowait = NoWait,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:add/2,
- SourceNameBin, exchange, DestinationNameBin, RoutingKey,
- Arguments, #'exchange.bind_ok'{}, NoWait, State);
-
-handle_method(#'exchange.unbind'{destination = DestinationNameBin,
- source = SourceNameBin,
- routing_key = RoutingKey,
- nowait = NoWait,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:remove/2,
- SourceNameBin, exchange, DestinationNameBin, RoutingKey,
- Arguments, #'exchange.unbind_ok'{}, NoWait, State);
-
-handle_method(#'queue.declare'{queue = QueueNameBin,
- passive = false,
- durable = Durable,
- exclusive = ExclusiveDeclare,
- auto_delete = AutoDelete,
- nowait = NoWait,
- arguments = Args} = Declare,
- _, State = #ch{virtual_host = VHostPath,
- conn_pid = ConnPid,
- queue_collector_pid = CollectorPid}) ->
- Owner = case ExclusiveDeclare of
- true -> ConnPid;
- false -> none
- end,
- ActualNameBin = case QueueNameBin of
- <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
- "amq.gen");
- Other -> check_name('queue', Other)
- end,
- QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin),
- check_configure_permitted(QueueName, State),
- case rabbit_amqqueue:with(
- QueueName,
- fun (Q) -> ok = rabbit_amqqueue:assert_equivalence(
- Q, Durable, AutoDelete, Args, Owner),
- rabbit_amqqueue:stat(Q)
- end) of
- {ok, MessageCount, ConsumerCount} ->
- return_queue_declare_ok(QueueName, NoWait, MessageCount,
- ConsumerCount, State);
- {error, not_found} ->
- DlxKey = <<"x-dead-letter-exchange">>,
- case rabbit_misc:r_arg(VHostPath, exchange, Args, DlxKey) of
- undefined ->
- ok;
- {error, {invalid_type, Type}} ->
- precondition_failed(
- "invalid type '~s' for arg '~s' in ~s",
- [Type, DlxKey, rabbit_misc:rs(QueueName)]);
- DLX ->
- check_read_permitted(QueueName, State),
- check_write_permitted(DLX, State),
- ok
- end,
- case rabbit_amqqueue:declare(QueueName, Durable, AutoDelete,
- Args, Owner) of
- {new, #amqqueue{pid = QPid}} ->
- %% We need to notify the reader within the channel
- %% process so that we can be sure there are no
- %% outstanding exclusive queues being declared as
- %% the connection shuts down.
- ok = case Owner of
- none -> ok;
- _ -> rabbit_queue_collector:register(
- CollectorPid, QPid)
- end,
- return_queue_declare_ok(QueueName, NoWait, 0, 0, State);
- {existing, _Q} ->
- %% must have been created between the stat and the
- %% declare. Loop around again.
- handle_method(Declare, none, State);
- {absent, Q} ->
- rabbit_misc:absent(Q);
- {owner_died, _Q} ->
- %% Presumably our own days are numbered since the
- %% connection has died. Pretend the queue exists though,
- %% just so nothing fails.
- return_queue_declare_ok(QueueName, NoWait, 0, 0, State)
- end;
- {error, {absent, Q}} ->
- rabbit_misc:absent(Q)
- end;
-
-handle_method(#'queue.declare'{queue = QueueNameBin,
- passive = true,
- nowait = NoWait},
- _, State = #ch{virtual_host = VHostPath,
- conn_pid = ConnPid}) ->
- QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin),
- {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} =
- rabbit_amqqueue:with_or_die(
- QueueName, fun (Q) -> {rabbit_amqqueue:stat(Q), Q} end),
- ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
- return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount,
- State);
-
-handle_method(#'queue.delete'{queue = QueueNameBin,
- if_unused = IfUnused,
- if_empty = IfEmpty,
- nowait = NoWait},
- _, State = #ch{conn_pid = ConnPid}) ->
- QueueName = expand_queue_name_shortcut(QueueNameBin, State),
- check_configure_permitted(QueueName, State),
- case rabbit_amqqueue:with_exclusive_access_or_die(
- QueueName, ConnPid,
- fun (Q) -> rabbit_amqqueue:delete(Q, IfUnused, IfEmpty) end) of
- {error, in_use} ->
- precondition_failed("~s in use", [rabbit_misc:rs(QueueName)]);
- {error, not_empty} ->
- precondition_failed("~s not empty", [rabbit_misc:rs(QueueName)]);
- {ok, PurgedMessageCount} ->
- return_ok(State, NoWait,
- #'queue.delete_ok'{message_count = PurgedMessageCount})
- end;
-
-handle_method(#'queue.bind'{queue = QueueNameBin,
- exchange = ExchangeNameBin,
- routing_key = RoutingKey,
- nowait = NoWait,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:add/2,
- ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments,
- #'queue.bind_ok'{}, NoWait, State);
-
-handle_method(#'queue.unbind'{queue = QueueNameBin,
- exchange = ExchangeNameBin,
- routing_key = RoutingKey,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:remove/2,
- ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments,
- #'queue.unbind_ok'{}, false, State);
-
-handle_method(#'queue.purge'{queue = QueueNameBin,
- nowait = NoWait},
- _, State = #ch{conn_pid = ConnPid}) ->
- QueueName = expand_queue_name_shortcut(QueueNameBin, State),
- check_read_permitted(QueueName, State),
- {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die(
- QueueName, ConnPid,
- fun (Q) -> rabbit_amqqueue:purge(Q) end),
- return_ok(State, NoWait,
- #'queue.purge_ok'{message_count = PurgedMessageCount});
-
-handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) ->
- precondition_failed("cannot switch from confirm to tx mode");
-
-handle_method(#'tx.select'{}, _, State = #ch{tx = none}) ->
- {reply, #'tx.select_ok'{}, State#ch{tx = new_tx()}};
-
-handle_method(#'tx.select'{}, _, State) ->
- {reply, #'tx.select_ok'{}, State};
-
-handle_method(#'tx.commit'{}, _, #ch{tx = none}) ->
- precondition_failed("channel is not transactional");
-
-handle_method(#'tx.commit'{}, _, State = #ch{tx = {Msgs, Acks},
- limiter = Limiter}) ->
- State1 = rabbit_misc:queue_fold(fun deliver_to_queues/2, State, Msgs),
- Rev = fun (X) -> lists:reverse(lists:sort(X)) end,
- lists:foreach(fun ({ack, A}) -> ack(Rev(A), State1);
- ({Requeue, A}) -> reject(Requeue, Rev(A), Limiter)
- end, lists:reverse(Acks)),
- {noreply, maybe_complete_tx(State1#ch{tx = committing})};
-
-handle_method(#'tx.rollback'{}, _, #ch{tx = none}) ->
- precondition_failed("channel is not transactional");
-
-handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ,
- tx = {_Msgs, Acks}}) ->
- AcksL = lists:append(lists:reverse([lists:reverse(L) || {_, L} <- Acks])),
- UAMQ1 = queue:from_list(lists:usort(AcksL ++ queue:to_list(UAMQ))),
- {reply, #'tx.rollback_ok'{}, State#ch{unacked_message_q = UAMQ1,
- tx = new_tx()}};
-
-handle_method(#'confirm.select'{}, _, #ch{tx = {_, _}}) ->
- precondition_failed("cannot switch from tx to confirm mode");
-
-handle_method(#'confirm.select'{nowait = NoWait}, _, State) ->
- return_ok(State#ch{confirm_enabled = true},
- NoWait, #'confirm.select_ok'{});
-
-handle_method(#'channel.flow'{active = true}, _,
- State = #ch{limiter = Limiter}) ->
- Limiter1 = rabbit_limiter:unblock(Limiter),
- {reply, #'channel.flow_ok'{active = true},
- maybe_limit_queues(Limiter, Limiter1, State#ch{limiter = Limiter1})};
-
-handle_method(#'channel.flow'{active = false}, _,
- State = #ch{consumer_mapping = Consumers,
- limiter = Limiter}) ->
- case rabbit_limiter:is_blocked(Limiter) of
- true -> {noreply, maybe_send_flow_ok(State)};
- false -> Limiter1 = rabbit_limiter:block(Limiter),
- State1 = maybe_limit_queues(Limiter, Limiter1,
- State#ch{limiter = Limiter1}),
- %% The semantics of channel.flow{active=false}
- %% require that no messages are delivered after the
- %% channel.flow_ok has been sent. We accomplish that
- %% by "flushing" all messages in flight from the
- %% consumer queues to us. To do this we tell all the
- %% queues to invoke rabbit_channel:flushed/2, which
- %% will send us a {flushed, ...} message that appears
- %% *after* all the {deliver, ...} messages. We keep
- %% track of all the QPids thus asked, and once all of
- %% them have responded (or died) we send the
- %% channel.flow_ok.
- QPids = consumer_queues(Consumers),
- ok = rabbit_amqqueue:flush_all(QPids, self()),
- {noreply, maybe_send_flow_ok(
- State1#ch{blocking = sets:from_list(QPids)})}
- end;
-
-handle_method(#'basic.credit'{consumer_tag = CTag,
- credit = Credit,
- drain = Drain}, _,
- State = #ch{consumer_mapping = Consumers}) ->
- case dict:find(CTag, Consumers) of
- {ok, Q} -> ok = rabbit_amqqueue:credit(
- Q, self(), CTag, Credit, Drain),
- {noreply, State};
- error -> precondition_failed("unknown consumer tag '~s'", [CTag])
- end;
-
-handle_method(_MethodRecord, _Content, _State) ->
- rabbit_misc:protocol_error(
- command_invalid, "unimplemented method", []).
-
-%%----------------------------------------------------------------------------
-
-consumer_monitor(ConsumerTag,
- State = #ch{consumer_mapping = ConsumerMapping,
- queue_monitors = QMons,
- queue_consumers = QCons,
- capabilities = Capabilities}) ->
- case rabbit_misc:table_lookup(
- Capabilities, <<"consumer_cancel_notify">>) of
- {bool, true} ->
- #amqqueue{pid = QPid} = dict:fetch(ConsumerTag, ConsumerMapping),
- QCons1 = dict:update(QPid,
- fun (CTags) ->
- gb_sets:insert(ConsumerTag, CTags)
- end,
- gb_sets:singleton(ConsumerTag),
- QCons),
- State#ch{queue_monitors = pmon:monitor(QPid, QMons),
- queue_consumers = QCons1};
- _ ->
- State
- end.
-
-monitor_delivering_queue(NoAck, QPid, QName,
- State = #ch{queue_names = QNames,
- queue_monitors = QMons,
- delivering_queues = DQ}) ->
- State#ch{queue_names = dict:store(QPid, QName, QNames),
- queue_monitors = pmon:monitor(QPid, QMons),
- delivering_queues = case NoAck of
- true -> DQ;
- false -> sets:add_element(QPid, DQ)
- end}.
-
-handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed = UC}) ->
- case rabbit_misc:is_abnormal_exit(Reason) of
- true -> {MXs, UC1} = dtree:take_all(QPid, UC),
- send_nacks(MXs, State#ch{unconfirmed = UC1});
- false -> {MXs, UC1} = dtree:take(QPid, UC),
- record_confirms(MXs, State#ch{unconfirmed = UC1})
- end.
-
-handle_consuming_queue_down(QPid,
- State = #ch{consumer_mapping = ConsumerMapping,
- queue_consumers = QCons,
- queue_names = QNames}) ->
- ConsumerTags = case dict:find(QPid, QCons) of
- error -> gb_sets:new();
- {ok, CTags} -> CTags
- end,
- ConsumerMapping1 =
- gb_sets:fold(fun (CTag, CMap) ->
- ok = send(#'basic.cancel'{consumer_tag = CTag,
- nowait = true},
- State),
- rabbit_event:notify(
- consumer_deleted,
- [{consumer_tag, CTag},
- {channel, self()},
- {queue, dict:fetch(QPid, QNames)}]),
- dict:erase(CTag, CMap)
- end, ConsumerMapping, ConsumerTags),
- State#ch{consumer_mapping = ConsumerMapping1,
- queue_consumers = dict:erase(QPid, QCons)}.
-
-handle_delivering_queue_down(QPid, State = #ch{delivering_queues = DQ}) ->
- State#ch{delivering_queues = sets:del_element(QPid, DQ)}.
-
-parse_credit_args(Arguments) ->
- case rabbit_misc:table_lookup(Arguments, <<"x-credit">>) of
- {table, T} -> case {rabbit_misc:table_lookup(T, <<"credit">>),
- rabbit_misc:table_lookup(T, <<"drain">>)} of
- {{long, Credit}, {boolean, Drain}} -> {Credit, Drain};
- _ -> none
- end;
- undefined -> none
- end.
-
-binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin,
- RoutingKey, Arguments, ReturnMethod, NoWait,
- State = #ch{virtual_host = VHostPath,
- conn_pid = ConnPid }) ->
- {DestinationName, ActualRoutingKey} =
- expand_binding(DestinationType, DestinationNameBin, RoutingKey, State),
- check_write_permitted(DestinationName, State),
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- [check_not_default_exchange(N) || N <- [DestinationName, ExchangeName]],
- check_read_permitted(ExchangeName, State),
- case Fun(#binding{source = ExchangeName,
- destination = DestinationName,
- key = ActualRoutingKey,
- args = Arguments},
- fun (_X, Q = #amqqueue{}) ->
- try rabbit_amqqueue:check_exclusive_access(Q, ConnPid)
- catch exit:Reason -> {error, Reason}
- end;
- (_X, #exchange{}) ->
- ok
- end) of
- {error, {resources_missing, [{not_found, Name} | _]}} ->
- rabbit_misc:not_found(Name);
- {error, {resources_missing, [{absent, Q} | _]}} ->
- rabbit_misc:absent(Q);
- {error, binding_not_found} ->
- rabbit_misc:protocol_error(
- not_found, "no binding ~s between ~s and ~s",
- [RoutingKey, rabbit_misc:rs(ExchangeName),
- rabbit_misc:rs(DestinationName)]);
- {error, {binding_invalid, Fmt, Args}} ->
- rabbit_misc:protocol_error(precondition_failed, Fmt, Args);
- {error, #amqp_error{} = Error} ->
- rabbit_misc:protocol_error(Error);
- ok -> return_ok(State, NoWait, ReturnMethod)
- end.
-
-basic_return(#basic_message{exchange_name = ExchangeName,
- routing_keys = [RoutingKey | _CcRoutes],
- content = Content},
- #ch{protocol = Protocol, writer_pid = WriterPid}, Reason) ->
- {_Close, ReplyCode, ReplyText} = Protocol:lookup_amqp_exception(Reason),
- ok = rabbit_writer:send_command(
- WriterPid,
- #'basic.return'{reply_code = ReplyCode,
- reply_text = ReplyText,
- exchange = ExchangeName#resource.name,
- routing_key = RoutingKey},
- Content).
-
-reject(DeliveryTag, Requeue, Multiple,
- State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
- {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
- State1 = State#ch{unacked_message_q = Remaining},
- {noreply, case Tx of
- none -> reject(Requeue, Acked, State1#ch.limiter),
- State1;
- {Msgs, Acks} -> Acks1 = ack_cons(Requeue, Acked, Acks),
- State1#ch{tx = {Msgs, Acks1}}
- end}.
-
-%% NB: Acked is in youngest-first order
-reject(Requeue, Acked, Limiter) ->
- foreach_per_queue(
- fun (QPid, MsgIds) ->
- rabbit_amqqueue:reject(QPid, MsgIds, Requeue, self())
- end, Acked),
- ok = notify_limiter(Limiter, Acked).
-
-record_sent(ConsumerTag, AckRequired,
- Msg = {QName, QPid, MsgId, Redelivered, _Message},
- State = #ch{unacked_message_q = UAMQ,
- next_tag = DeliveryTag,
- trace_state = TraceState}) ->
- ?INCR_STATS([{queue_stats, QName, 1}], case {ConsumerTag, AckRequired} of
- {none, true} -> get;
- {none, false} -> get_no_ack;
- {_ , true} -> deliver;
- {_ , false} -> deliver_no_ack
- end, State),
- case Redelivered of
- true -> ?INCR_STATS([{queue_stats, QName, 1}], redeliver, State);
- false -> ok
- end,
- rabbit_trace:tap_out(Msg, TraceState),
- UAMQ1 = case AckRequired of
- true -> queue:in({DeliveryTag, ConsumerTag, {QPid, MsgId}},
- UAMQ);
- false -> UAMQ
- end,
- State#ch{unacked_message_q = UAMQ1, next_tag = DeliveryTag + 1}.
-
-%% NB: returns acks in youngest-first order
-collect_acks(Q, 0, true) ->
- {lists:reverse(queue:to_list(Q)), queue:new()};
-collect_acks(Q, DeliveryTag, Multiple) ->
- collect_acks([], [], Q, DeliveryTag, Multiple).
-
-collect_acks(ToAcc, PrefixAcc, Q, DeliveryTag, Multiple) ->
- case queue:out(Q) of
- {{value, UnackedMsg = {CurrentDeliveryTag, _ConsumerTag, _Msg}},
- QTail} ->
- if CurrentDeliveryTag == DeliveryTag ->
- {[UnackedMsg | ToAcc],
- case PrefixAcc of
- [] -> QTail;
- _ -> queue:join(
- queue:from_list(lists:reverse(PrefixAcc)),
- QTail)
- end};
- Multiple ->
- collect_acks([UnackedMsg | ToAcc], PrefixAcc,
- QTail, DeliveryTag, Multiple);
- true ->
- collect_acks(ToAcc, [UnackedMsg | PrefixAcc],
- QTail, DeliveryTag, Multiple)
- end;
- {empty, _} ->
- precondition_failed("unknown delivery tag ~w", [DeliveryTag])
- end.
-
-%% NB: Acked is in youngest-first order
-ack(Acked, State = #ch{queue_names = QNames}) ->
- foreach_per_queue(
- fun (QPid, MsgIds) ->
- ok = rabbit_amqqueue:ack(QPid, MsgIds, self()),
- ?INCR_STATS(case dict:find(QPid, QNames) of
- {ok, QName} -> Count = length(MsgIds),
- [{queue_stats, QName, Count}];
- error -> []
- end, ack, State)
- end, Acked),
- ok = notify_limiter(State#ch.limiter, Acked).
-
-%% {Msgs, Acks}
-%%
-%% Msgs is a queue.
-%%
-%% Acks looks s.t. like this:
-%% [{false,[5,4]},{true,[3]},{ack,[2,1]}, ...]
-%%
-%% Each element is a pair consisting of a tag and a list of
-%% ack'ed/reject'ed msg ids. The tag is one of 'ack' (to ack), 'true'
-%% (reject w requeue), 'false' (reject w/o requeue). The msg ids, as
-%% well as the list overall, are in "most-recent (generally youngest)
-%% ack first" order.
-new_tx() -> {queue:new(), []}.
-
-notify_queues(State = #ch{state = closing}) ->
- {ok, State};
-notify_queues(State = #ch{consumer_mapping = Consumers,
- delivering_queues = DQ }) ->
- QPids = sets:to_list(
- sets:union(sets:from_list(consumer_queues(Consumers)), DQ)),
- {rabbit_amqqueue:notify_down_all(QPids, self()), State#ch{state = closing}}.
-
-foreach_per_queue(_F, []) ->
- ok;
-foreach_per_queue(F, [{_DTag, _CTag, {QPid, MsgId}}]) -> %% common case
- F(QPid, [MsgId]);
-%% NB: UAL should be in youngest-first order; the tree values will
-%% then be in oldest-first order
-foreach_per_queue(F, UAL) ->
- T = lists:foldl(fun ({_DTag, _CTag, {QPid, MsgId}}, T) ->
- rabbit_misc:gb_trees_cons(QPid, MsgId, T)
- end, gb_trees:empty(), UAL),
- rabbit_misc:gb_trees_foreach(F, T).
-
-maybe_limit_queues(OldLimiter, NewLimiter, State) ->
- case ((not rabbit_limiter:is_active(OldLimiter)) andalso
- rabbit_limiter:is_active(NewLimiter)) of
- true -> Queues = consumer_queues(State#ch.consumer_mapping),
- rabbit_amqqueue:activate_limit_all(Queues, self());
- false -> ok
- end,
- State.
-
-consumer_queues(Consumers) ->
- lists:usort([QPid ||
- {_Key, #amqqueue{pid = QPid}} <- dict:to_list(Consumers)]).
-
-%% tell the limiter about the number of acks that have been received
-%% for messages delivered to subscribed consumers, but not acks for
-%% messages sent in a response to a basic.get (identified by their
-%% 'none' consumer tag)
-notify_limiter(Limiter, Acked) ->
- %% optimisation: avoid the potentially expensive 'foldl' in the
- %% common case.
- case rabbit_limiter:is_prefetch_limited(Limiter) of
- false -> ok;
- true -> case lists:foldl(fun ({_, none, _}, Acc) -> Acc;
- ({_, _, _}, Acc) -> Acc + 1
- end, 0, Acked) of
- 0 -> ok;
- Count -> rabbit_limiter:ack(Limiter, Count)
- end
- end.
-
-deliver_to_queues({#delivery{message = #basic_message{exchange_name = XName},
- msg_seq_no = undefined,
- mandatory = false},
- []}, State) -> %% optimisation
- ?INCR_STATS([{exchange_stats, XName, 1}], publish, State),
- State;
-deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{
- exchange_name = XName},
- msg_seq_no = MsgSeqNo},
- DelQNames}, State = #ch{queue_names = QNames,
- queue_monitors = QMons}) ->
- Qs = rabbit_amqqueue:lookup(DelQNames),
- {RoutingRes, DeliveredQPids} = rabbit_amqqueue:deliver_flow(Qs, Delivery),
- %% The pmon:monitor_all/2 monitors all queues to which we
- %% delivered. But we want to monitor even queues we didn't deliver
- %% to, since we need their 'DOWN' messages to clean
- %% queue_names. So we also need to monitor each QPid from
- %% queues. But that only gets the masters (which is fine for
- %% cleaning queue_names), so we need the union of both.
- %%
- %% ...and we need to add even non-delivered queues to queue_names
- %% since alternative algorithms to update queue_names less
- %% frequently would in fact be more expensive in the common case.
- {QNames1, QMons1} =
- lists:foldl(fun (#amqqueue{pid = QPid, name = QName},
- {QNames0, QMons0}) ->
- {case dict:is_key(QPid, QNames0) of
- true -> QNames0;
- false -> dict:store(QPid, QName, QNames0)
- end, pmon:monitor(QPid, QMons0)}
- end, {QNames, pmon:monitor_all(DeliveredQPids, QMons)}, Qs),
- State1 = process_routing_result(RoutingRes, DeliveredQPids,
- XName, MsgSeqNo, Message,
- State#ch{queue_names = QNames1,
- queue_monitors = QMons1}),
- ?INCR_STATS([{exchange_stats, XName, 1} |
- [{queue_exchange_stats, {QName, XName}, 1} ||
- QPid <- DeliveredQPids,
- {ok, QName} <- [dict:find(QPid, QNames1)]]],
- publish, State1),
- State1.
-
-process_routing_result(routed, _, _, undefined, _, State) ->
- State;
-process_routing_result(routed, [], XName, MsgSeqNo, _, State) ->
- record_confirms([{MsgSeqNo, XName}], State);
-process_routing_result(routed, QPids, XName, MsgSeqNo, _, State) ->
- State#ch{unconfirmed = dtree:insert(MsgSeqNo, QPids, XName,
- State#ch.unconfirmed)};
-process_routing_result(unroutable, _, XName, MsgSeqNo, Msg, State) ->
- ok = basic_return(Msg, State, no_route),
- ?INCR_STATS([{exchange_stats, XName, 1}], return_unroutable, State),
- case MsgSeqNo of
- undefined -> State;
- _ -> record_confirms([{MsgSeqNo, XName}], State)
- end.
-
-send_nacks([], State) ->
- State;
-send_nacks(_MXs, State = #ch{state = closing,
- tx = none}) -> %% optimisation
- State;
-send_nacks(MXs, State = #ch{tx = none}) ->
- coalesce_and_send([MsgSeqNo || {MsgSeqNo, _} <- MXs],
- fun(MsgSeqNo, Multiple) ->
- #'basic.nack'{delivery_tag = MsgSeqNo,
- multiple = Multiple}
- end, State);
-send_nacks(_MXs, State = #ch{state = closing}) -> %% optimisation
- State#ch{tx = failed};
-send_nacks(_, State) ->
- maybe_complete_tx(State#ch{tx = failed}).
-
-send_confirms(State = #ch{tx = none, confirmed = []}) ->
- State;
-send_confirms(State = #ch{tx = none, confirmed = C}) ->
- MsgSeqNos =
- lists:foldl(
- fun ({MsgSeqNo, XName}, MSNs) ->
- ?INCR_STATS([{exchange_stats, XName, 1}], confirm, State),
- [MsgSeqNo | MSNs]
- end, [], lists:append(C)),
- send_confirms(MsgSeqNos, State#ch{confirmed = []});
-send_confirms(State) ->
- maybe_complete_tx(State).
-
-send_confirms([], State) ->
- State;
-send_confirms(_Cs, State = #ch{state = closing}) -> %% optimisation
- State;
-send_confirms([MsgSeqNo], State) ->
- ok = send(#'basic.ack'{delivery_tag = MsgSeqNo}, State),
- State;
-send_confirms(Cs, State) ->
- coalesce_and_send(Cs, fun(MsgSeqNo, Multiple) ->
- #'basic.ack'{delivery_tag = MsgSeqNo,
- multiple = Multiple}
- end, State).
-
-coalesce_and_send(MsgSeqNos, MkMsgFun, State = #ch{unconfirmed = UC}) ->
- SMsgSeqNos = lists:usort(MsgSeqNos),
- CutOff = case dtree:is_empty(UC) of
- true -> lists:last(SMsgSeqNos) + 1;
- false -> {SeqNo, _XName} = dtree:smallest(UC), SeqNo
- end,
- {Ms, Ss} = lists:splitwith(fun(X) -> X < CutOff end, SMsgSeqNos),
- case Ms of
- [] -> ok;
- _ -> ok = send(MkMsgFun(lists:last(Ms), true), State)
- end,
- [ok = send(MkMsgFun(SeqNo, false), State) || SeqNo <- Ss],
- State.
-
-ack_cons(Tag, Acked, [{Tag, Acks} | L]) -> [{Tag, Acked ++ Acks} | L];
-ack_cons(Tag, Acked, Acks) -> [{Tag, Acked} | Acks].
-
-ack_len(Acks) -> lists:sum([length(L) || {ack, L} <- Acks]).
-
-maybe_complete_tx(State = #ch{tx = {_, _}}) ->
- State;
-maybe_complete_tx(State = #ch{unconfirmed = UC}) ->
- case dtree:is_empty(UC) of
- false -> State;
- true -> complete_tx(State#ch{confirmed = []})
- end.
-
-complete_tx(State = #ch{tx = committing}) ->
- ok = send(#'tx.commit_ok'{}, State),
- State#ch{tx = new_tx()};
-complete_tx(State = #ch{tx = failed}) ->
- {noreply, State1} = handle_exception(
- rabbit_misc:amqp_error(
- precondition_failed, "partial tx completion", [],
- 'tx.commit'),
- State),
- State1#ch{tx = new_tx()}.
-
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
-
-i(pid, _) -> self();
-i(connection, #ch{conn_pid = ConnPid}) -> ConnPid;
-i(number, #ch{channel = Channel}) -> Channel;
-i(user, #ch{user = User}) -> User#user.username;
-i(vhost, #ch{virtual_host = VHost}) -> VHost;
-i(transactional, #ch{tx = Tx}) -> Tx =/= none;
-i(confirm, #ch{confirm_enabled = CE}) -> CE;
-i(name, State) -> name(State);
-i(consumer_count, #ch{consumer_mapping = CM}) -> dict:size(CM);
-i(messages_unconfirmed, #ch{unconfirmed = UC}) -> dtree:size(UC);
-i(messages_unacknowledged, #ch{unacked_message_q = UAMQ}) -> queue:len(UAMQ);
-i(messages_uncommitted, #ch{tx = {Msgs, _Acks}}) -> queue:len(Msgs);
-i(messages_uncommitted, #ch{}) -> 0;
-i(acks_uncommitted, #ch{tx = {_Msgs, Acks}}) -> ack_len(Acks);
-i(acks_uncommitted, #ch{}) -> 0;
-i(prefetch_count, #ch{limiter = Limiter}) ->
- rabbit_limiter:get_prefetch_limit(Limiter);
-i(client_flow_blocked, #ch{limiter = Limiter}) ->
- rabbit_limiter:is_blocked(Limiter);
-i(Item, _) ->
- throw({bad_argument, Item}).
-
-name(#ch{conn_name = ConnName, channel = Channel}) ->
- list_to_binary(rabbit_misc:format("~s (~p)", [ConnName, Channel])).
-
-incr_stats(Incs, Measure) ->
- [update_measures(Type, Key, Inc, Measure) || {Type, Key, Inc} <- Incs].
-
-update_measures(Type, Key, Inc, Measure) ->
- Measures = case get({Type, Key}) of
- undefined -> [];
- D -> D
- end,
- Cur = case orddict:find(Measure, Measures) of
- error -> 0;
- {ok, C} -> C
- end,
- put({Type, Key}, orddict:store(Measure, Cur + Inc, Measures)).
-
-emit_stats(State) ->
- emit_stats(State, []).
-
-emit_stats(State, Extra) ->
- Coarse = infos(?STATISTICS_KEYS, State),
- case rabbit_event:stats_level(State, #ch.stats_timer) of
- coarse -> rabbit_event:notify(channel_stats, Extra ++ Coarse);
- fine -> Fine = [{channel_queue_stats,
- [{QName, Stats} ||
- {{queue_stats, QName}, Stats} <- get()]},
- {channel_exchange_stats,
- [{XName, Stats} ||
- {{exchange_stats, XName}, Stats} <- get()]},
- {channel_queue_exchange_stats,
- [{QX, Stats} ||
- {{queue_exchange_stats, QX}, Stats} <- get()]}],
- rabbit_event:notify(channel_stats, Extra ++ Coarse ++ Fine)
- end.
-
-erase_queue_stats(QName) ->
- erase({queue_stats, QName}),
- [erase({queue_exchange_stats, QX}) ||
- {{queue_exchange_stats, QX = {QName0, _}}, _} <- get(),
- QName0 =:= QName].
diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl
deleted file mode 100644
index df2e80ca..00000000
--- a/src/rabbit_channel_sup.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_channel_sup).
-
--behaviour(supervisor2).
-
--export([start_link/1]).
-
--export([init/1]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([start_link_args/0]).
-
--type(start_link_args() ::
- {'tcp', rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), pid(), string(), rabbit_types:protocol(),
- rabbit_types:user(), rabbit_types:vhost(), rabbit_framing:amqp_table(),
- pid()} |
- {'direct', rabbit_channel:channel_number(), pid(), string(),
- rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
- rabbit_framing:amqp_table(), pid()}).
-
--spec(start_link/1 :: (start_link_args()) -> {'ok', pid(), {pid(), any()}}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link({tcp, Sock, Channel, FrameMax, ReaderPid, ConnName, Protocol, User,
- VHost, Capabilities, Collector}) ->
- {ok, SupPid} = supervisor2:start_link(?MODULE,
- {tcp, Sock, Channel, FrameMax,
- ReaderPid, Protocol}),
- [LimiterPid] = supervisor2:find_child(SupPid, limiter),
- [WriterPid] = supervisor2:find_child(SupPid, writer),
- {ok, ChannelPid} =
- supervisor2:start_child(
- SupPid,
- {channel, {rabbit_channel, start_link,
- [Channel, ReaderPid, WriterPid, ReaderPid, ConnName,
- Protocol, User, VHost, Capabilities, Collector,
- LimiterPid]},
- intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}),
- {ok, AState} = rabbit_command_assembler:init(Protocol),
- {ok, SupPid, {ChannelPid, AState}};
-start_link({direct, Channel, ClientChannelPid, ConnPid, ConnName, Protocol,
- User, VHost, Capabilities, Collector}) ->
- {ok, SupPid} = supervisor2:start_link(?MODULE, direct),
- [LimiterPid] = supervisor2:find_child(SupPid, limiter),
- {ok, ChannelPid} =
- supervisor2:start_child(
- SupPid,
- {channel, {rabbit_channel, start_link,
- [Channel, ClientChannelPid, ClientChannelPid, ConnPid,
- ConnName, Protocol, User, VHost, Capabilities, Collector,
- LimiterPid]},
- intrinsic, ?MAX_WAIT, worker, [rabbit_channel]}),
- {ok, SupPid, {ChannelPid, none}}.
-
-%%----------------------------------------------------------------------------
-
-init(Type) ->
- {ok, {{one_for_all, 0, 1}, child_specs(Type)}}.
-
-child_specs({tcp, Sock, Channel, FrameMax, ReaderPid, Protocol}) ->
- [{writer, {rabbit_writer, start_link,
- [Sock, Channel, FrameMax, Protocol, ReaderPid, true]},
- intrinsic, ?MAX_WAIT, worker, [rabbit_writer]} | child_specs(direct)];
-child_specs(direct) ->
- [{limiter, {rabbit_limiter, start_link, []},
- transient, ?MAX_WAIT, worker, [rabbit_limiter]}].
diff --git a/src/rabbit_channel_sup_sup.erl b/src/rabbit_channel_sup_sup.erl
deleted file mode 100644
index 1d9ba48b..00000000
--- a/src/rabbit_channel_sup_sup.erl
+++ /dev/null
@@ -1,48 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_channel_sup_sup).
-
--behaviour(supervisor2).
-
--export([start_link/0, start_channel/2]).
-
--export([init/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_channel/2 :: (pid(), rabbit_channel_sup:start_link_args()) ->
- {'ok', pid(), {pid(), any()}}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- supervisor2:start_link(?MODULE, []).
-
-start_channel(Pid, Args) ->
- supervisor2:start_child(Pid, [Args]).
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, {{simple_one_for_one_terminate, 0, 1},
- [{channel_sup, {rabbit_channel_sup, start_link, []},
- temporary, infinity, supervisor, [rabbit_channel_sup]}]}}.
diff --git a/src/rabbit_client_sup.erl b/src/rabbit_client_sup.erl
deleted file mode 100644
index d6536e16..00000000
--- a/src/rabbit_client_sup.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_client_sup).
-
--behaviour(supervisor2).
-
--export([start_link/1, start_link/2, start_link_worker/2]).
-
--export([init/1]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 :: (rabbit_types:mfargs()) ->
- rabbit_types:ok_pid_or_error()).
--spec(start_link/2 :: ({'local', atom()}, rabbit_types:mfargs()) ->
- rabbit_types:ok_pid_or_error()).
--spec(start_link_worker/2 :: ({'local', atom()}, rabbit_types:mfargs()) ->
- rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Callback) ->
- supervisor2:start_link(?MODULE, Callback).
-
-start_link(SupName, Callback) ->
- supervisor2:start_link(SupName, ?MODULE, Callback).
-
-start_link_worker(SupName, Callback) ->
- supervisor2:start_link(SupName, ?MODULE, {Callback, worker}).
-
-init({M,F,A}) ->
- {ok, {{simple_one_for_one_terminate, 0, 1},
- [{client, {M,F,A}, temporary, infinity, supervisor, [M]}]}};
-init({{M,F,A}, worker}) ->
- {ok, {{simple_one_for_one_terminate, 0, 1},
- [{client, {M,F,A}, temporary, ?MAX_WAIT, worker, [M]}]}}.
diff --git a/src/rabbit_command_assembler.erl b/src/rabbit_command_assembler.erl
deleted file mode 100644
index 4095ccf1..00000000
--- a/src/rabbit_command_assembler.erl
+++ /dev/null
@@ -1,137 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_command_assembler).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--export([analyze_frame/3, init/1, process/2]).
-
-%%----------------------------------------------------------------------------
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([frame/0]).
-
--type(frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
- ?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY |
- ?FRAME_TRACE | ?FRAME_HEARTBEAT).
--type(protocol() :: rabbit_framing:protocol()).
--type(method() :: rabbit_framing:amqp_method_record()).
--type(class_id() :: rabbit_framing:amqp_class_id()).
--type(weight() :: non_neg_integer()).
--type(body_size() :: non_neg_integer()).
--type(content() :: rabbit_types:undecoded_content()).
-
--type(frame() ::
- {'method', rabbit_framing:amqp_method_name(), binary()} |
- {'content_header', class_id(), weight(), body_size(), binary()} |
- {'content_body', binary()}).
-
--type(state() ::
- {'method', protocol()} |
- {'content_header', method(), class_id(), protocol()} |
- {'content_body', method(), body_size(), class_id(), protocol()}).
-
--spec(analyze_frame/3 :: (frame_type(), binary(), protocol()) ->
- frame() | 'heartbeat' | 'error').
-
--spec(init/1 :: (protocol()) -> {ok, state()}).
--spec(process/2 :: (frame(), state()) ->
- {ok, state()} |
- {ok, method(), state()} |
- {ok, method(), content(), state()} |
- {error, rabbit_types:amqp_error()}).
-
--endif.
-
-%%--------------------------------------------------------------------
-
-analyze_frame(?FRAME_METHOD,
- <<ClassId:16, MethodId:16, MethodFields/binary>>,
- Protocol) ->
- MethodName = Protocol:lookup_method_name({ClassId, MethodId}),
- {method, MethodName, MethodFields};
-analyze_frame(?FRAME_HEADER,
- <<ClassId:16, Weight:16, BodySize:64, Properties/binary>>,
- _Protocol) ->
- {content_header, ClassId, Weight, BodySize, Properties};
-analyze_frame(?FRAME_BODY, Body, _Protocol) ->
- {content_body, Body};
-analyze_frame(?FRAME_HEARTBEAT, <<>>, _Protocol) ->
- heartbeat;
-analyze_frame(_Type, _Body, _Protocol) ->
- error.
-
-init(Protocol) -> {ok, {method, Protocol}}.
-
-process({method, MethodName, FieldsBin}, {method, Protocol}) ->
- try
- Method = Protocol:decode_method_fields(MethodName, FieldsBin),
- case Protocol:method_has_content(MethodName) of
- true -> {ClassId, _MethodId} = Protocol:method_id(MethodName),
- {ok, {content_header, Method, ClassId, Protocol}};
- false -> {ok, Method, {method, Protocol}}
- end
- catch exit:#amqp_error{} = Reason -> {error, Reason}
- end;
-process(_Frame, {method, _Protocol}) ->
- unexpected_frame("expected method frame, "
- "got non method frame instead", [], none);
-process({content_header, ClassId, 0, 0, PropertiesBin},
- {content_header, Method, ClassId, Protocol}) ->
- Content = empty_content(ClassId, PropertiesBin, Protocol),
- {ok, Method, Content, {method, Protocol}};
-process({content_header, ClassId, 0, BodySize, PropertiesBin},
- {content_header, Method, ClassId, Protocol}) ->
- Content = empty_content(ClassId, PropertiesBin, Protocol),
- {ok, {content_body, Method, BodySize, Content, Protocol}};
-process({content_header, HeaderClassId, 0, _BodySize, _PropertiesBin},
- {content_header, Method, ClassId, _Protocol}) ->
- unexpected_frame("expected content header for class ~w, "
- "got one for class ~w instead",
- [ClassId, HeaderClassId], Method);
-process(_Frame, {content_header, Method, ClassId, _Protocol}) ->
- unexpected_frame("expected content header for class ~w, "
- "got non content header frame instead", [ClassId], Method);
-process({content_body, FragmentBin},
- {content_body, Method, RemainingSize,
- Content = #content{payload_fragments_rev = Fragments}, Protocol}) ->
- NewContent = Content#content{
- payload_fragments_rev = [FragmentBin | Fragments]},
- case RemainingSize - size(FragmentBin) of
- 0 -> {ok, Method, NewContent, {method, Protocol}};
- Sz -> {ok, {content_body, Method, Sz, NewContent, Protocol}}
- end;
-process(_Frame, {content_body, Method, _RemainingSize, _Content, _Protocol}) ->
- unexpected_frame("expected content body, "
- "got non content body frame instead", [], Method).
-
-%%--------------------------------------------------------------------
-
-empty_content(ClassId, PropertiesBin, Protocol) ->
- #content{class_id = ClassId,
- properties = none,
- properties_bin = PropertiesBin,
- protocol = Protocol,
- payload_fragments_rev = []}.
-
-unexpected_frame(Format, Params, Method) when is_atom(Method) ->
- {error, rabbit_misc:amqp_error(unexpected_frame, Format, Params, Method)};
-unexpected_frame(Format, Params, Method) ->
- unexpected_frame(Format, Params, rabbit_misc:method_record_type(Method)).
diff --git a/src/rabbit_connection_sup.erl b/src/rabbit_connection_sup.erl
deleted file mode 100644
index c1fa17aa..00000000
--- a/src/rabbit_connection_sup.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_connection_sup).
-
--behaviour(supervisor2).
-
--export([start_link/0, reader/1]).
-
--export([init/1]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> {'ok', pid(), pid()}).
--spec(reader/1 :: (pid()) -> pid()).
-
--endif.
-
-%%--------------------------------------------------------------------------
-
-start_link() ->
- {ok, SupPid} = supervisor2:start_link(?MODULE, []),
- {ok, Collector} =
- supervisor2:start_child(
- SupPid,
- {collector, {rabbit_queue_collector, start_link, []},
- intrinsic, ?MAX_WAIT, worker, [rabbit_queue_collector]}),
- %% We need to get channels in the hierarchy here so they get shut
- %% down after the reader, so the reader gets a chance to terminate
- %% them cleanly. But for 1.0 readers we can't start the real
- %% ch_sup_sup (because we don't know if we will be 0-9-1 or 1.0) -
- %% so we add another supervisor into the hierarchy.
- {ok, ChannelSup3Pid} =
- supervisor2:start_child(
- SupPid,
- {channel_sup3, {rabbit_intermediate_sup, start_link, []},
- intrinsic, infinity, supervisor, [rabbit_intermediate_sup]}),
- {ok, ReaderPid} =
- supervisor2:start_child(
- SupPid,
- {reader, {rabbit_reader, start_link,
- [ChannelSup3Pid, Collector,
- rabbit_heartbeat:start_heartbeat_fun(SupPid)]},
- intrinsic, ?MAX_WAIT, worker, [rabbit_reader]}),
- {ok, SupPid, ReaderPid}.
-
-reader(Pid) ->
- hd(supervisor2:find_child(Pid, reader)).
-
-%%--------------------------------------------------------------------------
-
-init([]) ->
- {ok, {{one_for_all, 0, 1}, []}}.
diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl
deleted file mode 100644
index 0b666a36..00000000
--- a/src/rabbit_control_main.erl
+++ /dev/null
@@ -1,728 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_control_main).
--include("rabbit.hrl").
-
--export([start/0, stop/0, action/5, sync_queue/1, cancel_sync_queue/1]).
-
--define(RPC_TIMEOUT, infinity).
--define(EXTERNAL_CHECK_INTERVAL, 1000).
-
--define(QUIET_OPT, "-q").
--define(NODE_OPT, "-n").
--define(VHOST_OPT, "-p").
--define(RAM_OPT, "--ram").
--define(OFFLINE_OPT, "--offline").
-
--define(QUIET_DEF, {?QUIET_OPT, flag}).
--define(NODE_DEF(Node), {?NODE_OPT, {option, Node}}).
--define(VHOST_DEF, {?VHOST_OPT, {option, "/"}}).
--define(RAM_DEF, {?RAM_OPT, flag}).
--define(OFFLINE_DEF, {?OFFLINE_OPT, flag}).
-
--define(GLOBAL_DEFS(Node), [?QUIET_DEF, ?NODE_DEF(Node)]).
-
--define(COMMANDS,
- [stop,
- stop_app,
- start_app,
- wait,
- reset,
- force_reset,
- rotate_logs,
-
- {join_cluster, [?RAM_DEF]},
- change_cluster_node_type,
- update_cluster_nodes,
- {forget_cluster_node, [?OFFLINE_DEF]},
- cluster_status,
- {sync_queue, [?VHOST_DEF]},
- {cancel_sync_queue, [?VHOST_DEF]},
-
- add_user,
- delete_user,
- change_password,
- clear_password,
- set_user_tags,
- list_users,
-
- add_vhost,
- delete_vhost,
- list_vhosts,
- {set_permissions, [?VHOST_DEF]},
- {clear_permissions, [?VHOST_DEF]},
- {list_permissions, [?VHOST_DEF]},
- list_user_permissions,
-
- {set_parameter, [?VHOST_DEF]},
- {clear_parameter, [?VHOST_DEF]},
- {list_parameters, [?VHOST_DEF]},
-
- {set_policy, [?VHOST_DEF]},
- {clear_policy, [?VHOST_DEF]},
- {list_policies, [?VHOST_DEF]},
-
- {list_queues, [?VHOST_DEF]},
- {list_exchanges, [?VHOST_DEF]},
- {list_bindings, [?VHOST_DEF]},
- {list_connections, [?VHOST_DEF]},
- list_channels,
- {list_consumers, [?VHOST_DEF]},
- status,
- environment,
- report,
- eval,
-
- close_connection,
- {trace_on, [?VHOST_DEF]},
- {trace_off, [?VHOST_DEF]},
- set_vm_memory_high_watermark
- ]).
-
--define(GLOBAL_QUERIES,
- [{"Connections", rabbit_networking, connection_info_all,
- connection_info_keys},
- {"Channels", rabbit_channel, info_all, info_keys}]).
-
--define(VHOST_QUERIES,
- [{"Queues", rabbit_amqqueue, info_all, info_keys},
- {"Exchanges", rabbit_exchange, info_all, info_keys},
- {"Bindings", rabbit_binding, info_all, info_keys},
- {"Consumers", rabbit_amqqueue, consumers_all, consumer_info_keys},
- {"Permissions", rabbit_auth_backend_internal, list_vhost_permissions,
- vhost_perms_info_keys},
- {"Policies", rabbit_policy, list_formatted, info_keys},
- {"Parameters", rabbit_runtime_parameters, list_formatted, info_keys}]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start/0 :: () -> no_return()).
--spec(stop/0 :: () -> 'ok').
--spec(action/5 ::
- (atom(), node(), [string()], [{string(), any()}],
- fun ((string(), [any()]) -> 'ok'))
- -> 'ok').
--spec(usage/0 :: () -> no_return()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start() ->
- {ok, [[NodeStr|_]|_]} = init:get_argument(nodename),
- {Command, Opts, Args} =
- case rabbit_misc:parse_arguments(?COMMANDS, ?GLOBAL_DEFS(NodeStr),
- init:get_plain_arguments())
- of
- {ok, Res} -> Res;
- no_command -> print_error("could not recognise command", []),
- usage()
- end,
- Opts1 = [case K of
- ?NODE_OPT -> {?NODE_OPT, rabbit_nodes:make(V)};
- _ -> {K, V}
- end || {K, V} <- Opts],
- Quiet = proplists:get_bool(?QUIET_OPT, Opts1),
- Node = proplists:get_value(?NODE_OPT, Opts1),
- Inform = case Quiet of
- true -> fun (_Format, _Args1) -> ok end;
- false -> fun (Format, Args1) ->
- io:format(Format ++ " ...~n", Args1)
- end
- end,
- PrintInvalidCommandError =
- fun () ->
- print_error("invalid command '~s'",
- [string:join([atom_to_list(Command) | Args], " ")])
- end,
-
- %% The reason we don't use a try/catch here is that rpc:call turns
- %% thrown errors into normal return values
- case catch action(Command, Node, Args, Opts, Inform) of
- ok ->
- case Quiet of
- true -> ok;
- false -> io:format("...done.~n")
- end,
- rabbit_misc:quit(0);
- {ok, Info} ->
- case Quiet of
- true -> ok;
- false -> io:format("...done (~p).~n", [Info])
- end,
- rabbit_misc:quit(0);
- {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> %% < R15
- PrintInvalidCommandError(),
- usage();
- {'EXIT', {function_clause, [{?MODULE, action, _, _} | _]}} -> %% >= R15
- PrintInvalidCommandError(),
- usage();
- {'EXIT', {badarg, _}} ->
- print_error("invalid parameter: ~p", [Args]),
- usage();
- {error, {Problem, Reason}} when is_atom(Problem), is_binary(Reason) ->
- %% We handle this common case specially to avoid ~p since
- %% that has i18n issues
- print_error("~s: ~s", [Problem, Reason]),
- rabbit_misc:quit(2);
- {error, Reason} ->
- print_error("~p", [Reason]),
- rabbit_misc:quit(2);
- {error_string, Reason} ->
- print_error("~s", [Reason]),
- rabbit_misc:quit(2);
- {badrpc, {'EXIT', Reason}} ->
- print_error("~p", [Reason]),
- rabbit_misc:quit(2);
- {badrpc, Reason} ->
- print_error("unable to connect to node ~w: ~w", [Node, Reason]),
- print_badrpc_diagnostics(Node),
- rabbit_misc:quit(2);
- Other ->
- print_error("~p", [Other]),
- rabbit_misc:quit(2)
- end.
-
-fmt_stderr(Format, Args) -> rabbit_misc:format_stderr(Format ++ "~n", Args).
-
-print_report(Node, {Descr, Module, InfoFun, KeysFun}) ->
- io:format("~s:~n", [Descr]),
- print_report0(Node, {Module, InfoFun, KeysFun}, []).
-
-print_report(Node, {Descr, Module, InfoFun, KeysFun}, VHostArg) ->
- io:format("~s on ~s:~n", [Descr, VHostArg]),
- print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg).
-
-print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg) ->
- case rpc_call(Node, Module, InfoFun, VHostArg) of
- [_|_] = Results -> InfoItems = rpc_call(Node, Module, KeysFun, []),
- display_row([atom_to_list(I) || I <- InfoItems]),
- display_info_list(Results, InfoItems);
- _ -> ok
- end,
- io:nl().
-
-print_error(Format, Args) -> fmt_stderr("Error: " ++ Format, Args).
-
-print_badrpc_diagnostics(Node) ->
- fmt_stderr(rabbit_nodes:diagnostics([Node]), []).
-
-stop() ->
- ok.
-
-usage() ->
- io:format("~s", [rabbit_ctl_usage:usage()]),
- rabbit_misc:quit(1).
-
-%%----------------------------------------------------------------------------
-
-action(stop, Node, Args, _Opts, Inform) ->
- Inform("Stopping and halting node ~p", [Node]),
- Res = call(Node, {rabbit, stop_and_halt, []}),
- case {Res, Args} of
- {ok, [PidFile]} -> wait_for_process_death(
- read_pid_file(PidFile, false));
- {ok, [_, _| _]} -> exit({badarg, Args});
- _ -> ok
- end,
- Res;
-
-action(stop_app, Node, [], _Opts, Inform) ->
- Inform("Stopping node ~p", [Node]),
- call(Node, {rabbit, stop, []});
-
-action(start_app, Node, [], _Opts, Inform) ->
- Inform("Starting node ~p", [Node]),
- call(Node, {rabbit, start, []});
-
-action(reset, Node, [], _Opts, Inform) ->
- Inform("Resetting node ~p", [Node]),
- call(Node, {rabbit_mnesia, reset, []});
-
-action(force_reset, Node, [], _Opts, Inform) ->
- Inform("Forcefully resetting node ~p", [Node]),
- call(Node, {rabbit_mnesia, force_reset, []});
-
-action(join_cluster, Node, [ClusterNodeS], Opts, Inform) ->
- ClusterNode = list_to_atom(ClusterNodeS),
- NodeType = case proplists:get_bool(?RAM_OPT, Opts) of
- true -> ram;
- false -> disc
- end,
- Inform("Clustering node ~p with ~p", [Node, ClusterNode]),
- rpc_call(Node, rabbit_mnesia, join_cluster, [ClusterNode, NodeType]);
-
-action(change_cluster_node_type, Node, ["ram"], _Opts, Inform) ->
- Inform("Turning ~p into a ram node", [Node]),
- rpc_call(Node, rabbit_mnesia, change_cluster_node_type, [ram]);
-action(change_cluster_node_type, Node, [Type], _Opts, Inform)
- when Type =:= "disc" orelse Type =:= "disk" ->
- Inform("Turning ~p into a disc node", [Node]),
- rpc_call(Node, rabbit_mnesia, change_cluster_node_type, [disc]);
-
-action(update_cluster_nodes, Node, [ClusterNodeS], _Opts, Inform) ->
- ClusterNode = list_to_atom(ClusterNodeS),
- Inform("Updating cluster nodes for ~p from ~p", [Node, ClusterNode]),
- rpc_call(Node, rabbit_mnesia, update_cluster_nodes, [ClusterNode]);
-
-action(forget_cluster_node, Node, [ClusterNodeS], Opts, Inform) ->
- ClusterNode = list_to_atom(ClusterNodeS),
- RemoveWhenOffline = proplists:get_bool(?OFFLINE_OPT, Opts),
- Inform("Removing node ~p from cluster", [ClusterNode]),
- rpc_call(Node, rabbit_mnesia, forget_cluster_node,
- [ClusterNode, RemoveWhenOffline]);
-
-action(sync_queue, Node, [Q], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- QName = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
- Inform("Synchronising ~s", [rabbit_misc:rs(QName)]),
- rpc_call(Node, rabbit_control_main, sync_queue, [QName]);
-
-action(cancel_sync_queue, Node, [Q], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- QName = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
- Inform("Stopping synchronising ~s", [rabbit_misc:rs(QName)]),
- rpc_call(Node, rabbit_control_main, cancel_sync_queue, [QName]);
-
-action(wait, Node, [PidFile], _Opts, Inform) ->
- Inform("Waiting for ~p", [Node]),
- wait_for_application(Node, PidFile, rabbit_and_plugins, Inform);
-action(wait, Node, [PidFile, App], _Opts, Inform) ->
- Inform("Waiting for ~p on ~p", [App, Node]),
- wait_for_application(Node, PidFile, list_to_atom(App), Inform);
-
-action(status, Node, [], _Opts, Inform) ->
- Inform("Status of node ~p", [Node]),
- display_call_result(Node, {rabbit, status, []});
-
-action(cluster_status, Node, [], _Opts, Inform) ->
- Inform("Cluster status of node ~p", [Node]),
- display_call_result(Node, {rabbit_mnesia, status, []});
-
-action(environment, Node, _App, _Opts, Inform) ->
- Inform("Application environment of node ~p", [Node]),
- display_call_result(Node, {rabbit, environment, []});
-
-action(rotate_logs, Node, [], _Opts, Inform) ->
- Inform("Reopening logs for node ~p", [Node]),
- call(Node, {rabbit, rotate_logs, [""]});
-action(rotate_logs, Node, Args = [Suffix], _Opts, Inform) ->
- Inform("Rotating logs to files with suffix \"~s\"", [Suffix]),
- call(Node, {rabbit, rotate_logs, Args});
-
-action(close_connection, Node, [PidStr, Explanation], _Opts, Inform) ->
- Inform("Closing connection \"~s\"", [PidStr]),
- rpc_call(Node, rabbit_networking, close_connection,
- [rabbit_misc:string_to_pid(PidStr), Explanation]);
-
-action(add_user, Node, Args = [Username, _Password], _Opts, Inform) ->
- Inform("Creating user \"~s\"", [Username]),
- call(Node, {rabbit_auth_backend_internal, add_user, Args});
-
-action(delete_user, Node, Args = [_Username], _Opts, Inform) ->
- Inform("Deleting user \"~s\"", Args),
- call(Node, {rabbit_auth_backend_internal, delete_user, Args});
-
-action(change_password, Node, Args = [Username, _Newpassword], _Opts, Inform) ->
- Inform("Changing password for user \"~s\"", [Username]),
- call(Node, {rabbit_auth_backend_internal, change_password, Args});
-
-action(clear_password, Node, Args = [Username], _Opts, Inform) ->
- Inform("Clearing password for user \"~s\"", [Username]),
- call(Node, {rabbit_auth_backend_internal, clear_password, Args});
-
-action(set_user_tags, Node, [Username | TagsStr], _Opts, Inform) ->
- Tags = [list_to_atom(T) || T <- TagsStr],
- Inform("Setting tags for user \"~s\" to ~p", [Username, Tags]),
- rpc_call(Node, rabbit_auth_backend_internal, set_tags,
- [list_to_binary(Username), Tags]);
-
-action(list_users, Node, [], _Opts, Inform) ->
- Inform("Listing users", []),
- display_info_list(
- call(Node, {rabbit_auth_backend_internal, list_users, []}),
- rabbit_auth_backend_internal:user_info_keys());
-
-action(add_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
- Inform("Creating vhost \"~s\"", Args),
- call(Node, {rabbit_vhost, add, Args});
-
-action(delete_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
- Inform("Deleting vhost \"~s\"", Args),
- call(Node, {rabbit_vhost, delete, Args});
-
-action(list_vhosts, Node, Args, _Opts, Inform) ->
- Inform("Listing vhosts", []),
- ArgAtoms = default_if_empty(Args, [name]),
- display_info_list(call(Node, {rabbit_vhost, info_all, []}), ArgAtoms);
-
-action(list_user_permissions, Node, Args = [_Username], _Opts, Inform) ->
- Inform("Listing permissions for user ~p", Args),
- display_info_list(call(Node, {rabbit_auth_backend_internal,
- list_user_permissions, Args}),
- rabbit_auth_backend_internal:user_perms_info_keys());
-
-action(list_queues, Node, Args, Opts, Inform) ->
- Inform("Listing queues", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- ArgAtoms = default_if_empty(Args, [name, messages]),
- display_info_list(rpc_call(Node, rabbit_amqqueue, info_all,
- [VHostArg, ArgAtoms]),
- ArgAtoms);
-
-action(list_exchanges, Node, Args, Opts, Inform) ->
- Inform("Listing exchanges", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- ArgAtoms = default_if_empty(Args, [name, type]),
- display_info_list(rpc_call(Node, rabbit_exchange, info_all,
- [VHostArg, ArgAtoms]),
- ArgAtoms);
-
-action(list_bindings, Node, Args, Opts, Inform) ->
- Inform("Listing bindings", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- ArgAtoms = default_if_empty(Args, [source_name, source_kind,
- destination_name, destination_kind,
- routing_key, arguments]),
- display_info_list(rpc_call(Node, rabbit_binding, info_all,
- [VHostArg, ArgAtoms]),
- ArgAtoms);
-
-action(list_connections, Node, Args, _Opts, Inform) ->
- Inform("Listing connections", []),
- ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
- display_info_list(rpc_call(Node, rabbit_networking, connection_info_all,
- [ArgAtoms]),
- ArgAtoms);
-
-action(list_channels, Node, Args, _Opts, Inform) ->
- Inform("Listing channels", []),
- ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
- messages_unacknowledged]),
- display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms]),
- ArgAtoms);
-
-action(list_consumers, Node, _Args, Opts, Inform) ->
- Inform("Listing consumers", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- display_info_list(rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg]),
- rabbit_amqqueue:consumer_info_keys());
-
-action(trace_on, Node, [], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Starting tracing for vhost \"~s\"", [VHost]),
- rpc_call(Node, rabbit_trace, start, [list_to_binary(VHost)]);
-
-action(trace_off, Node, [], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Stopping tracing for vhost \"~s\"", [VHost]),
- rpc_call(Node, rabbit_trace, stop, [list_to_binary(VHost)]);
-
-action(set_vm_memory_high_watermark, Node, [Arg], _Opts, Inform) ->
- Frac = list_to_float(case string:chr(Arg, $.) of
- 0 -> Arg ++ ".0";
- _ -> Arg
- end),
- Inform("Setting memory threshold on ~p to ~p", [Node, Frac]),
- rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark, [Frac]);
-
-action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Setting permissions for user \"~s\" in vhost \"~s\"",
- [Username, VHost]),
- call(Node, {rabbit_auth_backend_internal, set_permissions,
- [Username, VHost, CPerm, WPerm, RPerm]});
-
-action(clear_permissions, Node, [Username], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Clearing permissions for user \"~s\" in vhost \"~s\"",
- [Username, VHost]),
- call(Node, {rabbit_auth_backend_internal, clear_permissions,
- [Username, VHost]});
-
-action(list_permissions, Node, [], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Listing permissions in vhost \"~s\"", [VHost]),
- display_info_list(call(Node, {rabbit_auth_backend_internal,
- list_vhost_permissions, [VHost]}),
- rabbit_auth_backend_internal:vhost_perms_info_keys());
-
-action(set_parameter, Node, [Component, Key, Value], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Setting runtime parameter ~p for component ~p to ~p",
- [Key, Component, Value]),
- rpc_call(Node, rabbit_runtime_parameters, parse_set,
- [VHostArg, list_to_binary(Component), list_to_binary(Key), Value]);
-
-action(clear_parameter, Node, [Component, Key], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Clearing runtime parameter ~p for component ~p", [Key, Component]),
- rpc_call(Node, rabbit_runtime_parameters, clear, [VHostArg,
- list_to_binary(Component),
- list_to_binary(Key)]);
-
-action(list_parameters, Node, [], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Listing runtime parameters", []),
- display_info_list(
- rpc_call(Node, rabbit_runtime_parameters, list_formatted, [VHostArg]),
- rabbit_runtime_parameters:info_keys());
-
-action(set_policy, Node, [Key, Pattern, Defn | Prio], Opts, Inform)
- when Prio == [] orelse length(Prio) == 1 ->
- Msg = "Setting policy ~p for pattern ~p to ~p",
- {InformMsg, Prio1} = case Prio of [] -> {Msg, undefined};
- [P] -> {Msg ++ " with priority ~s", P}
- end,
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform(InformMsg, [Key, Pattern, Defn] ++ Prio),
- rpc_call(Node, rabbit_policy, parse_set,
- [VHostArg, list_to_binary(Key), Pattern, Defn, Prio1]);
-
-action(clear_policy, Node, [Key], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Clearing policy ~p", [Key]),
- rpc_call(Node, rabbit_policy, delete, [VHostArg, list_to_binary(Key)]);
-
-action(list_policies, Node, [], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Listing policies", []),
- display_info_list(rpc_call(Node, rabbit_policy, list_formatted, [VHostArg]),
- rabbit_policy:info_keys());
-
-action(report, Node, _Args, _Opts, Inform) ->
- Inform("Reporting server status on ~p~n~n", [erlang:universaltime()]),
- [begin ok = action(Action, N, [], [], Inform), io:nl() end ||
- N <- unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running]),
- Action <- [status, cluster_status, environment]],
- VHosts = unsafe_rpc(Node, rabbit_vhost, list, []),
- [print_report(Node, Q) || Q <- ?GLOBAL_QUERIES],
- [print_report(Node, Q, [V]) || Q <- ?VHOST_QUERIES, V <- VHosts],
- ok;
-
-action(eval, Node, [Expr], _Opts, _Inform) ->
- case erl_scan:string(Expr) of
- {ok, Scanned, _} ->
- case erl_parse:parse_exprs(Scanned) of
- {ok, Parsed} -> {value, Value, _} =
- unsafe_rpc(
- Node, erl_eval, exprs, [Parsed, []]),
- io:format("~p~n", [Value]),
- ok;
- {error, E} -> {error_string, format_parse_error(E)}
- end;
- {error, E, _} ->
- {error_string, format_parse_error(E)}
- end.
-
-format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)).
-
-sync_queue(Q) ->
- rabbit_amqqueue:with(
- Q, fun(#amqqueue{pid = QPid}) -> rabbit_amqqueue:sync_mirrors(QPid) end).
-
-cancel_sync_queue(Q) ->
- rabbit_amqqueue:with(
- Q, fun(#amqqueue{pid = QPid}) ->
- rabbit_amqqueue:cancel_sync_mirrors(QPid)
- end).
-
-%%----------------------------------------------------------------------------
-
-wait_for_application(Node, PidFile, Application, Inform) ->
- Pid = read_pid_file(PidFile, true),
- Inform("pid is ~s", [Pid]),
- wait_for_application(Node, Pid, Application).
-
-wait_for_application(Node, Pid, rabbit_and_plugins) ->
- wait_for_startup(Node, Pid);
-wait_for_application(Node, Pid, Application) ->
- while_process_is_alive(
- Node, Pid, fun() -> rabbit_nodes:is_running(Node, Application) end).
-
-wait_for_startup(Node, Pid) ->
- while_process_is_alive(
- Node, Pid, fun() -> rpc:call(Node, rabbit, await_startup, []) =:= ok end).
-
-while_process_is_alive(Node, Pid, Activity) ->
- case process_up(Pid) of
- true -> case Activity() of
- true -> ok;
- false -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
- while_process_is_alive(Node, Pid, Activity)
- end;
- false -> {error, process_not_running}
- end.
-
-wait_for_process_death(Pid) ->
- case process_up(Pid) of
- true -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
- wait_for_process_death(Pid);
- false -> ok
- end.
-
-read_pid_file(PidFile, Wait) ->
- case {file:read_file(PidFile), Wait} of
- {{ok, Bin}, _} ->
- S = binary_to_list(Bin),
- {match, [PidS]} = re:run(S, "[^\\s]+",
- [{capture, all, list}]),
- try list_to_integer(PidS)
- catch error:badarg ->
- exit({error, {garbage_in_pid_file, PidFile}})
- end,
- PidS;
- {{error, enoent}, true} ->
- timer:sleep(?EXTERNAL_CHECK_INTERVAL),
- read_pid_file(PidFile, Wait);
- {{error, _} = E, _} ->
- exit({error, {could_not_read_pid, E}})
- end.
-
-% Test using some OS clunkiness since we shouldn't trust
-% rpc:call(os, getpid, []) at this point
-process_up(Pid) ->
- with_os([{unix, fun () ->
- run_ps(Pid) =:= 0
- end},
- {win32, fun () ->
- Cmd = "tasklist /nh /fi \"pid eq " ++ Pid ++ "\" ",
- Res = rabbit_misc:os_cmd(Cmd ++ "2>&1"),
- case re:run(Res, "erl\\.exe", [{capture, none}]) of
- match -> true;
- _ -> false
- end
- end}]).
-
-with_os(Handlers) ->
- {OsFamily, _} = os:type(),
- case proplists:get_value(OsFamily, Handlers) of
- undefined -> throw({unsupported_os, OsFamily});
- Handler -> Handler()
- end.
-
-run_ps(Pid) ->
- Port = erlang:open_port({spawn, "ps -p " ++ Pid},
- [exit_status, {line, 16384},
- use_stdio, stderr_to_stdout]),
- exit_loop(Port).
-
-exit_loop(Port) ->
- receive
- {Port, {exit_status, Rc}} -> Rc;
- {Port, _} -> exit_loop(Port)
- end.
-
-%%----------------------------------------------------------------------------
-
-default_if_empty(List, Default) when is_list(List) ->
- if List == [] -> Default;
- true -> [list_to_atom(X) || X <- List]
- end.
-
-display_info_list(Results, InfoItemKeys) when is_list(Results) ->
- lists:foreach(
- fun (Result) -> display_row(
- [format_info_item(proplists:get_value(X, Result)) ||
- X <- InfoItemKeys])
- end, lists:sort(Results)),
- ok;
-display_info_list(Other, _) ->
- Other.
-
-display_row(Row) ->
- io:fwrite(string:join(Row, "\t")),
- io:nl().
-
--define(IS_U8(X), (X >= 0 andalso X =< 255)).
--define(IS_U16(X), (X >= 0 andalso X =< 65535)).
-
-format_info_item(#resource{name = Name}) ->
- escape(Name);
-format_info_item({N1, N2, N3, N4} = Value) when
- ?IS_U8(N1), ?IS_U8(N2), ?IS_U8(N3), ?IS_U8(N4) ->
- rabbit_misc:ntoa(Value);
-format_info_item({K1, K2, K3, K4, K5, K6, K7, K8} = Value) when
- ?IS_U16(K1), ?IS_U16(K2), ?IS_U16(K3), ?IS_U16(K4),
- ?IS_U16(K5), ?IS_U16(K6), ?IS_U16(K7), ?IS_U16(K8) ->
- rabbit_misc:ntoa(Value);
-format_info_item(Value) when is_pid(Value) ->
- rabbit_misc:pid_to_string(Value);
-format_info_item(Value) when is_binary(Value) ->
- escape(Value);
-format_info_item(Value) when is_atom(Value) ->
- escape(atom_to_list(Value));
-format_info_item([{TableEntryKey, TableEntryType, _TableEntryValue} | _] =
- Value) when is_binary(TableEntryKey) andalso
- is_atom(TableEntryType) ->
- io_lib:format("~1000000000000p", [prettify_amqp_table(Value)]);
-format_info_item([T | _] = Value)
- when is_tuple(T) orelse is_pid(T) orelse is_binary(T) orelse is_atom(T) orelse
- is_list(T) ->
- "[" ++
- lists:nthtail(2, lists:append(
- [", " ++ format_info_item(E) || E <- Value])) ++ "]";
-format_info_item(Value) ->
- io_lib:format("~w", [Value]).
-
-display_call_result(Node, MFA) ->
- case call(Node, MFA) of
- {badrpc, _} = Res -> throw(Res);
- Res -> io:format("~p~n", [Res]),
- ok
- end.
-
-unsafe_rpc(Node, Mod, Fun, Args) ->
- case rpc_call(Node, Mod, Fun, Args) of
- {badrpc, _} = Res -> throw(Res);
- Normal -> Normal
- end.
-
-call(Node, {Mod, Fun, Args}) ->
- rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary/1, Args)).
-
-rpc_call(Node, Mod, Fun, Args) ->
- rpc:call(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
-
-%% escape does C-style backslash escaping of non-printable ASCII
-%% characters. We don't escape characters above 127, since they may
-%% form part of UTF-8 strings.
-
-escape(Atom) when is_atom(Atom) -> escape(atom_to_list(Atom));
-escape(Bin) when is_binary(Bin) -> escape(binary_to_list(Bin));
-escape(L) when is_list(L) -> escape_char(lists:reverse(L), []).
-
-escape_char([$\\ | T], Acc) ->
- escape_char(T, [$\\, $\\ | Acc]);
-escape_char([X | T], Acc) when X >= 32, X /= 127 ->
- escape_char(T, [X | Acc]);
-escape_char([X | T], Acc) ->
- escape_char(T, [$\\, $0 + (X bsr 6), $0 + (X band 8#070 bsr 3),
- $0 + (X band 7) | Acc]);
-escape_char([], Acc) ->
- Acc.
-
-prettify_amqp_table(Table) ->
- [{escape(K), prettify_typed_amqp_value(T, V)} || {K, T, V} <- Table].
-
-prettify_typed_amqp_value(longstr, Value) -> escape(Value);
-prettify_typed_amqp_value(table, Value) -> prettify_amqp_table(Value);
-prettify_typed_amqp_value(array, Value) -> [prettify_typed_amqp_value(T, V) ||
- {T, V} <- Value];
-prettify_typed_amqp_value(_Type, Value) -> Value.
diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl
deleted file mode 100644
index 9002514f..00000000
--- a/src/rabbit_direct.erl
+++ /dev/null
@@ -1,108 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_direct).
-
--export([boot/0, force_event_refresh/0, list/0, connect/5,
- start_channel/9, disconnect/2]).
-%% Internal
--export([list_local/0]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(boot/0 :: () -> 'ok').
--spec(force_event_refresh/0 :: () -> 'ok').
--spec(list/0 :: () -> [pid()]).
--spec(list_local/0 :: () -> [pid()]).
--spec(connect/5 :: ((rabbit_types:username() | rabbit_types:user() |
- {rabbit_types:username(), rabbit_types:password()}),
- rabbit_types:vhost(), rabbit_types:protocol(), pid(),
- rabbit_event:event_props()) ->
- {'ok', {rabbit_types:user(),
- rabbit_framing:amqp_table()}}).
--spec(start_channel/9 ::
- (rabbit_channel:channel_number(), pid(), pid(), string(),
- rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
- rabbit_framing:amqp_table(), pid()) -> {'ok', pid()}).
--spec(disconnect/2 :: (pid(), rabbit_event:event_props()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-boot() -> rabbit_sup:start_supervisor_child(
- rabbit_direct_client_sup, rabbit_client_sup,
- [{local, rabbit_direct_client_sup},
- {rabbit_channel_sup, start_link, []}]).
-
-force_event_refresh() ->
- [Pid ! force_event_refresh || Pid<- list()],
- ok.
-
-list_local() ->
- pg_local:get_members(rabbit_direct).
-
-list() ->
- rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
- rabbit_direct, list_local, []).
-
-%%----------------------------------------------------------------------------
-
-connect(User = #user{}, VHost, Protocol, Pid, Infos) ->
- try rabbit_access_control:check_vhost_access(User, VHost) of
- ok -> ok = pg_local:join(rabbit_direct, Pid),
- rabbit_event:notify(connection_created, Infos),
- {ok, {User, rabbit_reader:server_properties(Protocol)}}
- catch
- exit:#amqp_error{name = access_refused} ->
- {error, access_refused}
- end;
-
-connect({Username, Password}, VHost, Protocol, Pid, Infos) ->
- connect0(check_user_pass_login, Username, Password, VHost, Protocol, Pid,
- Infos);
-
-connect(Username, VHost, Protocol, Pid, Infos) ->
- connect0(check_user_login, Username, [], VHost, Protocol, Pid, Infos).
-
-connect0(FunctionName, U, P, VHost, Protocol, Pid, Infos) ->
- case rabbit:is_running() of
- true ->
- case rabbit_access_control:FunctionName(U, P) of
- {ok, User} -> connect(User, VHost, Protocol, Pid, Infos);
- {refused, _M, _A} -> {error, auth_failure}
- end;
- false ->
- {error, broker_not_found_on_node}
- end.
-
-
-start_channel(Number, ClientChannelPid, ConnPid, ConnName, Protocol, User,
- VHost, Capabilities, Collector) ->
- {ok, _, {ChannelPid, _}} =
- supervisor2:start_child(
- rabbit_direct_client_sup,
- [{direct, Number, ClientChannelPid, ConnPid, ConnName, Protocol,
- User, VHost, Capabilities, Collector}]),
- {ok, ChannelPid}.
-
-disconnect(Pid, Infos) ->
- pg_local:leave(rabbit_direct, Pid),
- rabbit_event:notify(connection_closed, Infos).
diff --git a/src/rabbit_disk_monitor.erl b/src/rabbit_disk_monitor.erl
deleted file mode 100644
index 5aaa1b2d..00000000
--- a/src/rabbit_disk_monitor.erl
+++ /dev/null
@@ -1,198 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_disk_monitor).
-
--behaviour(gen_server).
-
--export([start_link/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--export([get_disk_free_limit/0, set_disk_free_limit/1, get_check_interval/0,
- set_check_interval/1, get_disk_free/0]).
-
--define(SERVER, ?MODULE).
--define(DEFAULT_DISK_CHECK_INTERVAL, 10000).
-
--record(state, {dir,
- limit,
- actual,
- timeout,
- timer,
- alarmed
- }).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(disk_free_limit() :: (integer() | {'mem_relative', float()})).
--spec(start_link/1 :: (disk_free_limit()) -> rabbit_types:ok_pid_or_error()).
--spec(get_disk_free_limit/0 :: () -> integer()).
--spec(set_disk_free_limit/1 :: (disk_free_limit()) -> 'ok').
--spec(get_check_interval/0 :: () -> integer()).
--spec(set_check_interval/1 :: (integer()) -> 'ok').
--spec(get_disk_free/0 :: () -> (integer() | 'unknown')).
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-get_disk_free_limit() ->
- gen_server:call(?MODULE, get_disk_free_limit, infinity).
-
-set_disk_free_limit(Limit) ->
- gen_server:call(?MODULE, {set_disk_free_limit, Limit}, infinity).
-
-get_check_interval() ->
- gen_server:call(?MODULE, get_check_interval, infinity).
-
-set_check_interval(Interval) ->
- gen_server:call(?MODULE, {set_check_interval, Interval}, infinity).
-
-get_disk_free() ->
- gen_server:call(?MODULE, get_disk_free, infinity).
-
-%%----------------------------------------------------------------------------
-%% gen_server callbacks
-%%----------------------------------------------------------------------------
-
-start_link(Args) ->
- gen_server:start_link({local, ?SERVER}, ?MODULE, [Args], []).
-
-init([Limit]) ->
- TRef = start_timer(?DEFAULT_DISK_CHECK_INTERVAL),
- Dir = dir(),
- State = #state { dir = Dir,
- timeout = ?DEFAULT_DISK_CHECK_INTERVAL,
- timer = TRef,
- alarmed = false},
- case {catch get_disk_free(Dir),
- vm_memory_monitor:get_total_memory()} of
- {N1, N2} when is_integer(N1), is_integer(N2) ->
- {ok, set_disk_limits(State, Limit)};
- Err ->
- rabbit_log:info("Disabling disk free space monitoring "
- "on unsupported platform: ~p~n", [Err]),
- {stop, unsupported_platform}
- end.
-
-handle_call(get_disk_free_limit, _From, State) ->
- {reply, interpret_limit(State#state.limit), State};
-
-handle_call({set_disk_free_limit, Limit}, _From, State) ->
- {reply, ok, set_disk_limits(State, Limit)};
-
-handle_call(get_check_interval, _From, State) ->
- {reply, State#state.timeout, State};
-
-handle_call({set_check_interval, Timeout}, _From, State) ->
- {ok, cancel} = timer:cancel(State#state.timer),
- {reply, ok, State#state{timeout = Timeout, timer = start_timer(Timeout)}};
-
-handle_call(get_disk_free, _From, State = #state { actual = Actual }) ->
- {reply, Actual, State};
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Request, State) ->
- {noreply, State}.
-
-handle_info(update, State) ->
- {noreply, internal_update(State)};
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-%% Server Internals
-%%----------------------------------------------------------------------------
-
-% the partition / drive containing this directory will be monitored
-dir() -> rabbit_mnesia:dir().
-
-set_disk_limits(State, Limit) ->
- State1 = State#state { limit = Limit },
- rabbit_log:info("Disk free limit set to ~pMB~n",
- [trunc(interpret_limit(Limit) / 1000000)]),
- internal_update(State1).
-
-internal_update(State = #state { limit = Limit,
- dir = Dir,
- alarmed = Alarmed}) ->
- CurrentFreeBytes = get_disk_free(Dir),
- LimitBytes = interpret_limit(Limit),
- NewAlarmed = CurrentFreeBytes < LimitBytes,
- case {Alarmed, NewAlarmed} of
- {false, true} ->
- emit_update_info("insufficient", CurrentFreeBytes, LimitBytes),
- rabbit_alarm:set_alarm({{resource_limit, disk, node()}, []});
- {true, false} ->
- emit_update_info("sufficient", CurrentFreeBytes, LimitBytes),
- rabbit_alarm:clear_alarm({resource_limit, disk, node()});
- _ ->
- ok
- end,
- State #state {alarmed = NewAlarmed, actual = CurrentFreeBytes}.
-
-get_disk_free(Dir) ->
- get_disk_free(Dir, os:type()).
-
-get_disk_free(Dir, {unix, Sun})
- when Sun =:= sunos; Sun =:= sunos4; Sun =:= solaris ->
- parse_free_unix(rabbit_misc:os_cmd("/usr/bin/df -k " ++ Dir));
-get_disk_free(Dir, {unix, _}) ->
- parse_free_unix(rabbit_misc:os_cmd("/bin/df -kP " ++ Dir));
-get_disk_free(Dir, {win32, _}) ->
- parse_free_win32(rabbit_misc:os_cmd("dir /-C /W \"" ++ Dir ++ [$"]));
-get_disk_free(_, Platform) ->
- {unknown, Platform}.
-
-parse_free_unix(CommandResult) ->
- [_, Stats | _] = string:tokens(CommandResult, "\n"),
- [_FS, _Total, _Used, Free | _] = string:tokens(Stats, " \t"),
- list_to_integer(Free) * 1024.
-
-parse_free_win32(CommandResult) ->
- LastLine = lists:last(string:tokens(CommandResult, "\r\n")),
- {match, [Free]} = re:run(lists:reverse(LastLine), "(\\d+)",
- [{capture, all_but_first, list}]),
- list_to_integer(lists:reverse(Free)).
-
-interpret_limit({mem_relative, R}) ->
- round(R * vm_memory_monitor:get_total_memory());
-interpret_limit(L) ->
- L.
-
-emit_update_info(StateStr, CurrentFree, Limit) ->
- rabbit_log:info(
- "Disk free space ~s. Free bytes:~p Limit:~p~n",
- [StateStr, CurrentFree, Limit]).
-
-start_timer(Timeout) ->
- {ok, TRef} = timer:send_interval(Timeout, update),
- TRef.
diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl
deleted file mode 100644
index 17ed8563..00000000
--- a/src/rabbit_error_logger.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_error_logger).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--define(LOG_EXCH_NAME, <<"amq.rabbitmq.log">>).
-
--behaviour(gen_event).
-
--export([start/0, stop/0]).
-
--export([init/1, terminate/2, code_change/3, handle_call/2, handle_event/2,
- handle_info/2]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start/0 :: () -> 'ok').
--spec(stop/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start() ->
- {ok, DefaultVHost} = application:get_env(default_vhost),
- ok = error_logger:add_report_handler(?MODULE, [DefaultVHost]).
-
-stop() ->
- terminated_ok = error_logger:delete_report_handler(rabbit_error_logger),
- ok.
-
-%%----------------------------------------------------------------------------
-
-init([DefaultVHost]) ->
- #exchange{} = rabbit_exchange:declare(
- rabbit_misc:r(DefaultVHost, exchange, ?LOG_EXCH_NAME),
- topic, true, false, false, []),
- {ok, #resource{virtual_host = DefaultVHost,
- kind = exchange,
- name = ?LOG_EXCH_NAME}}.
-
-terminate(_Arg, _State) ->
- terminated_ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_call(_Request, State) ->
- {ok, not_understood, State}.
-
-handle_event({Kind, _Gleader, {_Pid, Format, Data}}, State) ->
- ok = publish(Kind, Format, Data, State),
- {ok, State};
-handle_event(_Event, State) ->
- {ok, State}.
-
-handle_info(_Info, State) ->
- {ok, State}.
-
-publish(error, Format, Data, State) ->
- publish1(<<"error">>, Format, Data, State);
-publish(warning_msg, Format, Data, State) ->
- publish1(<<"warning">>, Format, Data, State);
-publish(info_msg, Format, Data, State) ->
- publish1(<<"info">>, Format, Data, State);
-publish(_Other, _Format, _Data, _State) ->
- ok.
-
-publish1(RoutingKey, Format, Data, LogExch) ->
- %% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's
- %% second resolution, not millisecond.
- Timestamp = rabbit_misc:now_ms() div 1000,
- {ok, _RoutingRes, _DeliveredQPids} =
- rabbit_basic:publish(LogExch, RoutingKey,
- #'P_basic'{content_type = <<"text/plain">>,
- timestamp = Timestamp},
- list_to_binary(io_lib:format(Format, Data))),
- ok.
diff --git a/src/rabbit_error_logger_file_h.erl b/src/rabbit_error_logger_file_h.erl
deleted file mode 100644
index d59641b0..00000000
--- a/src/rabbit_error_logger_file_h.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_error_logger_file_h).
-
--behaviour(gen_event).
-
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
- code_change/3]).
-
-%% rabbit_error_logger_file_h is a wrapper around the error_logger_file_h
-%% module because the original's init/1 does not match properly
-%% with the result of closing the old handler when swapping handlers.
-%% The first init/1 additionally allows for simple log rotation
-%% when the suffix is not the empty string.
-%% The original init/2 also opened the file in 'write' mode, thus
-%% overwriting old logs. To remedy this, init/2 from
-%% lib/stdlib/src/error_logger_file_h.erl from R14B3 was copied as
-%% init_file/2 and changed so that it opens the file in 'append' mode.
-
-%% Used only when swapping handlers in log rotation
-init({{File, Suffix}, []}) ->
- case rabbit_file:append_file(File, Suffix) of
- ok -> file:delete(File),
- ok;
- {error, Error} ->
- rabbit_log:error("Failed to append contents of "
- "log file '~s' to '~s':~n~p~n",
- [File, [File, Suffix], Error])
- end,
- init(File);
-%% Used only when swapping handlers and the original handler
-%% failed to terminate or was never installed
-init({{File, _}, error}) ->
- init(File);
-%% Used only when swapping handlers without performing
-%% log rotation
-init({File, []}) ->
- init(File);
-%% Used only when taking over from the tty handler
-init({{File, []}, _}) ->
- init(File);
-init({File, {error_logger, Buf}}) ->
- rabbit_file:ensure_parent_dirs_exist(File),
- init_file(File, {error_logger, Buf});
-init(File) ->
- rabbit_file:ensure_parent_dirs_exist(File),
- init_file(File, []).
-
-init_file(File, {error_logger, Buf}) ->
- case init_file(File, error_logger) of
- {ok, {Fd, File, PrevHandler}} ->
- [handle_event(Event, {Fd, File, PrevHandler}) ||
- {_, Event} <- lists:reverse(Buf)],
- {ok, {Fd, File, PrevHandler}};
- Error ->
- Error
- end;
-init_file(File, PrevHandler) ->
- process_flag(trap_exit, true),
- case file:open(File, [append]) of
- {ok,Fd} -> {ok, {Fd, File, PrevHandler}};
- Error -> Error
- end.
-
-%% filter out "application: foo; exited: stopped; type: temporary"
-handle_event({info_report, _, {_, std_info, _}}, State) ->
- {ok, State};
-handle_event(Event, State) ->
- error_logger_file_h:handle_event(Event, State).
-
-handle_info(Event, State) ->
- error_logger_file_h:handle_info(Event, State).
-
-handle_call(Event, State) ->
- error_logger_file_h:handle_call(Event, State).
-
-terminate(Reason, State) ->
- error_logger_file_h:terminate(Reason, State).
-
-code_change(OldVsn, State, Extra) ->
- error_logger_file_h:code_change(OldVsn, State, Extra).
diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl
deleted file mode 100644
index 4d3ddc79..00000000
--- a/src/rabbit_event.erl
+++ /dev/null
@@ -1,148 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_event).
-
--include("rabbit.hrl").
-
--export([start_link/0]).
--export([init_stats_timer/2, init_disabled_stats_timer/2,
- ensure_stats_timer/3, stop_stats_timer/2, reset_stats_timer/2]).
--export([stats_level/2, if_enabled/3]).
--export([notify/2, notify_if/3]).
-
-%%----------------------------------------------------------------------------
-
--record(state, {level, interval, timer}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([event_type/0, event_props/0, event_timestamp/0, event/0]).
-
--type(event_type() :: atom()).
--type(event_props() :: term()).
--type(event_timestamp() ::
- {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
-
--type(event() :: #event { type :: event_type(),
- props :: event_props(),
- timestamp :: event_timestamp() }).
-
--type(level() :: 'none' | 'coarse' | 'fine').
-
--type(timer_fun() :: fun (() -> 'ok')).
--type(container() :: tuple()).
--type(pos() :: non_neg_integer()).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(init_stats_timer/2 :: (container(), pos()) -> container()).
--spec(init_disabled_stats_timer/2 :: (container(), pos()) -> container()).
--spec(ensure_stats_timer/3 :: (container(), pos(), term()) -> container()).
--spec(stop_stats_timer/2 :: (container(), pos()) -> container()).
--spec(reset_stats_timer/2 :: (container(), pos()) -> container()).
--spec(stats_level/2 :: (container(), pos()) -> level()).
--spec(if_enabled/3 :: (container(), pos(), timer_fun()) -> 'ok').
--spec(notify/2 :: (event_type(), event_props()) -> 'ok').
--spec(notify_if/3 :: (boolean(), event_type(), event_props()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_event:start_link({local, ?MODULE}).
-
-%% The idea is, for each stat-emitting object:
-%%
-%% On startup:
-%% init_stats_timer(State)
-%% notify(created event)
-%% if_enabled(internal_emit_stats) - so we immediately send something
-%%
-%% On wakeup:
-%% ensure_stats_timer(State, emit_stats)
-%% (Note we can't emit stats immediately, the timer may have fired 1ms ago.)
-%%
-%% emit_stats:
-%% if_enabled(internal_emit_stats)
-%% reset_stats_timer(State) - just bookkeeping
-%%
-%% Pre-hibernation:
-%% if_enabled(internal_emit_stats)
-%% stop_stats_timer(State)
-%%
-%% internal_emit_stats:
-%% notify(stats)
-
-init_stats_timer(C, P) ->
- {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
- {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
- setelement(P, C, #state{level = StatsLevel, interval = Interval,
- timer = undefined}).
-
-init_disabled_stats_timer(C, P) ->
- setelement(P, C, #state{level = none, interval = 0, timer = undefined}).
-
-ensure_stats_timer(C, P, Msg) ->
- case element(P, C) of
- #state{level = Level, interval = Interval, timer = undefined} = State
- when Level =/= none ->
- TRef = erlang:send_after(Interval, self(), Msg),
- setelement(P, C, State#state{timer = TRef});
- #state{} ->
- C
- end.
-
-stop_stats_timer(C, P) ->
- case element(P, C) of
- #state{timer = TRef} = State when TRef =/= undefined ->
- case erlang:cancel_timer(TRef) of
- false -> C;
- _ -> setelement(P, C, State#state{timer = undefined})
- end;
- #state{} ->
- C
- end.
-
-reset_stats_timer(C, P) ->
- case element(P, C) of
- #state{timer = TRef} = State when TRef =/= undefined ->
- setelement(P, C, State#state{timer = undefined});
- #state{} ->
- C
- end.
-
-stats_level(C, P) ->
- #state{level = Level} = element(P, C),
- Level.
-
-if_enabled(C, P, Fun) ->
- case element(P, C) of
- #state{level = none} -> ok;
- #state{} -> Fun(), ok
- end.
-
-notify_if(true, Type, Props) -> notify(Type, Props);
-notify_if(false, _Type, _Props) -> ok.
-
-notify(Type, Props) ->
- %% TODO: switch to os:timestamp() when we drop support for
- %% Erlang/OTP < R13B01
- gen_event:notify(?MODULE, #event{type = Type,
- props = Props,
- timestamp = now()}).
diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl
deleted file mode 100644
index 49952a4d..00000000
--- a/src/rabbit_exchange.erl
+++ /dev/null
@@ -1,475 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--export([recover/0, policy_changed/2, callback/4, declare/6,
- assert_equivalence/6, assert_args_equivalence/2, check_type/1,
- lookup/1, lookup_or_die/1, list/1, lookup_scratch/2, update_scratch/3,
- info_keys/0, info/1, info/2, info_all/1, info_all/2,
- route/2, delete/2, validate_binding/2]).
-%% these must be run inside a mnesia tx
--export([maybe_auto_delete/1, serial/1, peek_serial/1, update/2]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([name/0, type/0]).
-
--type(name() :: rabbit_types:r('exchange')).
--type(type() :: atom()).
--type(fun_name() :: atom()).
-
--spec(recover/0 :: () -> [name()]).
--spec(callback/4::
- (rabbit_types:exchange(), fun_name(),
- fun((boolean()) -> non_neg_integer()) | atom(), [any()]) -> 'ok').
--spec(policy_changed/2 ::
- (rabbit_types:exchange(), rabbit_types:exchange()) -> 'ok').
--spec(declare/6 ::
- (name(), type(), boolean(), boolean(), boolean(),
- rabbit_framing:amqp_table())
- -> rabbit_types:exchange()).
--spec(check_type/1 ::
- (binary()) -> atom() | rabbit_types:connection_exit()).
--spec(assert_equivalence/6 ::
- (rabbit_types:exchange(), atom(), boolean(), boolean(), boolean(),
- rabbit_framing:amqp_table())
- -> 'ok' | rabbit_types:connection_exit()).
--spec(assert_args_equivalence/2 ::
- (rabbit_types:exchange(), rabbit_framing:amqp_table())
- -> 'ok' | rabbit_types:connection_exit()).
--spec(lookup/1 ::
- (name()) -> rabbit_types:ok(rabbit_types:exchange()) |
- rabbit_types:error('not_found')).
--spec(lookup_or_die/1 ::
- (name()) -> rabbit_types:exchange() |
- rabbit_types:channel_exit()).
--spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:exchange()]).
--spec(lookup_scratch/2 :: (name(), atom()) ->
- rabbit_types:ok(term()) |
- rabbit_types:error('not_found')).
--spec(update_scratch/3 :: (name(), atom(), fun((any()) -> any())) -> 'ok').
--spec(update/2 ::
- (name(),
- fun((rabbit_types:exchange()) -> rabbit_types:exchange()))
- -> not_found | rabbit_types:exchange()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (rabbit_types:exchange()) -> rabbit_types:infos()).
--spec(info/2 ::
- (rabbit_types:exchange(), rabbit_types:info_keys())
- -> rabbit_types:infos()).
--spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
- -> [rabbit_types:infos()]).
--spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery())
- -> [rabbit_amqqueue:name()]).
--spec(delete/2 ::
- (name(), boolean())-> 'ok' |
- rabbit_types:error('not_found') |
- rabbit_types:error('in_use')).
--spec(validate_binding/2 ::
- (rabbit_types:exchange(), rabbit_types:binding())
- -> rabbit_types:ok_or_error({'binding_invalid', string(), [any()]})).
--spec(maybe_auto_delete/1::
- (rabbit_types:exchange())
- -> 'not_deleted' | {'deleted', rabbit_binding:deletions()}).
--spec(serial/1 :: (rabbit_types:exchange()) ->
- fun((boolean()) -> 'none' | pos_integer())).
--spec(peek_serial/1 :: (name()) -> pos_integer() | 'undefined').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(INFO_KEYS, [name, type, durable, auto_delete, internal, arguments,
- policy]).
-
-recover() ->
- Xs = rabbit_misc:table_filter(
- fun (#exchange{name = XName}) ->
- mnesia:read({rabbit_exchange, XName}) =:= []
- end,
- fun (X, Tx) ->
- case Tx of
- true -> store(X);
- false -> ok
- end,
- callback(X, create, map_create_tx(Tx), [X])
- end,
- rabbit_durable_exchange),
- report_missing_decorators(Xs),
- [XName || #exchange{name = XName} <- Xs].
-
-report_missing_decorators(Xs) ->
- Mods = lists:usort(lists:append([rabbit_exchange_decorator:select(raw, D) ||
- #exchange{decorators = D} <- Xs])),
- case [M || M <- Mods, code:which(M) =:= non_existing] of
- [] -> ok;
- M -> rabbit_log:warning("Missing exchange decorators: ~p~n", [M])
- end.
-
-callback(X = #exchange{type = XType,
- decorators = Decorators}, Fun, Serial0, Args) ->
- Serial = if is_function(Serial0) -> Serial0;
- is_atom(Serial0) -> fun (_Bool) -> Serial0 end
- end,
- [ok = apply(M, Fun, [Serial(M:serialise_events(X)) | Args]) ||
- M <- rabbit_exchange_decorator:select(all, Decorators)],
- Module = type_to_module(XType),
- apply(Module, Fun, [Serial(Module:serialise_events()) | Args]).
-
-policy_changed(X = #exchange{type = XType,
- decorators = Decorators},
- X1 = #exchange{decorators = Decorators1}) ->
- D = rabbit_exchange_decorator:select(all, Decorators),
- D1 = rabbit_exchange_decorator:select(all, Decorators1),
- DAll = lists:usort(D ++ D1),
- [ok = M:policy_changed(X, X1) || M <- [type_to_module(XType) | DAll]],
- ok.
-
-serialise_events(X = #exchange{type = Type, decorators = Decorators}) ->
- lists:any(fun (M) -> M:serialise_events(X) end,
- rabbit_exchange_decorator:select(all, Decorators))
- orelse (type_to_module(Type)):serialise_events().
-
-serial(#exchange{name = XName} = X) ->
- Serial = case serialise_events(X) of
- true -> next_serial(XName);
- false -> none
- end,
- fun (true) -> Serial;
- (false) -> none
- end.
-
-declare(XName, Type, Durable, AutoDelete, Internal, Args) ->
- X = rabbit_policy:set(#exchange{name = XName,
- type = Type,
- durable = Durable,
- auto_delete = AutoDelete,
- internal = Internal,
- arguments = Args}),
- XT = type_to_module(Type),
- %% We want to upset things if it isn't ok
- ok = XT:validate(X),
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- case mnesia:wread({rabbit_exchange, XName}) of
- [] ->
- store(X),
- ok = case Durable of
- true -> mnesia:write(rabbit_durable_exchange,
- X, write);
- false -> ok
- end,
- {new, X};
- [ExistingX] ->
- {existing, ExistingX}
- end
- end,
- fun ({new, Exchange}, Tx) ->
- ok = callback(X, create, map_create_tx(Tx), [Exchange]),
- rabbit_event:notify_if(not Tx, exchange_created, info(Exchange)),
- Exchange;
- ({existing, Exchange}, _Tx) ->
- Exchange;
- (Err, _Tx) ->
- Err
- end).
-
-map_create_tx(true) -> transaction;
-map_create_tx(false) -> none.
-
-store(X) -> ok = mnesia:write(rabbit_exchange, X, write).
-
-%% Used with binaries sent over the wire; the type may not exist.
-check_type(TypeBin) ->
- case rabbit_registry:binary_to_type(TypeBin) of
- {error, not_found} ->
- rabbit_misc:protocol_error(
- command_invalid, "unknown exchange type '~s'", [TypeBin]);
- T ->
- case rabbit_registry:lookup_module(exchange, T) of
- {error, not_found} -> rabbit_misc:protocol_error(
- command_invalid,
- "invalid exchange type '~s'", [T]);
- {ok, _Module} -> T
- end
- end.
-
-assert_equivalence(X = #exchange{ durable = Durable,
- auto_delete = AutoDelete,
- internal = Internal,
- type = Type},
- Type, Durable, AutoDelete, Internal, RequiredArgs) ->
- (type_to_module(Type)):assert_args_equivalence(X, RequiredArgs);
-assert_equivalence(#exchange{ name = Name },
- _Type, _Durable, _Internal, _AutoDelete, _Args) ->
- rabbit_misc:protocol_error(
- precondition_failed,
- "cannot redeclare ~s with different type, durable, "
- "internal or autodelete value",
- [rabbit_misc:rs(Name)]).
-
-assert_args_equivalence(#exchange{ name = Name, arguments = Args },
- RequiredArgs) ->
- %% The spec says "Arguments are compared for semantic
- %% equivalence". The only arg we care about is
- %% "alternate-exchange".
- rabbit_misc:assert_args_equivalence(Args, RequiredArgs, Name,
- [<<"alternate-exchange">>]).
-
-lookup(Name) ->
- rabbit_misc:dirty_read({rabbit_exchange, Name}).
-
-lookup_or_die(Name) ->
- case lookup(Name) of
- {ok, X} -> X;
- {error, not_found} -> rabbit_misc:not_found(Name)
- end.
-
-list(VHostPath) ->
- mnesia:dirty_match_object(
- rabbit_exchange,
- #exchange{name = rabbit_misc:r(VHostPath, exchange), _ = '_'}).
-
-lookup_scratch(Name, App) ->
- case lookup(Name) of
- {ok, #exchange{scratches = undefined}} ->
- {error, not_found};
- {ok, #exchange{scratches = Scratches}} ->
- case orddict:find(App, Scratches) of
- {ok, Value} -> {ok, Value};
- error -> {error, not_found}
- end;
- {error, not_found} ->
- {error, not_found}
- end.
-
-update_scratch(Name, App, Fun) ->
- rabbit_misc:execute_mnesia_transaction(
- fun() ->
- update(Name,
- fun(X = #exchange{scratches = Scratches0}) ->
- Scratches1 = case Scratches0 of
- undefined -> orddict:new();
- _ -> Scratches0
- end,
- Scratch = case orddict:find(App, Scratches1) of
- {ok, S} -> S;
- error -> undefined
- end,
- Scratches2 = orddict:store(
- App, Fun(Scratch), Scratches1),
- X#exchange{scratches = Scratches2}
- end),
- ok
- end).
-
-update(Name, Fun) ->
- case mnesia:wread({rabbit_exchange, Name}) of
- [X = #exchange{durable = Durable}] ->
- X1 = Fun(X),
- ok = mnesia:write(rabbit_exchange, X1, write),
- case Durable of
- true -> ok = mnesia:write(rabbit_durable_exchange, X1, write);
- _ -> ok
- end,
- X1;
- [] ->
- not_found
- end.
-
-info_keys() -> ?INFO_KEYS.
-
-map(VHostPath, F) ->
- %% TODO: there is scope for optimisation here, e.g. using a
- %% cursor, parallelising the function invocation
- lists:map(F, list(VHostPath)).
-
-infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
-
-i(name, #exchange{name = Name}) -> Name;
-i(type, #exchange{type = Type}) -> Type;
-i(durable, #exchange{durable = Durable}) -> Durable;
-i(auto_delete, #exchange{auto_delete = AutoDelete}) -> AutoDelete;
-i(internal, #exchange{internal = Internal}) -> Internal;
-i(arguments, #exchange{arguments = Arguments}) -> Arguments;
-i(policy, X) -> case rabbit_policy:name(X) of
- none -> '';
- Policy -> Policy
- end;
-i(Item, _) -> throw({bad_argument, Item}).
-
-info(X = #exchange{}) -> infos(?INFO_KEYS, X).
-
-info(X = #exchange{}, Items) -> infos(Items, X).
-
-info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end).
-
-info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end).
-
-route(#exchange{name = #resource{virtual_host = VHost, name = RName} = XName,
- decorators = Decorators} = X,
- #delivery{message = #basic_message{routing_keys = RKs}} = Delivery) ->
- case {RName, rabbit_exchange_decorator:select(route, Decorators)} of
- {<<"">>, []} ->
- %% Optimisation
- [rabbit_misc:r(VHost, queue, RK) || RK <- lists:usort(RKs)];
- {_, SelectedDecorators} ->
- lists:usort(route1(Delivery, SelectedDecorators, {[X], XName, []}))
- end.
-
-route1(_, _, {[], _, QNames}) ->
- QNames;
-route1(Delivery, Decorators,
- {[X = #exchange{type = Type} | WorkList], SeenXs, QNames}) ->
- ExchangeDests = (type_to_module(Type)):route(X, Delivery),
- DecorateDests = process_decorators(X, Decorators, Delivery),
- AlternateDests = process_alternate(X, ExchangeDests),
- route1(Delivery, Decorators,
- lists:foldl(fun process_route/2, {WorkList, SeenXs, QNames},
- AlternateDests ++ DecorateDests ++ ExchangeDests)).
-
-process_alternate(#exchange{arguments = []}, _Results) -> %% optimisation
- [];
-process_alternate(#exchange{name = XName, arguments = Args}, []) ->
- case rabbit_misc:r_arg(XName, exchange, Args, <<"alternate-exchange">>) of
- undefined -> [];
- AName -> [AName]
- end;
-process_alternate(_X, _Results) ->
- [].
-
-process_decorators(_, [], _) -> %% optimisation
- [];
-process_decorators(X, Decorators, Delivery) ->
- lists:append([Decorator:route(X, Delivery) || Decorator <- Decorators]).
-
-process_route(#resource{kind = exchange} = XName,
- {_WorkList, XName, _QNames} = Acc) ->
- Acc;
-process_route(#resource{kind = exchange} = XName,
- {WorkList, #resource{kind = exchange} = SeenX, QNames}) ->
- {cons_if_present(XName, WorkList),
- gb_sets:from_list([SeenX, XName]), QNames};
-process_route(#resource{kind = exchange} = XName,
- {WorkList, SeenXs, QNames} = Acc) ->
- case gb_sets:is_element(XName, SeenXs) of
- true -> Acc;
- false -> {cons_if_present(XName, WorkList),
- gb_sets:add_element(XName, SeenXs), QNames}
- end;
-process_route(#resource{kind = queue} = QName,
- {WorkList, SeenXs, QNames}) ->
- {WorkList, SeenXs, [QName | QNames]}.
-
-cons_if_present(XName, L) ->
- case lookup(XName) of
- {ok, X} -> [X | L];
- {error, not_found} -> L
- end.
-
-call_with_exchange(XName, Fun) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () -> case mnesia:read({rabbit_exchange, XName}) of
- [] -> rabbit_misc:const({error, not_found});
- [X] -> Fun(X)
- end
- end).
-
-delete(XName, IfUnused) ->
- Fun = case IfUnused of
- true -> fun conditional_delete/1;
- false -> fun unconditional_delete/1
- end,
- call_with_exchange(
- XName,
- fun (X) ->
- case Fun(X) of
- {deleted, X, Bs, Deletions} ->
- rabbit_binding:process_deletions(
- rabbit_binding:add_deletion(
- XName, {X, deleted, Bs}, Deletions));
- {error, _InUseOrNotFound} = E ->
- rabbit_misc:const(E)
- end
- end).
-
-validate_binding(X = #exchange{type = XType}, Binding) ->
- Module = type_to_module(XType),
- Module:validate_binding(X, Binding).
-
-maybe_auto_delete(#exchange{auto_delete = false}) ->
- not_deleted;
-maybe_auto_delete(#exchange{auto_delete = true} = X) ->
- case conditional_delete(X) of
- {error, in_use} -> not_deleted;
- {deleted, X, [], Deletions} -> {deleted, Deletions}
- end.
-
-conditional_delete(X = #exchange{name = XName}) ->
- case rabbit_binding:has_for_source(XName) of
- false -> unconditional_delete(X);
- true -> {error, in_use}
- end.
-
-unconditional_delete(X = #exchange{name = XName}) ->
- %% this 'guarded' delete prevents unnecessary writes to the mnesia
- %% disk log
- case mnesia:wread({rabbit_durable_exchange, XName}) of
- [] -> ok;
- [_] -> ok = mnesia:delete({rabbit_durable_exchange, XName})
- end,
- ok = mnesia:delete({rabbit_exchange, XName}),
- ok = mnesia:delete({rabbit_exchange_serial, XName}),
- Bindings = rabbit_binding:remove_for_source(XName),
- {deleted, X, Bindings, rabbit_binding:remove_for_destination(XName)}.
-
-next_serial(XName) ->
- Serial = peek_serial(XName, write),
- ok = mnesia:write(rabbit_exchange_serial,
- #exchange_serial{name = XName, next = Serial + 1}, write),
- Serial.
-
-peek_serial(XName) -> peek_serial(XName, read).
-
-peek_serial(XName, LockType) ->
- case mnesia:read(rabbit_exchange_serial, XName, LockType) of
- [#exchange_serial{next = Serial}] -> Serial;
- _ -> 1
- end.
-
-invalid_module(T) ->
- rabbit_log:warning("Could not find exchange type ~s.~n", [T]),
- put({xtype_to_module, T}, rabbit_exchange_type_invalid),
- rabbit_exchange_type_invalid.
-
-%% Used with atoms from records; e.g., the type is expected to exist.
-type_to_module(T) ->
- case get({xtype_to_module, T}) of
- undefined ->
- case rabbit_registry:lookup_module(exchange, T) of
- {ok, Module} -> put({xtype_to_module, T}, Module),
- Module;
- {error, not_found} -> invalid_module(T)
- end;
- Module ->
- Module
- end.
diff --git a/src/rabbit_exchange_decorator.erl b/src/rabbit_exchange_decorator.erl
deleted file mode 100644
index 505998b9..00000000
--- a/src/rabbit_exchange_decorator.erl
+++ /dev/null
@@ -1,106 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_decorator).
-
--include("rabbit.hrl").
-
--export([select/2, set/1]).
-
-%% This is like an exchange type except that:
-%%
-%% 1) It applies to all exchanges as soon as it is installed, therefore
-%% 2) It is not allowed to affect validation, so no validate/1 or
-%% assert_args_equivalence/2
-%%
-%% It's possible in the future we might make decorators
-%% able to manipulate messages as they are published.
-
--ifdef(use_specs).
-
--type(tx() :: 'transaction' | 'none').
--type(serial() :: pos_integer() | tx()).
-
--callback description() -> [proplists:property()].
-
-%% Should Rabbit ensure that all binding events that are
-%% delivered to an individual exchange can be serialised? (they
-%% might still be delivered out of order, but there'll be a
-%% serial number).
--callback serialise_events(rabbit_types:exchange()) -> boolean().
-
-%% called after declaration and recovery
--callback create(tx(), rabbit_types:exchange()) -> 'ok'.
-
-%% called after exchange (auto)deletion.
--callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
- 'ok'.
-
-%% called when the policy attached to this exchange changes.
--callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
- 'ok'.
-
-%% called after a binding has been added or recovered
--callback add_binding(serial(), rabbit_types:exchange(),
- rabbit_types:binding()) -> 'ok'.
-
-%% called after bindings have been deleted.
--callback remove_bindings(serial(), rabbit_types:exchange(),
- [rabbit_types:binding()]) -> 'ok'.
-
-%% Allows additional destinations to be added to the routing decision.
--callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
- [rabbit_amqqueue:name() | rabbit_exchange:name()].
-
-%% Whether the decorator wishes to receive callbacks for the exchange
-%% none:no callbacks, noroute:all callbacks except route, all:all callbacks
--callback active_for(rabbit_types:exchange()) -> 'none' | 'noroute' | 'all'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {serialise_events, 1}, {create, 2}, {delete, 3},
- {policy_changed, 2}, {add_binding, 3}, {remove_bindings, 3},
- {route, 2}, {active_for, 1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% select a subset of active decorators
-select(all, {Route, NoRoute}) -> filter(Route ++ NoRoute);
-select(route, {Route, _NoRoute}) -> filter(Route);
-select(raw, {Route, NoRoute}) -> Route ++ NoRoute.
-
-filter(Modules) ->
- [M || M <- Modules, code:which(M) =/= non_existing].
-
-set(X) ->
- Decs = lists:foldl(fun (D, {Route, NoRoute}) ->
- ActiveFor = D:active_for(X),
- {cons_if_eq(all, ActiveFor, D, Route),
- cons_if_eq(noroute, ActiveFor, D, NoRoute)}
- end, {[], []}, list()),
- X#exchange{decorators = Decs}.
-
-list() -> [M || {_, M} <- rabbit_registry:lookup_all(exchange_decorator)].
-
-cons_if_eq(Select, Select, Item, List) -> [Item | List];
-cons_if_eq(_Select, _Other, _Item, List) -> List.
diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl
deleted file mode 100644
index ce7a436b..00000000
--- a/src/rabbit_exchange_type.erl
+++ /dev/null
@@ -1,81 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_type).
-
--ifdef(use_specs).
-
--type(tx() :: 'transaction' | 'none').
--type(serial() :: pos_integer() | tx()).
-
--callback description() -> [proplists:property()].
-
-%% Should Rabbit ensure that all binding events that are
-%% delivered to an individual exchange can be serialised? (they
-%% might still be delivered out of order, but there'll be a
-%% serial number).
--callback serialise_events() -> boolean().
-
-%% The no_return is there so that we can have an "invalid" exchange
-%% type (see rabbit_exchange_type_invalid).
--callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
- rabbit_router:match_result().
-
-%% called BEFORE declaration, to check args etc; may exit with #amqp_error{}
--callback validate(rabbit_types:exchange()) -> 'ok'.
-
-%% called BEFORE declaration, to check args etc
--callback validate_binding(rabbit_types:exchange(), rabbit_types:binding()) ->
- rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
-
-%% called after declaration and recovery
--callback create(tx(), rabbit_types:exchange()) -> 'ok'.
-
-%% called after exchange (auto)deletion.
--callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
- 'ok'.
-
-%% called when the policy attached to this exchange changes.
--callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
- 'ok'.
-
-%% called after a binding has been added or recovered
--callback add_binding(serial(), rabbit_types:exchange(),
- rabbit_types:binding()) -> 'ok'.
-
-%% called after bindings have been deleted.
--callback remove_bindings(serial(), rabbit_types:exchange(),
- [rabbit_types:binding()]) -> 'ok'.
-
-%% called when comparing exchanges for equivalence - should return ok or
-%% exit with #amqp_error{}
--callback assert_args_equivalence(rabbit_types:exchange(),
- rabbit_framing:amqp_table()) ->
- 'ok' | rabbit_types:connection_exit().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {serialise_events, 0}, {route, 2},
- {validate, 1}, {validate_binding, 2}, {policy_changed, 2},
- {create, 2}, {delete, 3}, {add_binding, 3}, {remove_bindings, 3},
- {assert_args_equivalence, 2}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_exchange_type_direct.erl b/src/rabbit_exchange_type_direct.erl
deleted file mode 100644
index 52704ab6..00000000
--- a/src/rabbit_exchange_type_direct.erl
+++ /dev/null
@@ -1,51 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_type_direct).
--include("rabbit.hrl").
-
--behaviour(rabbit_exchange_type).
-
--export([description/0, serialise_events/0, route/2]).
--export([validate/1, validate_binding/2,
- create/2, delete/3, policy_changed/2, add_binding/3,
- remove_bindings/3, assert_args_equivalence/2]).
-
--rabbit_boot_step({?MODULE,
- [{description, "exchange type direct"},
- {mfa, {rabbit_registry, register,
- [exchange, <<"direct">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-description() ->
- [{description, <<"AMQP direct exchange, as per the AMQP specification">>}].
-
-serialise_events() -> false.
-
-route(#exchange{name = Name},
- #delivery{message = #basic_message{routing_keys = Routes}}) ->
- rabbit_router:match_routing_key(Name, Routes).
-
-validate(_X) -> ok.
-validate_binding(_X, _B) -> ok.
-create(_Tx, _X) -> ok.
-delete(_Tx, _X, _Bs) -> ok.
-policy_changed(_X1, _X2) -> ok.
-add_binding(_Tx, _X, _B) -> ok.
-remove_bindings(_Tx, _X, _Bs) -> ok.
-assert_args_equivalence(X, Args) ->
- rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/src/rabbit_exchange_type_fanout.erl b/src/rabbit_exchange_type_fanout.erl
deleted file mode 100644
index 068472bb..00000000
--- a/src/rabbit_exchange_type_fanout.erl
+++ /dev/null
@@ -1,50 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_type_fanout).
--include("rabbit.hrl").
-
--behaviour(rabbit_exchange_type).
-
--export([description/0, serialise_events/0, route/2]).
--export([validate/1, validate_binding/2,
- create/2, delete/3, policy_changed/2, add_binding/3,
- remove_bindings/3, assert_args_equivalence/2]).
-
--rabbit_boot_step({?MODULE,
- [{description, "exchange type fanout"},
- {mfa, {rabbit_registry, register,
- [exchange, <<"fanout">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-description() ->
- [{description, <<"AMQP fanout exchange, as per the AMQP specification">>}].
-
-serialise_events() -> false.
-
-route(#exchange{name = Name}, _Delivery) ->
- rabbit_router:match_routing_key(Name, ['_']).
-
-validate(_X) -> ok.
-validate_binding(_X, _B) -> ok.
-create(_Tx, _X) -> ok.
-delete(_Tx, _X, _Bs) -> ok.
-policy_changed(_X1, _X2) -> ok.
-add_binding(_Tx, _X, _B) -> ok.
-remove_bindings(_Tx, _X, _Bs) -> ok.
-assert_args_equivalence(X, Args) ->
- rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/src/rabbit_exchange_type_headers.erl b/src/rabbit_exchange_type_headers.erl
deleted file mode 100644
index baec9c29..00000000
--- a/src/rabbit_exchange_type_headers.erl
+++ /dev/null
@@ -1,127 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_type_headers).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--behaviour(rabbit_exchange_type).
-
--export([description/0, serialise_events/0, route/2]).
--export([validate/1, validate_binding/2,
- create/2, delete/3, policy_changed/2, add_binding/3,
- remove_bindings/3, assert_args_equivalence/2]).
-
--rabbit_boot_step({?MODULE,
- [{description, "exchange type headers"},
- {mfa, {rabbit_registry, register,
- [exchange, <<"headers">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
--ifdef(use_specs).
--spec(headers_match/2 :: (rabbit_framing:amqp_table(),
- rabbit_framing:amqp_table()) -> boolean()).
--endif.
-
-description() ->
- [{description, <<"AMQP headers exchange, as per the AMQP specification">>}].
-
-serialise_events() -> false.
-
-route(#exchange{name = Name},
- #delivery{message = #basic_message{content = Content}}) ->
- Headers = case (Content#content.properties)#'P_basic'.headers of
- undefined -> [];
- H -> rabbit_misc:sort_field_table(H)
- end,
- rabbit_router:match_bindings(
- Name, fun (#binding{args = Spec}) -> headers_match(Spec, Headers) end).
-
-validate_binding(_X, #binding{args = Args}) ->
- case rabbit_misc:table_lookup(Args, <<"x-match">>) of
- {longstr, <<"all">>} -> ok;
- {longstr, <<"any">>} -> ok;
- {longstr, Other} -> {error,
- {binding_invalid,
- "Invalid x-match field value ~p; "
- "expected all or any", [Other]}};
- {Type, Other} -> {error,
- {binding_invalid,
- "Invalid x-match field type ~p (value ~p); "
- "expected longstr", [Type, Other]}};
- undefined -> ok %% [0]
- end.
-%% [0] spec is vague on whether it can be omitted but in practice it's
-%% useful to allow people to do this
-
-parse_x_match({longstr, <<"all">>}) -> all;
-parse_x_match({longstr, <<"any">>}) -> any;
-parse_x_match(_) -> all. %% legacy; we didn't validate
-
-%% Horrendous matching algorithm. Depends for its merge-like
-%% (linear-time) behaviour on the lists:keysort
-%% (rabbit_misc:sort_field_table) that route/1 and
-%% rabbit_binding:{add,remove}/2 do.
-%%
-%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-%% In other words: REQUIRES BOTH PATTERN AND DATA TO BE SORTED ASCENDING BY KEY.
-%% !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-%%
-headers_match(Args, Data) ->
- MK = parse_x_match(rabbit_misc:table_lookup(Args, <<"x-match">>)),
- headers_match(Args, Data, true, false, MK).
-
-headers_match([], _Data, AllMatch, _AnyMatch, all) ->
- AllMatch;
-headers_match([], _Data, _AllMatch, AnyMatch, any) ->
- AnyMatch;
-headers_match([{<<"x-", _/binary>>, _PT, _PV} | PRest], Data,
- AllMatch, AnyMatch, MatchKind) ->
- headers_match(PRest, Data, AllMatch, AnyMatch, MatchKind);
-headers_match(_Pattern, [], _AllMatch, AnyMatch, MatchKind) ->
- headers_match([], [], false, AnyMatch, MatchKind);
-headers_match(Pattern = [{PK, _PT, _PV} | _], [{DK, _DT, _DV} | DRest],
- AllMatch, AnyMatch, MatchKind) when PK > DK ->
- headers_match(Pattern, DRest, AllMatch, AnyMatch, MatchKind);
-headers_match([{PK, _PT, _PV} | PRest], Data = [{DK, _DT, _DV} | _],
- _AllMatch, AnyMatch, MatchKind) when PK < DK ->
- headers_match(PRest, Data, false, AnyMatch, MatchKind);
-headers_match([{PK, PT, PV} | PRest], [{DK, DT, DV} | DRest],
- AllMatch, AnyMatch, MatchKind) when PK == DK ->
- {AllMatch1, AnyMatch1} =
- if
- %% It's not properly specified, but a "no value" in a
- %% pattern field is supposed to mean simple presence of
- %% the corresponding data field. I've interpreted that to
- %% mean a type of "void" for the pattern field.
- PT == void -> {AllMatch, true};
- %% Similarly, it's not specified, but I assume that a
- %% mismatched type causes a mismatched value.
- PT =/= DT -> {false, AnyMatch};
- PV == DV -> {AllMatch, true};
- true -> {false, AnyMatch}
- end,
- headers_match(PRest, DRest, AllMatch1, AnyMatch1, MatchKind).
-
-validate(_X) -> ok.
-create(_Tx, _X) -> ok.
-delete(_Tx, _X, _Bs) -> ok.
-policy_changed(_X1, _X2) -> ok.
-add_binding(_Tx, _X, _B) -> ok.
-remove_bindings(_Tx, _X, _Bs) -> ok.
-assert_args_equivalence(X, Args) ->
- rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/src/rabbit_exchange_type_invalid.erl b/src/rabbit_exchange_type_invalid.erl
deleted file mode 100644
index 84bb2182..00000000
--- a/src/rabbit_exchange_type_invalid.erl
+++ /dev/null
@@ -1,52 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_type_invalid).
--include("rabbit.hrl").
-
--behaviour(rabbit_exchange_type).
-
--export([description/0, serialise_events/0, route/2]).
--export([validate/1, validate_binding/2,
- create/2, delete/3, policy_changed/2, add_binding/3,
- remove_bindings/3, assert_args_equivalence/2]).
-
-description() ->
- [{description,
- <<"Dummy exchange type, to be used when the intended one is not found.">>
- }].
-
-serialise_events() -> false.
-
--ifdef(use_specs).
--spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery())
- -> no_return()).
--endif.
-route(#exchange{name = Name, type = Type}, _) ->
- rabbit_misc:protocol_error(
- precondition_failed,
- "Cannot route message through ~s: exchange type ~s not found",
- [rabbit_misc:rs(Name), Type]).
-
-validate(_X) -> ok.
-validate_binding(_X, _B) -> ok.
-create(_Tx, _X) -> ok.
-delete(_Tx, _X, _Bs) -> ok.
-policy_changed(_X1, _X2) -> ok.
-add_binding(_Tx, _X, _B) -> ok.
-remove_bindings(_Tx, _X, _Bs) -> ok.
-assert_args_equivalence(X, Args) ->
- rabbit_exchange:assert_args_equivalence(X, Args).
diff --git a/src/rabbit_exchange_type_topic.erl b/src/rabbit_exchange_type_topic.erl
deleted file mode 100644
index 8ba29deb..00000000
--- a/src/rabbit_exchange_type_topic.erl
+++ /dev/null
@@ -1,267 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_type_topic).
-
--include("rabbit.hrl").
-
--behaviour(rabbit_exchange_type).
-
--export([description/0, serialise_events/0, route/2]).
--export([validate/1, validate_binding/2,
- create/2, delete/3, policy_changed/2, add_binding/3,
- remove_bindings/3, assert_args_equivalence/2]).
-
--rabbit_boot_step({?MODULE,
- [{description, "exchange type topic"},
- {mfa, {rabbit_registry, register,
- [exchange, <<"topic">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-%%----------------------------------------------------------------------------
-
-description() ->
- [{description, <<"AMQP topic exchange, as per the AMQP specification">>}].
-
-serialise_events() -> false.
-
-%% NB: This may return duplicate results in some situations (that's ok)
-route(#exchange{name = X},
- #delivery{message = #basic_message{routing_keys = Routes}}) ->
- lists:append([begin
- Words = split_topic_key(RKey),
- mnesia:async_dirty(fun trie_match/2, [X, Words])
- end || RKey <- Routes]).
-
-validate(_X) -> ok.
-validate_binding(_X, _B) -> ok.
-create(_Tx, _X) -> ok.
-
-delete(transaction, #exchange{name = X}, _Bs) ->
- trie_remove_all_nodes(X),
- trie_remove_all_edges(X),
- trie_remove_all_bindings(X),
- ok;
-delete(none, _Exchange, _Bs) ->
- ok.
-
-policy_changed(_X1, _X2) -> ok.
-
-add_binding(transaction, _Exchange, Binding) ->
- internal_add_binding(Binding);
-add_binding(none, _Exchange, _Binding) ->
- ok.
-
-remove_bindings(transaction, _X, Bs) ->
- %% See rabbit_binding:lock_route_tables for the rationale for
- %% taking table locks.
- case Bs of
- [_] -> ok;
- _ -> [mnesia:lock({table, T}, write) ||
- T <- [rabbit_topic_trie_node,
- rabbit_topic_trie_edge,
- rabbit_topic_trie_binding]]
- end,
- [begin
- Path = [{FinalNode, _} | _] =
- follow_down_get_path(X, split_topic_key(K)),
- trie_remove_binding(X, FinalNode, D),
- remove_path_if_empty(X, Path)
- end || #binding{source = X, key = K, destination = D} <- Bs],
- ok;
-remove_bindings(none, _X, _Bs) ->
- ok.
-
-assert_args_equivalence(X, Args) ->
- rabbit_exchange:assert_args_equivalence(X, Args).
-
-%%----------------------------------------------------------------------------
-
-internal_add_binding(#binding{source = X, key = K, destination = D}) ->
- FinalNode = follow_down_create(X, split_topic_key(K)),
- trie_add_binding(X, FinalNode, D),
- ok.
-
-trie_match(X, Words) ->
- trie_match(X, root, Words, []).
-
-trie_match(X, Node, [], ResAcc) ->
- trie_match_part(X, Node, "#", fun trie_match_skip_any/4, [],
- trie_bindings(X, Node) ++ ResAcc);
-trie_match(X, Node, [W | RestW] = Words, ResAcc) ->
- lists:foldl(fun ({WArg, MatchFun, RestWArg}, Acc) ->
- trie_match_part(X, Node, WArg, MatchFun, RestWArg, Acc)
- end, ResAcc, [{W, fun trie_match/4, RestW},
- {"*", fun trie_match/4, RestW},
- {"#", fun trie_match_skip_any/4, Words}]).
-
-trie_match_part(X, Node, Search, MatchFun, RestW, ResAcc) ->
- case trie_child(X, Node, Search) of
- {ok, NextNode} -> MatchFun(X, NextNode, RestW, ResAcc);
- error -> ResAcc
- end.
-
-trie_match_skip_any(X, Node, [], ResAcc) ->
- trie_match(X, Node, [], ResAcc);
-trie_match_skip_any(X, Node, [_ | RestW] = Words, ResAcc) ->
- trie_match_skip_any(X, Node, RestW,
- trie_match(X, Node, Words, ResAcc)).
-
-follow_down_create(X, Words) ->
- case follow_down_last_node(X, Words) of
- {ok, FinalNode} -> FinalNode;
- {error, Node, RestW} -> lists:foldl(
- fun (W, CurNode) ->
- NewNode = new_node_id(),
- trie_add_edge(X, CurNode, NewNode, W),
- NewNode
- end, Node, RestW)
- end.
-
-follow_down_last_node(X, Words) ->
- follow_down(X, fun (_, Node, _) -> Node end, root, Words).
-
-follow_down_get_path(X, Words) ->
- {ok, Path} =
- follow_down(X, fun (W, Node, PathAcc) -> [{Node, W} | PathAcc] end,
- [{root, none}], Words),
- Path.
-
-follow_down(X, AccFun, Acc0, Words) ->
- follow_down(X, root, AccFun, Acc0, Words).
-
-follow_down(_X, _CurNode, _AccFun, Acc, []) ->
- {ok, Acc};
-follow_down(X, CurNode, AccFun, Acc, Words = [W | RestW]) ->
- case trie_child(X, CurNode, W) of
- {ok, NextNode} -> follow_down(X, NextNode, AccFun,
- AccFun(W, NextNode, Acc), RestW);
- error -> {error, Acc, Words}
- end.
-
-remove_path_if_empty(_, [{root, none}]) ->
- ok;
-remove_path_if_empty(X, [{Node, W} | [{Parent, _} | _] = RestPath]) ->
- case mnesia:read(rabbit_topic_trie_node,
- #trie_node{exchange_name = X, node_id = Node}, write) of
- [] -> trie_remove_edge(X, Parent, Node, W),
- remove_path_if_empty(X, RestPath);
- _ -> ok
- end.
-
-trie_child(X, Node, Word) ->
- case mnesia:read({rabbit_topic_trie_edge,
- #trie_edge{exchange_name = X,
- node_id = Node,
- word = Word}}) of
- [#topic_trie_edge{node_id = NextNode}] -> {ok, NextNode};
- [] -> error
- end.
-
-trie_bindings(X, Node) ->
- MatchHead = #topic_trie_binding{
- trie_binding = #trie_binding{exchange_name = X,
- node_id = Node,
- destination = '$1'}},
- mnesia:select(rabbit_topic_trie_binding, [{MatchHead, [], ['$1']}]).
-
-trie_update_node_counts(X, Node, Field, Delta) ->
- E = case mnesia:read(rabbit_topic_trie_node,
- #trie_node{exchange_name = X,
- node_id = Node}, write) of
- [] -> #topic_trie_node{trie_node = #trie_node{
- exchange_name = X,
- node_id = Node},
- edge_count = 0,
- binding_count = 0};
- [E0] -> E0
- end,
- case setelement(Field, E, element(Field, E) + Delta) of
- #topic_trie_node{edge_count = 0, binding_count = 0} ->
- ok = mnesia:delete_object(rabbit_topic_trie_node, E, write);
- EN ->
- ok = mnesia:write(rabbit_topic_trie_node, EN, write)
- end.
-
-trie_add_edge(X, FromNode, ToNode, W) ->
- trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, +1),
- trie_edge_op(X, FromNode, ToNode, W, fun mnesia:write/3).
-
-trie_remove_edge(X, FromNode, ToNode, W) ->
- trie_update_node_counts(X, FromNode, #topic_trie_node.edge_count, -1),
- trie_edge_op(X, FromNode, ToNode, W, fun mnesia:delete_object/3).
-
-trie_edge_op(X, FromNode, ToNode, W, Op) ->
- ok = Op(rabbit_topic_trie_edge,
- #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
- node_id = FromNode,
- word = W},
- node_id = ToNode},
- write).
-
-trie_add_binding(X, Node, D) ->
- trie_update_node_counts(X, Node, #topic_trie_node.binding_count, +1),
- trie_binding_op(X, Node, D, fun mnesia:write/3).
-
-trie_remove_binding(X, Node, D) ->
- trie_update_node_counts(X, Node, #topic_trie_node.binding_count, -1),
- trie_binding_op(X, Node, D, fun mnesia:delete_object/3).
-
-trie_binding_op(X, Node, D, Op) ->
- ok = Op(rabbit_topic_trie_binding,
- #topic_trie_binding{
- trie_binding = #trie_binding{exchange_name = X,
- node_id = Node,
- destination = D}},
- write).
-
-trie_remove_all_nodes(X) ->
- remove_all(rabbit_topic_trie_node,
- #topic_trie_node{trie_node = #trie_node{exchange_name = X,
- _ = '_'},
- _ = '_'}).
-
-trie_remove_all_edges(X) ->
- remove_all(rabbit_topic_trie_edge,
- #topic_trie_edge{trie_edge = #trie_edge{exchange_name = X,
- _ = '_'},
- _ = '_'}).
-
-trie_remove_all_bindings(X) ->
- remove_all(rabbit_topic_trie_binding,
- #topic_trie_binding{
- trie_binding = #trie_binding{exchange_name = X, _ = '_'},
- _ = '_'}).
-
-remove_all(Table, Pattern) ->
- lists:foreach(fun (R) -> mnesia:delete_object(Table, R, write) end,
- mnesia:match_object(Table, Pattern, write)).
-
-new_node_id() ->
- rabbit_guid:gen().
-
-split_topic_key(Key) ->
- split_topic_key(Key, [], []).
-
-split_topic_key(<<>>, [], []) ->
- [];
-split_topic_key(<<>>, RevWordAcc, RevResAcc) ->
- lists:reverse([lists:reverse(RevWordAcc) | RevResAcc]);
-split_topic_key(<<$., Rest/binary>>, RevWordAcc, RevResAcc) ->
- split_topic_key(Rest, [], [lists:reverse(RevWordAcc) | RevResAcc]);
-split_topic_key(<<C:8, Rest/binary>>, RevWordAcc, RevResAcc) ->
- split_topic_key(Rest, [C | RevWordAcc], RevResAcc).
diff --git a/src/rabbit_file.erl b/src/rabbit_file.erl
deleted file mode 100644
index 4cf314ca..00000000
--- a/src/rabbit_file.erl
+++ /dev/null
@@ -1,311 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_file).
-
--include_lib("kernel/include/file.hrl").
-
--export([is_file/1, is_dir/1, file_size/1, ensure_dir/1, wildcard/2, list_dir/1]).
--export([read_term_file/1, write_term_file/2, write_file/2, write_file/3]).
--export([append_file/2, ensure_parent_dirs_exist/1]).
--export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]).
--export([lock_file/1]).
-
--define(TMP_EXT, ".tmp").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(ok_or_error() :: rabbit_types:ok_or_error(any())).
-
--spec(is_file/1 :: ((file:filename())) -> boolean()).
--spec(is_dir/1 :: ((file:filename())) -> boolean()).
--spec(file_size/1 :: ((file:filename())) -> non_neg_integer()).
--spec(ensure_dir/1 :: ((file:filename())) -> ok_or_error()).
--spec(wildcard/2 :: (string(), file:filename()) -> [file:filename()]).
--spec(list_dir/1 :: (file:filename()) -> rabbit_types:ok_or_error2(
- [file:filename()], any())).
--spec(read_term_file/1 ::
- (file:filename()) -> {'ok', [any()]} | rabbit_types:error(any())).
--spec(write_term_file/2 :: (file:filename(), [any()]) -> ok_or_error()).
--spec(write_file/2 :: (file:filename(), iodata()) -> ok_or_error()).
--spec(write_file/3 :: (file:filename(), iodata(), [any()]) -> ok_or_error()).
--spec(append_file/2 :: (file:filename(), string()) -> ok_or_error()).
--spec(ensure_parent_dirs_exist/1 :: (string()) -> 'ok').
--spec(rename/2 ::
- (file:filename(), file:filename()) -> ok_or_error()).
--spec(delete/1 :: ([file:filename()]) -> ok_or_error()).
--spec(recursive_delete/1 ::
- ([file:filename()])
- -> rabbit_types:ok_or_error({file:filename(), any()})).
--spec(recursive_copy/2 ::
- (file:filename(), file:filename())
- -> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})).
--spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-is_file(File) ->
- case read_file_info(File) of
- {ok, #file_info{type=regular}} -> true;
- {ok, #file_info{type=directory}} -> true;
- _ -> false
- end.
-
-is_dir(Dir) -> is_dir_internal(read_file_info(Dir)).
-
-is_dir_no_handle(Dir) -> is_dir_internal(prim_file:read_file_info(Dir)).
-
-is_dir_internal({ok, #file_info{type=directory}}) -> true;
-is_dir_internal(_) -> false.
-
-file_size(File) ->
- case read_file_info(File) of
- {ok, #file_info{size=Size}} -> Size;
- _ -> 0
- end.
-
-ensure_dir(File) -> with_fhc_handle(fun () -> ensure_dir_internal(File) end).
-
-ensure_dir_internal("/") ->
- ok;
-ensure_dir_internal(File) ->
- Dir = filename:dirname(File),
- case is_dir_no_handle(Dir) of
- true -> ok;
- false -> ensure_dir_internal(Dir),
- prim_file:make_dir(Dir)
- end.
-
-wildcard(Pattern, Dir) ->
- {ok, Files} = list_dir(Dir),
- {ok, RE} = re:compile(Pattern, [anchored]),
- [File || File <- Files, match =:= re:run(File, RE, [{capture, none}])].
-
-list_dir(Dir) -> with_fhc_handle(fun () -> prim_file:list_dir(Dir) end).
-
-read_file_info(File) ->
- with_fhc_handle(fun () -> prim_file:read_file_info(File) end).
-
-with_fhc_handle(Fun) ->
- with_fhc_handle(1, Fun).
-
-with_fhc_handle(N, Fun) ->
- ok = file_handle_cache:obtain(N),
- try Fun()
- after ok = file_handle_cache:release(N)
- end.
-
-read_term_file(File) ->
- try
- {ok, Data} = with_fhc_handle(fun () -> prim_file:read_file(File) end),
- {ok, Tokens, _} = erl_scan:string(binary_to_list(Data)),
- TokenGroups = group_tokens(Tokens),
- {ok, [begin
- {ok, Term} = erl_parse:parse_term(Tokens1),
- Term
- end || Tokens1 <- TokenGroups]}
- catch
- error:{badmatch, Error} -> Error
- end.
-
-group_tokens(Ts) -> [lists:reverse(G) || G <- group_tokens([], Ts)].
-
-group_tokens([], []) -> [];
-group_tokens(Cur, []) -> [Cur];
-group_tokens(Cur, [T = {dot, _} | Ts]) -> [[T | Cur] | group_tokens([], Ts)];
-group_tokens(Cur, [T | Ts]) -> group_tokens([T | Cur], Ts).
-
-write_term_file(File, Terms) ->
- write_file(File, list_to_binary([io_lib:format("~w.~n", [Term]) ||
- Term <- Terms])).
-
-write_file(Path, Data) -> write_file(Path, Data, []).
-
-write_file(Path, Data, Modes) ->
- Modes1 = [binary, write | (Modes -- [binary, write])],
- case make_binary(Data) of
- Bin when is_binary(Bin) -> write_file1(Path, Bin, Modes1);
- {error, _} = E -> E
- end.
-
-%% make_binary/1 is based on the corresponding function in the
-%% kernel/file.erl module of the Erlang R14B02 release, which is
-%% licensed under the EPL.
-
-make_binary(Bin) when is_binary(Bin) ->
- Bin;
-make_binary(List) ->
- try
- iolist_to_binary(List)
- catch error:Reason ->
- {error, Reason}
- end.
-
-write_file1(Path, Bin, Modes) ->
- try
- with_synced_copy(Path, Modes,
- fun (Hdl) ->
- ok = prim_file:write(Hdl, Bin)
- end)
- catch
- error:{badmatch, Error} -> Error;
- _:{error, Error} -> {error, Error}
- end.
-
-with_synced_copy(Path, Modes, Fun) ->
- case lists:member(append, Modes) of
- true ->
- {error, append_not_supported, Path};
- false ->
- with_fhc_handle(
- fun () ->
- Bak = Path ++ ?TMP_EXT,
- case prim_file:open(Bak, Modes) of
- {ok, Hdl} ->
- try
- Result = Fun(Hdl),
- ok = prim_file:rename(Bak, Path),
- ok = prim_file:sync(Hdl),
- Result
- after
- prim_file:close(Hdl)
- end;
- {error, _} = E -> E
- end
- end)
- end.
-
-%% TODO the semantics of this function are rather odd. But see bug 25021.
-append_file(File, Suffix) ->
- case read_file_info(File) of
- {ok, FInfo} -> append_file(File, FInfo#file_info.size, Suffix);
- {error, enoent} -> append_file(File, 0, Suffix);
- Error -> Error
- end.
-
-append_file(_, _, "") ->
- ok;
-append_file(File, 0, Suffix) ->
- with_fhc_handle(fun () ->
- case prim_file:open([File, Suffix], [append]) of
- {ok, Fd} -> prim_file:close(Fd);
- Error -> Error
- end
- end);
-append_file(File, _, Suffix) ->
- case with_fhc_handle(2, fun () ->
- file:copy(File, {[File, Suffix], [append]})
- end) of
- {ok, _BytesCopied} -> ok;
- Error -> Error
- end.
-
-ensure_parent_dirs_exist(Filename) ->
- case ensure_dir(Filename) of
- ok -> ok;
- {error, Reason} ->
- throw({error, {cannot_create_parent_dirs, Filename, Reason}})
- end.
-
-rename(Old, New) -> with_fhc_handle(fun () -> prim_file:rename(Old, New) end).
-
-delete(File) -> with_fhc_handle(fun () -> prim_file:delete(File) end).
-
-recursive_delete(Files) ->
- with_fhc_handle(
- fun () -> lists:foldl(fun (Path, ok) -> recursive_delete1(Path);
- (_Path, {error, _Err} = Error) -> Error
- end, ok, Files)
- end).
-
-recursive_delete1(Path) ->
- case is_dir_no_handle(Path) and not(is_symlink_no_handle(Path)) of
- false -> case prim_file:delete(Path) of
- ok -> ok;
- {error, enoent} -> ok; %% Path doesn't exist anyway
- {error, Err} -> {error, {Path, Err}}
- end;
- true -> case prim_file:list_dir(Path) of
- {ok, FileNames} ->
- case lists:foldl(
- fun (FileName, ok) ->
- recursive_delete1(
- filename:join(Path, FileName));
- (_FileName, Error) ->
- Error
- end, ok, FileNames) of
- ok ->
- case prim_file:del_dir(Path) of
- ok -> ok;
- {error, Err} -> {error, {Path, Err}}
- end;
- {error, _Err} = Error ->
- Error
- end;
- {error, Err} ->
- {error, {Path, Err}}
- end
- end.
-
-is_symlink_no_handle(File) ->
- case prim_file:read_link(File) of
- {ok, _} -> true;
- _ -> false
- end.
-
-recursive_copy(Src, Dest) ->
- %% Note that this uses the 'file' module and, hence, shouldn't be
- %% run on many processes at once.
- case is_dir(Src) of
- false -> case file:copy(Src, Dest) of
- {ok, _Bytes} -> ok;
- {error, enoent} -> ok; %% Path doesn't exist anyway
- {error, Err} -> {error, {Src, Dest, Err}}
- end;
- true -> case file:list_dir(Src) of
- {ok, FileNames} ->
- case file:make_dir(Dest) of
- ok ->
- lists:foldl(
- fun (FileName, ok) ->
- recursive_copy(
- filename:join(Src, FileName),
- filename:join(Dest, FileName));
- (_FileName, Error) ->
- Error
- end, ok, FileNames);
- {error, Err} ->
- {error, {Src, Dest, Err}}
- end;
- {error, Err} ->
- {error, {Src, Dest, Err}}
- end
- end.
-
-%% TODO: When we stop supporting Erlang prior to R14, this should be
-%% replaced with file:open [write, exclusive]
-lock_file(Path) ->
- case is_file(Path) of
- true -> {error, eexist};
- false -> with_fhc_handle(
- fun () -> {ok, Lock} = prim_file:open(Path, [write]),
- ok = prim_file:close(Lock)
- end)
- end.
diff --git a/src/rabbit_framing.erl b/src/rabbit_framing.erl
deleted file mode 100644
index 51aaa999..00000000
--- a/src/rabbit_framing.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-%% TODO auto-generate
-
--module(rabbit_framing).
-
--ifdef(use_specs).
-
--export_type([protocol/0,
- amqp_field_type/0, amqp_property_type/0,
- amqp_table/0, amqp_array/0, amqp_value/0,
- amqp_method_name/0, amqp_method/0, amqp_method_record/0,
- amqp_method_field_name/0, amqp_property_record/0,
- amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
-
--type(protocol() :: 'rabbit_framing_amqp_0_8' | 'rabbit_framing_amqp_0_9_1').
-
--define(protocol_type(T), type(T :: rabbit_framing_amqp_0_8:T |
- rabbit_framing_amqp_0_9_1:T)).
-
--?protocol_type(amqp_field_type()).
--?protocol_type(amqp_property_type()).
--?protocol_type(amqp_table()).
--?protocol_type(amqp_array()).
--?protocol_type(amqp_value()).
--?protocol_type(amqp_method_name()).
--?protocol_type(amqp_method()).
--?protocol_type(amqp_method_record()).
--?protocol_type(amqp_method_field_name()).
--?protocol_type(amqp_property_record()).
--?protocol_type(amqp_exception()).
--?protocol_type(amqp_exception_code()).
--?protocol_type(amqp_class_id()).
-
--endif.
diff --git a/src/rabbit_guid.erl b/src/rabbit_guid.erl
deleted file mode 100644
index 70d1f0c1..00000000
--- a/src/rabbit_guid.erl
+++ /dev/null
@@ -1,177 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_guid).
-
--behaviour(gen_server).
-
--export([start_link/0]).
--export([filename/0]).
--export([gen/0, gen_secure/0, string/2, binary/2]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--define(SERVER, ?MODULE).
--define(SERIAL_FILENAME, "rabbit_serial").
-
--record(state, {serial}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([guid/0]).
-
--type(guid() :: binary()).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(filename/0 :: () -> string()).
--spec(gen/0 :: () -> guid()).
--spec(gen_secure/0 :: () -> guid()).
--spec(string/2 :: (guid(), any()) -> string()).
--spec(binary/2 :: (guid(), any()) -> binary()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_server:start_link({local, ?SERVER}, ?MODULE,
- [update_disk_serial()], []).
-
-%% We use this to detect a (possibly rather old) Mnesia directory,
-%% since it has existed since at least 1.7.0 (as far back as I cared
-%% to go).
-filename() ->
- filename:join(rabbit_mnesia:dir(), ?SERIAL_FILENAME).
-
-update_disk_serial() ->
- Filename = filename(),
- Serial = case rabbit_file:read_term_file(Filename) of
- {ok, [Num]} -> Num;
- {ok, []} -> 0; %% [1]
- {error, enoent} -> 0;
- {error, Reason} ->
- throw({error, {cannot_read_serial_file, Filename, Reason}})
- end,
- case rabbit_file:write_term_file(Filename, [Serial + 1]) of
- ok -> ok;
- {error, Reason1} ->
- throw({error, {cannot_write_serial_file, Filename, Reason1}})
- end,
- Serial.
-%% [1] a couple of users have reported startup failures due to the
-%% file being empty, presumably as a result of filesystem
-%% corruption. While rabbit doesn't cope with that in general, in this
-%% specific case we can be more accommodating.
-
-%% Generate an un-hashed guid.
-fresh() ->
- %% We don't use erlang:now() here because a) it may return
- %% duplicates when the system clock has been rewound prior to a
- %% restart, or ids were generated at a high rate (which causes
- %% now() to move ahead of the system time), and b) it is really
- %% slow since it takes a global lock and makes a system call.
- %%
- %% A persisted serial number, the node, and a unique reference
- %% (per node incarnation) uniquely identifies a process in space
- %% and time.
- Serial = gen_server:call(?SERVER, serial, infinity),
- {Serial, node(), make_ref()}.
-
-advance_blocks({B1, B2, B3, B4}, I) ->
- %% To produce a new set of blocks, we create a new 32bit block
- %% hashing {B5, I}. The new hash is used as last block, and the
- %% other three blocks are XORed with it.
- %%
- %% Doing this is convenient because it avoids cascading conflits,
- %% while being very fast. The conflicts are avoided by propagating
- %% the changes through all the blocks at each round by XORing, so
- %% the only occasion in which a collision will take place is when
- %% all 4 blocks are the same and the counter is the same.
- %%
- %% The range (2^32) is provided explicitly since phash uses 2^27
- %% by default.
- B5 = erlang:phash2({B1, I}, 4294967296),
- {{(B2 bxor B5), (B3 bxor B5), (B4 bxor B5), B5}, I+1}.
-
-%% generate a GUID. This function should be used when performance is a
-%% priority and predictability is not an issue. Otherwise use
-%% gen_secure/0.
-gen() ->
- %% We hash a fresh GUID with md5, split it in 4 blocks, and each
- %% time we need a new guid we rotate them producing a new hash
- %% with the aid of the counter. Look at the comments in
- %% advance_blocks/2 for details.
- case get(guid) of
- undefined -> <<B1:32, B2:32, B3:32, B4:32>> = Res =
- erlang:md5(term_to_binary(fresh())),
- put(guid, {{B1, B2, B3, B4}, 0}),
- Res;
- {BS, I} -> {{B1, B2, B3, B4}, _} = S = advance_blocks(BS, I),
- put(guid, S),
- <<B1:32, B2:32, B3:32, B4:32>>
- end.
-
-%% generate a non-predictable GUID.
-%%
-%% The id is only unique within a single cluster and as long as the
-%% serial store hasn't been deleted.
-%%
-%% If you are not concerned with predictability, gen/0 is faster.
-gen_secure() ->
- %% Here instead of hashing once we hash the GUID and the counter
- %% each time, so that the GUID is not predictable.
- G = case get(guid_secure) of
- undefined -> {fresh(), 0};
- {S, I} -> {S, I+1}
- end,
- put(guid_secure, G),
- erlang:md5(term_to_binary(G)).
-
-%% generate a readable string representation of a GUID.
-%%
-%% employs base64url encoding, which is safer in more contexts than
-%% plain base64.
-string(G, Prefix) ->
- Prefix ++ "-" ++ rabbit_misc:base64url(G).
-
-binary(G, Prefix) ->
- list_to_binary(string(G, Prefix)).
-
-%%----------------------------------------------------------------------------
-
-init([Serial]) ->
- {ok, #state{serial = Serial}}.
-
-handle_call(serial, _From, State = #state{serial = Serial}) ->
- {reply, Serial, State};
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/rabbit_heartbeat.erl b/src/rabbit_heartbeat.erl
deleted file mode 100644
index df9baed9..00000000
--- a/src/rabbit_heartbeat.erl
+++ /dev/null
@@ -1,132 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_heartbeat).
-
--export([start_heartbeat_sender/3, start_heartbeat_receiver/3,
- start_heartbeat_fun/1, pause_monitor/1, resume_monitor/1]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([heartbeaters/0]).
--export_type([start_heartbeat_fun/0]).
-
--type(heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}).
-
--type(heartbeat_callback() :: fun (() -> any())).
-
--type(start_heartbeat_fun() ::
- fun((rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
- non_neg_integer(), heartbeat_callback()) ->
- no_return())).
-
--spec(start_heartbeat_sender/3 ::
- (rabbit_net:socket(), non_neg_integer(), heartbeat_callback()) ->
- rabbit_types:ok(pid())).
--spec(start_heartbeat_receiver/3 ::
- (rabbit_net:socket(), non_neg_integer(), heartbeat_callback()) ->
- rabbit_types:ok(pid())).
-
--spec(start_heartbeat_fun/1 ::
- (pid()) -> start_heartbeat_fun()).
-
-
--spec(pause_monitor/1 :: (heartbeaters()) -> 'ok').
--spec(resume_monitor/1 :: (heartbeaters()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_heartbeat_sender(Sock, TimeoutSec, SendFun) ->
- %% the 'div 2' is there so that we don't end up waiting for nearly
- %% 2 * TimeoutSec before sending a heartbeat in the boundary case
- %% where the last message was sent just after a heartbeat.
- heartbeater({Sock, TimeoutSec * 1000 div 2, send_oct, 0,
- fun () -> SendFun(), continue end}).
-
-start_heartbeat_receiver(Sock, TimeoutSec, ReceiveFun) ->
- %% we check for incoming data every interval, and time out after
- %% two checks with no change. As a result we will time out between
- %% 2 and 3 intervals after the last data has been received.
- heartbeater({Sock, TimeoutSec * 1000, recv_oct, 1,
- fun () -> ReceiveFun(), stop end}).
-
-start_heartbeat_fun(SupPid) ->
- fun (Sock, SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
- {ok, Sender} =
- start_heartbeater(SendTimeoutSec, SupPid, Sock,
- SendFun, heartbeat_sender,
- start_heartbeat_sender),
- {ok, Receiver} =
- start_heartbeater(ReceiveTimeoutSec, SupPid, Sock,
- ReceiveFun, heartbeat_receiver,
- start_heartbeat_receiver),
- {Sender, Receiver}
- end.
-
-pause_monitor({_Sender, none}) -> ok;
-pause_monitor({_Sender, Receiver}) -> Receiver ! pause, ok.
-
-resume_monitor({_Sender, none}) -> ok;
-resume_monitor({_Sender, Receiver}) -> Receiver ! resume, ok.
-
-%%----------------------------------------------------------------------------
-start_heartbeater(0, _SupPid, _Sock, _TimeoutFun, _Name, _Callback) ->
- {ok, none};
-start_heartbeater(TimeoutSec, SupPid, Sock, TimeoutFun, Name, Callback) ->
- supervisor2:start_child(
- SupPid, {Name,
- {rabbit_heartbeat, Callback, [Sock, TimeoutSec, TimeoutFun]},
- transient, ?MAX_WAIT, worker, [rabbit_heartbeat]}).
-
-heartbeater(Params) ->
- {ok, proc_lib:spawn_link(fun () -> heartbeater(Params, {0, 0}) end)}.
-
-heartbeater({Sock, TimeoutMillisec, StatName, Threshold, Handler} = Params,
- {StatVal, SameCount}) ->
- Recurse = fun (V) -> heartbeater(Params, V) end,
- receive
- pause -> receive
- resume -> Recurse({0, 0});
- Other -> exit({unexpected_message, Other})
- end;
- Other -> exit({unexpected_message, Other})
- after TimeoutMillisec ->
- case rabbit_net:getstat(Sock, [StatName]) of
- {ok, [{StatName, NewStatVal}]} ->
- if NewStatVal =/= StatVal ->
- Recurse({NewStatVal, 0});
- SameCount < Threshold ->
- Recurse({NewStatVal, SameCount + 1});
- true ->
- case Handler() of
- stop -> ok;
- continue -> Recurse({NewStatVal, 0})
- end
- end;
- {error, einval} ->
- %% the socket is dead, most likely because the
- %% connection is being shut down -> terminate
- ok;
- {error, Reason} ->
- exit({cannot_get_socket_stats, Reason})
- end
- end.
diff --git a/src/rabbit_intermediate_sup.erl b/src/rabbit_intermediate_sup.erl
deleted file mode 100644
index a9381f20..00000000
--- a/src/rabbit_intermediate_sup.erl
+++ /dev/null
@@ -1,39 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2013-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_intermediate_sup).
-
--behaviour(supervisor2).
-
--export([start_link/0]).
-
--export([init/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- supervisor2:start_link(?MODULE, []).
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, {{one_for_one, 10, 10}, []}}.
diff --git a/src/rabbit_limiter.erl b/src/rabbit_limiter.erl
deleted file mode 100644
index 12a13c00..00000000
--- a/src/rabbit_limiter.erl
+++ /dev/null
@@ -1,435 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-%% The purpose of the limiter is to stem the flow of messages from
-%% queues to channels, in order to act upon various protocol-level
-%% flow control mechanisms, specifically AMQP 0-9-1's basic.qos
-%% prefetch_count and channel.flow, and AMQP 1.0's link (aka consumer)
-%% credit mechanism.
-%%
-%% Each channel has an associated limiter process, created with
-%% start_link/1, which it passes to queues on consumer creation with
-%% rabbit_amqqueue:basic_consume/9, and rabbit_amqqueue:basic_get/4.
-%% The latter isn't strictly necessary, since basic.get is not
-%% subject to limiting, but it means that whenever a queue knows about
-%% a channel, it also knows about its limiter, which is less fiddly.
-%%
-%% The limiter process holds state that is, in effect, shared between
-%% the channel and all queues from which the channel is
-%% consuming. Essentially all these queues are competing for access to
-%% a single, limited resource - the ability to deliver messages via
-%% the channel - and it is the job of the limiter process to mediate
-%% that access.
-%%
-%% The limiter process is separate from the channel process for two
-%% reasons: separation of concerns, and efficiency. Channels can get
-%% very busy, particularly if they are also dealing with publishes.
-%% With a separate limiter process all the aforementioned access
-%% mediation can take place without touching the channel.
-%%
-%% For efficiency, both the channel and the queues keep some local
-%% state, initialised from the limiter pid with new/1 and client/1,
-%% respectively. In particular this allows them to avoid any
-%% interaction with the limiter process when it is 'inactive', i.e. no
-%% protocol-level flow control is taking place.
-%%
-%% This optimisation does come at the cost of some complexity though:
-%% when a limiter becomes active, the channel needs to inform all its
-%% consumer queues of this change in status. It does this by invoking
-%% rabbit_amqqueue:activate_limit_all/2. Note that there is no inverse
-%% transition, i.e. once a queue has been told about an active
-%% limiter, it is not subsequently told when that limiter becomes
-%% inactive. In practice it is rare for that to happen, though we
-%% could optimise this case in the future.
-%%
-%% In addition, the consumer credit bookkeeping is local to queues, so
-%% it is not necessary to store information about it in the limiter
-%% process. But for abstraction we hide it from the queue behind the
-%% limiter API, and it therefore becomes part of the queue local
-%% state.
-%%
-%% The interactions with the limiter are as follows:
-%%
-%% 1. Channels tell the limiter about basic.qos prefetch counts -
-%% that's what the limit_prefetch/3, unlimit_prefetch/1,
-%% is_prefetch_limited/1, get_prefetch_limit/1 API functions are
-%% about - and channel.flow blocking - that's what block/1,
-%% unblock/1 and is_blocked/1 are for. They also tell the limiter
-%% queue state (via the queue) about consumer credit changes -
-%% that's what credit/4 is for.
-%%
-%% 2. Queues also tell the limiter queue state about the queue
-%% becoming empty (via drained/1) and consumers leaving (via
-%% forget_consumer/2).
-%%
-%% 3. Queues register with the limiter - this happens as part of
-%% activate/1.
-%%
-%% 4. The limiter process maintains an internal counter of 'messages
-%% sent but not yet acknowledged', called the 'volume'.
-%%
-%% 5. Queues ask the limiter for permission (with can_send/3) whenever
-%% they want to deliver a message to a channel. The limiter checks
-%% whether a) the channel isn't blocked by channel.flow, b) the
-%% volume has not yet reached the prefetch limit, and c) whether
-%% the consumer has enough credit. If so it increments the volume
-%% and tells the queue to proceed. Otherwise it marks the queue as
-%% requiring notification (see below) and tells the queue not to
-%% proceed.
-%%
-%% 6. A queue that has been told to proceed (by the return value of
-%% can_send/3) sends the message to the channel. Conversely, a
-%% queue that has been told not to proceed, will not attempt to
-%% deliver that message, or any future messages, to the
-%% channel. This is accomplished by can_send/3 capturing the
-%% outcome in the local state, where it can be accessed with
-%% is_suspended/1.
-%%
-%% 7. When a channel receives an ack it tells the limiter (via ack/2)
-%% how many messages were ack'ed. The limiter process decrements
-%% the volume and if it falls below the prefetch_count then it
-%% notifies (through rabbit_amqqueue:resume/2) all the queues
-%% requiring notification, i.e. all those that had a can_send/3
-%% request denied.
-%%
-%% 8. Upon receipt of such a notification, queues resume delivery to
-%% the channel, i.e. they will once again start asking limiter, as
-%% described in (5).
-%%
-%% 9. When a queue has no more consumers associated with a particular
-%% channel, it deactivates use of the limiter with deactivate/1,
-%% which alters the local state such that no further interactions
-%% with the limiter process take place until a subsequent
-%% activate/1.
-
--module(rabbit_limiter).
-
--behaviour(gen_server2).
-
--export([start_link/0]).
-%% channel API
--export([new/1, limit_prefetch/3, unlimit_prefetch/1, block/1, unblock/1,
- is_prefetch_limited/1, is_blocked/1, is_active/1,
- get_prefetch_limit/1, ack/2, pid/1]).
-%% queue API
--export([client/1, activate/1, can_send/3, resume/1, deactivate/1,
- is_suspended/1, is_consumer_blocked/2, credit/4, drained/1,
- forget_consumer/2]).
-%% callbacks
--export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
- handle_info/2, prioritise_call/4]).
-
-%%----------------------------------------------------------------------------
-
--record(lstate, {pid, prefetch_limited, blocked}).
--record(qstate, {pid, state, credits}).
-
--ifdef(use_specs).
-
--type(lstate() :: #lstate{pid :: pid(),
- prefetch_limited :: boolean(),
- blocked :: boolean()}).
--type(qstate() :: #qstate{pid :: pid(),
- state :: 'dormant' | 'active' | 'suspended'}).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(new/1 :: (pid()) -> lstate()).
-
--spec(limit_prefetch/3 :: (lstate(), non_neg_integer(), non_neg_integer())
- -> lstate()).
--spec(unlimit_prefetch/1 :: (lstate()) -> lstate()).
--spec(block/1 :: (lstate()) -> lstate()).
--spec(unblock/1 :: (lstate()) -> lstate()).
--spec(is_prefetch_limited/1 :: (lstate()) -> boolean()).
--spec(is_blocked/1 :: (lstate()) -> boolean()).
--spec(is_active/1 :: (lstate()) -> boolean()).
--spec(get_prefetch_limit/1 :: (lstate()) -> non_neg_integer()).
--spec(ack/2 :: (lstate(), non_neg_integer()) -> 'ok').
--spec(pid/1 :: (lstate()) -> pid()).
-
--spec(client/1 :: (pid()) -> qstate()).
--spec(activate/1 :: (qstate()) -> qstate()).
--spec(can_send/3 :: (qstate(), boolean(), rabbit_types:ctag()) ->
- {'continue' | 'suspend', qstate()}).
--spec(resume/1 :: (qstate()) -> qstate()).
--spec(deactivate/1 :: (qstate()) -> qstate()).
--spec(is_suspended/1 :: (qstate()) -> boolean()).
--spec(is_consumer_blocked/2 :: (qstate(), rabbit_types:ctag()) -> boolean()).
--spec(credit/4 :: (qstate(), rabbit_types:ctag(), non_neg_integer(), boolean())
- -> qstate()).
--spec(drained/1 :: (qstate())
- -> {[{rabbit_types:ctag(), non_neg_integer()}], qstate()}).
--spec(forget_consumer/2 :: (qstate(), rabbit_types:ctag()) -> qstate()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--record(lim, {prefetch_count = 0,
- ch_pid,
- blocked = false,
- queues = orddict:new(), % QPid -> {MonitorRef, Notify}
- volume = 0}).
-%% 'Notify' is a boolean that indicates whether a queue should be
-%% notified of a change in the limit or volume that may allow it to
-%% deliver more messages via the limiter's channel.
-
--record(credit, {credit = 0, drain = false}).
-
-%%----------------------------------------------------------------------------
-%% API
-%%----------------------------------------------------------------------------
-
-start_link() -> gen_server2:start_link(?MODULE, [], []).
-
-new(Pid) ->
- %% this a 'call' to ensure that it is invoked at most once.
- ok = gen_server:call(Pid, {new, self()}),
- #lstate{pid = Pid, prefetch_limited = false, blocked = false}.
-
-limit_prefetch(L, PrefetchCount, UnackedCount) when PrefetchCount > 0 ->
- ok = gen_server:call(L#lstate.pid,
- {limit_prefetch, PrefetchCount, UnackedCount}),
- L#lstate{prefetch_limited = true}.
-
-unlimit_prefetch(L) ->
- ok = gen_server:call(L#lstate.pid, unlimit_prefetch),
- L#lstate{prefetch_limited = false}.
-
-block(L) ->
- ok = gen_server:call(L#lstate.pid, block),
- L#lstate{blocked = true}.
-
-unblock(L) ->
- ok = gen_server:call(L#lstate.pid, unblock),
- L#lstate{blocked = false}.
-
-is_prefetch_limited(#lstate{prefetch_limited = Limited}) -> Limited.
-
-is_blocked(#lstate{blocked = Blocked}) -> Blocked.
-
-is_active(L) -> is_prefetch_limited(L) orelse is_blocked(L).
-
-get_prefetch_limit(#lstate{prefetch_limited = false}) -> 0;
-get_prefetch_limit(L) -> gen_server:call(L#lstate.pid, get_prefetch_limit).
-
-ack(#lstate{prefetch_limited = false}, _AckCount) -> ok;
-ack(L, AckCount) -> gen_server:cast(L#lstate.pid, {ack, AckCount}).
-
-pid(#lstate{pid = Pid}) -> Pid.
-
-client(Pid) -> #qstate{pid = Pid, state = dormant, credits = gb_trees:empty()}.
-
-activate(L = #qstate{state = dormant}) ->
- ok = gen_server:cast(L#qstate.pid, {register, self()}),
- L#qstate{state = active};
-activate(L) -> L.
-
-can_send(L = #qstate{pid = Pid, state = State, credits = Credits},
- AckRequired, CTag) ->
- case is_consumer_blocked(L, CTag) of
- false -> case (State =/= active orelse
- safe_call(Pid, {can_send, self(), AckRequired}, true)) of
- true -> {continue, L#qstate{
- credits = record_send_q(CTag, Credits)}};
- false -> {suspend, L#qstate{state = suspended}}
- end;
- true -> {suspend, L}
- end.
-
-safe_call(Pid, Msg, ExitValue) ->
- rabbit_misc:with_exit_handler(
- fun () -> ExitValue end,
- fun () -> gen_server2:call(Pid, Msg, infinity) end).
-
-resume(L = #qstate{state = suspended}) ->
- L#qstate{state = active};
-resume(L) -> L.
-
-deactivate(L = #qstate{state = dormant}) -> L;
-deactivate(L) ->
- ok = gen_server:cast(L#qstate.pid, {unregister, self()}),
- L#qstate{state = dormant}.
-
-is_suspended(#qstate{state = suspended}) -> true;
-is_suspended(#qstate{}) -> false.
-
-is_consumer_blocked(#qstate{credits = Credits}, CTag) ->
- case gb_trees:lookup(CTag, Credits) of
- {value, #credit{credit = C}} when C > 0 -> false;
- {value, #credit{}} -> true;
- none -> false
- end.
-
-credit(Limiter = #qstate{credits = Credits}, CTag, Credit, Drain) ->
- Limiter#qstate{credits = update_credit(CTag, Credit, Drain, Credits)}.
-
-drained(Limiter = #qstate{credits = Credits}) ->
- {CTagCredits, Credits2} =
- rabbit_misc:gb_trees_fold(
- fun (CTag, #credit{credit = C, drain = true}, {Acc, Creds0}) ->
- {[{CTag, C} | Acc], update_credit(CTag, 0, false, Creds0)};
- (_CTag, #credit{credit = _C, drain = false}, {Acc, Creds0}) ->
- {Acc, Creds0}
- end, {[], Credits}, Credits),
- {CTagCredits, Limiter#qstate{credits = Credits2}}.
-
-forget_consumer(Limiter = #qstate{credits = Credits}, CTag) ->
- Limiter#qstate{credits = gb_trees:delete_any(CTag, Credits)}.
-
-%%----------------------------------------------------------------------------
-%% Queue-local code
-%%----------------------------------------------------------------------------
-
-%% We want to do all the AMQP 1.0-ish link level credit calculations
-%% in the queue (to do them elsewhere introduces a ton of
-%% races). However, it's a big chunk of code that is conceptually very
-%% linked to the limiter concept. So we get the queue to hold a bit of
-%% state for us (#qstate.credits), and maintain a fiction that the
-%% limiter is making the decisions...
-
-record_send_q(CTag, Credits) ->
- case gb_trees:lookup(CTag, Credits) of
- {value, #credit{credit = Credit, drain = Drain}} ->
- update_credit(CTag, Credit - 1, Drain, Credits);
- none ->
- Credits
- end.
-
-update_credit(CTag, Credit, Drain, Credits) ->
- %% Using up all credit implies no need to send a 'drained' event
- Drain1 = Drain andalso Credit > 0,
- gb_trees:enter(CTag, #credit{credit = Credit, drain = Drain1}, Credits).
-
-%%----------------------------------------------------------------------------
-%% gen_server callbacks
-%%----------------------------------------------------------------------------
-
-init([]) -> {ok, #lim{}}.
-
-prioritise_call(get_prefetch_limit, _From, _Len, _State) -> 9;
-prioritise_call(_Msg, _From, _Len, _State) -> 0.
-
-handle_call({new, ChPid}, _From, State = #lim{ch_pid = undefined}) ->
- {reply, ok, State#lim{ch_pid = ChPid}};
-
-handle_call({limit_prefetch, PrefetchCount, UnackedCount}, _From,
- State = #lim{prefetch_count = 0}) ->
- {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount,
- volume = UnackedCount})};
-handle_call({limit_prefetch, PrefetchCount, _UnackedCount}, _From, State) ->
- {reply, ok, maybe_notify(State, State#lim{prefetch_count = PrefetchCount})};
-
-handle_call(unlimit_prefetch, _From, State) ->
- {reply, ok, maybe_notify(State, State#lim{prefetch_count = 0,
- volume = 0})};
-
-handle_call(block, _From, State) ->
- {reply, ok, State#lim{blocked = true}};
-
-handle_call(unblock, _From, State) ->
- {reply, ok, maybe_notify(State, State#lim{blocked = false})};
-
-handle_call(get_prefetch_limit, _From,
- State = #lim{prefetch_count = PrefetchCount}) ->
- {reply, PrefetchCount, State};
-
-handle_call({can_send, QPid, _AckRequired}, _From,
- State = #lim{blocked = true}) ->
- {reply, false, limit_queue(QPid, State)};
-handle_call({can_send, QPid, AckRequired}, _From,
- State = #lim{volume = Volume}) ->
- case prefetch_limit_reached(State) of
- true -> {reply, false, limit_queue(QPid, State)};
- false -> {reply, true, State#lim{volume = if AckRequired -> Volume + 1;
- true -> Volume
- end}}
- end.
-
-handle_cast({ack, Count}, State = #lim{volume = Volume}) ->
- NewVolume = if Volume == 0 -> 0;
- true -> Volume - Count
- end,
- {noreply, maybe_notify(State, State#lim{volume = NewVolume})};
-
-handle_cast({register, QPid}, State) ->
- {noreply, remember_queue(QPid, State)};
-
-handle_cast({unregister, QPid}, State) ->
- {noreply, forget_queue(QPid, State)}.
-
-handle_info({'DOWN', _MonitorRef, _Type, QPid, _Info}, State) ->
- {noreply, forget_queue(QPid, State)}.
-
-terminate(_, _) ->
- ok.
-
-code_change(_, State, _) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-%% Internal plumbing
-%%----------------------------------------------------------------------------
-
-maybe_notify(OldState, NewState) ->
- case (prefetch_limit_reached(OldState) orelse blocked(OldState)) andalso
- not (prefetch_limit_reached(NewState) orelse blocked(NewState)) of
- true -> notify_queues(NewState);
- false -> NewState
- end.
-
-prefetch_limit_reached(#lim{prefetch_count = Limit, volume = Volume}) ->
- Limit =/= 0 andalso Volume >= Limit.
-
-blocked(#lim{blocked = Blocked}) -> Blocked.
-
-remember_queue(QPid, State = #lim{queues = Queues}) ->
- case orddict:is_key(QPid, Queues) of
- false -> MRef = erlang:monitor(process, QPid),
- State#lim{queues = orddict:store(QPid, {MRef, false}, Queues)};
- true -> State
- end.
-
-forget_queue(QPid, State = #lim{queues = Queues}) ->
- case orddict:find(QPid, Queues) of
- {ok, {MRef, _}} -> true = erlang:demonitor(MRef),
- State#lim{queues = orddict:erase(QPid, Queues)};
- error -> State
- end.
-
-limit_queue(QPid, State = #lim{queues = Queues}) ->
- UpdateFun = fun ({MRef, _}) -> {MRef, true} end,
- State#lim{queues = orddict:update(QPid, UpdateFun, Queues)}.
-
-notify_queues(State = #lim{ch_pid = ChPid, queues = Queues}) ->
- {QList, NewQueues} =
- orddict:fold(fun (_QPid, {_, false}, Acc) -> Acc;
- (QPid, {MRef, true}, {L, D}) ->
- {[QPid | L], orddict:store(QPid, {MRef, false}, D)}
- end, {[], Queues}, Queues),
- case length(QList) of
- 0 -> ok;
- 1 -> ok = rabbit_amqqueue:resume(hd(QList), ChPid); %% common case
- L ->
- %% We randomly vary the position of queues in the list,
- %% thus ensuring that each queue has an equal chance of
- %% being notified first.
- {L1, L2} = lists:split(random:uniform(L), QList),
- [[ok = rabbit_amqqueue:resume(Q, ChPid) || Q <- L3]
- || L3 <- [L2, L1]],
- ok
- end,
- State#lim{queues = NewQueues}.
diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl
deleted file mode 100644
index 2e3a1bbb..00000000
--- a/src/rabbit_log.erl
+++ /dev/null
@@ -1,110 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_log).
-
--behaviour(gen_server).
-
--export([start_link/0]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--export([log/3, log/4, info/1, info/2, warning/1, warning/2, error/1, error/2]).
-
--define(SERVER, ?MODULE).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([level/0]).
-
--type(category() :: atom()).
--type(level() :: 'info' | 'warning' | 'error').
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-
--spec(log/3 :: (category(), level(), string()) -> 'ok').
--spec(log/4 :: (category(), level(), string(), [any()]) -> 'ok').
-
--spec(info/1 :: (string()) -> 'ok').
--spec(info/2 :: (string(), [any()]) -> 'ok').
--spec(warning/1 :: (string()) -> 'ok').
--spec(warning/2 :: (string(), [any()]) -> 'ok').
--spec(error/1 :: (string()) -> 'ok').
--spec(error/2 :: (string(), [any()]) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-start_link() ->
- gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
-
-log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
-
-log(Category, Level, Fmt, Args) when is_list(Args) ->
- gen_server:cast(?SERVER, {log, Category, Level, Fmt, Args}).
-
-info(Fmt) -> log(default, info, Fmt).
-info(Fmt, Args) -> log(default, info, Fmt, Args).
-warning(Fmt) -> log(default, warning, Fmt).
-warning(Fmt, Args) -> log(default, warning, Fmt, Args).
-error(Fmt) -> log(default, error, Fmt).
-error(Fmt, Args) -> log(default, error, Fmt, Args).
-
-%%--------------------------------------------------------------------
-
-init([]) ->
- {ok, CatLevelList} = application:get_env(log_levels),
- CatLevels = [{Cat, level(Level)} || {Cat, Level} <- CatLevelList],
- {ok, orddict:from_list(CatLevels)}.
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast({log, Category, Level, Fmt, Args}, CatLevels) ->
- CatLevel = case orddict:find(Category, CatLevels) of
- {ok, L} -> L;
- error -> level(info)
- end,
- case level(Level) =< CatLevel of
- false -> ok;
- true -> (case Level of
- info -> fun error_logger:info_msg/2;
- warning -> fun error_logger:warning_msg/2;
- error -> fun error_logger:error_msg/2
- end)(Fmt, Args)
- end,
- {noreply, CatLevels};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%--------------------------------------------------------------------
-
-level(info) -> 3;
-level(warning) -> 2;
-level(error) -> 1;
-level(none) -> 0.
diff --git a/src/rabbit_memory_monitor.erl b/src/rabbit_memory_monitor.erl
deleted file mode 100644
index 495f6fdd..00000000
--- a/src/rabbit_memory_monitor.erl
+++ /dev/null
@@ -1,257 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-
-%% This module handles the node-wide memory statistics.
-%% It receives statistics from all queues, counts the desired
-%% queue length (in seconds), and sends this information back to
-%% queues.
-
--module(rabbit_memory_monitor).
-
--behaviour(gen_server2).
-
--export([start_link/0, register/2, deregister/1,
- report_ram_duration/2, stop/0]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--record(process, {pid, reported, sent, callback, monitor}).
-
--record(state, {timer, %% 'internal_update' timer
- queue_durations, %% ets #process
- queue_duration_sum, %% sum of all queue_durations
- queue_duration_count, %% number of elements in sum
- desired_duration %% the desired queue duration
- }).
-
--define(SERVER, ?MODULE).
--define(DEFAULT_UPDATE_INTERVAL, 2500).
--define(TABLE_NAME, ?MODULE).
-
-%% Because we have a feedback loop here, we need to ensure that we
-%% have some space for when the queues don't quite respond as fast as
-%% we would like, or when there is buffering going on in other parts
-%% of the system. In short, we aim to stay some distance away from
-%% when the memory alarms will go off, which cause backpressure (of
-%% some sort) on producers. Note that all other Thresholds are
-%% relative to this scaling.
--define(MEMORY_LIMIT_SCALING, 0.4).
-
--define(LIMIT_THRESHOLD, 0.5). %% don't limit queues when mem use is < this
-
-%% If all queues are pushed to disk (duration 0), then the sum of
-%% their reported lengths will be 0. If memory then becomes available,
-%% unless we manually intervene, the sum will remain 0, and the queues
-%% will never get a non-zero duration. Thus when the mem use is <
-%% SUM_INC_THRESHOLD, increase the sum artificially by SUM_INC_AMOUNT.
--define(SUM_INC_THRESHOLD, 0.95).
--define(SUM_INC_AMOUNT, 1.0).
-
--define(EPSILON, 0.000001). %% less than this and we clamp to 0
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(register/2 :: (pid(), {atom(),atom(),[any()]}) -> 'ok').
--spec(deregister/1 :: (pid()) -> 'ok').
--spec(report_ram_duration/2 ::
- (pid(), float() | 'infinity') -> number() | 'infinity').
--spec(stop/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_server2:start_link({local, ?SERVER}, ?MODULE, [], []).
-
-register(Pid, MFA = {_M, _F, _A}) ->
- gen_server2:call(?SERVER, {register, Pid, MFA}, infinity).
-
-deregister(Pid) ->
- gen_server2:cast(?SERVER, {deregister, Pid}).
-
-report_ram_duration(Pid, QueueDuration) ->
- gen_server2:call(?SERVER,
- {report_ram_duration, Pid, QueueDuration}, infinity).
-
-stop() ->
- gen_server2:cast(?SERVER, stop).
-
-%%----------------------------------------------------------------------------
-%% Gen_server callbacks
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, TRef} = timer:send_interval(?DEFAULT_UPDATE_INTERVAL, update),
-
- Ets = ets:new(?TABLE_NAME, [set, private, {keypos, #process.pid}]),
-
- {ok, internal_update(
- #state { timer = TRef,
- queue_durations = Ets,
- queue_duration_sum = 0.0,
- queue_duration_count = 0,
- desired_duration = infinity })}.
-
-handle_call({report_ram_duration, Pid, QueueDuration}, From,
- State = #state { queue_duration_sum = Sum,
- queue_duration_count = Count,
- queue_durations = Durations,
- desired_duration = SendDuration }) ->
-
- [Proc = #process { reported = PrevQueueDuration }] =
- ets:lookup(Durations, Pid),
-
- gen_server2:reply(From, SendDuration),
-
- {Sum1, Count1} =
- case {PrevQueueDuration, QueueDuration} of
- {infinity, infinity} -> {Sum, Count};
- {infinity, _} -> {Sum + QueueDuration, Count + 1};
- {_, infinity} -> {Sum - PrevQueueDuration, Count - 1};
- {_, _} -> {Sum - PrevQueueDuration + QueueDuration,
- Count}
- end,
- true = ets:insert(Durations, Proc #process { reported = QueueDuration,
- sent = SendDuration }),
- {noreply, State #state { queue_duration_sum = zero_clamp(Sum1),
- queue_duration_count = Count1 }};
-
-handle_call({register, Pid, MFA}, _From,
- State = #state { queue_durations = Durations }) ->
- MRef = erlang:monitor(process, Pid),
- true = ets:insert(Durations, #process { pid = Pid, reported = infinity,
- sent = infinity, callback = MFA,
- monitor = MRef }),
- {reply, ok, State};
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast({deregister, Pid}, State) ->
- {noreply, internal_deregister(Pid, true, State)};
-
-handle_cast(stop, State) ->
- {stop, normal, State};
-
-handle_cast(_Request, State) ->
- {noreply, State}.
-
-handle_info(update, State) ->
- {noreply, internal_update(State)};
-
-handle_info({'DOWN', _MRef, process, Pid, _Reason}, State) ->
- {noreply, internal_deregister(Pid, false, State)};
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, #state { timer = TRef }) ->
- timer:cancel(TRef),
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-%%----------------------------------------------------------------------------
-%% Internal functions
-%%----------------------------------------------------------------------------
-
-zero_clamp(Sum) when Sum < ?EPSILON -> 0.0;
-zero_clamp(Sum) -> Sum.
-
-internal_deregister(Pid, Demonitor,
- State = #state { queue_duration_sum = Sum,
- queue_duration_count = Count,
- queue_durations = Durations }) ->
- case ets:lookup(Durations, Pid) of
- [] -> State;
- [#process { reported = PrevQueueDuration, monitor = MRef }] ->
- true = case Demonitor of
- true -> erlang:demonitor(MRef);
- false -> true
- end,
- {Sum1, Count1} =
- case PrevQueueDuration of
- infinity -> {Sum, Count};
- _ -> {zero_clamp(Sum - PrevQueueDuration),
- Count - 1}
- end,
- true = ets:delete(Durations, Pid),
- State #state { queue_duration_sum = Sum1,
- queue_duration_count = Count1 }
- end.
-
-internal_update(State = #state { queue_durations = Durations,
- desired_duration = DesiredDurationAvg,
- queue_duration_sum = Sum,
- queue_duration_count = Count }) ->
- MemoryLimit = ?MEMORY_LIMIT_SCALING * vm_memory_monitor:get_memory_limit(),
- MemoryRatio = case MemoryLimit > 0.0 of
- true -> erlang:memory(total) / MemoryLimit;
- false -> infinity
- end,
- DesiredDurationAvg1 =
- if MemoryRatio =:= infinity ->
- 0.0;
- MemoryRatio < ?LIMIT_THRESHOLD orelse Count == 0 ->
- infinity;
- MemoryRatio < ?SUM_INC_THRESHOLD ->
- ((Sum + ?SUM_INC_AMOUNT) / Count) / MemoryRatio;
- true ->
- (Sum / Count) / MemoryRatio
- end,
- State1 = State #state { desired_duration = DesiredDurationAvg1 },
-
- %% only inform queues immediately if the desired duration has
- %% decreased
- case DesiredDurationAvg1 == infinity orelse
- (DesiredDurationAvg /= infinity andalso
- DesiredDurationAvg1 >= DesiredDurationAvg) of
- true ->
- ok;
- false ->
- true =
- ets:foldl(
- fun (Proc = #process { reported = QueueDuration,
- sent = PrevSendDuration,
- callback = {M, F, A} }, true) ->
- case should_send(QueueDuration, PrevSendDuration,
- DesiredDurationAvg1) of
- true -> ok = erlang:apply(
- M, F, A ++ [DesiredDurationAvg1]),
- ets:insert(
- Durations,
- Proc #process {
- sent = DesiredDurationAvg1});
- false -> true
- end
- end, true, Durations)
- end,
- State1.
-
-should_send(infinity, infinity, _) -> true;
-should_send(infinity, D, DD) -> DD < D;
-should_send(D, infinity, DD) -> DD < D;
-should_send(D1, D2, DD) -> DD < lists:min([D1, D2]).
diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl
deleted file mode 100644
index ab466c3c..00000000
--- a/src/rabbit_mirror_queue_coordinator.erl
+++ /dev/null
@@ -1,427 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_coordinator).
-
--export([start_link/4, get_gm/1, ensure_monitoring/2]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([joined/2, members_changed/4, handle_msg/3]).
-
--behaviour(gen_server2).
--behaviour(gm).
-
--include("rabbit.hrl").
--include("gm_specs.hrl").
-
--record(state, { q,
- gm,
- monitors,
- death_fun,
- depth_fun
- }).
-
--ifdef(use_specs).
-
--spec(start_link/4 :: (rabbit_types:amqqueue(), pid() | 'undefined',
- rabbit_mirror_queue_master:death_fun(),
- rabbit_mirror_queue_master:depth_fun()) ->
- rabbit_types:ok_pid_or_error()).
--spec(get_gm/1 :: (pid()) -> pid()).
--spec(ensure_monitoring/2 :: (pid(), [pid()]) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-%%
-%% Mirror Queues
-%%
-%% A queue with mirrors consists of the following:
-%%
-%% #amqqueue{ pid, slave_pids }
-%% | |
-%% +----------+ +-------+--------------+-----------...etc...
-%% | | |
-%% V V V
-%% amqqueue_process---+ slave-----+ slave-----+ ...etc...
-%% | BQ = master----+ | | BQ = vq | | BQ = vq |
-%% | | BQ = vq | | +-+-------+ +-+-------+
-%% | +-+-------+ | | |
-%% +-++-----|---------+ | | (some details elided)
-%% || | | |
-%% || coordinator-+ | |
-%% || +-+---------+ | |
-%% || | | |
-%% || gm-+ -- -- -- -- gm-+- -- -- -- gm-+- -- --...etc...
-%% || +--+ +--+ +--+
-%% ||
-%% consumers
-%%
-%% The master is merely an implementation of bq, and thus is invoked
-%% through the normal bq interface by the amqqueue_process. The slaves
-%% meanwhile are processes in their own right (as is the
-%% coordinator). The coordinator and all slaves belong to the same gm
-%% group. Every member of a gm group receives messages sent to the gm
-%% group. Because the master is the bq of amqqueue_process, it doesn't
-%% have sole control over its mailbox, and as a result, the master
-%% itself cannot be passed messages directly (well, it could by via
-%% the amqqueue:run_backing_queue callback but that would induce
-%% additional unnecessary loading on the master queue process), yet it
-%% needs to react to gm events, such as the death of slaves. Thus the
-%% master creates the coordinator, and it is the coordinator that is
-%% the gm callback module and event handler for the master.
-%%
-%% Consumers are only attached to the master. Thus the master is
-%% responsible for informing all slaves when messages are fetched from
-%% the bq, when they're acked, and when they're requeued.
-%%
-%% The basic goal is to ensure that all slaves performs actions on
-%% their bqs in the same order as the master. Thus the master
-%% intercepts all events going to its bq, and suitably broadcasts
-%% these events on the gm. The slaves thus receive two streams of
-%% events: one stream is via the gm, and one stream is from channels
-%% directly. Whilst the stream via gm is guaranteed to be consistently
-%% seen by all slaves, the same is not true of the stream via
-%% channels. For example, in the event of an unexpected death of a
-%% channel during a publish, only some of the mirrors may receive that
-%% publish. As a result of this problem, the messages broadcast over
-%% the gm contain published content, and thus slaves can operate
-%% successfully on messages that they only receive via the gm.
-%%
-%% The key purpose of also sending messages directly from the channels
-%% to the slaves is that without this, in the event of the death of
-%% the master, messages could be lost until a suitable slave is
-%% promoted. However, that is not the only reason. A slave cannot send
-%% confirms for a message until it has seen it from the
-%% channel. Otherwise, it might send a confirm to a channel for a
-%% message that it might *never* receive from that channel. This can
-%% happen because new slaves join the gm ring (and thus receive
-%% messages from the master) before inserting themselves in the
-%% queue's mnesia record (which is what channels look at for routing).
-%% As it turns out, channels will simply ignore such bogus confirms,
-%% but relying on that would introduce a dangerously tight coupling.
-%%
-%% Hence the slaves have to wait until they've seen both the publish
-%% via gm, and the publish via the channel before they issue the
-%% confirm. Either form of publish can arrive first, and a slave can
-%% be upgraded to the master at any point during this
-%% process. Confirms continue to be issued correctly, however.
-%%
-%% Because the slave is a full process, it impersonates parts of the
-%% amqqueue API. However, it does not need to implement all parts: for
-%% example, no ack or consumer-related message can arrive directly at
-%% a slave from a channel: it is only publishes that pass both
-%% directly to the slaves and go via gm.
-%%
-%% Slaves can be added dynamically. When this occurs, there is no
-%% attempt made to sync the current contents of the master with the
-%% new slave, thus the slave will start empty, regardless of the state
-%% of the master. Thus the slave needs to be able to detect and ignore
-%% operations which are for messages it has not received: because of
-%% the strict FIFO nature of queues in general, this is
-%% straightforward - all new publishes that the new slave receives via
-%% gm should be processed as normal, but fetches which are for
-%% messages the slave has never seen should be ignored. Similarly,
-%% acks for messages the slave never fetched should be
-%% ignored. Similarly, we don't republish rejected messages that we
-%% haven't seen. Eventually, as the master is consumed from, the
-%% messages at the head of the queue which were there before the slave
-%% joined will disappear, and the slave will become fully synced with
-%% the state of the master.
-%%
-%% The detection of the sync-status is based on the depth of the BQs,
-%% where the depth is defined as the sum of the length of the BQ (as
-%% per BQ:len) and the messages pending an acknowledgement. When the
-%% depth of the slave is equal to the master's, then the slave is
-%% synchronised. We only store the difference between the two for
-%% simplicity. Comparing the length is not enough since we need to
-%% take into account rejected messages which will make it back into
-%% the master queue but can't go back in the slave, since we don't
-%% want "holes" in the slave queue. Note that the depth, and the
-%% length likewise, must always be shorter on the slave - we assert
-%% that in various places. In case slaves are joined to an empty queue
-%% which only goes on to receive publishes, they start by asking the
-%% master to broadcast its depth. This is enough for slaves to always
-%% be able to work out when their head does not differ from the master
-%% (and is much simpler and cheaper than getting the master to hang on
-%% to the guid of the msg at the head of its queue). When a slave is
-%% promoted to a master, it unilaterally broadcasts its depth, in
-%% order to solve the problem of depth requests from new slaves being
-%% unanswered by a dead master.
-%%
-%% Obviously, due to the async nature of communication across gm, the
-%% slaves can fall behind. This does not matter from a sync pov: if
-%% they fall behind and the master dies then a) no publishes are lost
-%% because all publishes go to all mirrors anyway; b) the worst that
-%% happens is that acks get lost and so messages come back to
-%% life. This is no worse than normal given you never get confirmation
-%% that an ack has been received (not quite true with QoS-prefetch,
-%% but close enough for jazz).
-%%
-%% Because acktags are issued by the bq independently, and because
-%% there is no requirement for the master and all slaves to use the
-%% same bq, all references to msgs going over gm is by msg_id. Thus
-%% upon acking, the master must convert the acktags back to msg_ids
-%% (which happens to be what bq:ack returns), then sends the msg_ids
-%% over gm, the slaves must convert the msg_ids to acktags (a mapping
-%% the slaves themselves must maintain).
-%%
-%% When the master dies, a slave gets promoted. This will be the
-%% eldest slave, and thus the hope is that that slave is most likely
-%% to be sync'd with the master. The design of gm is that the
-%% notification of the death of the master will only appear once all
-%% messages in-flight from the master have been fully delivered to all
-%% members of the gm group. Thus at this point, the slave that gets
-%% promoted cannot broadcast different events in a different order
-%% than the master for the same msgs: there is no possibility for the
-%% same msg to be processed by the old master and the new master - if
-%% it was processed by the old master then it will have been processed
-%% by the slave before the slave was promoted, and vice versa.
-%%
-%% Upon promotion, all msgs pending acks are requeued as normal, the
-%% slave constructs state suitable for use in the master module, and
-%% then dynamically changes into an amqqueue_process with the master
-%% as the bq, and the slave's bq as the master's bq. Thus the very
-%% same process that was the slave is now a full amqqueue_process.
-%%
-%% It is important that we avoid memory leaks due to the death of
-%% senders (i.e. channels) and partial publications. A sender
-%% publishing a message may fail mid way through the publish and thus
-%% only some of the mirrors will receive the message. We need the
-%% mirrors to be able to detect this and tidy up as necessary to avoid
-%% leaks. If we just had the master monitoring all senders then we
-%% would have the possibility that a sender appears and only sends the
-%% message to a few of the slaves before dying. Those slaves would
-%% then hold on to the message, assuming they'll receive some
-%% instruction eventually from the master. Thus we have both slaves
-%% and the master monitor all senders they become aware of. But there
-%% is a race: if the slave receives a DOWN of a sender, how does it
-%% know whether or not the master is going to send it instructions
-%% regarding those messages?
-%%
-%% Whilst the master monitors senders, it can't access its mailbox
-%% directly, so it delegates monitoring to the coordinator. When the
-%% coordinator receives a DOWN message from a sender, it informs the
-%% master via a callback. This allows the master to do any tidying
-%% necessary, but more importantly allows the master to broadcast a
-%% sender_death message to all the slaves, saying the sender has
-%% died. Once the slaves receive the sender_death message, they know
-%% that they're not going to receive any more instructions from the gm
-%% regarding that sender, thus they throw away any publications from
-%% the sender pending publication instructions. However, it is
-%% possible that the coordinator receives the DOWN and communicates
-%% that to the master before the master has finished receiving and
-%% processing publishes from the sender. This turns out not to be a
-%% problem: the sender has actually died, and so will not need to
-%% receive confirms or other feedback, and should further messages be
-%% "received" from the sender, the master will ask the coordinator to
-%% set up a new monitor, and will continue to process the messages
-%% normally. Slaves may thus receive publishes via gm from previously
-%% declared "dead" senders, but again, this is fine: should the slave
-%% have just thrown out the message it had received directly from the
-%% sender (due to receiving a sender_death message via gm), it will be
-%% able to cope with the publication purely from the master via gm.
-%%
-%% When a slave receives a DOWN message for a sender, if it has not
-%% received the sender_death message from the master via gm already,
-%% then it will wait 20 seconds before broadcasting a request for
-%% confirmation from the master that the sender really has died.
-%% Should a sender have only sent a publish to slaves, this allows
-%% slaves to inform the master of the previous existence of the
-%% sender. The master will thus monitor the sender, receive the DOWN,
-%% and subsequently broadcast the sender_death message, allowing the
-%% slaves to tidy up. This process can repeat for the same sender:
-%% consider one slave receives the publication, then the DOWN, then
-%% asks for confirmation of death, then the master broadcasts the
-%% sender_death message. Only then does another slave receive the
-%% publication and thus set up its monitoring. Eventually that slave
-%% too will receive the DOWN, ask for confirmation and the master will
-%% monitor the sender again, receive another DOWN, and send out
-%% another sender_death message. Given the 20 second delay before
-%% requesting death confirmation, this is highly unlikely, but it is a
-%% possibility.
-%%
-%% When the 20 second timer expires, the slave first checks to see
-%% whether it still needs confirmation of the death before requesting
-%% it. This prevents unnecessary traffic on gm as it allows one
-%% broadcast of the sender_death message to satisfy many slaves.
-%%
-%% If we consider the promotion of a slave at this point, we have two
-%% possibilities: that of the slave that has received the DOWN and is
-%% thus waiting for confirmation from the master that the sender
-%% really is down; and that of the slave that has not received the
-%% DOWN. In the first case, in the act of promotion to master, the new
-%% master will monitor again the dead sender, and after it has
-%% finished promoting itself, it should find another DOWN waiting,
-%% which it will then broadcast. This will allow slaves to tidy up as
-%% normal. In the second case, we have the possibility that
-%% confirmation-of-sender-death request has been broadcast, but that
-%% it was broadcast before the master failed, and that the slave being
-%% promoted does not know anything about that sender, and so will not
-%% monitor it on promotion. Thus a slave that broadcasts such a
-%% request, at the point of broadcasting it, recurses, setting another
-%% 20 second timer. As before, on expiry of the timer, the slaves
-%% checks to see whether it still has not received a sender_death
-%% message for the dead sender, and if not, broadcasts a death
-%% confirmation request. Thus this ensures that even when a master
-%% dies and the new slave has no knowledge of the dead sender, it will
-%% eventually receive a death confirmation request, shall monitor the
-%% dead sender, receive the DOWN and broadcast the sender_death
-%% message.
-%%
-%% The preceding commentary deals with the possibility of slaves
-%% receiving publications from senders which the master does not, and
-%% the need to prevent memory leaks in such scenarios. The inverse is
-%% also possible: a partial publication may cause only the master to
-%% receive a publication. It will then publish the message via gm. The
-%% slaves will receive it via gm, will publish it to their BQ and will
-%% set up monitoring on the sender. They will then receive the DOWN
-%% message and the master will eventually publish the corresponding
-%% sender_death message. The slave will then be able to tidy up its
-%% state as normal.
-%%
-%% Recovery of mirrored queues is straightforward: as nodes die, the
-%% remaining nodes record this, and eventually a situation is reached
-%% in which only one node is alive, which is the master. This is the
-%% only node which, upon recovery, will resurrect a mirrored queue:
-%% nodes which die and then rejoin as a slave will start off empty as
-%% if they have no mirrored content at all. This is not surprising: to
-%% achieve anything more sophisticated would require the master and
-%% recovering slave to be able to check to see whether they agree on
-%% the last seen state of the queue: checking depth alone is not
-%% sufficient in this case.
-%%
-%% For more documentation see the comments in bug 23554.
-%%
-%%----------------------------------------------------------------------------
-
-start_link(Queue, GM, DeathFun, DepthFun) ->
- gen_server2:start_link(?MODULE, [Queue, GM, DeathFun, DepthFun], []).
-
-get_gm(CPid) ->
- gen_server2:call(CPid, get_gm, infinity).
-
-ensure_monitoring(CPid, Pids) ->
- gen_server2:cast(CPid, {ensure_monitoring, Pids}).
-
-%% ---------------------------------------------------------------------------
-%% gen_server
-%% ---------------------------------------------------------------------------
-
-init([#amqqueue { name = QueueName } = Q, GM, DeathFun, DepthFun]) ->
- GM1 = case GM of
- undefined ->
- {ok, GM2} = gm:start_link(
- QueueName, ?MODULE, [self()],
- fun rabbit_misc:execute_mnesia_transaction/1),
- receive {joined, GM2, _Members} ->
- ok
- end,
- GM2;
- _ ->
- true = link(GM),
- GM
- end,
- {ok, #state { q = Q,
- gm = GM1,
- monitors = pmon:new(),
- death_fun = DeathFun,
- depth_fun = DepthFun },
- hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-handle_call(get_gm, _From, State = #state { gm = GM }) ->
- reply(GM, State).
-
-handle_cast({gm_deaths, LiveGMPids},
- State = #state { q = #amqqueue { name = QueueName, pid = MPid } })
- when node(MPid) =:= node() ->
- case rabbit_mirror_queue_misc:remove_from_queue(
- QueueName, MPid, LiveGMPids) of
- {ok, MPid, DeadPids} ->
- rabbit_mirror_queue_misc:report_deaths(MPid, true, QueueName,
- DeadPids),
- noreply(State);
- {error, not_found} ->
- {stop, normal, State}
- end;
-
-handle_cast(request_depth, State = #state { depth_fun = DepthFun }) ->
- ok = DepthFun(),
- noreply(State);
-
-handle_cast({ensure_monitoring, Pids}, State = #state { monitors = Mons }) ->
- noreply(State #state { monitors = pmon:monitor_all(Pids, Mons) });
-
-handle_cast({delete_and_terminate, Reason}, State) ->
- {stop, Reason, State}.
-
-handle_info({'DOWN', _MonitorRef, process, Pid, _Reason},
- State = #state { monitors = Mons,
- death_fun = DeathFun }) ->
- noreply(case pmon:is_monitored(Pid, Mons) of
- false -> State;
- true -> ok = DeathFun(Pid),
- State #state { monitors = pmon:erase(Pid, Mons) }
- end);
-
-handle_info(Msg, State) ->
- {stop, {unexpected_info, Msg}, State}.
-
-terminate(_Reason, #state{}) ->
- %% gen_server case
- ok;
-terminate([_CPid], _Reason) ->
- %% gm case
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%% ---------------------------------------------------------------------------
-%% GM
-%% ---------------------------------------------------------------------------
-
-joined([CPid], Members) ->
- CPid ! {joined, self(), Members},
- ok.
-
-members_changed([_CPid], _Births, [], _Live) ->
- ok;
-members_changed([CPid], _Births, _Deaths, Live) ->
- ok = gen_server2:cast(CPid, {gm_deaths, Live}).
-
-handle_msg([CPid], _From, request_depth = Msg) ->
- ok = gen_server2:cast(CPid, Msg);
-handle_msg([CPid], _From, {ensure_monitoring, _Pids} = Msg) ->
- ok = gen_server2:cast(CPid, Msg);
-handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
- ok = gen_server2:cast(CPid, Msg),
- {stop, {shutdown, ring_shutdown}};
-handle_msg([_CPid], _From, _Msg) ->
- ok.
-
-%% ---------------------------------------------------------------------------
-%% Others
-%% ---------------------------------------------------------------------------
-
-noreply(State) ->
- {noreply, State, hibernate}.
-
-reply(Reply, State) ->
- {reply, Reply, State, hibernate}.
diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl
deleted file mode 100644
index 3abd81f5..00000000
--- a/src/rabbit_mirror_queue_master.erl
+++ /dev/null
@@ -1,475 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_master).
-
--export([init/3, terminate/2, delete_and_terminate/2,
- purge/1, purge_acks/1, publish/5, publish_delivered/4,
- discard/3, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
- len/1, is_empty/1, depth/1, drain_confirmed/1,
- dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1,
- needs_timeout/1, timeout/1, handle_pre_hibernate/1,
- status/1, invoke/3, is_duplicate/2]).
-
--export([start/1, stop/0]).
-
--export([promote_backing_queue_state/8, sender_death_fun/0, depth_fun/0]).
-
--export([init_with_existing_bq/3, stop_mirroring/1, sync_mirrors/3]).
-
--behaviour(rabbit_backing_queue).
-
--include("rabbit.hrl").
-
--record(state, { name,
- gm,
- coordinator,
- backing_queue,
- backing_queue_state,
- seen_status,
- confirmed,
- known_senders
- }).
-
--ifdef(use_specs).
-
--export_type([death_fun/0, depth_fun/0, stats_fun/0]).
-
--type(death_fun() :: fun ((pid()) -> 'ok')).
--type(depth_fun() :: fun (() -> 'ok')).
--type(stats_fun() :: fun ((any()) -> 'ok')).
--type(master_state() :: #state { name :: rabbit_amqqueue:name(),
- gm :: pid(),
- coordinator :: pid(),
- backing_queue :: atom(),
- backing_queue_state :: any(),
- seen_status :: dict(),
- confirmed :: [rabbit_guid:guid()],
- known_senders :: set()
- }).
-
--spec(promote_backing_queue_state/8 ::
- (rabbit_amqqueue:name(), pid(), atom(), any(), pid(), [any()], dict(),
- [pid()]) -> master_state()).
--spec(sender_death_fun/0 :: () -> death_fun()).
--spec(depth_fun/0 :: () -> depth_fun()).
--spec(init_with_existing_bq/3 :: (rabbit_types:amqqueue(), atom(), any()) ->
- master_state()).
--spec(stop_mirroring/1 :: (master_state()) -> {atom(), any()}).
--spec(sync_mirrors/3 :: (stats_fun(), stats_fun(), master_state()) ->
- {'ok', master_state()} | {stop, any(), master_state()}).
-
--endif.
-
-%% For general documentation of HA design, see
-%% rabbit_mirror_queue_coordinator
-
-%% ---------------------------------------------------------------------------
-%% Backing queue
-%% ---------------------------------------------------------------------------
-
-start(_DurableQueues) ->
- %% This will never get called as this module will never be
- %% installed as the default BQ implementation.
- exit({not_valid_for_generic_backing_queue, ?MODULE}).
-
-stop() ->
- %% Same as start/1.
- exit({not_valid_for_generic_backing_queue, ?MODULE}).
-
-init(Q, Recover, AsyncCallback) ->
- {ok, BQ} = application:get_env(backing_queue_module),
- BQS = BQ:init(Q, Recover, AsyncCallback),
- State = #state{gm = GM} = init_with_existing_bq(Q, BQ, BQS),
- ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
- State.
-
-init_with_existing_bq(Q = #amqqueue{name = QName}, BQ, BQS) ->
- {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(
- Q, undefined, sender_death_fun(), depth_fun()),
- GM = rabbit_mirror_queue_coordinator:get_gm(CPid),
- Self = self(),
- ok = rabbit_misc:execute_mnesia_transaction(
- fun () ->
- [Q1 = #amqqueue{gm_pids = GMPids}]
- = mnesia:read({rabbit_queue, QName}),
- ok = rabbit_amqqueue:store_queue(
- Q1#amqqueue{gm_pids = [{GM, Self} | GMPids]})
- end),
- {_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q),
- rabbit_mirror_queue_misc:add_mirrors(QName, SNodes),
- #state { name = QName,
- gm = GM,
- coordinator = CPid,
- backing_queue = BQ,
- backing_queue_state = BQS,
- seen_status = dict:new(),
- confirmed = [],
- known_senders = sets:new() }.
-
-stop_mirroring(State = #state { coordinator = CPid,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- unlink(CPid),
- stop_all_slaves(shutdown, State),
- {BQ, BQS}.
-
-sync_mirrors(HandleInfo, EmitStats,
- State = #state { name = QName,
- gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- Log = fun (Fmt, Params) ->
- rabbit_log:info("Synchronising ~s: " ++ Fmt ++ "~n",
- [rabbit_misc:rs(QName) | Params])
- end,
- Log("~p messages to synchronise", [BQ:len(BQS)]),
- {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(QName),
- Ref = make_ref(),
- Syncer = rabbit_mirror_queue_sync:master_prepare(Ref, Log, SPids),
- gm:broadcast(GM, {sync_start, Ref, Syncer, SPids}),
- S = fun(BQSN) -> State#state{backing_queue_state = BQSN} end,
- case rabbit_mirror_queue_sync:master_go(
- Syncer, Ref, Log, HandleInfo, EmitStats, BQ, BQS) of
- {shutdown, R, BQS1} -> {stop, R, S(BQS1)};
- {sync_died, R, BQS1} -> Log("~p", [R]),
- {ok, S(BQS1)};
- {already_synced, BQS1} -> {ok, S(BQS1)};
- {ok, BQS1} -> Log("complete", []),
- {ok, S(BQS1)}
- end.
-
-terminate({shutdown, dropped} = Reason,
- State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- %% Backing queue termination - this node has been explicitly
- %% dropped. Normally, non-durable queues would be tidied up on
- %% startup, but there's a possibility that we will be added back
- %% in without this node being restarted. Thus we must do the full
- %% blown delete_and_terminate now, but only locally: we do not
- %% broadcast delete_and_terminate.
- State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)};
-
-terminate(Reason,
- State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
- %% Backing queue termination. The queue is going down but
- %% shouldn't be deleted. Most likely safe shutdown of this
- %% node. Thus just let some other slave take over.
- State #state { backing_queue_state = BQ:terminate(Reason, BQS) }.
-
-delete_and_terminate(Reason, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- stop_all_slaves(Reason, State),
- State#state{backing_queue_state = BQ:delete_and_terminate(Reason, BQS)}.
-
-stop_all_slaves(Reason, #state{name = QName, gm = GM}) ->
- {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(QName),
- MRefs = [erlang:monitor(process, SPid) || SPid <- SPids],
- ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
- [receive {'DOWN', MRef, process, _Pid, _Info} -> ok end || MRef <- MRefs],
- %% Normally when we remove a slave another slave or master will
- %% notice and update Mnesia. But we just removed them all, and
- %% have stopped listening ourselves. So manually clean up.
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- [Q] = mnesia:read({rabbit_queue, QName}),
- rabbit_mirror_queue_misc:store_updated_slaves(
- Q #amqqueue { gm_pids = [], slave_pids = [] })
- end),
- ok = gm:forget_group(QName).
-
-purge(State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- ok = gm:broadcast(GM, {drop, 0, BQ:len(BQS), false}),
- {Count, BQS1} = BQ:purge(BQS),
- {Count, State #state { backing_queue_state = BQS1 }}.
-
-purge_acks(_State) -> exit({not_implemented, {?MODULE, purge_acks}}).
-
-publish(Msg = #basic_message { id = MsgId }, MsgProps, IsDelivered, ChPid,
- State = #state { gm = GM,
- seen_status = SS,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- false = dict:is_key(MsgId, SS), %% ASSERTION
- ok = gm:broadcast(GM, {publish, ChPid, MsgProps, Msg}),
- BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, BQS),
- ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
-
-publish_delivered(Msg = #basic_message { id = MsgId }, MsgProps,
- ChPid, State = #state { gm = GM,
- seen_status = SS,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- false = dict:is_key(MsgId, SS), %% ASSERTION
- ok = gm:broadcast(GM, {publish_delivered, ChPid, MsgProps, Msg}),
- {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, BQS),
- State1 = State #state { backing_queue_state = BQS1 },
- {AckTag, ensure_monitoring(ChPid, State1)}.
-
-discard(MsgId, ChPid, State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS,
- seen_status = SS }) ->
- false = dict:is_key(MsgId, SS), %% ASSERTION
- ok = gm:broadcast(GM, {discard, ChPid, MsgId}),
- ensure_monitoring(ChPid, State #state { backing_queue_state =
- BQ:discard(MsgId, ChPid, BQS) }).
-
-dropwhile(Pred, State = #state{backing_queue = BQ,
- backing_queue_state = BQS }) ->
- Len = BQ:len(BQS),
- {Next, BQS1} = BQ:dropwhile(Pred, BQS),
- {Next, drop(Len, false, State #state { backing_queue_state = BQS1 })}.
-
-fetchwhile(Pred, Fun, Acc, State = #state{backing_queue = BQ,
- backing_queue_state = BQS }) ->
- Len = BQ:len(BQS),
- {Next, Acc1, BQS1} = BQ:fetchwhile(Pred, Fun, Acc, BQS),
- {Next, Acc1, drop(Len, true, State #state { backing_queue_state = BQS1 })}.
-
-drain_confirmed(State = #state { backing_queue = BQ,
- backing_queue_state = BQS,
- seen_status = SS,
- confirmed = Confirmed }) ->
- {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
- {MsgIds1, SS1} =
- lists:foldl(
- fun (MsgId, {MsgIdsN, SSN}) ->
- %% We will never see 'discarded' here
- case dict:find(MsgId, SSN) of
- error ->
- {[MsgId | MsgIdsN], SSN};
- {ok, published} ->
- %% It was published when we were a slave,
- %% and we were promoted before we saw the
- %% publish from the channel. We still
- %% haven't seen the channel publish, and
- %% consequently we need to filter out the
- %% confirm here. We will issue the confirm
- %% when we see the publish from the channel.
- {MsgIdsN, dict:store(MsgId, confirmed, SSN)};
- {ok, confirmed} ->
- %% Well, confirms are racy by definition.
- {[MsgId | MsgIdsN], SSN}
- end
- end, {[], SS}, MsgIds),
- {Confirmed ++ MsgIds1, State #state { backing_queue_state = BQS1,
- seen_status = SS1,
- confirmed = [] }}.
-
-fetch(AckRequired, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- {Result, BQS1} = BQ:fetch(AckRequired, BQS),
- State1 = State #state { backing_queue_state = BQS1 },
- {Result, case Result of
- empty -> State1;
- {_MsgId, _IsDelivered, AckTag} -> drop_one(AckTag, State1)
- end}.
-
-drop(AckRequired, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- {Result, BQS1} = BQ:drop(AckRequired, BQS),
- State1 = State #state { backing_queue_state = BQS1 },
- {Result, case Result of
- empty -> State1;
- {_MsgId, AckTag} -> drop_one(AckTag, State1)
- end}.
-
-ack(AckTags, State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- {MsgIds, BQS1} = BQ:ack(AckTags, BQS),
- case MsgIds of
- [] -> ok;
- _ -> ok = gm:broadcast(GM, {ack, MsgIds})
- end,
- {MsgIds, State #state { backing_queue_state = BQS1 }}.
-
-requeue(AckTags, State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- {MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
- ok = gm:broadcast(GM, {requeue, MsgIds}),
- {MsgIds, State #state { backing_queue_state = BQS1 }}.
-
-ackfold(MsgFun, Acc, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }, AckTags) ->
- {Acc1, BQS1} = BQ:ackfold(MsgFun, Acc, BQS, AckTags),
- {Acc1, State #state { backing_queue_state = BQS1 }}.
-
-fold(Fun, Acc, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- {Result, BQS1} = BQ:fold(Fun, Acc, BQS),
- {Result, State #state { backing_queue_state = BQS1 }}.
-
-len(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
- BQ:len(BQS).
-
-is_empty(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
- BQ:is_empty(BQS).
-
-depth(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
- BQ:depth(BQS).
-
-set_ram_duration_target(Target, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- State #state { backing_queue_state =
- BQ:set_ram_duration_target(Target, BQS) }.
-
-ram_duration(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
- {Result, BQS1} = BQ:ram_duration(BQS),
- {Result, State #state { backing_queue_state = BQS1 }}.
-
-needs_timeout(#state { backing_queue = BQ, backing_queue_state = BQS }) ->
- BQ:needs_timeout(BQS).
-
-timeout(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
- State #state { backing_queue_state = BQ:timeout(BQS) }.
-
-handle_pre_hibernate(State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- State #state { backing_queue_state = BQ:handle_pre_hibernate(BQS) }.
-
-status(State = #state { backing_queue = BQ, backing_queue_state = BQS }) ->
- BQ:status(BQS) ++
- [ {mirror_seen, dict:size(State #state.seen_status)},
- {mirror_senders, sets:size(State #state.known_senders)} ].
-
-invoke(?MODULE, Fun, State) ->
- Fun(?MODULE, State);
-invoke(Mod, Fun, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
-
-is_duplicate(Message = #basic_message { id = MsgId },
- State = #state { seen_status = SS,
- backing_queue = BQ,
- backing_queue_state = BQS,
- confirmed = Confirmed }) ->
- %% Here, we need to deal with the possibility that we're about to
- %% receive a message that we've already seen when we were a slave
- %% (we received it via gm). Thus if we do receive such message now
- %% via the channel, there may be a confirm waiting to issue for
- %% it.
-
- %% We will never see {published, ChPid, MsgSeqNo} here.
- case dict:find(MsgId, SS) of
- error ->
- %% We permit the underlying BQ to have a peek at it, but
- %% only if we ourselves are not filtering out the msg.
- {Result, BQS1} = BQ:is_duplicate(Message, BQS),
- {Result, State #state { backing_queue_state = BQS1 }};
- {ok, published} ->
- %% It already got published when we were a slave and no
- %% confirmation is waiting. amqqueue_process will have, in
- %% its msg_id_to_channel mapping, the entry for dealing
- %% with the confirm when that comes back in (it's added
- %% immediately after calling is_duplicate). The msg is
- %% invalid. We will not see this again, nor will we be
- %% further involved in confirming this message, so erase.
- {true, State #state { seen_status = dict:erase(MsgId, SS) }};
- {ok, Disposition}
- when Disposition =:= confirmed
- %% It got published when we were a slave via gm, and
- %% confirmed some time after that (maybe even after
- %% promotion), but before we received the publish from the
- %% channel, so couldn't previously know what the
- %% msg_seq_no was (and thus confirm as a slave). So we
- %% need to confirm now. As above, amqqueue_process will
- %% have the entry for the msg_id_to_channel mapping added
- %% immediately after calling is_duplicate/2.
- orelse Disposition =:= discarded ->
- %% Message was discarded while we were a slave. Confirm now.
- %% As above, amqqueue_process will have the entry for the
- %% msg_id_to_channel mapping.
- {true, State #state { seen_status = dict:erase(MsgId, SS),
- confirmed = [MsgId | Confirmed] }}
- end.
-
-%% ---------------------------------------------------------------------------
-%% Other exported functions
-%% ---------------------------------------------------------------------------
-
-promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
- {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
- Len = BQ:len(BQS1),
- Depth = BQ:depth(BQS1),
- true = Len == Depth, %% ASSERTION: everything must have been requeued
- ok = gm:broadcast(GM, {depth, Depth}),
- #state { name = QName,
- gm = GM,
- coordinator = CPid,
- backing_queue = BQ,
- backing_queue_state = BQS1,
- seen_status = Seen,
- confirmed = [],
- known_senders = sets:from_list(KS) }.
-
-sender_death_fun() ->
- Self = self(),
- fun (DeadPid) ->
- rabbit_amqqueue:run_backing_queue(
- Self, ?MODULE,
- fun (?MODULE, State = #state { gm = GM, known_senders = KS }) ->
- ok = gm:broadcast(GM, {sender_death, DeadPid}),
- KS1 = sets:del_element(DeadPid, KS),
- State #state { known_senders = KS1 }
- end)
- end.
-
-depth_fun() ->
- Self = self(),
- fun () ->
- rabbit_amqqueue:run_backing_queue(
- Self, ?MODULE,
- fun (?MODULE, State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- ok = gm:broadcast(GM, {depth, BQ:depth(BQS)}),
- State
- end)
- end.
-
-%% ---------------------------------------------------------------------------
-%% Helpers
-%% ---------------------------------------------------------------------------
-
-drop_one(AckTag, State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckTag =/= undefined}),
- State.
-
-drop(PrevLen, AckRequired, State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- Len = BQ:len(BQS),
- case PrevLen - Len of
- 0 -> State;
- Dropped -> ok = gm:broadcast(GM, {drop, Len, Dropped, AckRequired}),
- State
- end.
-
-ensure_monitoring(ChPid, State = #state { coordinator = CPid,
- known_senders = KS }) ->
- case sets:is_element(ChPid, KS) of
- true -> State;
- false -> ok = rabbit_mirror_queue_coordinator:ensure_monitoring(
- CPid, [ChPid]),
- State #state { known_senders = sets:add_element(ChPid, KS) }
- end.
diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl
deleted file mode 100644
index 4ea1d984..00000000
--- a/src/rabbit_mirror_queue_misc.erl
+++ /dev/null
@@ -1,347 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_misc).
--behaviour(rabbit_policy_validator).
-
--export([remove_from_queue/3, on_node_up/0, add_mirrors/2, add_mirror/2,
- report_deaths/4, store_updated_slaves/1, suggested_queue_nodes/1,
- is_mirrored/1, update_mirrors/2, validate_policy/1]).
-
-%% for testing only
--export([module/1]).
-
--include("rabbit.hrl").
-
--rabbit_boot_step({?MODULE,
- [{description, "HA policy validation"},
- {mfa, {rabbit_registry, register,
- [policy_validator, <<"ha-mode">>, ?MODULE]}},
- {mfa, {rabbit_registry, register,
- [policy_validator, <<"ha-params">>, ?MODULE]}},
- {mfa, {rabbit_registry, register,
- [policy_validator, <<"ha-sync-mode">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, recovery}]}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(remove_from_queue/3 ::
- (rabbit_amqqueue:name(), pid(), [pid()])
- -> {'ok', pid(), [pid()]} | {'error', 'not_found'}).
--spec(on_node_up/0 :: () -> 'ok').
--spec(add_mirrors/2 :: (rabbit_amqqueue:name(), [node()]) -> 'ok').
--spec(add_mirror/2 ::
- (rabbit_amqqueue:name(), node()) ->
- {'ok', atom()} | rabbit_types:error(any())).
--spec(store_updated_slaves/1 :: (rabbit_types:amqqueue()) ->
- rabbit_types:amqqueue()).
--spec(suggested_queue_nodes/1 :: (rabbit_types:amqqueue()) ->
- {node(), [node()]}).
--spec(is_mirrored/1 :: (rabbit_types:amqqueue()) -> boolean()).
--spec(update_mirrors/2 ::
- (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% If the dead pids include the queue pid (i.e. the master has died)
-%% then only remove that if we are about to be promoted. Otherwise we
-%% can have the situation where a slave updates the mnesia record for
-%% a queue, promoting another slave before that slave realises it has
-%% become the new master, which is bad because it could then mean the
-%% slave (now master) receives messages it's not ready for (for
-%% example, new consumers).
-%% Returns {ok, NewMPid, DeadPids}
-remove_from_queue(QueueName, Self, LiveGMPids) ->
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- %% Someone else could have deleted the queue before we
- %% get here.
- case mnesia:read({rabbit_queue, QueueName}) of
- [] -> {error, not_found};
- [Q = #amqqueue { pid = QPid,
- slave_pids = SPids,
- gm_pids = GMPids }] ->
- {GMPids1, Dead} = lists:partition(
- fun ({GM, _}) ->
- lists:member(GM, LiveGMPids)
- end, GMPids),
- DeadPids = [Pid || {_GM, Pid} <- Dead],
- Alive = [QPid | SPids] -- DeadPids,
- {QPid1, SPids1} = promote_slave(Alive),
- case {{QPid, SPids}, {QPid1, SPids1}} of
- {Same, Same} ->
- GMPids = GMPids1, %% ASSERTION
- {ok, QPid1, []};
- _ when QPid =:= QPid1 orelse QPid1 =:= Self ->
- %% Either master hasn't changed, so
- %% we're ok to update mnesia; or we have
- %% become the master.
- store_updated_slaves(
- Q #amqqueue { pid = QPid1,
- slave_pids = SPids1,
- gm_pids = GMPids1 }),
- {ok, QPid1, [QPid | SPids] -- Alive};
- _ ->
- %% Master has changed, and we're not it,
- %% so leave alone to allow the promoted
- %% slave to find it and make its
- %% promotion atomic.
- {ok, QPid1, []}
- end
- end
- end).
-
-on_node_up() ->
- QNames =
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- mnesia:foldl(
- fun (Q = #amqqueue{name = QName,
- pid = Pid,
- slave_pids = SPids}, QNames0) ->
- %% We don't want to pass in the whole
- %% cluster - we don't want a situation
- %% where starting one node causes us to
- %% decide to start a mirror on another
- PossibleNodes0 = [node(P) || P <- [Pid | SPids]],
- PossibleNodes =
- case lists:member(node(), PossibleNodes0) of
- true -> PossibleNodes0;
- false -> [node() | PossibleNodes0]
- end,
- {_MNode, SNodes} = suggested_queue_nodes(
- Q, PossibleNodes),
- case lists:member(node(), SNodes) of
- true -> [QName | QNames0];
- false -> QNames0
- end
- end, [], rabbit_queue)
- end),
- [add_mirror(QName, node()) || QName <- QNames],
- ok.
-
-drop_mirrors(QName, Nodes) ->
- [drop_mirror(QName, Node) || Node <- Nodes],
- ok.
-
-drop_mirror(QName, MirrorNode) ->
- rabbit_amqqueue:with(
- QName,
- fun (#amqqueue { name = Name, pid = QPid, slave_pids = SPids }) ->
- case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of
- [] ->
- {error, {queue_not_mirrored_on_node, MirrorNode}};
- [QPid] when SPids =:= [] ->
- {error, cannot_drop_only_mirror};
- [Pid] ->
- rabbit_log:info(
- "Dropping queue mirror on node ~p for ~s~n",
- [MirrorNode, rabbit_misc:rs(Name)]),
- exit(Pid, {shutdown, dropped}),
- {ok, dropped}
- end
- end).
-
-add_mirrors(QName, Nodes) ->
- [add_mirror(QName, Node) || Node <- Nodes],
- ok.
-
-add_mirror(QName, MirrorNode) ->
- rabbit_amqqueue:with(
- QName,
- fun (#amqqueue { name = Name, pid = QPid, slave_pids = SPids } = Q) ->
- case [Pid || Pid <- [QPid | SPids], node(Pid) =:= MirrorNode] of
- [] ->
- start_child(Name, MirrorNode, Q);
- [SPid] ->
- case rabbit_misc:is_process_alive(SPid) of
- true -> {ok, already_mirrored};
- false -> start_child(Name, MirrorNode, Q)
- end
- end
- end).
-
-start_child(Name, MirrorNode, Q) ->
- case rabbit_misc:with_exit_handler(
- rabbit_misc:const({ok, down}),
- fun () ->
- rabbit_mirror_queue_slave_sup:start_child(MirrorNode, [Q])
- end) of
- {ok, SPid} when is_pid(SPid) ->
- maybe_auto_sync(Q),
- rabbit_log:info("Adding mirror of ~s on node ~p: ~p~n",
- [rabbit_misc:rs(Name), MirrorNode, SPid]),
- {ok, started};
- {error, {{stale_master_pid, StalePid}, _}} ->
- rabbit_log:warning("Detected stale HA master while adding "
- "mirror of ~s on node ~p: ~p~n",
- [rabbit_misc:rs(Name), MirrorNode, StalePid]),
- {ok, stale_master};
- {error, {{duplicate_live_master, _}=Err, _}} ->
- Err;
- Other ->
- Other
- end.
-
-report_deaths(_MirrorPid, _IsMaster, _QueueName, []) ->
- ok;
-report_deaths(MirrorPid, IsMaster, QueueName, DeadPids) ->
- rabbit_event:notify(queue_mirror_deaths, [{name, QueueName},
- {pids, DeadPids}]),
- rabbit_log:info("Mirrored-queue (~s): ~s ~s saw deaths of mirrors ~s~n",
- [rabbit_misc:rs(QueueName),
- case IsMaster of
- true -> "Master";
- false -> "Slave"
- end,
- rabbit_misc:pid_to_string(MirrorPid),
- [[rabbit_misc:pid_to_string(P), $ ] || P <- DeadPids]]).
-
-store_updated_slaves(Q = #amqqueue{slave_pids = SPids,
- sync_slave_pids = SSPids}) ->
- %% TODO now that we clear sync_slave_pids in rabbit_durable_queue,
- %% do we still need this filtering?
- SSPids1 = [SSPid || SSPid <- SSPids, lists:member(SSPid, SPids)],
- Q1 = Q#amqqueue{sync_slave_pids = SSPids1},
- ok = rabbit_amqqueue:store_queue(Q1),
- %% Wake it up so that we emit a stats event
- rabbit_amqqueue:wake_up(Q1),
- Q1.
-
-%%----------------------------------------------------------------------------
-
-promote_slave([SPid | SPids]) ->
- %% The slave pids are maintained in descending order of age, so
- %% the one to promote is the oldest.
- {SPid, SPids}.
-
-suggested_queue_nodes(Q) ->
- suggested_queue_nodes(Q, rabbit_mnesia:cluster_nodes(running)).
-
-%% This variant exists so we can pull a call to
-%% rabbit_mnesia:cluster_nodes(running) out of a loop or
-%% transaction or both.
-suggested_queue_nodes(Q, All) ->
- {MNode0, SNodes, SSNodes} = actual_queue_nodes(Q),
- MNode = case MNode0 of
- none -> node();
- _ -> MNode0
- end,
- Params = policy(<<"ha-params">>, Q),
- case module(Q) of
- {ok, M} -> M:suggested_queue_nodes(Params, MNode, SNodes, SSNodes, All);
- _ -> {MNode, []}
- end.
-
-policy(Policy, Q) ->
- case rabbit_policy:get(Policy, Q) of
- {ok, P} -> P;
- _ -> none
- end.
-
-module(#amqqueue{} = Q) ->
- case rabbit_policy:get(<<"ha-mode">>, Q) of
- {ok, Mode} -> module(Mode);
- _ -> not_mirrored
- end;
-
-module(Mode) when is_binary(Mode) ->
- case rabbit_registry:binary_to_type(Mode) of
- {error, not_found} -> not_mirrored;
- T -> case rabbit_registry:lookup_module(ha_mode, T) of
- {ok, Module} -> {ok, Module};
- _ -> not_mirrored
- end
- end.
-
-is_mirrored(Q) ->
- case module(Q) of
- {ok, _} -> true;
- _ -> false
- end.
-
-actual_queue_nodes(#amqqueue{pid = MPid,
- slave_pids = SPids,
- sync_slave_pids = SSPids}) ->
- Nodes = fun (L) -> [node(Pid) || Pid <- L] end,
- {case MPid of
- none -> none;
- _ -> node(MPid)
- end, Nodes(SPids), Nodes(SSPids)}.
-
-maybe_auto_sync(Q = #amqqueue{pid = QPid}) ->
- case policy(<<"ha-sync-mode">>, Q) of
- <<"automatic">> ->
- spawn(fun() -> rabbit_amqqueue:sync_mirrors(QPid) end);
- _ ->
- ok
- end.
-
-update_mirrors(OldQ = #amqqueue{pid = QPid},
- NewQ = #amqqueue{pid = QPid}) ->
- case {is_mirrored(OldQ), is_mirrored(NewQ)} of
- {false, false} -> ok;
- {true, false} -> rabbit_amqqueue:stop_mirroring(QPid);
- {false, true} -> rabbit_amqqueue:start_mirroring(QPid);
- {true, true} -> update_mirrors0(OldQ, NewQ)
- end.
-
-update_mirrors0(OldQ = #amqqueue{name = QName},
- NewQ = #amqqueue{name = QName}) ->
- {OldMNode, OldSNodes, _} = actual_queue_nodes(OldQ),
- {NewMNode, NewSNodes} = suggested_queue_nodes(NewQ),
- OldNodes = [OldMNode | OldSNodes],
- NewNodes = [NewMNode | NewSNodes],
- add_mirrors (QName, NewNodes -- OldNodes),
- drop_mirrors(QName, OldNodes -- NewNodes),
- maybe_auto_sync(NewQ),
- ok.
-
-%%----------------------------------------------------------------------------
-
-validate_policy(KeyList) ->
- Mode = proplists:get_value(<<"ha-mode">>, KeyList, none),
- Params = proplists:get_value(<<"ha-params">>, KeyList, none),
- SyncMode = proplists:get_value(<<"ha-sync-mode">>, KeyList, none),
- case {Mode, Params, SyncMode} of
- {none, none, none} ->
- ok;
- {none, _, _} ->
- {error, "ha-mode must be specified to specify ha-params or "
- "ha-sync-mode", []};
- _ ->
- case module(Mode) of
- {ok, M} -> case M:validate_policy(Params) of
- ok -> validate_sync_mode(SyncMode);
- E -> E
- end;
- _ -> {error, "~p is not a valid ha-mode value", [Mode]}
- end
- end.
-
-validate_sync_mode(SyncMode) ->
- case SyncMode of
- <<"automatic">> -> ok;
- <<"manual">> -> ok;
- none -> ok;
- Mode -> {error, "ha-sync-mode must be \"manual\" "
- "or \"automatic\", got ~p", [Mode]}
- end.
diff --git a/src/rabbit_mirror_queue_mode.erl b/src/rabbit_mirror_queue_mode.erl
deleted file mode 100644
index 9e2015d9..00000000
--- a/src/rabbit_mirror_queue_mode.erl
+++ /dev/null
@@ -1,57 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_mode).
-
--ifdef(use_specs).
-
--type(master() :: node()).
--type(slave() :: node()).
--type(params() :: any()).
-
--callback description() -> [proplists:property()].
-
-%% Called whenever we think we might need to change nodes for a
-%% mirrored queue. Note that this is called from a variety of
-%% contexts, both inside and outside Mnesia transactions. Ideally it
-%% will be pure-functional.
-%%
-%% Takes: parameters set in the policy,
-%% current master,
-%% current slaves,
-%% current synchronised slaves,
-%% all nodes to consider
-%%
-%% Returns: tuple of new master, new slaves
-%%
--callback suggested_queue_nodes(
- params(), master(), [slave()], [slave()], [node()]) ->
- {master(), [slave()]}.
-
-%% Are the parameters valid for this mode?
--callback validate_policy(params()) ->
- rabbit_policy_validator:validate_results().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {suggested_queue_nodes, 5}, {validate_policy, 1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_mirror_queue_mode_all.erl b/src/rabbit_mirror_queue_mode_all.erl
deleted file mode 100644
index 3b5163a3..00000000
--- a/src/rabbit_mirror_queue_mode_all.erl
+++ /dev/null
@@ -1,41 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_mode_all).
-
--include("rabbit.hrl").
-
--behaviour(rabbit_mirror_queue_mode).
-
--export([description/0, suggested_queue_nodes/5, validate_policy/1]).
-
--rabbit_boot_step({?MODULE,
- [{description, "mirror mode all"},
- {mfa, {rabbit_registry, register,
- [ha_mode, <<"all">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-description() ->
- [{description, <<"Mirror queue to all nodes">>}].
-
-suggested_queue_nodes(_Params, MNode, _SNodes, _SSNodes, Poss) ->
- {MNode, Poss -- [MNode]}.
-
-validate_policy(none) ->
- ok;
-validate_policy(_Params) ->
- {error, "ha-mode=\"all\" does not take parameters", []}.
diff --git a/src/rabbit_mirror_queue_mode_exactly.erl b/src/rabbit_mirror_queue_mode_exactly.erl
deleted file mode 100644
index 2841f87e..00000000
--- a/src/rabbit_mirror_queue_mode_exactly.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_mode_exactly).
-
--include("rabbit.hrl").
-
--behaviour(rabbit_mirror_queue_mode).
-
--export([description/0, suggested_queue_nodes/5, validate_policy/1]).
-
--rabbit_boot_step({?MODULE,
- [{description, "mirror mode exactly"},
- {mfa, {rabbit_registry, register,
- [ha_mode, <<"exactly">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-description() ->
- [{description, <<"Mirror queue to a specified number of nodes">>}].
-
-%% When we need to add nodes, we randomise our candidate list as a
-%% crude form of load-balancing. TODO it would also be nice to
-%% randomise the list of ones to remove when we have too many - we
-%% would have to take account of synchronisation though.
-suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) ->
- SCount = Count - 1,
- {MNode, case SCount > length(SNodes) of
- true -> Cand = shuffle((Poss -- [MNode]) -- SNodes),
- SNodes ++ lists:sublist(Cand, SCount - length(SNodes));
- false -> lists:sublist(SNodes, SCount)
- end}.
-
-shuffle(L) ->
- {A1,A2,A3} = now(),
- random:seed(A1, A2, A3),
- {_, L1} = lists:unzip(lists:keysort(1, [{random:uniform(), N} || N <- L])),
- L1.
-
-validate_policy(N) when is_integer(N) andalso N > 0 ->
- ok;
-validate_policy(Params) ->
- {error, "ha-mode=\"exactly\" takes an integer, ~p given", [Params]}.
diff --git a/src/rabbit_mirror_queue_mode_nodes.erl b/src/rabbit_mirror_queue_mode_nodes.erl
deleted file mode 100644
index 779b439d..00000000
--- a/src/rabbit_mirror_queue_mode_nodes.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_mode_nodes).
-
--include("rabbit.hrl").
-
--behaviour(rabbit_mirror_queue_mode).
-
--export([description/0, suggested_queue_nodes/5, validate_policy/1]).
-
--rabbit_boot_step({?MODULE,
- [{description, "mirror mode nodes"},
- {mfa, {rabbit_registry, register,
- [ha_mode, <<"nodes">>, ?MODULE]}},
- {requires, rabbit_registry},
- {enables, kernel_ready}]}).
-
-description() ->
- [{description, <<"Mirror queue to specified nodes">>}].
-
-suggested_queue_nodes(Nodes0, MNode, _SNodes, SSNodes, Poss) ->
- Nodes1 = [list_to_atom(binary_to_list(Node)) || Node <- Nodes0],
- %% If the current master is not in the nodes specified, then what we want
- %% to do depends on whether there are any synchronised slaves. If there
- %% are then we can just kill the current master - the admin has asked for
- %% a migration and we should give it to them. If there are not however
- %% then we must keep the master around so as not to lose messages.
- Nodes = case SSNodes of
- [] -> lists:usort([MNode | Nodes1]);
- _ -> Nodes1
- end,
- Unavailable = Nodes -- Poss,
- Available = Nodes -- Unavailable,
- case Available of
- [] -> %% We have never heard of anything? Not much we can do but
- %% keep the master alive.
- {MNode, []};
- _ -> case lists:member(MNode, Available) of
- true -> {MNode, Available -- [MNode]};
- false -> %% Make sure the new master is synced! In order to
- %% get here SSNodes must not be empty.
- [NewMNode | _] = SSNodes,
- {NewMNode, Available -- [NewMNode]}
- end
- end.
-
-validate_policy([]) ->
- {error, "ha-mode=\"nodes\" list must be non-empty", []};
-validate_policy(Nodes) when is_list(Nodes) ->
- case [I || I <- Nodes, not is_binary(I)] of
- [] -> ok;
- Invalid -> {error, "ha-mode=\"nodes\" takes a list of strings, "
- "~p was not a string", [Invalid]}
- end;
-validate_policy(Params) ->
- {error, "ha-mode=\"nodes\" takes a list, ~p given", [Params]}.
diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl
deleted file mode 100644
index 313d2be4..00000000
--- a/src/rabbit_mirror_queue_slave.erl
+++ /dev/null
@@ -1,865 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_slave).
-
-%% For general documentation of HA design, see
-%% rabbit_mirror_queue_coordinator
-%%
-%% We receive messages from GM and from publishers, and the gm
-%% messages can arrive either before or after the 'actual' message.
-%% All instructions from the GM group must be processed in the order
-%% in which they're received.
-
--export([start_link/1, set_maximum_since_use/2, info/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3, handle_pre_hibernate/1, prioritise_call/4,
- prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
-
--export([joined/2, members_changed/4, handle_msg/3]).
-
--behaviour(gen_server2).
--behaviour(gm).
-
--include("rabbit.hrl").
-
--include("gm_specs.hrl").
-
-%%----------------------------------------------------------------------------
-
--define(CREATION_EVENT_KEYS,
- [pid,
- name,
- master_pid,
- is_synchronised
- ]).
-
--define(INFO_KEYS, ?CREATION_EVENT_KEYS).
-
--define(SYNC_INTERVAL, 25). %% milliseconds
--define(RAM_DURATION_UPDATE_INTERVAL, 5000).
--define(DEATH_TIMEOUT, 20000). %% 20 seconds
-
--record(state, { q,
- gm,
- backing_queue,
- backing_queue_state,
- sync_timer_ref,
- rate_timer_ref,
-
- sender_queues, %% :: Pid -> {Q Msg, Set MsgId}
- msg_id_ack, %% :: MsgId -> AckTag
-
- msg_id_status,
- known_senders,
-
- %% Master depth - local depth
- depth_delta
- }).
-
-%%----------------------------------------------------------------------------
-
-start_link(Q) -> gen_server2:start_link(?MODULE, Q, []).
-
-set_maximum_since_use(QPid, Age) ->
- gen_server2:cast(QPid, {set_maximum_since_use, Age}).
-
-info(QPid) -> gen_server2:call(QPid, info, infinity).
-
-init(Q = #amqqueue { name = QName }) ->
- %% We join the GM group before we add ourselves to the amqqueue
- %% record. As a result:
- %% 1. We can receive msgs from GM that correspond to messages we will
- %% never receive from publishers.
- %% 2. When we receive a message from publishers, we must receive a
- %% message from the GM group for it.
- %% 3. However, that instruction from the GM group can arrive either
- %% before or after the actual message. We need to be able to
- %% distinguish between GM instructions arriving early, and case (1)
- %% above.
- %%
- process_flag(trap_exit, true), %% amqqueue_process traps exits too.
- {ok, GM} = gm:start_link(QName, ?MODULE, [self()],
- fun rabbit_misc:execute_mnesia_transaction/1),
- receive {joined, GM} -> ok end,
- Self = self(),
- Node = node(),
- case rabbit_misc:execute_mnesia_transaction(
- fun() -> init_it(Self, GM, Node, QName) end) of
- {new, QPid, GMPids} ->
- erlang:monitor(process, QPid),
- ok = file_handle_cache:register_callback(
- rabbit_amqqueue, set_maximum_since_use, [Self]),
- ok = rabbit_memory_monitor:register(
- Self, {rabbit_amqqueue, set_ram_duration_target, [Self]}),
- {ok, BQ} = application:get_env(backing_queue_module),
- Q1 = Q #amqqueue { pid = QPid },
- BQS = bq_init(BQ, Q1, false),
- State = #state { q = Q1,
- gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS,
- rate_timer_ref = undefined,
- sync_timer_ref = undefined,
-
- sender_queues = dict:new(),
- msg_id_ack = dict:new(),
-
- msg_id_status = dict:new(),
- known_senders = pmon:new(),
-
- depth_delta = undefined
- },
- rabbit_event:notify(queue_slave_created,
- infos(?CREATION_EVENT_KEYS, State)),
- ok = gm:broadcast(GM, request_depth),
- ok = gm:validate_members(GM, [GM | [G || {G, _} <- GMPids]]),
- {ok, State, hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN,
- ?DESIRED_HIBERNATE}};
- {stale, StalePid} ->
- {stop, {stale_master_pid, StalePid}};
- duplicate_live_master ->
- {stop, {duplicate_live_master, Node}};
- existing ->
- gm:leave(GM),
- ignore
- end.
-
-init_it(Self, GM, Node, QName) ->
- [Q = #amqqueue { pid = QPid, slave_pids = SPids, gm_pids = GMPids }] =
- mnesia:read({rabbit_queue, QName}),
- case [Pid || Pid <- [QPid | SPids], node(Pid) =:= Node] of
- [] -> add_slave(Q, Self, GM),
- {new, QPid, GMPids};
- [QPid] -> case rabbit_misc:is_process_alive(QPid) of
- true -> duplicate_live_master;
- false -> {stale, QPid}
- end;
- [SPid] -> case rabbit_misc:is_process_alive(SPid) of
- true -> existing;
- false -> Q1 = Q#amqqueue {
- slave_pids = SPids -- [SPid],
- gm_pids = [T || T = {_, S} <- GMPids,
- S =/= SPid] },
- add_slave(Q1, Self, GM),
- {new, QPid, GMPids}
- end
- end.
-
-%% Add to the end, so they are in descending order of age, see
-%% rabbit_mirror_queue_misc:promote_slave/1
-add_slave(Q = #amqqueue { slave_pids = SPids, gm_pids = GMPids }, New, GM) ->
- rabbit_mirror_queue_misc:store_updated_slaves(
- Q#amqqueue{slave_pids = SPids ++ [New], gm_pids = [{GM, New} | GMPids]}).
-
-handle_call({deliver, Delivery, true}, From, State) ->
- %% Synchronous, "mandatory" deliver mode.
- gen_server2:reply(From, ok),
- noreply(maybe_enqueue_message(Delivery, State));
-
-handle_call({gm_deaths, LiveGMPids}, From,
- State = #state { q = Q = #amqqueue { name = QName, pid = MPid }}) ->
- Self = self(),
- case rabbit_mirror_queue_misc:remove_from_queue(QName, Self, LiveGMPids) of
- {error, not_found} ->
- gen_server2:reply(From, ok),
- {stop, normal, State};
- {ok, Pid, DeadPids} ->
- rabbit_mirror_queue_misc:report_deaths(Self, false, QName,
- DeadPids),
- case Pid of
- MPid ->
- %% master hasn't changed
- gen_server2:reply(From, ok),
- noreply(State);
- Self ->
- %% we've become master
- QueueState = promote_me(From, State),
- {become, rabbit_amqqueue_process, QueueState, hibernate};
- _ ->
- %% master has changed to not us
- gen_server2:reply(From, ok),
- erlang:monitor(process, Pid),
- noreply(State #state { q = Q #amqqueue { pid = Pid } })
- end
- end;
-
-handle_call(info, _From, State) ->
- reply(infos(?INFO_KEYS, State), State).
-
-handle_cast({run_backing_queue, Mod, Fun}, State) ->
- noreply(run_backing_queue(Mod, Fun, State));
-
-handle_cast({gm, Instruction}, State) ->
- handle_process_result(process_instruction(Instruction, State));
-
-handle_cast({deliver, Delivery = #delivery{sender = Sender}, true, Flow},
- State) ->
- %% Asynchronous, non-"mandatory", deliver mode.
- case Flow of
- flow -> credit_flow:ack(Sender);
- noflow -> ok
- end,
- noreply(maybe_enqueue_message(Delivery, State));
-
-handle_cast({sync_start, Ref, Syncer},
- State = #state { depth_delta = DD,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- State1 = #state{rate_timer_ref = TRef} = ensure_rate_timer(State),
- S = fun({MA, TRefN, BQSN}) ->
- State1#state{depth_delta = undefined,
- msg_id_ack = dict:from_list(MA),
- rate_timer_ref = TRefN,
- backing_queue_state = BQSN}
- end,
- case rabbit_mirror_queue_sync:slave(
- DD, Ref, TRef, Syncer, BQ, BQS,
- fun (BQN, BQSN) ->
- BQSN1 = update_ram_duration(BQN, BQSN),
- TRefN = erlang:send_after(?RAM_DURATION_UPDATE_INTERVAL,
- self(), update_ram_duration),
- {TRefN, BQSN1}
- end) of
- denied -> noreply(State1);
- {ok, Res} -> noreply(set_delta(0, S(Res)));
- {failed, Res} -> noreply(S(Res));
- {stop, Reason, Res} -> {stop, Reason, S(Res)}
- end;
-
-handle_cast({set_maximum_since_use, Age}, State) ->
- ok = file_handle_cache:set_maximum_since_use(Age),
- noreply(State);
-
-handle_cast({set_ram_duration_target, Duration},
- State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- BQS1 = BQ:set_ram_duration_target(Duration, BQS),
- noreply(State #state { backing_queue_state = BQS1 }).
-
-handle_info(update_ram_duration, State = #state{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- BQS1 = update_ram_duration(BQ, BQS),
- %% Don't call noreply/1, we don't want to set timers
- {State1, Timeout} = next_state(State #state {
- rate_timer_ref = undefined,
- backing_queue_state = BQS1 }),
- {noreply, State1, Timeout};
-
-handle_info(sync_timeout, State) ->
- noreply(backing_queue_timeout(
- State #state { sync_timer_ref = undefined }));
-
-handle_info(timeout, State) ->
- noreply(backing_queue_timeout(State));
-
-handle_info({'DOWN', _MonitorRef, process, MPid, _Reason},
- State = #state { gm = GM, q = #amqqueue { pid = MPid } }) ->
- ok = gm:broadcast(GM, process_death),
- noreply(State);
-
-handle_info({'DOWN', _MonitorRef, process, ChPid, _Reason}, State) ->
- noreply(local_sender_death(ChPid, State));
-
-handle_info({'EXIT', _Pid, Reason}, State) ->
- {stop, Reason, State};
-
-handle_info({bump_credit, Msg}, State) ->
- credit_flow:handle_bump_msg(Msg),
- noreply(State);
-
-handle_info(Msg, State) ->
- {stop, {unexpected_info, Msg}, State}.
-
-%% If the Reason is shutdown, or {shutdown, _}, it is not the queue
-%% being deleted: it's just the node going down. Even though we're a
-%% slave, we have no idea whether or not we'll be the only copy coming
-%% back up. Thus we must assume we will be, and preserve anything we
-%% have on disk.
-terminate(_Reason, #state { backing_queue_state = undefined }) ->
- %% We've received a delete_and_terminate from gm, thus nothing to
- %% do here.
- ok;
-terminate({shutdown, dropped} = R, #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- %% See rabbit_mirror_queue_master:terminate/2
- BQ:delete_and_terminate(R, BQS);
-terminate(Reason, #state { q = Q,
- gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS,
- rate_timer_ref = RateTRef }) ->
- ok = gm:leave(GM),
- QueueState = rabbit_amqqueue_process:init_with_backing_queue_state(
- Q, BQ, BQS, RateTRef, [], pmon:new(), dict:new()),
- rabbit_amqqueue_process:terminate(Reason, QueueState);
-terminate([_SPid], _Reason) ->
- %% gm case
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-handle_pre_hibernate(State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- {RamDuration, BQS1} = BQ:ram_duration(BQS),
- DesiredDuration =
- rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
- BQS2 = BQ:set_ram_duration_target(DesiredDuration, BQS1),
- BQS3 = BQ:handle_pre_hibernate(BQS2),
- {hibernate, stop_rate_timer(State #state { backing_queue_state = BQS3 })}.
-
-prioritise_call(Msg, _From, _Len, _State) ->
- case Msg of
- info -> 9;
- {gm_deaths, _Live} -> 5;
- _ -> 0
- end.
-
-prioritise_cast(Msg, _Len, _State) ->
- case Msg of
- {set_ram_duration_target, _Duration} -> 8;
- {set_maximum_since_use, _Age} -> 8;
- {run_backing_queue, _Mod, _Fun} -> 6;
- {gm, _Msg} -> 5;
- _ -> 0
- end.
-
-prioritise_info(Msg, _Len, _State) ->
- case Msg of
- update_ram_duration -> 8;
- sync_timeout -> 6;
- _ -> 0
- end.
-
-format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
-
-%% ---------------------------------------------------------------------------
-%% GM
-%% ---------------------------------------------------------------------------
-
-joined([SPid], _Members) -> SPid ! {joined, self()}, ok.
-
-members_changed([_SPid], _Births, [], _Live) ->
- ok;
-members_changed([ SPid], _Births, _Deaths, Live) ->
- inform_deaths(SPid, Live).
-
-handle_msg([_SPid], _From, request_depth) ->
- %% This is only of value to the master
- ok;
-handle_msg([_SPid], _From, {ensure_monitoring, _Pid}) ->
- %% This is only of value to the master
- ok;
-handle_msg([_SPid], _From, process_death) ->
- %% Since GM is by nature lazy we need to make sure there is some
- %% traffic when a master dies, to make sure we get informed of the
- %% death. That's all process_death does, create some traffic. We
- %% must not take any notice of the master death here since it
- %% comes without ordering guarantees - there could still be
- %% messages from the master we have yet to receive. When we get
- %% members_changed, then there will be no more messages.
- ok;
-handle_msg([CPid], _From, {delete_and_terminate, _Reason} = Msg) ->
- ok = gen_server2:cast(CPid, {gm, Msg}),
- {stop, {shutdown, ring_shutdown}};
-handle_msg([SPid], _From, {sync_start, Ref, Syncer, SPids}) ->
- case lists:member(SPid, SPids) of
- true -> gen_server2:cast(SPid, {sync_start, Ref, Syncer});
- false -> ok
- end;
-handle_msg([SPid], _From, Msg) ->
- ok = gen_server2:cast(SPid, {gm, Msg}).
-
-inform_deaths(SPid, Live) ->
- case gen_server2:call(SPid, {gm_deaths, Live}, infinity) of
- ok -> ok;
- {promote, CPid} -> {become, rabbit_mirror_queue_coordinator, [CPid]}
- end.
-
-%% ---------------------------------------------------------------------------
-%% Others
-%% ---------------------------------------------------------------------------
-
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
-
-i(pid, _State) -> self();
-i(name, #state { q = #amqqueue { name = Name } }) -> Name;
-i(master_pid, #state { q = #amqqueue { pid = MPid } }) -> MPid;
-i(is_synchronised, #state { depth_delta = DD }) -> DD =:= 0;
-i(Item, _State) -> throw({bad_argument, Item}).
-
-bq_init(BQ, Q, Recover) ->
- Self = self(),
- BQ:init(Q, Recover,
- fun (Mod, Fun) ->
- rabbit_amqqueue:run_backing_queue(Self, Mod, Fun)
- end).
-
-run_backing_queue(rabbit_mirror_queue_master, Fun, State) ->
- %% Yes, this might look a little crazy, but see comments in
- %% confirm_sender_death/1
- Fun(?MODULE, State);
-run_backing_queue(Mod, Fun, State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- State #state { backing_queue_state = BQ:invoke(Mod, Fun, BQS) }.
-
-send_or_record_confirm(_, #delivery{ msg_seq_no = undefined }, MS, _State) ->
- MS;
-send_or_record_confirm(published, #delivery { sender = ChPid,
- msg_seq_no = MsgSeqNo,
- message = #basic_message {
- id = MsgId,
- is_persistent = true } },
- MS, #state { q = #amqqueue { durable = true } }) ->
- dict:store(MsgId, {published, ChPid, MsgSeqNo} , MS);
-send_or_record_confirm(_Status, #delivery { sender = ChPid,
- msg_seq_no = MsgSeqNo },
- MS, _State) ->
- ok = rabbit_misc:confirm_to_sender(ChPid, [MsgSeqNo]),
- MS.
-
-confirm_messages(MsgIds, State = #state { msg_id_status = MS }) ->
- {CMs, MS1} =
- lists:foldl(
- fun (MsgId, {CMsN, MSN} = Acc) ->
- %% We will never see 'discarded' here
- case dict:find(MsgId, MSN) of
- error ->
- %% If it needed confirming, it'll have
- %% already been done.
- Acc;
- {ok, published} ->
- %% Still not seen it from the channel, just
- %% record that it's been confirmed.
- {CMsN, dict:store(MsgId, confirmed, MSN)};
- {ok, {published, ChPid, MsgSeqNo}} ->
- %% Seen from both GM and Channel. Can now
- %% confirm.
- {rabbit_misc:gb_trees_cons(ChPid, MsgSeqNo, CMsN),
- dict:erase(MsgId, MSN)};
- {ok, confirmed} ->
- %% It's already been confirmed. This is
- %% probably it's been both sync'd to disk
- %% and then delivered and ack'd before we've
- %% seen the publish from the
- %% channel. Nothing to do here.
- Acc
- end
- end, {gb_trees:empty(), MS}, MsgIds),
- rabbit_misc:gb_trees_foreach(fun rabbit_misc:confirm_to_sender/2, CMs),
- State #state { msg_id_status = MS1 }.
-
-handle_process_result({ok, State}) -> noreply(State);
-handle_process_result({stop, State}) -> {stop, normal, State}.
-
--ifdef(use_specs).
--spec(promote_me/2 :: ({pid(), term()}, #state{}) -> no_return()).
--endif.
-promote_me(From, #state { q = Q = #amqqueue { name = QName },
- gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS,
- rate_timer_ref = RateTRef,
- sender_queues = SQ,
- msg_id_ack = MA,
- msg_id_status = MS,
- known_senders = KS }) ->
- rabbit_log:info("Mirrored-queue (~s): Promoting slave ~s to master~n",
- [rabbit_misc:rs(QName), rabbit_misc:pid_to_string(self())]),
- Q1 = Q #amqqueue { pid = self() },
- {ok, CPid} = rabbit_mirror_queue_coordinator:start_link(
- Q1, GM, rabbit_mirror_queue_master:sender_death_fun(),
- rabbit_mirror_queue_master:depth_fun()),
- true = unlink(GM),
- gen_server2:reply(From, {promote, CPid}),
-
- %% Everything that we're monitoring, we need to ensure our new
- %% coordinator is monitoring.
- MPids = pmon:monitored(KS),
- ok = rabbit_mirror_queue_coordinator:ensure_monitoring(CPid, MPids),
-
- %% We find all the messages that we've received from channels but
- %% not from gm, and pass them to the
- %% queue_process:init_with_backing_queue_state to be enqueued.
- %%
- %% We also have to requeue messages which are pending acks: the
- %% consumers from the master queue have been lost and so these
- %% messages need requeuing. They might also be pending
- %% confirmation, and indeed they might also be pending arrival of
- %% the publication from the channel itself, if we received both
- %% the publication and the fetch via gm first! Requeuing doesn't
- %% affect confirmations: if the message was previously pending a
- %% confirmation then it still will be, under the same msg_id. So
- %% as a master, we need to be prepared to filter out the
- %% publication of said messages from the channel (is_duplicate
- %% (thus such requeued messages must remain in the msg_id_status
- %% (MS) which becomes seen_status (SS) in the master)).
- %%
- %% Then there are messages we already have in the queue, which are
- %% not currently pending acknowledgement:
- %% 1. Messages we've only received via gm:
- %% Filter out subsequent publication from channel through
- %% validate_message. Might have to issue confirms then or
- %% later, thus queue_process state will have to know that
- %% there's a pending confirm.
- %% 2. Messages received via both gm and channel:
- %% Queue will have to deal with issuing confirms if necessary.
- %%
- %% MS contains the following three entry types:
- %%
- %% a) published:
- %% published via gm only; pending arrival of publication from
- %% channel, maybe pending confirm.
- %%
- %% b) {published, ChPid, MsgSeqNo}:
- %% published via gm and channel; pending confirm.
- %%
- %% c) confirmed:
- %% published via gm only, and confirmed; pending publication
- %% from channel.
- %%
- %% d) discarded:
- %% seen via gm only as discarded. Pending publication from
- %% channel
- %%
- %% The forms a, c and d only, need to go to the master state
- %% seen_status (SS).
- %%
- %% The form b only, needs to go through to the queue_process
- %% state to form the msg_id_to_channel mapping (MTC).
- %%
- %% No messages that are enqueued from SQ at this point will have
- %% entries in MS.
- %%
- %% Messages that are extracted from MA may have entries in MS, and
- %% those messages are then requeued. However, as discussed above,
- %% this does not affect MS, nor which bits go through to SS in
- %% Master, or MTC in queue_process.
-
- St = [published, confirmed, discarded],
- SS = dict:filter(fun (_MsgId, Status) -> lists:member(Status, St) end, MS),
- AckTags = [AckTag || {_MsgId, AckTag} <- dict:to_list(MA)],
-
- MasterState = rabbit_mirror_queue_master:promote_backing_queue_state(
- QName, CPid, BQ, BQS, GM, AckTags, SS, MPids),
-
- MTC = dict:fold(fun (MsgId, {published, ChPid, MsgSeqNo}, MTC0) ->
- gb_trees:insert(MsgId, {ChPid, MsgSeqNo}, MTC0);
- (_Msgid, _Status, MTC0) ->
- MTC0
- end, gb_trees:empty(), MS),
- Deliveries = [Delivery || {_ChPid, {PubQ, _PendCh}} <- dict:to_list(SQ),
- Delivery <- queue:to_list(PubQ)],
- rabbit_amqqueue_process:init_with_backing_queue_state(
- Q1, rabbit_mirror_queue_master, MasterState, RateTRef, Deliveries, KS,
- MTC).
-
-noreply(State) ->
- {NewState, Timeout} = next_state(State),
- {noreply, ensure_rate_timer(NewState), Timeout}.
-
-reply(Reply, State) ->
- {NewState, Timeout} = next_state(State),
- {reply, Reply, ensure_rate_timer(NewState), Timeout}.
-
-next_state(State = #state{backing_queue = BQ, backing_queue_state = BQS}) ->
- {MsgIds, BQS1} = BQ:drain_confirmed(BQS),
- State1 = confirm_messages(MsgIds,
- State #state { backing_queue_state = BQS1 }),
- case BQ:needs_timeout(BQS1) of
- false -> {stop_sync_timer(State1), hibernate };
- idle -> {stop_sync_timer(State1), ?SYNC_INTERVAL};
- timed -> {ensure_sync_timer(State1), 0 }
- end.
-
-backing_queue_timeout(State = #state { backing_queue = BQ }) ->
- run_backing_queue(BQ, fun (M, BQS) -> M:timeout(BQS) end, State).
-
-ensure_sync_timer(State) ->
- rabbit_misc:ensure_timer(State, #state.sync_timer_ref,
- ?SYNC_INTERVAL, sync_timeout).
-
-stop_sync_timer(State) -> rabbit_misc:stop_timer(State, #state.sync_timer_ref).
-
-ensure_rate_timer(State) ->
- rabbit_misc:ensure_timer(State, #state.rate_timer_ref,
- ?RAM_DURATION_UPDATE_INTERVAL,
- update_ram_duration).
-
-stop_rate_timer(State) -> rabbit_misc:stop_timer(State, #state.rate_timer_ref).
-
-ensure_monitoring(ChPid, State = #state { known_senders = KS }) ->
- State #state { known_senders = pmon:monitor(ChPid, KS) }.
-
-local_sender_death(ChPid, State = #state { known_senders = KS }) ->
- %% The channel will be monitored iff we have received a delivery
- %% from it but not heard about its death from the master. So if it
- %% is monitored we need to point the death out to the master (see
- %% essay).
- ok = case pmon:is_monitored(ChPid, KS) of
- false -> ok;
- true -> confirm_sender_death(ChPid)
- end,
- State.
-
-confirm_sender_death(Pid) ->
- %% We have to deal with the possibility that we'll be promoted to
- %% master before this thing gets run. Consequently we set the
- %% module to rabbit_mirror_queue_master so that if we do become a
- %% rabbit_amqqueue_process before then, sane things will happen.
- Fun =
- fun (?MODULE, State = #state { known_senders = KS,
- gm = GM }) ->
- %% We're running still as a slave
- %%
- %% See comment in local_sender_death/2; we might have
- %% received a sender_death in the meanwhile so check
- %% again.
- ok = case pmon:is_monitored(Pid, KS) of
- false -> ok;
- true -> gm:broadcast(GM, {ensure_monitoring, [Pid]}),
- confirm_sender_death(Pid)
- end,
- State;
- (rabbit_mirror_queue_master, State) ->
- %% We've become a master. State is now opaque to
- %% us. When we became master, if Pid was still known
- %% to us then we'd have set up monitoring of it then,
- %% so this is now a noop.
- State
- end,
- %% Note that we do not remove our knowledge of this ChPid until we
- %% get the sender_death from GM.
- {ok, _TRef} = timer:apply_after(
- ?DEATH_TIMEOUT, rabbit_amqqueue, run_backing_queue,
- [self(), rabbit_mirror_queue_master, Fun]),
- ok.
-
-maybe_enqueue_message(
- Delivery = #delivery { message = #basic_message { id = MsgId },
- sender = ChPid },
- State = #state { sender_queues = SQ, msg_id_status = MS }) ->
- State1 = ensure_monitoring(ChPid, State),
- %% We will never see {published, ChPid, MsgSeqNo} here.
- case dict:find(MsgId, MS) of
- error ->
- {MQ, PendingCh} = get_sender_queue(ChPid, SQ),
- MQ1 = queue:in(Delivery, MQ),
- SQ1 = dict:store(ChPid, {MQ1, PendingCh}, SQ),
- State1 #state { sender_queues = SQ1 };
- {ok, Status} ->
- MS1 = send_or_record_confirm(
- Status, Delivery, dict:erase(MsgId, MS), State1),
- SQ1 = remove_from_pending_ch(MsgId, ChPid, SQ),
- State1 #state { msg_id_status = MS1,
- sender_queues = SQ1 }
- end.
-
-get_sender_queue(ChPid, SQ) ->
- case dict:find(ChPid, SQ) of
- error -> {queue:new(), sets:new()};
- {ok, Val} -> Val
- end.
-
-remove_from_pending_ch(MsgId, ChPid, SQ) ->
- case dict:find(ChPid, SQ) of
- error ->
- SQ;
- {ok, {MQ, PendingCh}} ->
- dict:store(ChPid, {MQ, sets:del_element(MsgId, PendingCh)}, SQ)
- end.
-
-publish_or_discard(Status, ChPid, MsgId,
- State = #state { sender_queues = SQ, msg_id_status = MS }) ->
- %% We really are going to do the publish/discard right now, even
- %% though we may not have seen it directly from the channel. But
- %% we cannot issues confirms until the latter has happened. So we
- %% need to keep track of the MsgId and its confirmation status in
- %% the meantime.
- State1 = ensure_monitoring(ChPid, State),
- {MQ, PendingCh} = get_sender_queue(ChPid, SQ),
- {MQ1, PendingCh1, MS1} =
- case queue:out(MQ) of
- {empty, _MQ2} ->
- {MQ, sets:add_element(MsgId, PendingCh),
- dict:store(MsgId, Status, MS)};
- {{value, Delivery = #delivery {
- message = #basic_message { id = MsgId } }}, MQ2} ->
- {MQ2, PendingCh,
- %% We received the msg from the channel first. Thus
- %% we need to deal with confirms here.
- send_or_record_confirm(Status, Delivery, MS, State1)};
- {{value, #delivery {}}, _MQ2} ->
- %% The instruction was sent to us before we were
- %% within the slave_pids within the #amqqueue{}
- %% record. We'll never receive the message directly
- %% from the channel. And the channel will not be
- %% expecting any confirms from us.
- {MQ, PendingCh, MS}
- end,
- SQ1 = dict:store(ChPid, {MQ1, PendingCh1}, SQ),
- State1 #state { sender_queues = SQ1, msg_id_status = MS1 }.
-
-
-process_instruction({publish, ChPid, MsgProps,
- Msg = #basic_message { id = MsgId }}, State) ->
- State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
- publish_or_discard(published, ChPid, MsgId, State),
- BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, BQS),
- {ok, State1 #state { backing_queue_state = BQS1 }};
-process_instruction({publish_delivered, ChPid, MsgProps,
- Msg = #basic_message { id = MsgId }}, State) ->
- State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
- publish_or_discard(published, ChPid, MsgId, State),
- true = BQ:is_empty(BQS),
- {AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, BQS),
- {ok, maybe_store_ack(true, MsgId, AckTag,
- State1 #state { backing_queue_state = BQS1 })};
-process_instruction({discard, ChPid, MsgId}, State) ->
- State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
- publish_or_discard(discarded, ChPid, MsgId, State),
- BQS1 = BQ:discard(MsgId, ChPid, BQS),
- {ok, State1 #state { backing_queue_state = BQS1 }};
-process_instruction({drop, Length, Dropped, AckRequired},
- State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- QLen = BQ:len(BQS),
- ToDrop = case QLen - Length of
- N when N > 0 -> N;
- _ -> 0
- end,
- State1 = lists:foldl(
- fun (const, StateN = #state{backing_queue_state = BQSN}) ->
- {{MsgId, AckTag}, BQSN1} = BQ:drop(AckRequired, BQSN),
- maybe_store_ack(
- AckRequired, MsgId, AckTag,
- StateN #state { backing_queue_state = BQSN1 })
- end, State, lists:duplicate(ToDrop, const)),
- {ok, case AckRequired of
- true -> State1;
- false -> update_delta(ToDrop - Dropped, State1)
- end};
-process_instruction({ack, MsgIds},
- State = #state { backing_queue = BQ,
- backing_queue_state = BQS,
- msg_id_ack = MA }) ->
- {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
- {MsgIds1, BQS1} = BQ:ack(AckTags, BQS),
- [] = MsgIds1 -- MsgIds, %% ASSERTION
- {ok, update_delta(length(MsgIds1) - length(MsgIds),
- State #state { msg_id_ack = MA1,
- backing_queue_state = BQS1 })};
-process_instruction({requeue, MsgIds},
- State = #state { backing_queue = BQ,
- backing_queue_state = BQS,
- msg_id_ack = MA }) ->
- {AckTags, MA1} = msg_ids_to_acktags(MsgIds, MA),
- {_MsgIds, BQS1} = BQ:requeue(AckTags, BQS),
- {ok, State #state { msg_id_ack = MA1,
- backing_queue_state = BQS1 }};
-process_instruction({sender_death, ChPid},
- State = #state { sender_queues = SQ,
- msg_id_status = MS,
- known_senders = KS }) ->
- %% The channel will be monitored iff we have received a message
- %% from it. In this case we just want to avoid doing work if we
- %% never got any messages.
- {ok, case pmon:is_monitored(ChPid, KS) of
- false -> State;
- true -> MS1 = case dict:find(ChPid, SQ) of
- error ->
- MS;
- {ok, {_MQ, PendingCh}} ->
- lists:foldl(fun dict:erase/2, MS,
- sets:to_list(PendingCh))
- end,
- credit_flow:peer_down(ChPid),
- State #state { sender_queues = dict:erase(ChPid, SQ),
- msg_id_status = MS1,
- known_senders = pmon:demonitor(ChPid, KS) }
- end};
-process_instruction({depth, Depth},
- State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- {ok, set_delta(Depth - BQ:depth(BQS), State)};
-
-process_instruction({delete_and_terminate, Reason},
- State = #state { backing_queue = BQ,
- backing_queue_state = BQS }) ->
- BQ:delete_and_terminate(Reason, BQS),
- {stop, State #state { backing_queue_state = undefined }}.
-
-msg_ids_to_acktags(MsgIds, MA) ->
- {AckTags, MA1} =
- lists:foldl(
- fun (MsgId, {Acc, MAN}) ->
- case dict:find(MsgId, MA) of
- error -> {Acc, MAN};
- {ok, AckTag} -> {[AckTag | Acc], dict:erase(MsgId, MAN)}
- end
- end, {[], MA}, MsgIds),
- {lists:reverse(AckTags), MA1}.
-
-maybe_store_ack(false, _MsgId, _AckTag, State) ->
- State;
-maybe_store_ack(true, MsgId, AckTag, State = #state { msg_id_ack = MA }) ->
- State #state { msg_id_ack = dict:store(MsgId, AckTag, MA) }.
-
-set_delta(0, State = #state { depth_delta = undefined }) ->
- ok = record_synchronised(State#state.q),
- State #state { depth_delta = 0 };
-set_delta(NewDelta, State = #state { depth_delta = undefined }) ->
- true = NewDelta > 0, %% assertion
- State #state { depth_delta = NewDelta };
-set_delta(NewDelta, State = #state { depth_delta = Delta }) ->
- update_delta(NewDelta - Delta, State).
-
-update_delta(_DeltaChange, State = #state { depth_delta = undefined }) ->
- State;
-update_delta( DeltaChange, State = #state { depth_delta = 0 }) ->
- 0 = DeltaChange, %% assertion: we cannot become unsync'ed
- State;
-update_delta( DeltaChange, State = #state { depth_delta = Delta }) ->
- true = DeltaChange =< 0, %% assertion: we cannot become 'less' sync'ed
- set_delta(Delta + DeltaChange, State #state { depth_delta = undefined }).
-
-update_ram_duration(BQ, BQS) ->
- {RamDuration, BQS1} = BQ:ram_duration(BQS),
- DesiredDuration =
- rabbit_memory_monitor:report_ram_duration(self(), RamDuration),
- BQ:set_ram_duration_target(DesiredDuration, BQS1).
-
-%% [1] - the arrival of this newly synced slave may cause the master to die if
-%% the admin has requested a migration-type change to policy.
-record_synchronised(#amqqueue { name = QName }) ->
- Self = self(),
- case rabbit_misc:execute_mnesia_transaction(
- fun () ->
- case mnesia:read({rabbit_queue, QName}) of
- [] ->
- ok;
- [Q1 = #amqqueue { sync_slave_pids = SSPids }] ->
- Q2 = Q1#amqqueue{sync_slave_pids = [Self | SSPids]},
- rabbit_mirror_queue_misc:store_updated_slaves(Q2),
- {ok, Q1, Q2}
- end
- end) of
- ok -> ok;
- {ok, Q1, Q2} -> rabbit_mirror_queue_misc:update_mirrors(Q1, Q2) %% [1]
- end.
diff --git a/src/rabbit_mirror_queue_slave_sup.erl b/src/rabbit_mirror_queue_slave_sup.erl
deleted file mode 100644
index 43dbb4e9..00000000
--- a/src/rabbit_mirror_queue_slave_sup.erl
+++ /dev/null
@@ -1,37 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_slave_sup).
-
--behaviour(supervisor2).
-
--export([start_link/0, start_child/2]).
-
--export([init/1]).
-
--include_lib("rabbit.hrl").
-
--define(SERVER, ?MODULE).
-
-start_link() -> supervisor2:start_link({local, ?SERVER}, ?MODULE, []).
-
-start_child(Node, Args) -> supervisor2:start_child({?SERVER, Node}, Args).
-
-init([]) ->
- {ok, {{simple_one_for_one_terminate, 10, 10},
- [{rabbit_mirror_queue_slave,
- {rabbit_mirror_queue_slave, start_link, []},
- temporary, ?MAX_WAIT, worker, [rabbit_mirror_queue_slave]}]}}.
diff --git a/src/rabbit_mirror_queue_sync.erl b/src/rabbit_mirror_queue_sync.erl
deleted file mode 100644
index 61e90105..00000000
--- a/src/rabbit_mirror_queue_sync.erl
+++ /dev/null
@@ -1,260 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2010-2012 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mirror_queue_sync).
-
--include("rabbit.hrl").
-
--export([master_prepare/3, master_go/7, slave/7]).
-
--define(SYNC_PROGRESS_INTERVAL, 1000000).
-
-%% There are three processes around, the master, the syncer and the
-%% slave(s). The syncer is an intermediary, linked to the master in
-%% order to make sure we do not mess with the master's credit flow or
-%% set of monitors.
-%%
-%% Interactions
-%% ------------
-%%
-%% '*' indicates repeating messages. All are standard Erlang messages
-%% except sync_start which is sent over GM to flush out any other
-%% messages that we might have sent that way already. (credit) is the
-%% usual credit_flow bump message every so often.
-%%
-%% Master Syncer Slave(s)
-%% sync_mirrors -> || ||
-%% (from channel) || -- (spawns) --> || ||
-%% || --------- sync_start (over GM) -------> ||
-%% || || <--- sync_ready ---- ||
-%% || || (or) ||
-%% || || <--- sync_deny ----- ||
-%% || <--- ready ---- || ||
-%% || <--- next* ---- || || }
-%% || ---- msg* ----> || || } loop
-%% || || ---- sync_msg* ----> || }
-%% || || <--- (credit)* ----- || }
-%% || <--- next ---- || ||
-%% || ---- done ----> || ||
-%% || || -- sync_complete --> ||
-%% || (Dies) ||
-
--ifdef(use_specs).
-
--type(log_fun() :: fun ((string(), [any()]) -> 'ok')).
--type(bq() :: atom()).
--type(bqs() :: any()).
--type(ack() :: any()).
--type(slave_sync_state() :: {[{rabbit_types:msg_id(), ack()}], timer:tref(),
- bqs()}).
-
--spec(master_prepare/3 :: (reference(), log_fun(), [pid()]) -> pid()).
--spec(master_go/7 :: (pid(), reference(), log_fun(),
- rabbit_mirror_queue_master:stats_fun(),
- rabbit_mirror_queue_master:stats_fun(),
- bq(), bqs()) ->
- {'already_synced', bqs()} | {'ok', bqs()} |
- {'shutdown', any(), bqs()} |
- {'sync_died', any(), bqs()}).
--spec(slave/7 :: (non_neg_integer(), reference(), timer:tref(), pid(),
- bq(), bqs(), fun((bq(), bqs()) -> {timer:tref(), bqs()})) ->
- 'denied' |
- {'ok' | 'failed', slave_sync_state()} |
- {'stop', any(), slave_sync_state()}).
-
--endif.
-
-%% ---------------------------------------------------------------------------
-%% Master
-
-master_prepare(Ref, Log, SPids) ->
- MPid = self(),
- spawn_link(fun () -> syncer(Ref, Log, MPid, SPids) end).
-
-master_go(Syncer, Ref, Log, HandleInfo, EmitStats, BQ, BQS) ->
- Args = {Syncer, Ref, Log, HandleInfo, EmitStats, rabbit_misc:get_parent()},
- receive
- {'EXIT', Syncer, normal} -> {already_synced, BQS};
- {'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS};
- {ready, Syncer} -> EmitStats({syncing, 0}),
- master_go0(Args, BQ, BQS)
- end.
-
-master_go0(Args, BQ, BQS) ->
- case BQ:fold(fun (Msg, MsgProps, Unacked, Acc) ->
- master_send(Msg, MsgProps, Unacked, Args, Acc)
- end, {0, erlang:now()}, BQS) of
- {{shutdown, Reason}, BQS1} -> {shutdown, Reason, BQS1};
- {{sync_died, Reason}, BQS1} -> {sync_died, Reason, BQS1};
- {_, BQS1} -> master_done(Args, BQS1)
- end.
-
-master_send(Msg, MsgProps, Unacked,
- {Syncer, Ref, Log, HandleInfo, EmitStats, Parent}, {I, Last}) ->
- T = case timer:now_diff(erlang:now(), Last) > ?SYNC_PROGRESS_INTERVAL of
- true -> EmitStats({syncing, I}),
- Log("~p messages", [I]),
- erlang:now();
- false -> Last
- end,
- HandleInfo({syncing, I}),
- receive
- {'$gen_cast', {set_maximum_since_use, Age}} ->
- ok = file_handle_cache:set_maximum_since_use(Age)
- after 0 ->
- ok
- end,
- receive
- {'$gen_call', From,
- cancel_sync_mirrors} -> stop_syncer(Syncer, {cancel, Ref}),
- gen_server2:reply(From, ok),
- {stop, cancelled};
- {next, Ref} -> Syncer ! {msg, Ref, Msg, MsgProps, Unacked},
- {cont, {I + 1, T}};
- {'EXIT', Parent, Reason} -> {stop, {shutdown, Reason}};
- {'EXIT', Syncer, Reason} -> {stop, {sync_died, Reason}}
- end.
-
-master_done({Syncer, Ref, _Log, _HandleInfo, _EmitStats, Parent}, BQS) ->
- receive
- {next, Ref} -> stop_syncer(Syncer, {done, Ref}),
- {ok, BQS};
- {'EXIT', Parent, Reason} -> {shutdown, Reason, BQS};
- {'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS}
- end.
-
-stop_syncer(Syncer, Msg) ->
- unlink(Syncer),
- Syncer ! Msg,
- receive {'EXIT', Syncer, _} -> ok
- after 0 -> ok
- end.
-
-%% Master
-%% ---------------------------------------------------------------------------
-%% Syncer
-
-syncer(Ref, Log, MPid, SPids) ->
- [erlang:monitor(process, SPid) || SPid <- SPids],
- %% We wait for a reply from the slaves so that we know they are in
- %% a receive block and will thus receive messages we send to them
- %% *without* those messages ending up in their gen_server2 pqueue.
- case [SPid || SPid <- SPids,
- receive
- {sync_ready, Ref, SPid} -> true;
- {sync_deny, Ref, SPid} -> false;
- {'DOWN', _, process, SPid, _} -> false
- end] of
- [] -> Log("all slaves already synced", []);
- SPids1 -> MPid ! {ready, self()},
- Log("mirrors ~p to sync", [[node(SPid) || SPid <- SPids1]]),
- syncer_loop(Ref, MPid, SPids1)
- end.
-
-syncer_loop(Ref, MPid, SPids) ->
- MPid ! {next, Ref},
- receive
- {msg, Ref, Msg, MsgProps, Unacked} ->
- SPids1 = wait_for_credit(SPids),
- [begin
- credit_flow:send(SPid),
- SPid ! {sync_msg, Ref, Msg, MsgProps, Unacked}
- end || SPid <- SPids1],
- syncer_loop(Ref, MPid, SPids1);
- {cancel, Ref} ->
- %% We don't tell the slaves we will die - so when we do
- %% they interpret that as a failure, which is what we
- %% want.
- ok;
- {done, Ref} ->
- [SPid ! {sync_complete, Ref} || SPid <- SPids]
- end.
-
-wait_for_credit(SPids) ->
- case credit_flow:blocked() of
- true -> receive
- {bump_credit, Msg} ->
- credit_flow:handle_bump_msg(Msg),
- wait_for_credit(SPids);
- {'DOWN', _, process, SPid, _} ->
- credit_flow:peer_down(SPid),
- wait_for_credit(lists:delete(SPid, SPids))
- end;
- false -> SPids
- end.
-
-%% Syncer
-%% ---------------------------------------------------------------------------
-%% Slave
-
-slave(0, Ref, _TRef, Syncer, _BQ, _BQS, _UpdateRamDuration) ->
- Syncer ! {sync_deny, Ref, self()},
- denied;
-
-slave(_DD, Ref, TRef, Syncer, BQ, BQS, UpdateRamDuration) ->
- MRef = erlang:monitor(process, Syncer),
- Syncer ! {sync_ready, Ref, self()},
- {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
- slave_sync_loop({Ref, MRef, Syncer, BQ, UpdateRamDuration,
- rabbit_misc:get_parent()}, {[], TRef, BQS1}).
-
-slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
- State = {MA, TRef, BQS}) ->
- receive
- {'DOWN', MRef, process, Syncer, _Reason} ->
- %% If the master dies half way we are not in the usual
- %% half-synced state (with messages nearer the tail of the
- %% queue); instead we have ones nearer the head. If we then
- %% sync with a newly promoted master, or even just receive
- %% messages from it, we have a hole in the middle. So the
- %% only thing to do here is purge.
- {_MsgCount, BQS1} = BQ:purge(BQ:purge_acks(BQS)),
- credit_flow:peer_down(Syncer),
- {failed, {[], TRef, BQS1}};
- {bump_credit, Msg} ->
- credit_flow:handle_bump_msg(Msg),
- slave_sync_loop(Args, State);
- {sync_complete, Ref} ->
- erlang:demonitor(MRef, [flush]),
- credit_flow:peer_down(Syncer),
- {ok, State};
- {'$gen_cast', {set_maximum_since_use, Age}} ->
- ok = file_handle_cache:set_maximum_since_use(Age),
- slave_sync_loop(Args, State);
- {'$gen_cast', {set_ram_duration_target, Duration}} ->
- BQS1 = BQ:set_ram_duration_target(Duration, BQS),
- slave_sync_loop(Args, {MA, TRef, BQS1});
- update_ram_duration ->
- {TRef1, BQS1} = UpdateRamDuration(BQ, BQS),
- slave_sync_loop(Args, {MA, TRef1, BQS1});
- {sync_msg, Ref, Msg, Props, Unacked} ->
- credit_flow:ack(Syncer),
- Props1 = Props#message_properties{needs_confirming = false},
- {MA1, BQS1} =
- case Unacked of
- false -> {MA, BQ:publish(Msg, Props1, true, none, BQS)};
- true -> {AckTag, BQS2} = BQ:publish_delivered(
- Msg, Props1, none, BQS),
- {[{Msg#basic_message.id, AckTag} | MA], BQS2}
- end,
- slave_sync_loop(Args, {MA1, TRef, BQS1});
- {'EXIT', Parent, Reason} ->
- {stop, Reason, State};
- %% If the master throws an exception
- {'$gen_cast', {gm, {delete_and_terminate, Reason}}} ->
- BQ:delete_and_terminate(Reason, BQS),
- {stop, Reason, {[], TRef, undefined}}
- end.
diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl
deleted file mode 100644
index bca9d5ce..00000000
--- a/src/rabbit_misc.erl
+++ /dev/null
@@ -1,1118 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_misc).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--export([method_record_type/1, polite_pause/0, polite_pause/1]).
--export([die/1, frame_error/2, amqp_error/4, quit/1,
- protocol_error/3, protocol_error/4, protocol_error/1]).
--export([not_found/1, absent/1, assert_args_equivalence/4]).
--export([dirty_read/1]).
--export([table_lookup/2, set_table_value/4]).
--export([r/3, r/2, r_arg/4, rs/1]).
--export([enable_cover/0, report_cover/0]).
--export([enable_cover/1, report_cover/1]).
--export([start_cover/1]).
--export([confirm_to_sender/2]).
--export([throw_on_error/2, with_exit_handler/2, is_abnormal_exit/1,
- filter_exit_map/2]).
--export([with_user/2, with_user_and_vhost/3]).
--export([execute_mnesia_transaction/1]).
--export([execute_mnesia_transaction/2]).
--export([execute_mnesia_tx_with_tail/1]).
--export([ensure_ok/2]).
--export([tcp_name/3, format_inet_error/1]).
--export([upmap/2, map_in_order/2]).
--export([table_filter/3]).
--export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]).
--export([format/2, format_many/1, format_stderr/2]).
--export([with_local_io/1, local_info_msg/2]).
--export([unfold/2, ceil/1, queue_fold/3]).
--export([sort_field_table/1]).
--export([pid_to_string/1, string_to_pid/1]).
--export([version_compare/2, version_compare/3]).
--export([version_minor_equivalent/2]).
--export([dict_cons/3, orddict_cons/3, gb_trees_cons/3]).
--export([gb_trees_fold/3, gb_trees_foreach/2]).
--export([parse_arguments/3]).
--export([all_module_attributes/1, build_acyclic_graph/3]).
--export([now_ms/0]).
--export([const_ok/0, const/1]).
--export([ntoa/1, ntoab/1]).
--export([is_process_alive/1]).
--export([pget/2, pget/3, pget_or_die/2, pset/3]).
--export([format_message_queue/2]).
--export([append_rpc_all_nodes/4]).
--export([multi_call/2]).
--export([os_cmd/1]).
--export([gb_sets_difference/2]).
--export([version/0, which_applications/0]).
--export([sequence_error/1]).
--export([json_encode/1, json_decode/1, json_to_term/1, term_to_json/1]).
--export([check_expiry/1]).
--export([base64url/1]).
--export([interval_operation/4]).
--export([ensure_timer/4, stop_timer/2]).
--export([get_parent/0]).
-
-%% Horrible macro to use in guards
--define(IS_BENIGN_EXIT(R),
- R =:= noproc; R =:= noconnection; R =:= nodedown; R =:= normal;
- R =:= shutdown).
-
-%% This is dictated by `erlang:send_after' on which we depend to implement TTL.
--define(MAX_EXPIRY_TIMER, 4294967295).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([resource_name/0, thunk/1]).
-
--type(ok_or_error() :: rabbit_types:ok_or_error(any())).
--type(thunk(T) :: fun(() -> T)).
--type(resource_name() :: binary()).
--type(optdef() :: flag | {option, string()}).
--type(channel_or_connection_exit()
- :: rabbit_types:channel_exit() | rabbit_types:connection_exit()).
--type(digraph_label() :: term()).
--type(graph_vertex_fun() ::
- fun ((atom(), [term()]) -> [{digraph:vertex(), digraph_label()}])).
--type(graph_edge_fun() ::
- fun ((atom(), [term()]) -> [{digraph:vertex(), digraph:vertex()}])).
-
--spec(method_record_type/1 :: (rabbit_framing:amqp_method_record())
- -> rabbit_framing:amqp_method_name()).
--spec(polite_pause/0 :: () -> 'done').
--spec(polite_pause/1 :: (non_neg_integer()) -> 'done').
--spec(die/1 ::
- (rabbit_framing:amqp_exception()) -> channel_or_connection_exit()).
-
--spec(quit/1 :: (integer()) -> no_return()).
-
--spec(frame_error/2 :: (rabbit_framing:amqp_method_name(), binary())
- -> rabbit_types:connection_exit()).
--spec(amqp_error/4 ::
- (rabbit_framing:amqp_exception(), string(), [any()],
- rabbit_framing:amqp_method_name())
- -> rabbit_types:amqp_error()).
--spec(protocol_error/3 :: (rabbit_framing:amqp_exception(), string(), [any()])
- -> channel_or_connection_exit()).
--spec(protocol_error/4 ::
- (rabbit_framing:amqp_exception(), string(), [any()],
- rabbit_framing:amqp_method_name()) -> channel_or_connection_exit()).
--spec(protocol_error/1 ::
- (rabbit_types:amqp_error()) -> channel_or_connection_exit()).
--spec(not_found/1 :: (rabbit_types:r(atom())) -> rabbit_types:channel_exit()).
--spec(absent/1 :: (rabbit_types:amqqueue()) -> rabbit_types:channel_exit()).
--spec(assert_args_equivalence/4 :: (rabbit_framing:amqp_table(),
- rabbit_framing:amqp_table(),
- rabbit_types:r(any()), [binary()]) ->
- 'ok' | rabbit_types:connection_exit()).
--spec(dirty_read/1 ::
- ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')).
--spec(table_lookup/2 ::
- (rabbit_framing:amqp_table(), binary())
- -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}).
--spec(set_table_value/4 ::
- (rabbit_framing:amqp_table(), binary(),
- rabbit_framing:amqp_field_type(), rabbit_framing:amqp_value())
- -> rabbit_framing:amqp_table()).
--spec(r/2 :: (rabbit_types:vhost(), K)
- -> rabbit_types:r3(rabbit_types:vhost(), K, '_')
- when is_subtype(K, atom())).
--spec(r/3 ::
- (rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name())
- -> rabbit_types:r3(rabbit_types:vhost(), K, resource_name())
- when is_subtype(K, atom())).
--spec(r_arg/4 ::
- (rabbit_types:vhost() | rabbit_types:r(atom()), K,
- rabbit_framing:amqp_table(), binary()) ->
- undefined |
- rabbit_types:error(
- {invalid_type, rabbit_framing:amqp_field_type()}) |
- rabbit_types:r(K) when is_subtype(K, atom())).
--spec(rs/1 :: (rabbit_types:r(atom())) -> string()).
--spec(enable_cover/0 :: () -> ok_or_error()).
--spec(start_cover/1 :: ([{string(), string()} | string()]) -> 'ok').
--spec(report_cover/0 :: () -> 'ok').
--spec(enable_cover/1 :: ([file:filename() | atom()]) -> ok_or_error()).
--spec(report_cover/1 :: ([file:filename() | atom()]) -> 'ok').
--spec(throw_on_error/2 ::
- (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A).
--spec(with_exit_handler/2 :: (thunk(A), thunk(A)) -> A).
--spec(is_abnormal_exit/1 :: (any()) -> boolean()).
--spec(filter_exit_map/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(with_user/2 :: (rabbit_types:username(), thunk(A)) -> A).
--spec(with_user_and_vhost/3 ::
- (rabbit_types:username(), rabbit_types:vhost(), thunk(A))
- -> A).
--spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A).
--spec(execute_mnesia_transaction/2 ::
- (thunk(A), fun ((A, boolean()) -> B)) -> B).
--spec(execute_mnesia_tx_with_tail/1 ::
- (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))).
--spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok').
--spec(tcp_name/3 ::
- (atom(), inet:ip_address(), rabbit_networking:ip_port())
- -> atom()).
--spec(format_inet_error/1 :: (atom()) -> string()).
--spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(table_filter/3:: (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'),
- atom()) -> [A]).
--spec(dirty_read_all/1 :: (atom()) -> [any()]).
--spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom())
- -> 'ok' | 'aborted').
--spec(dirty_dump_log/1 :: (file:filename()) -> ok_or_error()).
--spec(format/2 :: (string(), [any()]) -> string()).
--spec(format_many/1 :: ([{string(), [any()]}]) -> string()).
--spec(format_stderr/2 :: (string(), [any()]) -> 'ok').
--spec(with_local_io/1 :: (fun (() -> A)) -> A).
--spec(local_info_msg/2 :: (string(), [any()]) -> 'ok').
--spec(unfold/2 :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}).
--spec(ceil/1 :: (number()) -> integer()).
--spec(queue_fold/3 :: (fun ((any(), B) -> B), B, queue()) -> B).
--spec(sort_field_table/1 ::
- (rabbit_framing:amqp_table()) -> rabbit_framing:amqp_table()).
--spec(pid_to_string/1 :: (pid()) -> string()).
--spec(string_to_pid/1 :: (string()) -> pid()).
--spec(version_compare/2 :: (string(), string()) -> 'lt' | 'eq' | 'gt').
--spec(version_compare/3 ::
- (string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt'))
- -> boolean()).
--spec(version_minor_equivalent/2 :: (string(), string()) -> boolean()).
--spec(dict_cons/3 :: (any(), any(), dict()) -> dict()).
--spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()).
--spec(gb_trees_cons/3 :: (any(), any(), gb_tree()) -> gb_tree()).
--spec(gb_trees_fold/3 :: (fun ((any(), any(), A) -> A), A, gb_tree()) -> A).
--spec(gb_trees_foreach/2 ::
- (fun ((any(), any()) -> any()), gb_tree()) -> 'ok').
--spec(parse_arguments/3 ::
- ([{atom(), [{string(), optdef()}]} | atom()],
- [{string(), optdef()}],
- [string()])
- -> {'ok', {atom(), [{string(), string()}], [string()]}} |
- 'no_command').
--spec(all_module_attributes/1 :: (atom()) -> [{atom(), [term()]}]).
--spec(build_acyclic_graph/3 ::
- (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}])
- -> rabbit_types:ok_or_error2(digraph(),
- {'vertex', 'duplicate', digraph:vertex()} |
- {'edge', ({bad_vertex, digraph:vertex()} |
- {bad_edge, [digraph:vertex()]}),
- digraph:vertex(), digraph:vertex()})).
--spec(now_ms/0 :: () -> non_neg_integer()).
--spec(const_ok/0 :: () -> 'ok').
--spec(const/1 :: (A) -> thunk(A)).
--spec(ntoa/1 :: (inet:ip_address()) -> string()).
--spec(ntoab/1 :: (inet:ip_address()) -> string()).
--spec(is_process_alive/1 :: (pid()) -> boolean()).
--spec(pget/2 :: (term(), [term()]) -> term()).
--spec(pget/3 :: (term(), [term()], term()) -> term()).
--spec(pget_or_die/2 :: (term(), [term()]) -> term() | no_return()).
--spec(pset/3 :: (term(), term(), [term()]) -> term()).
--spec(format_message_queue/2 :: (any(), priority_queue:q()) -> term()).
--spec(append_rpc_all_nodes/4 :: ([node()], atom(), atom(), [any()]) -> [any()]).
--spec(multi_call/2 ::
- ([pid()], any()) -> {[{pid(), any()}], [{pid(), any()}]}).
--spec(os_cmd/1 :: (string()) -> string()).
--spec(gb_sets_difference/2 :: (gb_set(), gb_set()) -> gb_set()).
--spec(version/0 :: () -> string()).
--spec(which_applications/0 :: () -> [{atom(), string(), string()}]).
--spec(sequence_error/1 :: ([({'error', any()} | any())])
- -> {'error', any()} | any()).
--spec(json_encode/1 :: (any()) -> {'ok', string()} | {'error', any()}).
--spec(json_decode/1 :: (string()) -> {'ok', any()} | 'error').
--spec(json_to_term/1 :: (any()) -> any()).
--spec(term_to_json/1 :: (any()) -> any()).
--spec(check_expiry/1 :: (integer()) -> rabbit_types:ok_or_error(any())).
--spec(base64url/1 :: (binary()) -> string()).
--spec(interval_operation/4 ::
- ({atom(), atom(), any()}, float(), non_neg_integer(), non_neg_integer())
- -> {any(), non_neg_integer()}).
--spec(ensure_timer/4 :: (A, non_neg_integer(), non_neg_integer(), any()) -> A).
--spec(stop_timer/2 :: (A, non_neg_integer()) -> A).
--spec(get_parent/0 :: () -> pid()).
--endif.
-
-%%----------------------------------------------------------------------------
-
-method_record_type(Record) ->
- element(1, Record).
-
-polite_pause() ->
- polite_pause(3000).
-
-polite_pause(N) ->
- receive
- after N -> done
- end.
-
-die(Error) ->
- protocol_error(Error, "~w", [Error]).
-
-frame_error(MethodName, BinaryFields) ->
- protocol_error(frame_error, "cannot decode ~w", [BinaryFields], MethodName).
-
-amqp_error(Name, ExplanationFormat, Params, Method) ->
- Explanation = format(ExplanationFormat, Params),
- #amqp_error{name = Name, explanation = Explanation, method = Method}.
-
-protocol_error(Name, ExplanationFormat, Params) ->
- protocol_error(Name, ExplanationFormat, Params, none).
-
-protocol_error(Name, ExplanationFormat, Params, Method) ->
- protocol_error(amqp_error(Name, ExplanationFormat, Params, Method)).
-
-protocol_error(#amqp_error{} = Error) ->
- exit(Error).
-
-not_found(R) -> protocol_error(not_found, "no ~s", [rs(R)]).
-
-absent(#amqqueue{name = QueueName, pid = QPid, durable = true}) ->
- %% The assertion of durability is mainly there because we mention
- %% durability in the error message. That way we will hopefully
- %% notice if at some future point our logic changes s.t. we get
- %% here with non-durable queues.
- protocol_error(not_found,
- "home node '~s' of durable ~s is down or inaccessible",
- [node(QPid), rs(QueueName)]).
-
-type_class(byte) -> int;
-type_class(short) -> int;
-type_class(signedint) -> int;
-type_class(long) -> int;
-type_class(decimal) -> int;
-type_class(float) -> float;
-type_class(double) -> float;
-type_class(Other) -> Other.
-
-assert_args_equivalence(Orig, New, Name, Keys) ->
- [assert_args_equivalence1(Orig, New, Name, Key) || Key <- Keys],
- ok.
-
-assert_args_equivalence1(Orig, New, Name, Key) ->
- {Orig1, New1} = {table_lookup(Orig, Key), table_lookup(New, Key)},
- FailureFun = fun () ->
- protocol_error(precondition_failed, "inequivalent arg '~s'"
- "for ~s: received ~s but current is ~s",
- [Key, rs(Name), val(New1), val(Orig1)])
- end,
- case {Orig1, New1} of
- {Same, Same} ->
- ok;
- {{OrigType, OrigVal}, {NewType, NewVal}} ->
- case type_class(OrigType) == type_class(NewType) andalso
- OrigVal == NewVal of
- true -> ok;
- false -> FailureFun()
- end;
- {_, _} ->
- FailureFun()
- end.
-
-val(undefined) ->
- "none";
-val({Type, Value}) ->
- ValFmt = case is_binary(Value) of
- true -> "~s";
- false -> "~w"
- end,
- format("the value '" ++ ValFmt ++ "' of type '~s'", [Value, Type]).
-
-%% Normally we'd call mnesia:dirty_read/1 here, but that is quite
-%% expensive due to general mnesia overheads (figuring out table types
-%% and locations, etc). We get away with bypassing these because we
-%% know that the tables we are looking at here
-%% - are not the schema table
-%% - have a local ram copy
-%% - do not have any indices
-dirty_read({Table, Key}) ->
- case ets:lookup(Table, Key) of
- [Result] -> {ok, Result};
- [] -> {error, not_found}
- end.
-
-table_lookup(Table, Key) ->
- case lists:keysearch(Key, 1, Table) of
- {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin};
- false -> undefined
- end.
-
-set_table_value(Table, Key, Type, Value) ->
- sort_field_table(
- lists:keystore(Key, 1, Table, {Key, Type, Value})).
-
-r(#resource{virtual_host = VHostPath}, Kind, Name) ->
- #resource{virtual_host = VHostPath, kind = Kind, name = Name};
-r(VHostPath, Kind, Name) ->
- #resource{virtual_host = VHostPath, kind = Kind, name = Name}.
-
-r(VHostPath, Kind) ->
- #resource{virtual_host = VHostPath, kind = Kind, name = '_'}.
-
-r_arg(#resource{virtual_host = VHostPath}, Kind, Table, Key) ->
- r_arg(VHostPath, Kind, Table, Key);
-r_arg(VHostPath, Kind, Table, Key) ->
- case table_lookup(Table, Key) of
- {longstr, NameBin} -> r(VHostPath, Kind, NameBin);
- undefined -> undefined;
- {Type, _} -> {error, {invalid_type, Type}}
- end.
-
-rs(#resource{virtual_host = VHostPath, kind = Kind, name = Name}) ->
- format("~s '~s' in vhost '~s'", [Kind, Name, VHostPath]).
-
-enable_cover() -> enable_cover(["."]).
-
-enable_cover(Dirs) ->
- lists:foldl(fun (Dir, ok) ->
- case cover:compile_beam_directory(
- filename:join(lists:concat([Dir]),"ebin")) of
- {error, _} = Err -> Err;
- _ -> ok
- end;
- (_Dir, Err) ->
- Err
- end, ok, Dirs).
-
-start_cover(NodesS) ->
- {ok, _} = cover:start([rabbit_nodes:make(N) || N <- NodesS]),
- ok.
-
-report_cover() -> report_cover(["."]).
-
-report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok.
-
-report_cover1(Root) ->
- Dir = filename:join(Root, "cover"),
- ok = filelib:ensure_dir(filename:join(Dir, "junk")),
- lists:foreach(fun (F) -> file:delete(F) end,
- filelib:wildcard(filename:join(Dir, "*.html"))),
- {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]),
- {CT, NCT} =
- lists:foldl(
- fun (M,{CovTot, NotCovTot}) ->
- {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module),
- ok = report_coverage_percentage(SummaryFile,
- Cov, NotCov, M),
- {ok,_} = cover:analyze_to_file(
- M,
- filename:join(Dir, atom_to_list(M) ++ ".html"),
- [html]),
- {CovTot+Cov, NotCovTot+NotCov}
- end,
- {0, 0},
- lists:sort(cover:modules())),
- ok = report_coverage_percentage(SummaryFile, CT, NCT, 'TOTAL'),
- ok = file:close(SummaryFile),
- ok.
-
-report_coverage_percentage(File, Cov, NotCov, Mod) ->
- io:fwrite(File, "~6.2f ~p~n",
- [if
- Cov+NotCov > 0 -> 100.0*Cov/(Cov+NotCov);
- true -> 100.0
- end,
- Mod]).
-
-confirm_to_sender(Pid, MsgSeqNos) ->
- gen_server2:cast(Pid, {confirm, MsgSeqNos, self()}).
-
-%% @doc Halts the emulator returning the given status code to the os.
-%% On Windows this function will block indefinitely so as to give the io
-%% subsystem time to flush stdout completely.
-quit(Status) ->
- case os:type() of
- {unix, _} -> halt(Status);
- {win32, _} -> init:stop(Status),
- receive
- after infinity -> ok
- end
- end.
-
-throw_on_error(E, Thunk) ->
- case Thunk() of
- {error, Reason} -> throw({E, Reason});
- {ok, Res} -> Res;
- Res -> Res
- end.
-
-with_exit_handler(Handler, Thunk) ->
- try
- Thunk()
- catch
- exit:{R, _} when ?IS_BENIGN_EXIT(R) -> Handler();
- exit:{{R, _}, _} when ?IS_BENIGN_EXIT(R) -> Handler()
- end.
-
-is_abnormal_exit(R) when ?IS_BENIGN_EXIT(R) -> false;
-is_abnormal_exit({R, _}) when ?IS_BENIGN_EXIT(R) -> false;
-is_abnormal_exit(_) -> true.
-
-filter_exit_map(F, L) ->
- Ref = make_ref(),
- lists:filter(fun (R) -> R =/= Ref end,
- [with_exit_handler(
- fun () -> Ref end,
- fun () -> F(I) end) || I <- L]).
-
-
-with_user(Username, Thunk) ->
- fun () ->
- case mnesia:read({rabbit_user, Username}) of
- [] ->
- mnesia:abort({no_such_user, Username});
- [_U] ->
- Thunk()
- end
- end.
-
-with_user_and_vhost(Username, VHostPath, Thunk) ->
- with_user(Username, rabbit_vhost:with(VHostPath, Thunk)).
-
-execute_mnesia_transaction(TxFun) ->
- %% Making this a sync_transaction allows us to use dirty_read
- %% elsewhere and get a consistent result even when that read
- %% executes on a different node.
- case worker_pool:submit(
- fun () ->
- case mnesia:is_transaction() of
- false -> DiskLogBefore = mnesia_dumper:get_log_writes(),
- Res = mnesia:sync_transaction(TxFun),
- DiskLogAfter = mnesia_dumper:get_log_writes(),
- case DiskLogAfter == DiskLogBefore of
- true -> Res;
- false -> {sync, Res}
- end;
- true -> mnesia:sync_transaction(TxFun)
- end
- end) of
- {sync, {atomic, Result}} -> mnesia_sync:sync(), Result;
- {sync, {aborted, Reason}} -> throw({error, Reason});
- {atomic, Result} -> Result;
- {aborted, Reason} -> throw({error, Reason})
- end.
-
-%% Like execute_mnesia_transaction/1 with additional Pre- and Post-
-%% commit function
-execute_mnesia_transaction(TxFun, PrePostCommitFun) ->
- case mnesia:is_transaction() of
- true -> throw(unexpected_transaction);
- false -> ok
- end,
- PrePostCommitFun(execute_mnesia_transaction(
- fun () ->
- Result = TxFun(),
- PrePostCommitFun(Result, true),
- Result
- end), false).
-
-%% Like execute_mnesia_transaction/2, but TxFun is expected to return a
-%% TailFun which gets called (only) immediately after the tx commit
-execute_mnesia_tx_with_tail(TxFun) ->
- case mnesia:is_transaction() of
- true -> execute_mnesia_transaction(TxFun);
- false -> TailFun = execute_mnesia_transaction(TxFun),
- TailFun()
- end.
-
-ensure_ok(ok, _) -> ok;
-ensure_ok({error, Reason}, ErrorTag) -> throw({error, {ErrorTag, Reason}}).
-
-tcp_name(Prefix, IPAddress, Port)
- when is_atom(Prefix) andalso is_number(Port) ->
- list_to_atom(
- format("~w_~s:~w", [Prefix, inet_parse:ntoa(IPAddress), Port])).
-
-format_inet_error(address) -> "cannot connect to host/port";
-format_inet_error(timeout) -> "timed out";
-format_inet_error(Error) -> inet:format_error(Error).
-
-%% This is a modified version of Luke Gorrie's pmap -
-%% http://lukego.livejournal.com/6753.html - that doesn't care about
-%% the order in which results are received.
-%%
-%% WARNING: This is is deliberately lightweight rather than robust -- if F
-%% throws, upmap will hang forever, so make sure F doesn't throw!
-upmap(F, L) ->
- Parent = self(),
- Ref = make_ref(),
- [receive {Ref, Result} -> Result end
- || _ <- [spawn(fun () -> Parent ! {Ref, F(X)} end) || X <- L]].
-
-map_in_order(F, L) ->
- lists:reverse(
- lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)).
-
-%% Apply a pre-post-commit function to all entries in a table that
-%% satisfy a predicate, and return those entries.
-%%
-%% We ignore entries that have been modified or removed.
-table_filter(Pred, PrePostCommitFun, TableName) ->
- lists:foldl(
- fun (E, Acc) ->
- case execute_mnesia_transaction(
- fun () -> mnesia:match_object(TableName, E, read) =/= []
- andalso Pred(E) end,
- fun (false, _Tx) -> false;
- (true, Tx) -> PrePostCommitFun(E, Tx), true
- end) of
- false -> Acc;
- true -> [E | Acc]
- end
- end, [], dirty_read_all(TableName)).
-
-dirty_read_all(TableName) ->
- mnesia:dirty_select(TableName, [{'$1',[],['$1']}]).
-
-dirty_foreach_key(F, TableName) ->
- dirty_foreach_key1(F, TableName, mnesia:dirty_first(TableName)).
-
-dirty_foreach_key1(_F, _TableName, '$end_of_table') ->
- ok;
-dirty_foreach_key1(F, TableName, K) ->
- case catch mnesia:dirty_next(TableName, K) of
- {'EXIT', _} ->
- aborted;
- NextKey ->
- F(K),
- dirty_foreach_key1(F, TableName, NextKey)
- end.
-
-dirty_dump_log(FileName) ->
- {ok, LH} = disk_log:open([{name, dirty_dump_log},
- {mode, read_only},
- {file, FileName}]),
- dirty_dump_log1(LH, disk_log:chunk(LH, start)),
- disk_log:close(LH).
-
-dirty_dump_log1(_LH, eof) ->
- io:format("Done.~n");
-dirty_dump_log1(LH, {K, Terms}) ->
- io:format("Chunk: ~p~n", [Terms]),
- dirty_dump_log1(LH, disk_log:chunk(LH, K));
-dirty_dump_log1(LH, {K, Terms, BadBytes}) ->
- io:format("Bad Chunk, ~p: ~p~n", [BadBytes, Terms]),
- dirty_dump_log1(LH, disk_log:chunk(LH, K)).
-
-format(Fmt, Args) -> lists:flatten(io_lib:format(Fmt, Args)).
-
-format_many(List) ->
- lists:flatten([io_lib:format(F ++ "~n", A) || {F, A} <- List]).
-
-format_stderr(Fmt, Args) ->
- case os:type() of
- {unix, _} ->
- Port = open_port({fd, 0, 2}, [out]),
- port_command(Port, io_lib:format(Fmt, Args)),
- port_close(Port);
- {win32, _} ->
- %% stderr on Windows is buffered and I can't figure out a
- %% way to trigger a fflush(stderr) in Erlang. So rather
- %% than risk losing output we write to stdout instead,
- %% which appears to be unbuffered.
- io:format(Fmt, Args)
- end,
- ok.
-
-%% Execute Fun using the IO system of the local node (i.e. the node on
-%% which the code is executing).
-with_local_io(Fun) ->
- GL = group_leader(),
- group_leader(whereis(user), self()),
- try
- Fun()
- after
- group_leader(GL, self())
- end.
-
-%% Log an info message on the local node using the standard logger.
-%% Use this if rabbit isn't running and the call didn't originate on
-%% the local node (e.g. rabbitmqctl calls).
-local_info_msg(Format, Args) ->
- with_local_io(fun () -> error_logger:info_msg(Format, Args) end).
-
-unfold(Fun, Init) ->
- unfold(Fun, [], Init).
-
-unfold(Fun, Acc, Init) ->
- case Fun(Init) of
- {true, E, I} -> unfold(Fun, [E|Acc], I);
- false -> {Acc, Init}
- end.
-
-ceil(N) ->
- T = trunc(N),
- case N == T of
- true -> T;
- false -> 1 + T
- end.
-
-queue_fold(Fun, Init, Q) ->
- case queue:out(Q) of
- {empty, _Q} -> Init;
- {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
- end.
-
-%% Sorts a list of AMQP table fields as per the AMQP spec
-sort_field_table(Arguments) ->
- lists:keysort(1, Arguments).
-
-%% This provides a string representation of a pid that is the same
-%% regardless of what node we are running on. The representation also
-%% permits easy identification of the pid's node.
-pid_to_string(Pid) when is_pid(Pid) ->
- %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and
- %% 8.7)
- <<131,103,100,NodeLen:16,NodeBin:NodeLen/binary,Id:32,Ser:32,Cre:8>>
- = term_to_binary(Pid),
- Node = binary_to_term(<<131,100,NodeLen:16,NodeBin:NodeLen/binary>>),
- format("<~w.~B.~B.~B>", [Node, Cre, Id, Ser]).
-
-%% inverse of above
-string_to_pid(Str) ->
- Err = {error, {invalid_pid_syntax, Str}},
- %% The \ before the trailing $ is only there to keep emacs
- %% font-lock from getting confused.
- case re:run(Str, "^<(.*)\\.(\\d+)\\.(\\d+)\\.(\\d+)>\$",
- [{capture,all_but_first,list}]) of
- {match, [NodeStr, CreStr, IdStr, SerStr]} ->
- %% the NodeStr atom might be quoted, so we have to parse
- %% it rather than doing a simple list_to_atom
- NodeAtom = case erl_scan:string(NodeStr) of
- {ok, [{atom, _, X}], _} -> X;
- {error, _, _} -> throw(Err)
- end,
- <<131,NodeEnc/binary>> = term_to_binary(NodeAtom),
- [Cre, Id, Ser] = lists:map(fun list_to_integer/1,
- [CreStr, IdStr, SerStr]),
- binary_to_term(<<131,103,NodeEnc/binary,Id:32,Ser:32,Cre:8>>);
- nomatch ->
- throw(Err)
- end.
-
-version_compare(A, B, lte) ->
- case version_compare(A, B) of
- eq -> true;
- lt -> true;
- gt -> false
- end;
-version_compare(A, B, gte) ->
- case version_compare(A, B) of
- eq -> true;
- gt -> true;
- lt -> false
- end;
-version_compare(A, B, Result) ->
- Result =:= version_compare(A, B).
-
-version_compare(A, A) ->
- eq;
-version_compare([], [$0 | B]) ->
- version_compare([], dropdot(B));
-version_compare([], _) ->
- lt; %% 2.3 < 2.3.1
-version_compare([$0 | A], []) ->
- version_compare(dropdot(A), []);
-version_compare(_, []) ->
- gt; %% 2.3.1 > 2.3
-version_compare(A, B) ->
- {AStr, ATl} = lists:splitwith(fun (X) -> X =/= $. end, A),
- {BStr, BTl} = lists:splitwith(fun (X) -> X =/= $. end, B),
- ANum = list_to_integer(AStr),
- BNum = list_to_integer(BStr),
- if ANum =:= BNum -> version_compare(dropdot(ATl), dropdot(BTl));
- ANum < BNum -> lt;
- ANum > BNum -> gt
- end.
-
-%% a.b.c and a.b.d match, but a.b.c and a.d.e don't. If
-%% versions do not match that pattern, just compare them.
-version_minor_equivalent(A, B) ->
- {ok, RE} = re:compile("^(\\d+\\.\\d+)(\\.\\d+)\$"),
- Opts = [{capture, all_but_first, list}],
- case {re:run(A, RE, Opts), re:run(B, RE, Opts)} of
- {{match, [A1|_]}, {match, [B1|_]}} -> A1 =:= B1;
- _ -> A =:= B
- end.
-
-dropdot(A) -> lists:dropwhile(fun (X) -> X =:= $. end, A).
-
-dict_cons(Key, Value, Dict) ->
- dict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
-
-orddict_cons(Key, Value, Dict) ->
- orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
-
-gb_trees_cons(Key, Value, Tree) ->
- case gb_trees:lookup(Key, Tree) of
- {value, Values} -> gb_trees:update(Key, [Value | Values], Tree);
- none -> gb_trees:insert(Key, [Value], Tree)
- end.
-
-gb_trees_fold(Fun, Acc, Tree) ->
- gb_trees_fold1(Fun, Acc, gb_trees:next(gb_trees:iterator(Tree))).
-
-gb_trees_fold1(_Fun, Acc, none) ->
- Acc;
-gb_trees_fold1(Fun, Acc, {Key, Val, It}) ->
- gb_trees_fold1(Fun, Fun(Key, Val, Acc), gb_trees:next(It)).
-
-gb_trees_foreach(Fun, Tree) ->
- gb_trees_fold(fun (Key, Val, Acc) -> Fun(Key, Val), Acc end, ok, Tree).
-
-%% Takes:
-%% * A list of [{atom(), [{string(), optdef()]} | atom()], where the atom()s
-%% are the accepted commands and the optional [string()] is the list of
-%% accepted options for that command
-%% * A list [{string(), optdef()}] of options valid for all commands
-%% * The list of arguments given by the user
-%%
-%% Returns either {ok, {atom(), [{string(), string()}], [string()]} which are
-%% respectively the command, the key-value pairs of the options and the leftover
-%% arguments; or no_command if no command could be parsed.
-parse_arguments(Commands, GlobalDefs, As) ->
- lists:foldl(maybe_process_opts(GlobalDefs, As), no_command, Commands).
-
-maybe_process_opts(GDefs, As) ->
- fun({C, Os}, no_command) ->
- process_opts(atom_to_list(C), dict:from_list(GDefs ++ Os), As);
- (C, no_command) ->
- (maybe_process_opts(GDefs, As))({C, []}, no_command);
- (_, {ok, Res}) ->
- {ok, Res}
- end.
-
-process_opts(C, Defs, As0) ->
- KVs0 = dict:map(fun (_, flag) -> false;
- (_, {option, V}) -> V
- end, Defs),
- process_opts(Defs, C, As0, not_found, KVs0, []).
-
-%% Consume flags/options until you find the correct command. If there are no
-%% arguments or the first argument is not the command we're expecting, fail.
-%% Arguments to this are: definitions, cmd we're looking for, args we
-%% haven't parsed, whether we have found the cmd, options we've found,
-%% plain args we've found.
-process_opts(_Defs, C, [], found, KVs, Outs) ->
- {ok, {list_to_atom(C), dict:to_list(KVs), lists:reverse(Outs)}};
-process_opts(_Defs, _C, [], not_found, _, _) ->
- no_command;
-process_opts(Defs, C, [A | As], Found, KVs, Outs) ->
- OptType = case dict:find(A, Defs) of
- error -> none;
- {ok, flag} -> flag;
- {ok, {option, _}} -> option
- end,
- case {OptType, C, Found} of
- {flag, _, _} -> process_opts(
- Defs, C, As, Found, dict:store(A, true, KVs),
- Outs);
- {option, _, _} -> case As of
- [] -> no_command;
- [V | As1] -> process_opts(
- Defs, C, As1, Found,
- dict:store(A, V, KVs), Outs)
- end;
- {none, A, _} -> process_opts(Defs, C, As, found, KVs, Outs);
- {none, _, found} -> process_opts(Defs, C, As, found, KVs, [A | Outs]);
- {none, _, _} -> no_command
- end.
-
-now_ms() ->
- timer:now_diff(now(), {0,0,0}) div 1000.
-
-module_attributes(Module) ->
- case catch Module:module_info(attributes) of
- {'EXIT', {undef, [{Module, module_info, _} | _]}} ->
- io:format("WARNING: module ~p not found, so not scanned for boot steps.~n",
- [Module]),
- [];
- {'EXIT', Reason} ->
- exit(Reason);
- V ->
- V
- end.
-
-all_module_attributes(Name) ->
- Modules =
- lists:usort(
- lists:append(
- [Modules || {App, _, _} <- application:loaded_applications(),
- {ok, Modules} <- [application:get_key(App, modules)]])),
- lists:foldl(
- fun (Module, Acc) ->
- case lists:append([Atts || {N, Atts} <- module_attributes(Module),
- N =:= Name]) of
- [] -> Acc;
- Atts -> [{Module, Atts} | Acc]
- end
- end, [], Modules).
-
-
-build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
- G = digraph:new([acyclic]),
- try
- [case digraph:vertex(G, Vertex) of
- false -> digraph:add_vertex(G, Vertex, Label);
- _ -> ok = throw({graph_error, {vertex, duplicate, Vertex}})
- end || {Module, Atts} <- Graph,
- {Vertex, Label} <- VertexFun(Module, Atts)],
- [case digraph:add_edge(G, From, To) of
- {error, E} -> throw({graph_error, {edge, E, From, To}});
- _ -> ok
- end || {Module, Atts} <- Graph,
- {From, To} <- EdgeFun(Module, Atts)],
- {ok, G}
- catch {graph_error, Reason} ->
- true = digraph:delete(G),
- {error, Reason}
- end.
-
-const_ok() -> ok.
-const(X) -> fun () -> X end.
-
-%% Format IPv4-mapped IPv6 addresses as IPv4, since they're what we see
-%% when IPv6 is enabled but not used (i.e. 99% of the time).
-ntoa({0,0,0,0,0,16#ffff,AB,CD}) ->
- inet_parse:ntoa({AB bsr 8, AB rem 256, CD bsr 8, CD rem 256});
-ntoa(IP) ->
- inet_parse:ntoa(IP).
-
-ntoab(IP) ->
- Str = ntoa(IP),
- case string:str(Str, ":") of
- 0 -> Str;
- _ -> "[" ++ Str ++ "]"
- end.
-
-is_process_alive(Pid) ->
- rpc:call(node(Pid), erlang, is_process_alive, [Pid]) =:= true.
-
-pget(K, P) -> proplists:get_value(K, P).
-pget(K, P, D) -> proplists:get_value(K, P, D).
-
-pget_or_die(K, P) ->
- case proplists:get_value(K, P) of
- undefined -> exit({error, key_missing, K});
- V -> V
- end.
-
-pset(Key, Value, List) -> [{Key, Value} | proplists:delete(Key, List)].
-
-format_message_queue(_Opt, MQ) ->
- Len = priority_queue:len(MQ),
- {Len,
- case Len > 100 of
- false -> priority_queue:to_list(MQ);
- true -> {summary,
- orddict:to_list(
- lists:foldl(
- fun ({P, V}, Counts) ->
- orddict:update_counter(
- {P, format_message_queue_entry(V)}, 1, Counts)
- end, orddict:new(), priority_queue:to_list(MQ)))}
- end}.
-
-format_message_queue_entry(V) when is_atom(V) ->
- V;
-format_message_queue_entry(V) when is_tuple(V) ->
- list_to_tuple([format_message_queue_entry(E) || E <- tuple_to_list(V)]);
-format_message_queue_entry(_V) ->
- '_'.
-
-append_rpc_all_nodes(Nodes, M, F, A) ->
- {ResL, _} = rpc:multicall(Nodes, M, F, A),
- lists:append([case Res of
- {badrpc, _} -> [];
- _ -> Res
- end || Res <- ResL]).
-
-%% A simplified version of gen_server:multi_call/2 with a sane
-%% API. This is not in gen_server2 as there is no useful
-%% infrastructure there to share.
-multi_call(Pids, Req) ->
- MonitorPids = [start_multi_call(Pid, Req) || Pid <- Pids],
- receive_multi_call(MonitorPids, [], []).
-
-start_multi_call(Pid, Req) when is_pid(Pid) ->
- Mref = erlang:monitor(process, Pid),
- Pid ! {'$gen_call', {self(), Mref}, Req},
- {Mref, Pid}.
-
-receive_multi_call([], Good, Bad) ->
- {lists:reverse(Good), lists:reverse(Bad)};
-receive_multi_call([{Mref, Pid} | MonitorPids], Good, Bad) ->
- receive
- {Mref, Reply} ->
- erlang:demonitor(Mref, [flush]),
- receive_multi_call(MonitorPids, [{Pid, Reply} | Good], Bad);
- {'DOWN', Mref, _, _, noconnection} ->
- receive_multi_call(MonitorPids, Good, [{Pid, nodedown} | Bad]);
- {'DOWN', Mref, _, _, Reason} ->
- receive_multi_call(MonitorPids, Good, [{Pid, Reason} | Bad])
- end.
-
-os_cmd(Command) ->
- case os:type() of
- {win32, _} ->
- %% Clink workaround; see
- %% http://code.google.com/p/clink/issues/detail?id=141
- os:cmd(" " ++ Command);
- _ ->
- %% Don't just return "/bin/sh: <cmd>: not found" if not found
- Exec = hd(string:tokens(Command, " ")),
- case os:find_executable(Exec) of
- false -> throw({command_not_found, Exec});
- _ -> os:cmd(Command)
- end
- end.
-
-gb_sets_difference(S1, S2) ->
- gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
-
-version() ->
- {ok, VSN} = application:get_key(rabbit, vsn),
- VSN.
-
-%% application:which_applications(infinity) is dangerous, since it can
-%% cause deadlocks on shutdown. So we have to use a timeout variant,
-%% but w/o creating spurious timeout errors.
-which_applications() ->
- try
- application:which_applications()
- catch
- exit:{timeout, _} -> []
- end.
-
-sequence_error([T]) -> T;
-sequence_error([{error, _} = Error | _]) -> Error;
-sequence_error([_ | Rest]) -> sequence_error(Rest).
-
-json_encode(Term) ->
- try
- {ok, mochijson2:encode(Term)}
- catch
- exit:{json_encode, E} ->
- {error, E}
- end.
-
-json_decode(Term) ->
- try
- {ok, mochijson2:decode(Term)}
- catch
- %% Sadly `mochijson2:decode/1' does not offer a nice way to catch
- %% decoding errors...
- error:_ -> error
- end.
-
-json_to_term({struct, L}) ->
- [{K, json_to_term(V)} || {K, V} <- L];
-json_to_term(L) when is_list(L) ->
- [json_to_term(I) || I <- L];
-json_to_term(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
- V =:= true orelse V =:= false ->
- V.
-
-%% This has the flaw that empty lists will never be JSON objects, so use with
-%% care.
-term_to_json([{_, _}|_] = L) ->
- {struct, [{K, term_to_json(V)} || {K, V} <- L]};
-term_to_json(L) when is_list(L) ->
- [term_to_json(I) || I <- L];
-term_to_json(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
- V =:= true orelse V =:= false ->
- V.
-
-check_expiry(N) when N > ?MAX_EXPIRY_TIMER -> {error, {value_too_big, N}};
-check_expiry(N) when N < 0 -> {error, {value_negative, N}};
-check_expiry(_N) -> ok.
-
-base64url(In) ->
- lists:reverse(lists:foldl(fun ($\+, Acc) -> [$\- | Acc];
- ($\/, Acc) -> [$\_ | Acc];
- ($\=, Acc) -> Acc;
- (Chr, Acc) -> [Chr | Acc]
- end, [], base64:encode_to_string(In))).
-
-%% Ideally, you'd want Fun to run every IdealInterval. but you don't
-%% want it to take more than MaxRatio of IdealInterval. So if it takes
-%% more then you want to run it less often. So we time how long it
-%% takes to run, and then suggest how long you should wait before
-%% running it again. Times are in millis.
-interval_operation({M, F, A}, MaxRatio, IdealInterval, LastInterval) ->
- {Micros, Res} = timer:tc(M, F, A),
- {Res, case {Micros > 1000 * (MaxRatio * IdealInterval),
- Micros > 1000 * (MaxRatio * LastInterval)} of
- {true, true} -> round(LastInterval * 1.5);
- {true, false} -> LastInterval;
- {false, false} -> lists:max([IdealInterval,
- round(LastInterval / 1.5)])
- end}.
-
-ensure_timer(State, Idx, After, Msg) ->
- case element(Idx, State) of
- undefined -> TRef = erlang:send_after(After, self(), Msg),
- setelement(Idx, State, TRef);
- _ -> State
- end.
-
-stop_timer(State, Idx) ->
- case element(Idx, State) of
- undefined -> State;
- TRef -> case erlang:cancel_timer(TRef) of
- false -> State;
- _ -> setelement(Idx, State, undefined)
- end
- end.
-
-%% -------------------------------------------------------------------------
-%% Begin copypasta from gen_server2.erl
-
-get_parent() ->
- case get('$ancestors') of
- [Parent | _] when is_pid (Parent) -> Parent;
- [Parent | _] when is_atom(Parent) -> name_to_pid(Parent);
- _ -> exit(process_was_not_started_by_proc_lib)
- end.
-
-name_to_pid(Name) ->
- case whereis(Name) of
- undefined -> case whereis_name(Name) of
- undefined -> exit(could_not_find_registerd_name);
- Pid -> Pid
- end;
- Pid -> Pid
- end.
-
-whereis_name(Name) ->
- case ets:lookup(global_names, Name) of
- [{_Name, Pid, _Method, _RPid, _Ref}] ->
- if node(Pid) == node() -> case erlang:is_process_alive(Pid) of
- true -> Pid;
- false -> undefined
- end;
- true -> Pid
- end;
- [] -> undefined
- end.
-
-%% End copypasta from gen_server2.erl
-%% -------------------------------------------------------------------------
diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl
deleted file mode 100644
index 5fa29b7e..00000000
--- a/src/rabbit_mnesia.erl
+++ /dev/null
@@ -1,889 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_mnesia).
-
--export([init/0,
- join_cluster/2,
- reset/0,
- force_reset/0,
- update_cluster_nodes/1,
- change_cluster_node_type/1,
- forget_cluster_node/2,
-
- status/0,
- is_clustered/0,
- cluster_nodes/1,
- node_type/0,
- dir/0,
- cluster_status_from_mnesia/0,
-
- init_db_unchecked/2,
- copy_db/1,
- check_cluster_consistency/0,
- ensure_mnesia_dir/0,
-
- on_node_up/1,
- on_node_down/1
- ]).
-
-%% Used internally in rpc calls
--export([node_info/0, remove_node_if_mnesia_running/1]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([node_type/0, cluster_status/0]).
-
--type(node_type() :: disc | ram).
--type(cluster_status() :: {[node()], [node()], [node()]}).
-
-%% Main interface
--spec(init/0 :: () -> 'ok').
--spec(join_cluster/2 :: (node(), node_type())
- -> 'ok' | {'ok', 'already_member'}).
--spec(reset/0 :: () -> 'ok').
--spec(force_reset/0 :: () -> 'ok').
--spec(update_cluster_nodes/1 :: (node()) -> 'ok').
--spec(change_cluster_node_type/1 :: (node_type()) -> 'ok').
--spec(forget_cluster_node/2 :: (node(), boolean()) -> 'ok').
-
-%% Various queries to get the status of the db
--spec(status/0 :: () -> [{'nodes', [{node_type(), [node()]}]} |
- {'running_nodes', [node()]} |
- {'partitions', [{node(), [node()]}]}]).
--spec(is_clustered/0 :: () -> boolean()).
--spec(cluster_nodes/1 :: ('all' | 'disc' | 'ram' | 'running') -> [node()]).
--spec(node_type/0 :: () -> node_type()).
--spec(dir/0 :: () -> file:filename()).
--spec(cluster_status_from_mnesia/0 :: () -> rabbit_types:ok_or_error2(
- cluster_status(), any())).
-
-%% Operations on the db and utils, mainly used in `rabbit_upgrade' and `rabbit'
--spec(init_db_unchecked/2 :: ([node()], node_type()) -> 'ok').
--spec(copy_db/1 :: (file:filename()) -> rabbit_types:ok_or_error(any())).
--spec(check_cluster_consistency/0 :: () -> 'ok').
--spec(ensure_mnesia_dir/0 :: () -> 'ok').
-
-%% Hooks used in `rabbit_node_monitor'
--spec(on_node_up/1 :: (node()) -> 'ok').
--spec(on_node_down/1 :: (node()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Main interface
-%%----------------------------------------------------------------------------
-
-init() ->
- ensure_mnesia_running(),
- ensure_mnesia_dir(),
- case is_virgin_node() of
- true -> init_from_config();
- false -> NodeType = node_type(),
- init_db_and_upgrade(cluster_nodes(all), NodeType,
- NodeType =:= ram)
- end,
- %% We intuitively expect the global name server to be synced when
- %% Mnesia is up. In fact that's not guaranteed to be the case -
- %% let's make it so.
- ok = global:sync(),
- ok.
-
-init_from_config() ->
- {TryNodes, NodeType} =
- case application:get_env(rabbit, cluster_nodes) of
- {ok, Nodes} when is_list(Nodes) ->
- Config = {Nodes -- [node()], case lists:member(node(), Nodes) of
- true -> disc;
- false -> ram
- end},
- error_logger:warning_msg(
- "Converting legacy 'cluster_nodes' configuration~n ~w~n"
- "to~n ~w.~n~n"
- "Please update the configuration to the new format "
- "{Nodes, NodeType}, where Nodes contains the nodes that the "
- "node will try to cluster with, and NodeType is either "
- "'disc' or 'ram'~n", [Nodes, Config]),
- Config;
- {ok, Config} ->
- Config
- end,
- case find_good_node(nodes_excl_me(TryNodes)) of
- {ok, Node} ->
- rabbit_log:info("Node '~p' selected for clustering from "
- "configuration~n", [Node]),
- {ok, {_, DiscNodes, _}} = discover_cluster(Node),
- init_db_and_upgrade(DiscNodes, NodeType, true),
- rabbit_node_monitor:notify_joined_cluster();
- none ->
- rabbit_log:warning("Could not find any suitable node amongst the "
- "ones provided in the configuration: ~p~n",
- [TryNodes]),
- init_db_and_upgrade([node()], disc, false)
- end.
-
-%% Make the node join a cluster. The node will be reset automatically
-%% before we actually cluster it. The nodes provided will be used to
-%% find out about the nodes in the cluster.
-%%
-%% This function will fail if:
-%%
-%% * The node is currently the only disc node of its cluster
-%% * We can't connect to any of the nodes provided
-%% * The node is currently already clustered with the cluster of the nodes
-%% provided
-%%
-%% Note that we make no attempt to verify that the nodes provided are
-%% all in the same cluster, we simply pick the first online node and
-%% we cluster to its cluster.
-join_cluster(DiscoveryNode, NodeType) ->
- ensure_mnesia_not_running(),
- ensure_mnesia_dir(),
- case is_only_clustered_disc_node() of
- true -> e(clustering_only_disc_node);
- false -> ok
- end,
- {ClusterNodes, _, _} = case discover_cluster(DiscoveryNode) of
- {ok, Res} -> Res;
- {error, _} = E -> throw(E)
- end,
- case me_in_nodes(ClusterNodes) of
- false ->
- %% reset the node. this simplifies things and it will be needed in
- %% this case - we're joining a new cluster with new nodes which
- %% are not in synch with the current node. I also lifts the burden
- %% of reseting the node from the user.
- reset_gracefully(),
-
- %% Join the cluster
- rabbit_misc:local_info_msg("Clustering with ~p as ~p node~n",
- [ClusterNodes, NodeType]),
- ok = init_db_with_mnesia(ClusterNodes, NodeType, true, true),
- rabbit_node_monitor:notify_joined_cluster(),
- ok;
- true ->
- rabbit_misc:local_info_msg("Already member of cluster: ~p~n",
- [ClusterNodes]),
- {ok, already_member}
- end.
-
-%% return node to its virgin state, where it is not member of any
-%% cluster, has no cluster configuration, no local database, and no
-%% persisted messages
-reset() ->
- ensure_mnesia_not_running(),
- rabbit_misc:local_info_msg("Resetting Rabbit~n", []),
- reset_gracefully().
-
-force_reset() ->
- ensure_mnesia_not_running(),
- rabbit_misc:local_info_msg("Resetting Rabbit forcefully~n", []),
- wipe().
-
-reset_gracefully() ->
- AllNodes = cluster_nodes(all),
- %% Reconnecting so that we will get an up to date nodes. We don't
- %% need to check for consistency because we are resetting.
- %% Force=true here so that reset still works when clustered with a
- %% node which is down.
- init_db_with_mnesia(AllNodes, node_type(), false, false),
- case is_only_clustered_disc_node() of
- true -> e(resetting_only_disc_node);
- false -> ok
- end,
- leave_cluster(),
- rabbit_misc:ensure_ok(mnesia:delete_schema([node()]), cannot_delete_schema),
- wipe().
-
-wipe() ->
- %% We need to make sure that we don't end up in a distributed
- %% Erlang system with nodes while not being in an Mnesia cluster
- %% with them. We don't handle that well.
- [erlang:disconnect_node(N) || N <- cluster_nodes(all)],
- %% remove persisted messages and any other garbage we find
- ok = rabbit_file:recursive_delete(filelib:wildcard(dir() ++ "/*")),
- ok = rabbit_node_monitor:reset_cluster_status(),
- ok.
-
-change_cluster_node_type(Type) ->
- ensure_mnesia_not_running(),
- ensure_mnesia_dir(),
- case is_clustered() of
- false -> e(not_clustered);
- true -> ok
- end,
- {_, _, RunningNodes} = case discover_cluster(cluster_nodes(all)) of
- {ok, Status} -> Status;
- {error, _Reason} -> e(cannot_connect_to_cluster)
- end,
- %% We might still be marked as running by a remote node since the
- %% information of us going down might not have propagated yet.
- Node = case RunningNodes -- [node()] of
- [] -> e(no_online_cluster_nodes);
- [Node0|_] -> Node0
- end,
- ok = reset(),
- ok = join_cluster(Node, Type).
-
-update_cluster_nodes(DiscoveryNode) ->
- ensure_mnesia_not_running(),
- ensure_mnesia_dir(),
- Status = {AllNodes, _, _} =
- case discover_cluster(DiscoveryNode) of
- {ok, Status0} -> Status0;
- {error, _Reason} -> e(cannot_connect_to_node)
- end,
- case me_in_nodes(AllNodes) of
- true ->
- %% As in `check_consistency/0', we can safely delete the
- %% schema here, since it'll be replicated from the other
- %% nodes
- mnesia:delete_schema([node()]),
- rabbit_node_monitor:write_cluster_status(Status),
- rabbit_misc:local_info_msg("Updating cluster nodes from ~p~n",
- [DiscoveryNode]),
- init_db_with_mnesia(AllNodes, node_type(), true, true);
- false ->
- e(inconsistent_cluster)
- end,
- ok.
-
-%% We proceed like this: try to remove the node locally. If the node
-%% is offline, we remove the node if:
-%% * This node is a disc node
-%% * All other nodes are offline
-%% * This node was, at the best of our knowledge (see comment below)
-%% the last or second to last after the node we're removing to go
-%% down
-forget_cluster_node(Node, RemoveWhenOffline) ->
- case lists:member(Node, cluster_nodes(all)) of
- true -> ok;
- false -> e(not_a_cluster_node)
- end,
- case {RemoveWhenOffline, is_running()} of
- {true, false} -> remove_node_offline_node(Node);
- {true, true} -> e(online_node_offline_flag);
- {false, false} -> e(offline_node_no_offline_flag);
- {false, true} -> rabbit_misc:local_info_msg(
- "Removing node ~p from cluster~n", [Node]),
- case remove_node_if_mnesia_running(Node) of
- ok -> ok;
- {error, _} = Err -> throw(Err)
- end
- end.
-
-remove_node_offline_node(Node) ->
- %% Here `mnesia:system_info(running_db_nodes)' will RPC, but that's what we
- %% want - we need to know the running nodes *now*. If the current node is a
- %% RAM node it will return bogus results, but we don't care since we only do
- %% this operation from disc nodes.
- case {mnesia:system_info(running_db_nodes) -- [Node], node_type()} of
- {[], disc} ->
- start_mnesia(),
- try
- %% What we want to do here is replace the last node to
- %% go down with the current node. The way we do this
- %% is by force loading the table, and making sure that
- %% they are loaded.
- rabbit_table:force_load(),
- rabbit_table:wait_for_replicated(),
- forget_cluster_node(Node, false),
- force_load_next_boot()
- after
- stop_mnesia()
- end;
- {_, _} ->
- e(removing_node_from_offline_node)
- end.
-
-
-%%----------------------------------------------------------------------------
-%% Queries
-%%----------------------------------------------------------------------------
-
-status() ->
- IfNonEmpty = fun (_, []) -> [];
- (Type, Nodes) -> [{Type, Nodes}]
- end,
- [{nodes, (IfNonEmpty(disc, cluster_nodes(disc)) ++
- IfNonEmpty(ram, cluster_nodes(ram)))}] ++
- case is_running() of
- true -> RunningNodes = cluster_nodes(running),
- [{running_nodes, RunningNodes},
- {partitions, mnesia_partitions(RunningNodes)}];
- false -> []
- end.
-
-mnesia_partitions(Nodes) ->
- Replies = rabbit_node_monitor:partitions(Nodes),
- [Reply || Reply = {_, R} <- Replies, R =/= []].
-
-is_running() -> mnesia:system_info(is_running) =:= yes.
-
-is_clustered() -> AllNodes = cluster_nodes(all),
- AllNodes =/= [] andalso AllNodes =/= [node()].
-
-cluster_nodes(WhichNodes) -> cluster_status(WhichNodes).
-
-%% This function is the actual source of information, since it gets
-%% the data from mnesia. Obviously it'll work only when mnesia is
-%% running.
-cluster_status_from_mnesia() ->
- case is_running() of
- false ->
- {error, mnesia_not_running};
- true ->
- %% If the tables are not present, it means that
- %% `init_db/3' hasn't been run yet. In other words, either
- %% we are a virgin node or a restarted RAM node. In both
- %% cases we're not interested in what mnesia has to say.
- NodeType = case mnesia:system_info(use_dir) of
- true -> disc;
- false -> ram
- end,
- case rabbit_table:is_present() of
- true -> AllNodes = mnesia:system_info(db_nodes),
- DiscCopies = mnesia:table_info(schema, disc_copies),
- DiscNodes = case NodeType of
- disc -> nodes_incl_me(DiscCopies);
- ram -> DiscCopies
- end,
- %% `mnesia:system_info(running_db_nodes)' is safe since
- %% we know that mnesia is running
- RunningNodes = mnesia:system_info(running_db_nodes),
- {ok, {AllNodes, DiscNodes, RunningNodes}};
- false -> {error, tables_not_present}
- end
- end.
-
-cluster_status(WhichNodes) ->
- {AllNodes, DiscNodes, RunningNodes} = Nodes =
- case cluster_status_from_mnesia() of
- {ok, Nodes0} ->
- Nodes0;
- {error, _Reason} ->
- {AllNodes0, DiscNodes0, RunningNodes0} =
- rabbit_node_monitor:read_cluster_status(),
- %% The cluster status file records the status when the node is
- %% online, but we know for sure that the node is offline now, so
- %% we can remove it from the list of running nodes.
- {AllNodes0, DiscNodes0, nodes_excl_me(RunningNodes0)}
- end,
- case WhichNodes of
- status -> Nodes;
- all -> AllNodes;
- disc -> DiscNodes;
- ram -> AllNodes -- DiscNodes;
- running -> RunningNodes
- end.
-
-node_info() ->
- {erlang:system_info(otp_release), rabbit_misc:version(),
- delegate_beam_hash(), cluster_status_from_mnesia()}.
-
-node_type() ->
- DiscNodes = cluster_nodes(disc),
- case DiscNodes =:= [] orelse me_in_nodes(DiscNodes) of
- true -> disc;
- false -> ram
- end.
-
-dir() -> mnesia:system_info(directory).
-
-%%----------------------------------------------------------------------------
-%% Operations on the db
-%%----------------------------------------------------------------------------
-
-%% Adds the provided nodes to the mnesia cluster, creating a new
-%% schema if there is the need to and catching up if there are other
-%% nodes in the cluster already. It also updates the cluster status
-%% file.
-init_db(ClusterNodes, NodeType, CheckOtherNodes) ->
- Nodes = change_extra_db_nodes(ClusterNodes, CheckOtherNodes),
- %% Note that we use `system_info' here and not the cluster status
- %% since when we start rabbit for the first time the cluster
- %% status will say we are a disc node but the tables won't be
- %% present yet.
- WasDiscNode = mnesia:system_info(use_dir),
- case {Nodes, WasDiscNode, NodeType} of
- {[], _, ram} ->
- %% Standalone ram node, we don't want that
- throw({error, cannot_create_standalone_ram_node});
- {[], false, disc} ->
- %% RAM -> disc, starting from scratch
- ok = create_schema();
- {[], true, disc} ->
- %% First disc node up
- maybe_force_load(),
- ok;
- {[AnotherNode | _], _, _} ->
- %% Subsequent node in cluster, catch up
- ensure_version_ok(
- rpc:call(AnotherNode, rabbit_version, recorded, [])),
- maybe_force_load(),
- ok = rabbit_table:wait_for_replicated(),
- ok = rabbit_table:create_local_copy(NodeType)
- end,
- ensure_schema_integrity(),
- rabbit_node_monitor:update_cluster_status(),
- ok.
-
-init_db_unchecked(ClusterNodes, NodeType) ->
- init_db(ClusterNodes, NodeType, false).
-
-init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes) ->
- ok = init_db(ClusterNodes, NodeType, CheckOtherNodes),
- ok = case rabbit_upgrade:maybe_upgrade_local() of
- ok -> ok;
- starting_from_scratch -> rabbit_version:record_desired();
- version_not_available -> schema_ok_or_move()
- end,
- %% `maybe_upgrade_local' restarts mnesia, so ram nodes will forget
- %% about the cluster
- case NodeType of
- ram -> start_mnesia(),
- change_extra_db_nodes(ClusterNodes, false),
- rabbit_table:wait_for_replicated();
- disc -> ok
- end,
- ok.
-
-init_db_with_mnesia(ClusterNodes, NodeType,
- CheckOtherNodes, CheckConsistency) ->
- start_mnesia(CheckConsistency),
- try
- init_db_and_upgrade(ClusterNodes, NodeType, CheckOtherNodes)
- after
- stop_mnesia()
- end.
-
-ensure_mnesia_dir() ->
- MnesiaDir = dir() ++ "/",
- case filelib:ensure_dir(MnesiaDir) of
- {error, Reason} ->
- throw({error, {cannot_create_mnesia_dir, MnesiaDir, Reason}});
- ok ->
- ok
- end.
-
-ensure_mnesia_running() ->
- case mnesia:system_info(is_running) of
- yes ->
- ok;
- starting ->
- wait_for(mnesia_running),
- ensure_mnesia_running();
- Reason when Reason =:= no; Reason =:= stopping ->
- throw({error, mnesia_not_running})
- end.
-
-ensure_mnesia_not_running() ->
- case mnesia:system_info(is_running) of
- no ->
- ok;
- stopping ->
- wait_for(mnesia_not_running),
- ensure_mnesia_not_running();
- Reason when Reason =:= yes; Reason =:= starting ->
- throw({error, mnesia_unexpectedly_running})
- end.
-
-ensure_schema_integrity() ->
- case rabbit_table:check_schema_integrity() of
- ok ->
- ok;
- {error, Reason} ->
- throw({error, {schema_integrity_check_failed, Reason}})
- end.
-
-copy_db(Destination) ->
- ok = ensure_mnesia_not_running(),
- rabbit_file:recursive_copy(dir(), Destination).
-
-force_load_filename() ->
- filename:join(rabbit_mnesia:dir(), "force_load").
-
-force_load_next_boot() ->
- rabbit_file:write_file(force_load_filename(), <<"">>).
-
-maybe_force_load() ->
- case rabbit_file:is_file(force_load_filename()) of
- true -> rabbit_table:force_load(),
- rabbit_file:delete(force_load_filename());
- false -> ok
- end.
-
-%% This does not guarantee us much, but it avoids some situations that
-%% will definitely end up badly
-check_cluster_consistency() ->
- %% We want to find 0 or 1 consistent nodes.
- case lists:foldl(
- fun (Node, {error, _}) -> check_cluster_consistency(Node);
- (_Node, {ok, Status}) -> {ok, Status}
- end, {error, not_found}, nodes_excl_me(cluster_nodes(all)))
- of
- {ok, Status = {RemoteAllNodes, _, _}} ->
- case ordsets:is_subset(ordsets:from_list(cluster_nodes(all)),
- ordsets:from_list(RemoteAllNodes)) of
- true ->
- ok;
- false ->
- %% We delete the schema here since we think we are
- %% clustered with nodes that are no longer in the
- %% cluster and there is no other way to remove
- %% them from our schema. On the other hand, we are
- %% sure that there is another online node that we
- %% can use to sync the tables with. There is a
- %% race here: if between this check and the
- %% `init_db' invocation the cluster gets
- %% disbanded, we're left with a node with no
- %% mnesia data that will try to connect to offline
- %% nodes.
- mnesia:delete_schema([node()])
- end,
- rabbit_node_monitor:write_cluster_status(Status);
- {error, not_found} ->
- ok;
- {error, _} = E ->
- throw(E)
- end.
-
-check_cluster_consistency(Node) ->
- case rpc:call(Node, rabbit_mnesia, node_info, []) of
- {badrpc, _Reason} ->
- {error, not_found};
- {_OTP, _Rabbit, _Hash, {error, _}} ->
- {error, not_found};
- {_OTP, Rabbit, _Status} ->
- %% pre-2013/04 format implies version mismatch
- version_error("Rabbit", rabbit_misc:version(), Rabbit);
- {OTP, Rabbit, Hash, {ok, Status}} ->
- case check_consistency(OTP, Rabbit, Hash, Node, Status) of
- {error, _} = E -> E;
- {ok, Res} -> {ok, Res}
- end
- end.
-
-%%--------------------------------------------------------------------
-%% Hooks for `rabbit_node_monitor'
-%%--------------------------------------------------------------------
-
-on_node_up(Node) ->
- case running_disc_nodes() of
- [Node] -> rabbit_log:info("cluster contains disc nodes again~n");
- _ -> ok
- end.
-
-on_node_down(_Node) ->
- case running_disc_nodes() of
- [] -> rabbit_log:info("only running disc node went down~n");
- _ -> ok
- end.
-
-running_disc_nodes() ->
- {_AllNodes, DiscNodes, RunningNodes} = cluster_status(status),
- ordsets:to_list(ordsets:intersection(ordsets:from_list(DiscNodes),
- ordsets:from_list(RunningNodes))).
-
-%%--------------------------------------------------------------------
-%% Internal helpers
-%%--------------------------------------------------------------------
-
-discover_cluster(Nodes) when is_list(Nodes) ->
- lists:foldl(fun (_, {ok, Res}) -> {ok, Res};
- (Node, {error, _}) -> discover_cluster(Node)
- end, {error, no_nodes_provided}, Nodes);
-discover_cluster(Node) when Node == node() ->
- {error, {cannot_discover_cluster, "Cannot cluster node with itself"}};
-discover_cluster(Node) ->
- OfflineError =
- {error, {cannot_discover_cluster,
- "The nodes provided are either offline or not running"}},
- case rpc:call(Node, rabbit_mnesia, cluster_status_from_mnesia, []) of
- {badrpc, _Reason} -> OfflineError;
- {error, mnesia_not_running} -> OfflineError;
- {ok, Res} -> {ok, Res}
- end.
-
-schema_ok_or_move() ->
- case rabbit_table:check_schema_integrity() of
- ok ->
- ok;
- {error, Reason} ->
- %% NB: we cannot use rabbit_log here since it may not have been
- %% started yet
- error_logger:warning_msg("schema integrity check failed: ~p~n"
- "moving database to backup location "
- "and recreating schema from scratch~n",
- [Reason]),
- ok = move_db(),
- ok = create_schema()
- end.
-
-ensure_version_ok({ok, DiscVersion}) ->
- DesiredVersion = rabbit_version:desired(),
- case rabbit_version:matches(DesiredVersion, DiscVersion) of
- true -> ok;
- false -> throw({error, {version_mismatch, DesiredVersion, DiscVersion}})
- end;
-ensure_version_ok({error, _}) ->
- ok = rabbit_version:record_desired().
-
-%% We only care about disc nodes since ram nodes are supposed to catch
-%% up only
-create_schema() ->
- stop_mnesia(),
- rabbit_misc:ensure_ok(mnesia:create_schema([node()]), cannot_create_schema),
- start_mnesia(),
- ok = rabbit_table:create(),
- ensure_schema_integrity(),
- ok = rabbit_version:record_desired().
-
-move_db() ->
- stop_mnesia(),
- MnesiaDir = filename:dirname(dir() ++ "/"),
- {{Year, Month, Day}, {Hour, Minute, Second}} = erlang:universaltime(),
- BackupDir = rabbit_misc:format(
- "~s_~w~2..0w~2..0w~2..0w~2..0w~2..0w",
- [MnesiaDir, Year, Month, Day, Hour, Minute, Second]),
- case file:rename(MnesiaDir, BackupDir) of
- ok ->
- %% NB: we cannot use rabbit_log here since it may not have
- %% been started yet
- error_logger:warning_msg("moved database from ~s to ~s~n",
- [MnesiaDir, BackupDir]),
- ok;
- {error, Reason} -> throw({error, {cannot_backup_mnesia,
- MnesiaDir, BackupDir, Reason}})
- end,
- ensure_mnesia_dir(),
- start_mnesia(),
- ok.
-
-remove_node_if_mnesia_running(Node) ->
- case is_running() of
- false ->
- {error, mnesia_not_running};
- true ->
- %% Deleting the the schema copy of the node will result in
- %% the node being removed from the cluster, with that
- %% change being propagated to all nodes
- case mnesia:del_table_copy(schema, Node) of
- {atomic, ok} ->
- rabbit_amqqueue:forget_all_durable(Node),
- rabbit_node_monitor:notify_left_cluster(Node),
- ok;
- {aborted, Reason} ->
- {error, {failed_to_remove_node, Node, Reason}}
- end
- end.
-
-leave_cluster() ->
- case nodes_excl_me(cluster_nodes(all)) of
- [] -> ok;
- AllNodes -> case lists:any(fun leave_cluster/1, AllNodes) of
- true -> ok;
- false -> e(no_running_cluster_nodes)
- end
- end.
-
-leave_cluster(Node) ->
- case rpc:call(Node,
- rabbit_mnesia, remove_node_if_mnesia_running, [node()]) of
- ok -> true;
- {error, mnesia_not_running} -> false;
- {error, Reason} -> throw({error, Reason});
- {badrpc, nodedown} -> false
- end.
-
-wait_for(Condition) ->
- error_logger:info_msg("Waiting for ~p...~n", [Condition]),
- timer:sleep(1000).
-
-start_mnesia(CheckConsistency) ->
- case CheckConsistency of
- true -> check_cluster_consistency();
- false -> ok
- end,
- rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
- ensure_mnesia_running().
-
-start_mnesia() ->
- start_mnesia(true).
-
-stop_mnesia() ->
- stopped = mnesia:stop(),
- ensure_mnesia_not_running().
-
-change_extra_db_nodes(ClusterNodes0, CheckOtherNodes) ->
- ClusterNodes = nodes_excl_me(ClusterNodes0),
- case {mnesia:change_config(extra_db_nodes, ClusterNodes), ClusterNodes} of
- {{ok, []}, [_|_]} when CheckOtherNodes ->
- throw({error, {failed_to_cluster_with, ClusterNodes,
- "Mnesia could not connect to any nodes."}});
- {{ok, Nodes}, _} ->
- Nodes
- end.
-
-check_consistency(OTP, Rabbit, Hash) ->
- rabbit_misc:sequence_error(
- [check_otp_consistency(OTP),
- check_rabbit_consistency(Rabbit),
- check_beam_compatibility(Hash)]).
-
-check_consistency(OTP, Rabbit, Hash, Node, Status) ->
- rabbit_misc:sequence_error(
- [check_otp_consistency(OTP),
- check_rabbit_consistency(Rabbit),
- check_beam_compatibility(Hash),
- check_nodes_consistency(Node, Status)]).
-
-check_nodes_consistency(Node, RemoteStatus = {RemoteAllNodes, _, _}) ->
- case me_in_nodes(RemoteAllNodes) of
- true ->
- {ok, RemoteStatus};
- false ->
- {error, {inconsistent_cluster,
- rabbit_misc:format("Node ~p thinks it's clustered "
- "with node ~p, but ~p disagrees",
- [node(), Node, Node])}}
- end.
-
-check_version_consistency(This, Remote, Name) ->
- check_version_consistency(This, Remote, Name, fun (A, B) -> A =:= B end).
-
-check_version_consistency(This, Remote, Name, Comp) ->
- case Comp(This, Remote) of
- true -> ok;
- false -> version_error(Name, This, Remote)
- end.
-
-version_error(Name, This, Remote) ->
- {error, {inconsistent_cluster,
- rabbit_misc:format("~s version mismatch: local node is ~s, "
- "remote node ~s", [Name, This, Remote])}}.
-
-check_otp_consistency(Remote) ->
- check_version_consistency(erlang:system_info(otp_release), Remote, "OTP").
-
-%% Unlike the rest of 3.0.x, 3.0.0 is not compatible. This can be
-%% removed after 3.1.0 is released.
-check_rabbit_consistency("3.0.0") ->
- version_error("Rabbit", rabbit_misc:version(), "3.0.0");
-
-check_rabbit_consistency(Remote) ->
- check_version_consistency(
- rabbit_misc:version(), Remote, "Rabbit",
- fun rabbit_misc:version_minor_equivalent/2).
-
-check_beam_compatibility(RemoteHash) ->
- case RemoteHash == delegate_beam_hash() of
- true -> ok;
- false -> {error, {incompatible_bytecode,
- "Incompatible Erlang bytecode found on nodes"}}
- end.
-
-%% The delegate module sends functions across the cluster; if it is
-%% out of sync (say due to mixed compilers), we will get badfun
-%% exceptions when trying to do so. Let's detect that at startup.
-delegate_beam_hash() ->
- {delegate, Obj, _} = code:get_object_code(delegate),
- {ok, {delegate, Hash}} = beam_lib:md5(Obj),
- Hash.
-
-%% This is fairly tricky. We want to know if the node is in the state
-%% that a `reset' would leave it in. We cannot simply check if the
-%% mnesia tables aren't there because restarted RAM nodes won't have
-%% tables while still being non-virgin. What we do instead is to
-%% check if the mnesia directory is non existant or empty, with the
-%% exception of the cluster status files, which will be there thanks to
-%% `rabbit_node_monitor:prepare_cluster_status_file/0'.
-is_virgin_node() ->
- case rabbit_file:list_dir(dir()) of
- {error, enoent} ->
- true;
- {ok, []} ->
- true;
- {ok, [File1, File2]} ->
- lists:usort([dir() ++ "/" ++ File1, dir() ++ "/" ++ File2]) =:=
- lists:usort([rabbit_node_monitor:cluster_status_filename(),
- rabbit_node_monitor:running_nodes_filename()]);
- {ok, _} ->
- false
- end.
-
-find_good_node([]) ->
- none;
-find_good_node([Node | Nodes]) ->
- case rpc:call(Node, rabbit_mnesia, node_info, []) of
- {badrpc, _Reason} -> find_good_node(Nodes);
- {_OTP, _Rabbit, _} -> find_good_node(Nodes);
- {OTP, Rabbit, Hash, _} -> case check_consistency(OTP, Rabbit, Hash) of
- {error, _} -> find_good_node(Nodes);
- ok -> {ok, Node}
- end
- end.
-
-is_only_clustered_disc_node() ->
- node_type() =:= disc andalso is_clustered() andalso
- cluster_nodes(disc) =:= [node()].
-
-me_in_nodes(Nodes) -> lists:member(node(), Nodes).
-
-nodes_incl_me(Nodes) -> lists:usort([node()|Nodes]).
-
-nodes_excl_me(Nodes) -> Nodes -- [node()].
-
-e(Tag) -> throw({error, {Tag, error_description(Tag)}}).
-
-error_description(clustering_only_disc_node) ->
- "You cannot cluster a node if it is the only disc node in its existing "
- " cluster. If new nodes joined while this node was offline, use "
- "'update_cluster_nodes' to add them manually.";
-error_description(resetting_only_disc_node) ->
- "You cannot reset a node when it is the only disc node in a cluster. "
- "Please convert another node of the cluster to a disc node first.";
-error_description(not_clustered) ->
- "Non-clustered nodes can only be disc nodes.";
-error_description(cannot_connect_to_cluster) ->
- "Could not connect to the cluster nodes present in this node's "
- "status file. If the cluster has changed, you can use the "
- "'update_cluster_nodes' command to point to the new cluster nodes.";
-error_description(no_online_cluster_nodes) ->
- "Could not find any online cluster nodes. If the cluster has changed, "
- "you can use the 'update_cluster_nodes' command.";
-error_description(cannot_connect_to_node) ->
- "Could not connect to the cluster node provided.";
-error_description(inconsistent_cluster) ->
- "The nodes provided do not have this node as part of the cluster.";
-error_description(not_a_cluster_node) ->
- "The node selected is not in the cluster.";
-error_description(online_node_offline_flag) ->
- "You set the --offline flag, which is used to remove nodes remotely from "
- "offline nodes, but this node is online.";
-error_description(offline_node_no_offline_flag) ->
- "You are trying to remove a node from an offline node. That is dangerous, "
- "but can be done with the --offline flag. Please consult the manual "
- "for rabbitmqctl for more information.";
-error_description(removing_node_from_offline_node) ->
- "To remove a node remotely from an offline node, the node you are removing "
- "from must be a disc node and all the other nodes must be offline.";
-error_description(no_running_cluster_nodes) ->
- "You cannot leave a cluster if no online nodes are present.".
diff --git a/src/rabbit_msg_file.erl b/src/rabbit_msg_file.erl
deleted file mode 100644
index a37106d6..00000000
--- a/src/rabbit_msg_file.erl
+++ /dev/null
@@ -1,125 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_msg_file).
-
--export([append/3, read/2, scan/4]).
-
-%%----------------------------------------------------------------------------
-
--include("rabbit_msg_store.hrl").
-
--define(INTEGER_SIZE_BYTES, 8).
--define(INTEGER_SIZE_BITS, (8 * ?INTEGER_SIZE_BYTES)).
--define(WRITE_OK_SIZE_BITS, 8).
--define(WRITE_OK_MARKER, 255).
--define(FILE_PACKING_ADJUSTMENT, (1 + ?INTEGER_SIZE_BYTES)).
--define(MSG_ID_SIZE_BYTES, 16).
--define(MSG_ID_SIZE_BITS, (8 * ?MSG_ID_SIZE_BYTES)).
--define(SCAN_BLOCK_SIZE, 4194304). %% 4MB
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(io_device() :: any()).
--type(position() :: non_neg_integer()).
--type(msg_size() :: non_neg_integer()).
--type(file_size() :: non_neg_integer()).
--type(message_accumulator(A) ::
- fun (({rabbit_types:msg_id(), msg_size(), position(), binary()}, A) ->
- A)).
-
--spec(append/3 :: (io_device(), rabbit_types:msg_id(), msg()) ->
- rabbit_types:ok_or_error2(msg_size(), any())).
--spec(read/2 :: (io_device(), msg_size()) ->
- rabbit_types:ok_or_error2({rabbit_types:msg_id(), msg()},
- any())).
--spec(scan/4 :: (io_device(), file_size(), message_accumulator(A), A) ->
- {'ok', A, position()}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-append(FileHdl, MsgId, MsgBody)
- when is_binary(MsgId) andalso size(MsgId) =:= ?MSG_ID_SIZE_BYTES ->
- MsgBodyBin = term_to_binary(MsgBody),
- MsgBodyBinSize = size(MsgBodyBin),
- Size = MsgBodyBinSize + ?MSG_ID_SIZE_BYTES,
- case file_handle_cache:append(FileHdl,
- <<Size:?INTEGER_SIZE_BITS,
- MsgId:?MSG_ID_SIZE_BYTES/binary,
- MsgBodyBin:MsgBodyBinSize/binary,
- ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>) of
- ok -> {ok, Size + ?FILE_PACKING_ADJUSTMENT};
- KO -> KO
- end.
-
-read(FileHdl, TotalSize) ->
- Size = TotalSize - ?FILE_PACKING_ADJUSTMENT,
- BodyBinSize = Size - ?MSG_ID_SIZE_BYTES,
- case file_handle_cache:read(FileHdl, TotalSize) of
- {ok, <<Size:?INTEGER_SIZE_BITS,
- MsgId:?MSG_ID_SIZE_BYTES/binary,
- MsgBodyBin:BodyBinSize/binary,
- ?WRITE_OK_MARKER:?WRITE_OK_SIZE_BITS>>} ->
- {ok, {MsgId, binary_to_term(MsgBodyBin)}};
- KO -> KO
- end.
-
-scan(FileHdl, FileSize, Fun, Acc) when FileSize >= 0 ->
- scan(FileHdl, FileSize, <<>>, 0, 0, Fun, Acc).
-
-scan(_FileHdl, FileSize, _Data, FileSize, ScanOffset, _Fun, Acc) ->
- {ok, Acc, ScanOffset};
-scan(FileHdl, FileSize, Data, ReadOffset, ScanOffset, Fun, Acc) ->
- Read = lists:min([?SCAN_BLOCK_SIZE, (FileSize - ReadOffset)]),
- case file_handle_cache:read(FileHdl, Read) of
- {ok, Data1} ->
- {Data2, Acc1, ScanOffset1} =
- scanner(<<Data/binary, Data1/binary>>, ScanOffset, Fun, Acc),
- ReadOffset1 = ReadOffset + size(Data1),
- scan(FileHdl, FileSize, Data2, ReadOffset1, ScanOffset1, Fun, Acc1);
- _KO ->
- {ok, Acc, ScanOffset}
- end.
-
-scanner(<<>>, Offset, _Fun, Acc) ->
- {<<>>, Acc, Offset};
-scanner(<<0:?INTEGER_SIZE_BITS, _Rest/binary>>, Offset, _Fun, Acc) ->
- {<<>>, Acc, Offset}; %% Nothing to do other than stop.
-scanner(<<Size:?INTEGER_SIZE_BITS, MsgIdAndMsg:Size/binary,
- WriteMarker:?WRITE_OK_SIZE_BITS, Rest/binary>>, Offset, Fun, Acc) ->
- TotalSize = Size + ?FILE_PACKING_ADJUSTMENT,
- case WriteMarker of
- ?WRITE_OK_MARKER ->
- %% Here we take option 5 from
- %% http://www.erlang.org/cgi-bin/ezmlm-cgi?2:mss:1569 in
- %% which we read the MsgId as a number, and then convert it
- %% back to a binary in order to work around bugs in
- %% Erlang's GC.
- <<MsgIdNum:?MSG_ID_SIZE_BITS, Msg/binary>> =
- <<MsgIdAndMsg:Size/binary>>,
- <<MsgId:?MSG_ID_SIZE_BYTES/binary>> =
- <<MsgIdNum:?MSG_ID_SIZE_BITS>>,
- scanner(Rest, Offset + TotalSize, Fun,
- Fun({MsgId, TotalSize, Offset, Msg}, Acc));
- _ ->
- scanner(Rest, Offset + TotalSize, Fun, Acc)
- end;
-scanner(Data, Offset, _Fun, Acc) ->
- {Data, Acc, Offset}.
diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl
deleted file mode 100644
index 9a4439a7..00000000
--- a/src/rabbit_msg_store.erl
+++ /dev/null
@@ -1,2066 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_msg_store).
-
--behaviour(gen_server2).
-
--export([start_link/4, successfully_recovered_state/1,
- client_init/4, client_terminate/1, client_delete_and_terminate/1,
- client_ref/1, close_all_indicated/1,
- write/3, write_flow/3, read/2, contains/2, remove/2]).
-
--export([set_maximum_since_use/2, has_readers/2, combine_files/3,
- delete_file/2]). %% internal
-
--export([transform_dir/3, force_recovery/2]). %% upgrade
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3, prioritise_call/4, prioritise_cast/3,
- prioritise_info/3, format_message_queue/2]).
-
-%%----------------------------------------------------------------------------
-
--include("rabbit_msg_store.hrl").
-
--define(SYNC_INTERVAL, 25). %% milliseconds
--define(CLEAN_FILENAME, "clean.dot").
--define(FILE_SUMMARY_FILENAME, "file_summary.ets").
--define(TRANSFORM_TMP, "transform_tmp").
-
--define(BINARY_MODE, [raw, binary]).
--define(READ_MODE, [read]).
--define(READ_AHEAD_MODE, [read_ahead | ?READ_MODE]).
--define(WRITE_MODE, [write]).
-
--define(FILE_EXTENSION, ".rdq").
--define(FILE_EXTENSION_TMP, ".rdt").
-
--define(HANDLE_CACHE_BUFFER_SIZE, 1048576). %% 1MB
-
- %% i.e. two pairs, so GC does not go idle when busy
--define(MAXIMUM_SIMULTANEOUS_GC_FILES, 4).
-
-%%----------------------------------------------------------------------------
-
--record(msstate,
- { dir, %% store directory
- index_module, %% the module for index ops
- index_state, %% where are messages?
- current_file, %% current file name as number
- current_file_handle, %% current file handle since the last fsync?
- file_handle_cache, %% file handle cache
- sync_timer_ref, %% TRef for our interval timer
- sum_valid_data, %% sum of valid data in all files
- sum_file_size, %% sum of file sizes
- pending_gc_completion, %% things to do once GC completes
- gc_pid, %% pid of our GC
- file_handles_ets, %% tid of the shared file handles table
- file_summary_ets, %% tid of the file summary table
- cur_file_cache_ets, %% tid of current file cache table
- flying_ets, %% tid of writes/removes in flight
- dying_clients, %% set of dying clients
- clients, %% map of references of all registered clients
- %% to callbacks
- successfully_recovered, %% boolean: did we recover state?
- file_size_limit, %% how big are our files allowed to get?
- cref_to_msg_ids %% client ref to synced messages mapping
- }).
-
--record(client_msstate,
- { server,
- client_ref,
- file_handle_cache,
- index_state,
- index_module,
- dir,
- gc_pid,
- file_handles_ets,
- file_summary_ets,
- cur_file_cache_ets,
- flying_ets
- }).
-
--record(file_summary,
- {file, valid_total_size, left, right, file_size, locked, readers}).
-
--record(gc_state,
- { dir,
- index_module,
- index_state,
- file_summary_ets,
- file_handles_ets,
- msg_store
- }).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([gc_state/0, file_num/0]).
-
--type(gc_state() :: #gc_state { dir :: file:filename(),
- index_module :: atom(),
- index_state :: any(),
- file_summary_ets :: ets:tid(),
- file_handles_ets :: ets:tid(),
- msg_store :: server()
- }).
-
--type(server() :: pid() | atom()).
--type(client_ref() :: binary()).
--type(file_num() :: non_neg_integer()).
--type(client_msstate() :: #client_msstate {
- server :: server(),
- client_ref :: client_ref(),
- file_handle_cache :: dict(),
- index_state :: any(),
- index_module :: atom(),
- dir :: file:filename(),
- gc_pid :: pid(),
- file_handles_ets :: ets:tid(),
- file_summary_ets :: ets:tid(),
- cur_file_cache_ets :: ets:tid(),
- flying_ets :: ets:tid()}).
--type(msg_ref_delta_gen(A) ::
- fun ((A) -> 'finished' |
- {rabbit_types:msg_id(), non_neg_integer(), A})).
--type(maybe_msg_id_fun() ::
- 'undefined' | fun ((gb_set(), 'written' | 'ignored') -> any())).
--type(maybe_close_fds_fun() :: 'undefined' | fun (() -> 'ok')).
--type(deletion_thunk() :: fun (() -> boolean())).
-
--spec(start_link/4 ::
- (atom(), file:filename(), [binary()] | 'undefined',
- {msg_ref_delta_gen(A), A}) -> rabbit_types:ok_pid_or_error()).
--spec(successfully_recovered_state/1 :: (server()) -> boolean()).
--spec(client_init/4 :: (server(), client_ref(), maybe_msg_id_fun(),
- maybe_close_fds_fun()) -> client_msstate()).
--spec(client_terminate/1 :: (client_msstate()) -> 'ok').
--spec(client_delete_and_terminate/1 :: (client_msstate()) -> 'ok').
--spec(client_ref/1 :: (client_msstate()) -> client_ref()).
--spec(close_all_indicated/1 ::
- (client_msstate()) -> rabbit_types:ok(client_msstate())).
--spec(write/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok').
--spec(write_flow/3 :: (rabbit_types:msg_id(), msg(), client_msstate()) -> 'ok').
--spec(read/2 :: (rabbit_types:msg_id(), client_msstate()) ->
- {rabbit_types:ok(msg()) | 'not_found', client_msstate()}).
--spec(contains/2 :: (rabbit_types:msg_id(), client_msstate()) -> boolean()).
--spec(remove/2 :: ([rabbit_types:msg_id()], client_msstate()) -> 'ok').
-
--spec(set_maximum_since_use/2 :: (server(), non_neg_integer()) -> 'ok').
--spec(has_readers/2 :: (non_neg_integer(), gc_state()) -> boolean()).
--spec(combine_files/3 :: (non_neg_integer(), non_neg_integer(), gc_state()) ->
- deletion_thunk()).
--spec(delete_file/2 :: (non_neg_integer(), gc_state()) -> deletion_thunk()).
--spec(force_recovery/2 :: (file:filename(), server()) -> 'ok').
--spec(transform_dir/3 :: (file:filename(), server(),
- fun ((any()) -> (rabbit_types:ok_or_error2(msg(), any())))) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% We run GC whenever (garbage / sum_file_size) > ?GARBAGE_FRACTION
-%% It is not recommended to set this to < 0.5
--define(GARBAGE_FRACTION, 0.5).
-
-%% The components:
-%%
-%% Index: this is a mapping from MsgId to #msg_location{}:
-%% {MsgId, RefCount, File, Offset, TotalSize}
-%% By default, it's in ets, but it's also pluggable.
-%% FileSummary: this is an ets table which maps File to #file_summary{}:
-%% {File, ValidTotalSize, Left, Right, FileSize, Locked, Readers}
-%%
-%% The basic idea is that messages are appended to the current file up
-%% until that file becomes too big (> file_size_limit). At that point,
-%% the file is closed and a new file is created on the _right_ of the
-%% old file which is used for new messages. Files are named
-%% numerically ascending, thus the file with the lowest name is the
-%% eldest file.
-%%
-%% We need to keep track of which messages are in which files (this is
-%% the Index); how much useful data is in each file and which files
-%% are on the left and right of each other. This is the purpose of the
-%% FileSummary ets table.
-%%
-%% As messages are removed from files, holes appear in these
-%% files. The field ValidTotalSize contains the total amount of useful
-%% data left in the file. This is needed for garbage collection.
-%%
-%% When we discover that a file is now empty, we delete it. When we
-%% discover that it can be combined with the useful data in either its
-%% left or right neighbour, and overall, across all the files, we have
-%% ((the amount of garbage) / (the sum of all file sizes)) >
-%% ?GARBAGE_FRACTION, we start a garbage collection run concurrently,
-%% which will compact the two files together. This keeps disk
-%% utilisation high and aids performance. We deliberately do this
-%% lazily in order to prevent doing GC on files which are soon to be
-%% emptied (and hence deleted) soon.
-%%
-%% Given the compaction between two files, the left file (i.e. elder
-%% file) is considered the ultimate destination for the good data in
-%% the right file. If necessary, the good data in the left file which
-%% is fragmented throughout the file is written out to a temporary
-%% file, then read back in to form a contiguous chunk of good data at
-%% the start of the left file. Thus the left file is garbage collected
-%% and compacted. Then the good data from the right file is copied
-%% onto the end of the left file. Index and FileSummary tables are
-%% updated.
-%%
-%% On non-clean startup, we scan the files we discover, dealing with
-%% the possibilites of a crash having occured during a compaction
-%% (this consists of tidyup - the compaction is deliberately designed
-%% such that data is duplicated on disk rather than risking it being
-%% lost), and rebuild the FileSummary ets table and Index.
-%%
-%% So, with this design, messages move to the left. Eventually, they
-%% should end up in a contiguous block on the left and are then never
-%% rewritten. But this isn't quite the case. If in a file there is one
-%% message that is being ignored, for some reason, and messages in the
-%% file to the right and in the current block are being read all the
-%% time then it will repeatedly be the case that the good data from
-%% both files can be combined and will be written out to a new
-%% file. Whenever this happens, our shunned message will be rewritten.
-%%
-%% So, provided that we combine messages in the right order,
-%% (i.e. left file, bottom to top, right file, bottom to top),
-%% eventually our shunned message will end up at the bottom of the
-%% left file. The compaction/combining algorithm is smart enough to
-%% read in good data from the left file that is scattered throughout
-%% (i.e. C and D in the below diagram), then truncate the file to just
-%% above B (i.e. truncate to the limit of the good contiguous region
-%% at the start of the file), then write C and D on top and then write
-%% E, F and G from the right file on top. Thus contiguous blocks of
-%% good data at the bottom of files are not rewritten.
-%%
-%% +-------+ +-------+ +-------+
-%% | X | | G | | G |
-%% +-------+ +-------+ +-------+
-%% | D | | X | | F |
-%% +-------+ +-------+ +-------+
-%% | X | | X | | E |
-%% +-------+ +-------+ +-------+
-%% | C | | F | ===> | D |
-%% +-------+ +-------+ +-------+
-%% | X | | X | | C |
-%% +-------+ +-------+ +-------+
-%% | B | | X | | B |
-%% +-------+ +-------+ +-------+
-%% | A | | E | | A |
-%% +-------+ +-------+ +-------+
-%% left right left
-%%
-%% From this reasoning, we do have a bound on the number of times the
-%% message is rewritten. From when it is inserted, there can be no
-%% files inserted between it and the head of the queue, and the worst
-%% case is that everytime it is rewritten, it moves one position lower
-%% in the file (for it to stay at the same position requires that
-%% there are no holes beneath it, which means truncate would be used
-%% and so it would not be rewritten at all). Thus this seems to
-%% suggest the limit is the number of messages ahead of it in the
-%% queue, though it's likely that that's pessimistic, given the
-%% requirements for compaction/combination of files.
-%%
-%% The other property is that we have is the bound on the lowest
-%% utilisation, which should be 50% - worst case is that all files are
-%% fractionally over half full and can't be combined (equivalent is
-%% alternating full files and files with only one tiny message in
-%% them).
-%%
-%% Messages are reference-counted. When a message with the same msg id
-%% is written several times we only store it once, and only remove it
-%% from the store when it has been removed the same number of times.
-%%
-%% The reference counts do not persist. Therefore the initialisation
-%% function must be provided with a generator that produces ref count
-%% deltas for all recovered messages. This is only used on startup
-%% when the shutdown was non-clean.
-%%
-%% Read messages with a reference count greater than one are entered
-%% into a message cache. The purpose of the cache is not especially
-%% performance, though it can help there too, but prevention of memory
-%% explosion. It ensures that as messages with a high reference count
-%% are read from several processes they are read back as the same
-%% binary object rather than multiples of identical binary
-%% objects.
-%%
-%% Reads can be performed directly by clients without calling to the
-%% server. This is safe because multiple file handles can be used to
-%% read files. However, locking is used by the concurrent GC to make
-%% sure that reads are not attempted from files which are in the
-%% process of being garbage collected.
-%%
-%% When a message is removed, its reference count is decremented. Even
-%% if the reference count becomes 0, its entry is not removed. This is
-%% because in the event of the same message being sent to several
-%% different queues, there is the possibility of one queue writing and
-%% removing the message before other queues write it at all. Thus
-%% accomodating 0-reference counts allows us to avoid unnecessary
-%% writes here. Of course, there are complications: the file to which
-%% the message has already been written could be locked pending
-%% deletion or GC, which means we have to rewrite the message as the
-%% original copy will now be lost.
-%%
-%% The server automatically defers reads, removes and contains calls
-%% that occur which refer to files which are currently being
-%% GC'd. Contains calls are only deferred in order to ensure they do
-%% not overtake removes.
-%%
-%% The current file to which messages are being written has a
-%% write-back cache. This is written to immediately by clients and can
-%% be read from by clients too. This means that there are only ever
-%% writes made to the current file, thus eliminating delays due to
-%% flushing write buffers in order to be able to safely read from the
-%% current file. The one exception to this is that on start up, the
-%% cache is not populated with msgs found in the current file, and
-%% thus in this case only, reads may have to come from the file
-%% itself. The effect of this is that even if the msg_store process is
-%% heavily overloaded, clients can still write and read messages with
-%% very low latency and not block at all.
-%%
-%% Clients of the msg_store are required to register before using the
-%% msg_store. This provides them with the necessary client-side state
-%% to allow them to directly access the various caches and files. When
-%% they terminate, they should deregister. They can do this by calling
-%% either client_terminate/1 or client_delete_and_terminate/1. The
-%% differences are: (a) client_terminate is synchronous. As a result,
-%% if the msg_store is badly overloaded and has lots of in-flight
-%% writes and removes to process, this will take some time to
-%% return. However, once it does return, you can be sure that all the
-%% actions you've issued to the msg_store have been processed. (b) Not
-%% only is client_delete_and_terminate/1 asynchronous, but it also
-%% permits writes and subsequent removes from the current
-%% (terminating) client which are still in flight to be safely
-%% ignored. Thus from the point of view of the msg_store itself, and
-%% all from the same client:
-%%
-%% (T) = termination; (WN) = write of msg N; (RN) = remove of msg N
-%% --> W1, W2, W1, R1, T, W3, R2, W2, R1, R2, R3, W4 -->
-%%
-%% The client obviously sent T after all the other messages (up to
-%% W4), but because the msg_store prioritises messages, the T can be
-%% promoted and thus received early.
-%%
-%% Thus at the point of the msg_store receiving T, we have messages 1
-%% and 2 with a refcount of 1. After T, W3 will be ignored because
-%% it's an unknown message, as will R3, and W4. W2, R1 and R2 won't be
-%% ignored because the messages that they refer to were already known
-%% to the msg_store prior to T. However, it can be a little more
-%% complex: after the first R2, the refcount of msg 2 is 0. At that
-%% point, if a GC occurs or file deletion, msg 2 could vanish, which
-%% would then mean that the subsequent W2 and R2 are then ignored.
-%%
-%% The use case then for client_delete_and_terminate/1 is if the
-%% client wishes to remove everything it's written to the msg_store:
-%% it issues removes for all messages it's written and not removed,
-%% and then calls client_delete_and_terminate/1. At that point, any
-%% in-flight writes (and subsequent removes) can be ignored, but
-%% removes and writes for messages the msg_store already knows about
-%% will continue to be processed normally (which will normally just
-%% involve modifying the reference count, which is fast). Thus we save
-%% disk bandwidth for writes which are going to be immediately removed
-%% again by the the terminating client.
-%%
-%% We use a separate set to keep track of the dying clients in order
-%% to keep that set, which is inspected on every write and remove, as
-%% small as possible. Inspecting the set of all clients would degrade
-%% performance with many healthy clients and few, if any, dying
-%% clients, which is the typical case.
-%%
-%% When the msg_store has a backlog (i.e. it has unprocessed messages
-%% in its mailbox / gen_server priority queue), a further optimisation
-%% opportunity arises: we can eliminate pairs of 'write' and 'remove'
-%% from the same client for the same message. A typical occurrence of
-%% these is when an empty durable queue delivers persistent messages
-%% to ack'ing consumers. The queue will asynchronously ask the
-%% msg_store to 'write' such messages, and when they are acknowledged
-%% it will issue a 'remove'. That 'remove' may be issued before the
-%% msg_store has processed the 'write'. There is then no point going
-%% ahead with the processing of that 'write'.
-%%
-%% To detect this situation a 'flying_ets' table is shared between the
-%% clients and the server. The table is keyed on the combination of
-%% client (reference) and msg id, and the value represents an
-%% integration of all the writes and removes currently "in flight" for
-%% that message between the client and server - '+1' means all the
-%% writes/removes add up to a single 'write', '-1' to a 'remove', and
-%% '0' to nothing. (NB: the integration can never add up to more than
-%% one 'write' or 'read' since clients must not write/remove a message
-%% more than once without first removing/writing it).
-%%
-%% Maintaining this table poses two challenges: 1) both the clients
-%% and the server access and update the table, which causes
-%% concurrency issues, 2) we must ensure that entries do not stay in
-%% the table forever, since that would constitute a memory leak. We
-%% address the former by carefully modelling all operations as
-%% sequences of atomic actions that produce valid results in all
-%% possible interleavings. We address the latter by deleting table
-%% entries whenever the server finds a 0-valued entry during the
-%% processing of a write/remove. 0 is essentially equivalent to "no
-%% entry". If, OTOH, the value is non-zero we know there is at least
-%% one other 'write' or 'remove' in flight, so we get an opportunity
-%% later to delete the table entry when processing these.
-%%
-%% There are two further complications. We need to ensure that 1)
-%% eliminated writes still get confirmed, and 2) the write-back cache
-%% doesn't grow unbounded. These are quite straightforward to
-%% address. See the comments in the code.
-%%
-%% For notes on Clean Shutdown and startup, see documentation in
-%% variable_queue.
-
-%%----------------------------------------------------------------------------
-%% public API
-%%----------------------------------------------------------------------------
-
-start_link(Server, Dir, ClientRefs, StartupFunState) ->
- gen_server2:start_link({local, Server}, ?MODULE,
- [Server, Dir, ClientRefs, StartupFunState],
- [{timeout, infinity}]).
-
-successfully_recovered_state(Server) ->
- gen_server2:call(Server, successfully_recovered_state, infinity).
-
-client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) ->
- {IState, IModule, Dir, GCPid,
- FileHandlesEts, FileSummaryEts, CurFileCacheEts, FlyingEts} =
- gen_server2:call(
- Server, {new_client_state, Ref, self(), MsgOnDiskFun, CloseFDsFun},
- infinity),
- #client_msstate { server = Server,
- client_ref = Ref,
- file_handle_cache = dict:new(),
- index_state = IState,
- index_module = IModule,
- dir = Dir,
- gc_pid = GCPid,
- file_handles_ets = FileHandlesEts,
- file_summary_ets = FileSummaryEts,
- cur_file_cache_ets = CurFileCacheEts,
- flying_ets = FlyingEts }.
-
-client_terminate(CState = #client_msstate { client_ref = Ref }) ->
- close_all_handles(CState),
- ok = server_call(CState, {client_terminate, Ref}).
-
-client_delete_and_terminate(CState = #client_msstate { client_ref = Ref }) ->
- close_all_handles(CState),
- ok = server_cast(CState, {client_dying, Ref}),
- ok = server_cast(CState, {client_delete, Ref}).
-
-client_ref(#client_msstate { client_ref = Ref }) -> Ref.
-
-write_flow(MsgId, Msg, CState = #client_msstate { server = Server }) ->
- credit_flow:send(whereis(Server), ?CREDIT_DISC_BOUND),
- client_write(MsgId, Msg, flow, CState).
-
-write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState).
-
-read(MsgId,
- CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts }) ->
- %% Check the cur file cache
- case ets:lookup(CurFileCacheEts, MsgId) of
- [] ->
- Defer = fun() -> {server_call(CState, {read, MsgId}), CState} end,
- case index_lookup_positive_ref_count(MsgId, CState) of
- not_found -> Defer();
- MsgLocation -> client_read1(MsgLocation, Defer, CState)
- end;
- [{MsgId, Msg, _CacheRefCount}] ->
- {{ok, Msg}, CState}
- end.
-
-contains(MsgId, CState) -> server_call(CState, {contains, MsgId}).
-remove([], _CState) -> ok;
-remove(MsgIds, CState = #client_msstate { client_ref = CRef }) ->
- [client_update_flying(-1, MsgId, CState) || MsgId <- MsgIds],
- server_cast(CState, {remove, CRef, MsgIds}).
-
-set_maximum_since_use(Server, Age) ->
- gen_server2:cast(Server, {set_maximum_since_use, Age}).
-
-%%----------------------------------------------------------------------------
-%% Client-side-only helpers
-%%----------------------------------------------------------------------------
-
-server_call(#client_msstate { server = Server }, Msg) ->
- gen_server2:call(Server, Msg, infinity).
-
-server_cast(#client_msstate { server = Server }, Msg) ->
- gen_server2:cast(Server, Msg).
-
-client_write(MsgId, Msg, Flow,
- CState = #client_msstate { cur_file_cache_ets = CurFileCacheEts,
- client_ref = CRef }) ->
- ok = client_update_flying(+1, MsgId, CState),
- ok = update_msg_cache(CurFileCacheEts, MsgId, Msg),
- ok = server_cast(CState, {write, CRef, MsgId, Flow}).
-
-client_read1(#msg_location { msg_id = MsgId, file = File } = MsgLocation, Defer,
- CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
- case ets:lookup(FileSummaryEts, File) of
- [] -> %% File has been GC'd and no longer exists. Go around again.
- read(MsgId, CState);
- [#file_summary { locked = Locked, right = Right }] ->
- client_read2(Locked, Right, MsgLocation, Defer, CState)
- end.
-
-client_read2(false, undefined, _MsgLocation, Defer, _CState) ->
- %% Although we've already checked both caches and not found the
- %% message there, the message is apparently in the
- %% current_file. We can only arrive here if we are trying to read
- %% a message which we have not written, which is very odd, so just
- %% defer.
- %%
- %% OR, on startup, the cur_file_cache is not populated with the
- %% contents of the current file, thus reads from the current file
- %% will end up here and will need to be deferred.
- Defer();
-client_read2(true, _Right, _MsgLocation, Defer, _CState) ->
- %% Of course, in the mean time, the GC could have run and our msg
- %% is actually in a different file, unlocked. However, defering is
- %% the safest and simplest thing to do.
- Defer();
-client_read2(false, _Right,
- MsgLocation = #msg_location { msg_id = MsgId, file = File },
- Defer,
- CState = #client_msstate { file_summary_ets = FileSummaryEts }) ->
- %% It's entirely possible that everything we're doing from here on
- %% is for the wrong file, or a non-existent file, as a GC may have
- %% finished.
- safe_ets_update_counter(
- FileSummaryEts, File, {#file_summary.readers, +1},
- fun (_) -> client_read3(MsgLocation, Defer, CState) end,
- fun () -> read(MsgId, CState) end).
-
-client_read3(#msg_location { msg_id = MsgId, file = File }, Defer,
- CState = #client_msstate { file_handles_ets = FileHandlesEts,
- file_summary_ets = FileSummaryEts,
- gc_pid = GCPid,
- client_ref = Ref }) ->
- Release =
- fun() -> ok = case ets:update_counter(FileSummaryEts, File,
- {#file_summary.readers, -1}) of
- 0 -> case ets:lookup(FileSummaryEts, File) of
- [#file_summary { locked = true }] ->
- rabbit_msg_store_gc:no_readers(
- GCPid, File);
- _ -> ok
- end;
- _ -> ok
- end
- end,
- %% If a GC involving the file hasn't already started, it won't
- %% start now. Need to check again to see if we've been locked in
- %% the meantime, between lookup and update_counter (thus GC
- %% started before our +1. In fact, it could have finished by now
- %% too).
- case ets:lookup(FileSummaryEts, File) of
- [] -> %% GC has deleted our file, just go round again.
- read(MsgId, CState);
- [#file_summary { locked = true }] ->
- %% If we get a badarg here, then the GC has finished and
- %% deleted our file. Try going around again. Otherwise,
- %% just defer.
- %%
- %% badarg scenario: we lookup, msg_store locks, GC starts,
- %% GC ends, we +1 readers, msg_store ets:deletes (and
- %% unlocks the dest)
- try Release(),
- Defer()
- catch error:badarg -> read(MsgId, CState)
- end;
- [#file_summary { locked = false }] ->
- %% Ok, we're definitely safe to continue - a GC involving
- %% the file cannot start up now, and isn't running, so
- %% nothing will tell us from now on to close the handle if
- %% it's already open.
- %%
- %% Finally, we need to recheck that the msg is still at
- %% the same place - it's possible an entire GC ran between
- %% us doing the lookup and the +1 on the readers. (Same as
- %% badarg scenario above, but we don't have a missing file
- %% - we just have the /wrong/ file).
- case index_lookup(MsgId, CState) of
- #msg_location { file = File } = MsgLocation ->
- %% Still the same file.
- {ok, CState1} = close_all_indicated(CState),
- %% We are now guaranteed that the mark_handle_open
- %% call will either insert_new correctly, or will
- %% fail, but find the value is open, not close.
- mark_handle_open(FileHandlesEts, File, Ref),
- %% Could the msg_store now mark the file to be
- %% closed? No: marks for closing are issued only
- %% when the msg_store has locked the file.
- %% This will never be the current file
- {Msg, CState2} = read_from_disk(MsgLocation, CState1),
- Release(), %% this MUST NOT fail with badarg
- {{ok, Msg}, CState2};
- #msg_location {} = MsgLocation -> %% different file!
- Release(), %% this MUST NOT fail with badarg
- client_read1(MsgLocation, Defer, CState);
- not_found -> %% it seems not to exist. Defer, just to be sure.
- try Release() %% this can badarg, same as locked case, above
- catch error:badarg -> ok
- end,
- Defer()
- end
- end.
-
-client_update_flying(Diff, MsgId, #client_msstate { flying_ets = FlyingEts,
- client_ref = CRef }) ->
- Key = {MsgId, CRef},
- case ets:insert_new(FlyingEts, {Key, Diff}) of
- true -> ok;
- false -> try ets:update_counter(FlyingEts, Key, {2, Diff}) of
- 0 -> ok;
- Diff -> ok;
- Err -> throw({bad_flying_ets_update, Diff, Err, Key})
- catch error:badarg ->
- %% this is guaranteed to succeed since the
- %% server only removes and updates flying_ets
- %% entries; it never inserts them
- true = ets:insert_new(FlyingEts, {Key, Diff})
- end,
- ok
- end.
-
-clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM,
- dying_clients = DyingClients }) ->
- State #msstate { cref_to_msg_ids = dict:erase(CRef, CTM),
- dying_clients = sets:del_element(CRef, DyingClients) }.
-
-
-%%----------------------------------------------------------------------------
-%% gen_server callbacks
-%%----------------------------------------------------------------------------
-
-init([Server, BaseDir, ClientRefs, StartupFunState]) ->
- process_flag(trap_exit, true),
-
- ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
- [self()]),
-
- Dir = filename:join(BaseDir, atom_to_list(Server)),
-
- {ok, IndexModule} = application:get_env(msg_store_index_module),
- rabbit_log:info("~w: using ~p to provide index~n", [Server, IndexModule]),
-
- AttemptFileSummaryRecovery =
- case ClientRefs of
- undefined -> ok = rabbit_file:recursive_delete([Dir]),
- ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
- false;
- _ -> ok = filelib:ensure_dir(filename:join(Dir, "nothing")),
- recover_crashed_compactions(Dir)
- end,
-
- %% if we found crashed compactions we trust neither the
- %% file_summary nor the location index. Note the file_summary is
- %% left empty here if it can't be recovered.
- {FileSummaryRecovered, FileSummaryEts} =
- recover_file_summary(AttemptFileSummaryRecovery, Dir),
-
- {CleanShutdown, IndexState, ClientRefs1} =
- recover_index_and_client_refs(IndexModule, FileSummaryRecovered,
- ClientRefs, Dir, Server),
- Clients = dict:from_list(
- [{CRef, {undefined, undefined, undefined}} ||
- CRef <- ClientRefs1]),
- %% CleanShutdown => msg location index and file_summary both
- %% recovered correctly.
- true = case {FileSummaryRecovered, CleanShutdown} of
- {true, false} -> ets:delete_all_objects(FileSummaryEts);
- _ -> true
- end,
- %% CleanShutdown <=> msg location index and file_summary both
- %% recovered correctly.
-
- FileHandlesEts = ets:new(rabbit_msg_store_shared_file_handles,
- [ordered_set, public]),
- CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]),
- FlyingEts = ets:new(rabbit_msg_store_flying, [set, public]),
-
- {ok, FileSizeLimit} = application:get_env(msg_store_file_size_limit),
-
- {ok, GCPid} = rabbit_msg_store_gc:start_link(
- #gc_state { dir = Dir,
- index_module = IndexModule,
- index_state = IndexState,
- file_summary_ets = FileSummaryEts,
- file_handles_ets = FileHandlesEts,
- msg_store = self()
- }),
-
- State = #msstate { dir = Dir,
- index_module = IndexModule,
- index_state = IndexState,
- current_file = 0,
- current_file_handle = undefined,
- file_handle_cache = dict:new(),
- sync_timer_ref = undefined,
- sum_valid_data = 0,
- sum_file_size = 0,
- pending_gc_completion = orddict:new(),
- gc_pid = GCPid,
- file_handles_ets = FileHandlesEts,
- file_summary_ets = FileSummaryEts,
- cur_file_cache_ets = CurFileCacheEts,
- flying_ets = FlyingEts,
- dying_clients = sets:new(),
- clients = Clients,
- successfully_recovered = CleanShutdown,
- file_size_limit = FileSizeLimit,
- cref_to_msg_ids = dict:new()
- },
-
- %% If we didn't recover the msg location index then we need to
- %% rebuild it now.
- {Offset, State1 = #msstate { current_file = CurFile }} =
- build_index(CleanShutdown, StartupFunState, State),
-
- %% read is only needed so that we can seek
- {ok, CurHdl} = open_file(Dir, filenum_to_name(CurFile),
- [read | ?WRITE_MODE]),
- {ok, Offset} = file_handle_cache:position(CurHdl, Offset),
- ok = file_handle_cache:truncate(CurHdl),
-
- {ok, maybe_compact(State1 #msstate { current_file_handle = CurHdl }),
- hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-prioritise_call(Msg, _From, _Len, _State) ->
- case Msg of
- successfully_recovered_state -> 7;
- {new_client_state, _Ref, _Pid, _MODC, _CloseFDsFun} -> 7;
- {read, _MsgId} -> 2;
- _ -> 0
- end.
-
-prioritise_cast(Msg, _Len, _State) ->
- case Msg of
- {combine_files, _Source, _Destination, _Reclaimed} -> 8;
- {delete_file, _File, _Reclaimed} -> 8;
- {set_maximum_since_use, _Age} -> 8;
- {client_dying, _Pid} -> 7;
- _ -> 0
- end.
-
-prioritise_info(Msg, _Len, _State) ->
- case Msg of
- sync -> 8;
- _ -> 0
- end.
-
-handle_call(successfully_recovered_state, _From, State) ->
- reply(State #msstate.successfully_recovered, State);
-
-handle_call({new_client_state, CRef, CPid, MsgOnDiskFun, CloseFDsFun}, _From,
- State = #msstate { dir = Dir,
- index_state = IndexState,
- index_module = IndexModule,
- file_handles_ets = FileHandlesEts,
- file_summary_ets = FileSummaryEts,
- cur_file_cache_ets = CurFileCacheEts,
- flying_ets = FlyingEts,
- clients = Clients,
- gc_pid = GCPid }) ->
- Clients1 = dict:store(CRef, {CPid, MsgOnDiskFun, CloseFDsFun}, Clients),
- reply({IndexState, IndexModule, Dir, GCPid, FileHandlesEts, FileSummaryEts,
- CurFileCacheEts, FlyingEts},
- State #msstate { clients = Clients1 });
-
-handle_call({client_terminate, CRef}, _From, State) ->
- reply(ok, clear_client(CRef, State));
-
-handle_call({read, MsgId}, From, State) ->
- State1 = read_message(MsgId, From, State),
- noreply(State1);
-
-handle_call({contains, MsgId}, From, State) ->
- State1 = contains_message(MsgId, From, State),
- noreply(State1).
-
-handle_cast({client_dying, CRef},
- State = #msstate { dying_clients = DyingClients }) ->
- DyingClients1 = sets:add_element(CRef, DyingClients),
- noreply(write_message(CRef, <<>>,
- State #msstate { dying_clients = DyingClients1 }));
-
-handle_cast({client_delete, CRef},
- State = #msstate { clients = Clients }) ->
- {CPid, _, _} = dict:fetch(CRef, Clients),
- credit_flow:peer_down(CPid),
- State1 = State #msstate { clients = dict:erase(CRef, Clients) },
- noreply(remove_message(CRef, CRef, clear_client(CRef, State1)));
-
-handle_cast({write, CRef, MsgId, Flow},
- State = #msstate { cur_file_cache_ets = CurFileCacheEts,
- clients = Clients }) ->
- case Flow of
- flow -> {CPid, _, _} = dict:fetch(CRef, Clients),
- credit_flow:ack(CPid, ?CREDIT_DISC_BOUND);
- noflow -> ok
- end,
- true = 0 =< ets:update_counter(CurFileCacheEts, MsgId, {3, -1}),
- case update_flying(-1, MsgId, CRef, State) of
- process ->
- [{MsgId, Msg, _PWC}] = ets:lookup(CurFileCacheEts, MsgId),
- noreply(write_message(MsgId, Msg, CRef, State));
- ignore ->
- %% A 'remove' has already been issued and eliminated the
- %% 'write'.
- State1 = blind_confirm(CRef, gb_sets:singleton(MsgId),
- ignored, State),
- %% If all writes get eliminated, cur_file_cache_ets could
- %% grow unbounded. To prevent that we delete the cache
- %% entry here, but only if the message isn't in the
- %% current file. That way reads of the message can
- %% continue to be done client side, from either the cache
- %% or the non-current files. If the message *is* in the
- %% current file then the cache entry will be removed by
- %% the normal logic for that in write_message/4 and
- %% maybe_roll_to_new_file/2.
- case index_lookup(MsgId, State1) of
- [#msg_location { file = File }]
- when File == State1 #msstate.current_file ->
- ok;
- _ ->
- true = ets:match_delete(CurFileCacheEts, {MsgId, '_', 0})
- end,
- noreply(State1)
- end;
-
-handle_cast({remove, CRef, MsgIds}, State) ->
- {RemovedMsgIds, State1} =
- lists:foldl(
- fun (MsgId, {Removed, State2}) ->
- case update_flying(+1, MsgId, CRef, State2) of
- process -> {[MsgId | Removed],
- remove_message(MsgId, CRef, State2)};
- ignore -> {Removed, State2}
- end
- end, {[], State}, MsgIds),
- noreply(maybe_compact(client_confirm(CRef, gb_sets:from_list(RemovedMsgIds),
- ignored, State1)));
-
-handle_cast({combine_files, Source, Destination, Reclaimed},
- State = #msstate { sum_file_size = SumFileSize,
- file_handles_ets = FileHandlesEts,
- file_summary_ets = FileSummaryEts,
- clients = Clients }) ->
- ok = cleanup_after_file_deletion(Source, State),
- %% see comment in cleanup_after_file_deletion, and client_read3
- true = mark_handle_to_close(Clients, FileHandlesEts, Destination, false),
- true = ets:update_element(FileSummaryEts, Destination,
- {#file_summary.locked, false}),
- State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
- noreply(maybe_compact(run_pending([Source, Destination], State1)));
-
-handle_cast({delete_file, File, Reclaimed},
- State = #msstate { sum_file_size = SumFileSize }) ->
- ok = cleanup_after_file_deletion(File, State),
- State1 = State #msstate { sum_file_size = SumFileSize - Reclaimed },
- noreply(maybe_compact(run_pending([File], State1)));
-
-handle_cast({set_maximum_since_use, Age}, State) ->
- ok = file_handle_cache:set_maximum_since_use(Age),
- noreply(State).
-
-handle_info(sync, State) ->
- noreply(internal_sync(State));
-
-handle_info(timeout, State) ->
- noreply(internal_sync(State));
-
-handle_info({'EXIT', _Pid, Reason}, State) ->
- {stop, Reason, State}.
-
-terminate(_Reason, State = #msstate { index_state = IndexState,
- index_module = IndexModule,
- current_file_handle = CurHdl,
- gc_pid = GCPid,
- file_handles_ets = FileHandlesEts,
- file_summary_ets = FileSummaryEts,
- cur_file_cache_ets = CurFileCacheEts,
- flying_ets = FlyingEts,
- clients = Clients,
- dir = Dir }) ->
- %% stop the gc first, otherwise it could be working and we pull
- %% out the ets tables from under it.
- ok = rabbit_msg_store_gc:stop(GCPid),
- State1 = case CurHdl of
- undefined -> State;
- _ -> State2 = internal_sync(State),
- ok = file_handle_cache:close(CurHdl),
- State2
- end,
- State3 = close_all_handles(State1),
- ok = store_file_summary(FileSummaryEts, Dir),
- [true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts,
- CurFileCacheEts, FlyingEts]],
- IndexModule:terminate(IndexState),
- ok = store_recovery_terms([{client_refs, dict:fetch_keys(Clients)},
- {index_module, IndexModule}], Dir),
- State3 #msstate { index_state = undefined,
- current_file_handle = undefined }.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
-
-%%----------------------------------------------------------------------------
-%% general helper functions
-%%----------------------------------------------------------------------------
-
-noreply(State) ->
- {State1, Timeout} = next_state(State),
- {noreply, State1, Timeout}.
-
-reply(Reply, State) ->
- {State1, Timeout} = next_state(State),
- {reply, Reply, State1, Timeout}.
-
-next_state(State = #msstate { sync_timer_ref = undefined,
- cref_to_msg_ids = CTM }) ->
- case dict:size(CTM) of
- 0 -> {State, hibernate};
- _ -> {start_sync_timer(State), 0}
- end;
-next_state(State = #msstate { cref_to_msg_ids = CTM }) ->
- case dict:size(CTM) of
- 0 -> {stop_sync_timer(State), hibernate};
- _ -> {State, 0}
- end.
-
-start_sync_timer(State) ->
- rabbit_misc:ensure_timer(State, #msstate.sync_timer_ref,
- ?SYNC_INTERVAL, sync).
-
-stop_sync_timer(State) ->
- rabbit_misc:stop_timer(State, #msstate.sync_timer_ref).
-
-internal_sync(State = #msstate { current_file_handle = CurHdl,
- cref_to_msg_ids = CTM }) ->
- State1 = stop_sync_timer(State),
- CGs = dict:fold(fun (CRef, MsgIds, NS) ->
- case gb_sets:is_empty(MsgIds) of
- true -> NS;
- false -> [{CRef, MsgIds} | NS]
- end
- end, [], CTM),
- ok = case CGs of
- [] -> ok;
- _ -> file_handle_cache:sync(CurHdl)
- end,
- lists:foldl(fun ({CRef, MsgIds}, StateN) ->
- client_confirm(CRef, MsgIds, written, StateN)
- end, State1, CGs).
-
-update_flying(Diff, MsgId, CRef, #msstate { flying_ets = FlyingEts }) ->
- Key = {MsgId, CRef},
- NDiff = -Diff,
- case ets:lookup(FlyingEts, Key) of
- [] -> ignore;
- [{_, Diff}] -> ignore; %% [1]
- [{_, NDiff}] -> ets:update_counter(FlyingEts, Key, {2, Diff}),
- true = ets:delete_object(FlyingEts, {Key, 0}),
- process;
- [{_, 0}] -> true = ets:delete_object(FlyingEts, {Key, 0}),
- ignore;
- [{_, Err}] -> throw({bad_flying_ets_record, Diff, Err, Key})
- end.
-%% [1] We can get here, for example, in the following scenario: There
-%% is a write followed by a remove in flight. The counter will be 0,
-%% so on processing the write the server attempts to delete the
-%% entry. If at that point the client injects another write it will
-%% either insert a new entry, containing +1, or increment the existing
-%% entry to +1, thus preventing its removal. Either way therefore when
-%% the server processes the read, the counter will be +1.
-
-write_action({true, not_found}, _MsgId, State) ->
- {ignore, undefined, State};
-write_action({true, #msg_location { file = File }}, _MsgId, State) ->
- {ignore, File, State};
-write_action({false, not_found}, _MsgId, State) ->
- {write, State};
-write_action({Mask, #msg_location { ref_count = 0, file = File,
- total_size = TotalSize }},
- MsgId, State = #msstate { file_summary_ets = FileSummaryEts }) ->
- case {Mask, ets:lookup(FileSummaryEts, File)} of
- {false, [#file_summary { locked = true }]} ->
- ok = index_delete(MsgId, State),
- {write, State};
- {false_if_increment, [#file_summary { locked = true }]} ->
- %% The msg for MsgId is older than the client death
- %% message, but as it is being GC'd currently we'll have
- %% to write a new copy, which will then be younger, so
- %% ignore this write.
- {ignore, File, State};
- {_Mask, [#file_summary {}]} ->
- ok = index_update_ref_count(MsgId, 1, State),
- State1 = adjust_valid_total_size(File, TotalSize, State),
- {confirm, File, State1}
- end;
-write_action({_Mask, #msg_location { ref_count = RefCount, file = File }},
- MsgId, State) ->
- ok = index_update_ref_count(MsgId, RefCount + 1, State),
- %% We already know about it, just update counter. Only update
- %% field otherwise bad interaction with concurrent GC
- {confirm, File, State}.
-
-write_message(MsgId, Msg, CRef,
- State = #msstate { cur_file_cache_ets = CurFileCacheEts }) ->
- case write_action(should_mask_action(CRef, MsgId, State), MsgId, State) of
- {write, State1} ->
- write_message(MsgId, Msg,
- record_pending_confirm(CRef, MsgId, State1));
- {ignore, CurFile, State1 = #msstate { current_file = CurFile }} ->
- State1;
- {ignore, _File, State1} ->
- true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
- State1;
- {confirm, CurFile, State1 = #msstate { current_file = CurFile }}->
- record_pending_confirm(CRef, MsgId, State1);
- {confirm, _File, State1} ->
- true = ets:delete_object(CurFileCacheEts, {MsgId, Msg, 0}),
- update_pending_confirms(
- fun (MsgOnDiskFun, CTM) ->
- MsgOnDiskFun(gb_sets:singleton(MsgId), written),
- CTM
- end, CRef, State1)
- end.
-
-remove_message(MsgId, CRef,
- State = #msstate { file_summary_ets = FileSummaryEts }) ->
- case should_mask_action(CRef, MsgId, State) of
- {true, _Location} ->
- State;
- {false_if_increment, #msg_location { ref_count = 0 }} ->
- %% CRef has tried to both write and remove this msg whilst
- %% it's being GC'd.
- %%
- %% ASSERTION: [#file_summary { locked = true }] =
- %% ets:lookup(FileSummaryEts, File),
- State;
- {_Mask, #msg_location { ref_count = RefCount, file = File,
- total_size = TotalSize }}
- when RefCount > 0 ->
- %% only update field, otherwise bad interaction with
- %% concurrent GC
- Dec = fun () -> index_update_ref_count(
- MsgId, RefCount - 1, State) end,
- case RefCount of
- %% don't remove from cur_file_cache_ets here because
- %% there may be further writes in the mailbox for the
- %% same msg.
- 1 -> case ets:lookup(FileSummaryEts, File) of
- [#file_summary { locked = true }] ->
- add_to_pending_gc_completion(
- {remove, MsgId, CRef}, File, State);
- [#file_summary {}] ->
- ok = Dec(),
- delete_file_if_empty(
- File, adjust_valid_total_size(
- File, -TotalSize, State))
- end;
- _ -> ok = Dec(),
- State
- end
- end.
-
-write_message(MsgId, Msg,
- State = #msstate { current_file_handle = CurHdl,
- current_file = CurFile,
- sum_valid_data = SumValid,
- sum_file_size = SumFileSize,
- file_summary_ets = FileSummaryEts }) ->
- {ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
- {ok, TotalSize} = rabbit_msg_file:append(CurHdl, MsgId, Msg),
- ok = index_insert(
- #msg_location { msg_id = MsgId, ref_count = 1, file = CurFile,
- offset = CurOffset, total_size = TotalSize }, State),
- [#file_summary { right = undefined, locked = false }] =
- ets:lookup(FileSummaryEts, CurFile),
- [_,_] = ets:update_counter(FileSummaryEts, CurFile,
- [{#file_summary.valid_total_size, TotalSize},
- {#file_summary.file_size, TotalSize}]),
- maybe_roll_to_new_file(CurOffset + TotalSize,
- State #msstate {
- sum_valid_data = SumValid + TotalSize,
- sum_file_size = SumFileSize + TotalSize }).
-
-read_message(MsgId, From, State) ->
- case index_lookup_positive_ref_count(MsgId, State) of
- not_found -> gen_server2:reply(From, not_found),
- State;
- MsgLocation -> read_message1(From, MsgLocation, State)
- end.
-
-read_message1(From, #msg_location { msg_id = MsgId, file = File,
- offset = Offset } = MsgLoc,
- State = #msstate { current_file = CurFile,
- current_file_handle = CurHdl,
- file_summary_ets = FileSummaryEts,
- cur_file_cache_ets = CurFileCacheEts }) ->
- case File =:= CurFile of
- true -> {Msg, State1} =
- %% can return [] if msg in file existed on startup
- case ets:lookup(CurFileCacheEts, MsgId) of
- [] ->
- {ok, RawOffSet} =
- file_handle_cache:current_raw_offset(CurHdl),
- ok = case Offset >= RawOffSet of
- true -> file_handle_cache:flush(CurHdl);
- false -> ok
- end,
- read_from_disk(MsgLoc, State);
- [{MsgId, Msg1, _CacheRefCount}] ->
- {Msg1, State}
- end,
- gen_server2:reply(From, {ok, Msg}),
- State1;
- false -> [#file_summary { locked = Locked }] =
- ets:lookup(FileSummaryEts, File),
- case Locked of
- true -> add_to_pending_gc_completion({read, MsgId, From},
- File, State);
- false -> {Msg, State1} = read_from_disk(MsgLoc, State),
- gen_server2:reply(From, {ok, Msg}),
- State1
- end
- end.
-
-read_from_disk(#msg_location { msg_id = MsgId, file = File, offset = Offset,
- total_size = TotalSize }, State) ->
- {Hdl, State1} = get_read_handle(File, State),
- {ok, Offset} = file_handle_cache:position(Hdl, Offset),
- {ok, {MsgId, Msg}} =
- case rabbit_msg_file:read(Hdl, TotalSize) of
- {ok, {MsgId, _}} = Obj ->
- Obj;
- Rest ->
- {error, {misread, [{old_state, State},
- {file_num, File},
- {offset, Offset},
- {msg_id, MsgId},
- {read, Rest},
- {proc_dict, get()}
- ]}}
- end,
- {Msg, State1}.
-
-contains_message(MsgId, From,
- State = #msstate { pending_gc_completion = Pending }) ->
- case index_lookup_positive_ref_count(MsgId, State) of
- not_found ->
- gen_server2:reply(From, false),
- State;
- #msg_location { file = File } ->
- case orddict:is_key(File, Pending) of
- true -> add_to_pending_gc_completion(
- {contains, MsgId, From}, File, State);
- false -> gen_server2:reply(From, true),
- State
- end
- end.
-
-add_to_pending_gc_completion(
- Op, File, State = #msstate { pending_gc_completion = Pending }) ->
- State #msstate { pending_gc_completion =
- rabbit_misc:orddict_cons(File, Op, Pending) }.
-
-run_pending(Files, State) ->
- lists:foldl(
- fun (File, State1 = #msstate { pending_gc_completion = Pending }) ->
- Pending1 = orddict:erase(File, Pending),
- lists:foldl(
- fun run_pending_action/2,
- State1 #msstate { pending_gc_completion = Pending1 },
- lists:reverse(orddict:fetch(File, Pending)))
- end, State, Files).
-
-run_pending_action({read, MsgId, From}, State) ->
- read_message(MsgId, From, State);
-run_pending_action({contains, MsgId, From}, State) ->
- contains_message(MsgId, From, State);
-run_pending_action({remove, MsgId, CRef}, State) ->
- remove_message(MsgId, CRef, State).
-
-safe_ets_update_counter(Tab, Key, UpdateOp, SuccessFun, FailThunk) ->
- try
- SuccessFun(ets:update_counter(Tab, Key, UpdateOp))
- catch error:badarg -> FailThunk()
- end.
-
-update_msg_cache(CacheEts, MsgId, Msg) ->
- case ets:insert_new(CacheEts, {MsgId, Msg, 1}) of
- true -> ok;
- false -> safe_ets_update_counter(
- CacheEts, MsgId, {3, +1}, fun (_) -> ok end,
- fun () -> update_msg_cache(CacheEts, MsgId, Msg) end)
- end.
-
-adjust_valid_total_size(File, Delta, State = #msstate {
- sum_valid_data = SumValid,
- file_summary_ets = FileSummaryEts }) ->
- [_] = ets:update_counter(FileSummaryEts, File,
- [{#file_summary.valid_total_size, Delta}]),
- State #msstate { sum_valid_data = SumValid + Delta }.
-
-orddict_store(Key, Val, Dict) ->
- false = orddict:is_key(Key, Dict),
- orddict:store(Key, Val, Dict).
-
-update_pending_confirms(Fun, CRef,
- State = #msstate { clients = Clients,
- cref_to_msg_ids = CTM }) ->
- case dict:fetch(CRef, Clients) of
- {_CPid, undefined, _CloseFDsFun} -> State;
- {_CPid, MsgOnDiskFun, _CloseFDsFun} -> CTM1 = Fun(MsgOnDiskFun, CTM),
- State #msstate {
- cref_to_msg_ids = CTM1 }
- end.
-
-record_pending_confirm(CRef, MsgId, State) ->
- update_pending_confirms(
- fun (_MsgOnDiskFun, CTM) ->
- dict:update(CRef, fun (MsgIds) -> gb_sets:add(MsgId, MsgIds) end,
- gb_sets:singleton(MsgId), CTM)
- end, CRef, State).
-
-client_confirm(CRef, MsgIds, ActionTaken, State) ->
- update_pending_confirms(
- fun (MsgOnDiskFun, CTM) ->
- case dict:find(CRef, CTM) of
- {ok, Gs} -> MsgOnDiskFun(gb_sets:intersection(Gs, MsgIds),
- ActionTaken),
- MsgIds1 = rabbit_misc:gb_sets_difference(
- Gs, MsgIds),
- case gb_sets:is_empty(MsgIds1) of
- true -> dict:erase(CRef, CTM);
- false -> dict:store(CRef, MsgIds1, CTM)
- end;
- error -> CTM
- end
- end, CRef, State).
-
-blind_confirm(CRef, MsgIds, ActionTaken, State) ->
- update_pending_confirms(
- fun (MsgOnDiskFun, CTM) -> MsgOnDiskFun(MsgIds, ActionTaken), CTM end,
- CRef, State).
-
-%% Detect whether the MsgId is older or younger than the client's death
-%% msg (if there is one). If the msg is older than the client death
-%% msg, and it has a 0 ref_count we must only alter the ref_count, not
-%% rewrite the msg - rewriting it would make it younger than the death
-%% msg and thus should be ignored. Note that this (correctly) returns
-%% false when testing to remove the death msg itself.
-should_mask_action(CRef, MsgId,
- State = #msstate { dying_clients = DyingClients }) ->
- case {sets:is_element(CRef, DyingClients), index_lookup(MsgId, State)} of
- {false, Location} ->
- {false, Location};
- {true, not_found} ->
- {true, not_found};
- {true, #msg_location { file = File, offset = Offset,
- ref_count = RefCount } = Location} ->
- #msg_location { file = DeathFile, offset = DeathOffset } =
- index_lookup(CRef, State),
- {case {{DeathFile, DeathOffset} < {File, Offset}, RefCount} of
- {true, _} -> true;
- {false, 0} -> false_if_increment;
- {false, _} -> false
- end, Location}
- end.
-
-%%----------------------------------------------------------------------------
-%% file helper functions
-%%----------------------------------------------------------------------------
-
-open_file(Dir, FileName, Mode) ->
- file_handle_cache:open(form_filename(Dir, FileName), ?BINARY_MODE ++ Mode,
- [{write_buffer, ?HANDLE_CACHE_BUFFER_SIZE}]).
-
-close_handle(Key, CState = #client_msstate { file_handle_cache = FHC }) ->
- CState #client_msstate { file_handle_cache = close_handle(Key, FHC) };
-
-close_handle(Key, State = #msstate { file_handle_cache = FHC }) ->
- State #msstate { file_handle_cache = close_handle(Key, FHC) };
-
-close_handle(Key, FHC) ->
- case dict:find(Key, FHC) of
- {ok, Hdl} -> ok = file_handle_cache:close(Hdl),
- dict:erase(Key, FHC);
- error -> FHC
- end.
-
-mark_handle_open(FileHandlesEts, File, Ref) ->
- %% This is fine to fail (already exists). Note it could fail with
- %% the value being close, and not have it updated to open.
- ets:insert_new(FileHandlesEts, {{Ref, File}, open}),
- true.
-
-%% See comment in client_read3 - only call this when the file is locked
-mark_handle_to_close(ClientRefs, FileHandlesEts, File, Invoke) ->
- [ begin
- case (ets:update_element(FileHandlesEts, Key, {2, close})
- andalso Invoke) of
- true -> case dict:fetch(Ref, ClientRefs) of
- {_CPid, _MsgOnDiskFun, undefined} ->
- ok;
- {_CPid, _MsgOnDiskFun, CloseFDsFun} ->
- ok = CloseFDsFun()
- end;
- false -> ok
- end
- end || {{Ref, _File} = Key, open} <-
- ets:match_object(FileHandlesEts, {{'_', File}, open}) ],
- true.
-
-safe_file_delete_fun(File, Dir, FileHandlesEts) ->
- fun () -> safe_file_delete(File, Dir, FileHandlesEts) end.
-
-safe_file_delete(File, Dir, FileHandlesEts) ->
- %% do not match on any value - it's the absence of the row that
- %% indicates the client has really closed the file.
- case ets:match_object(FileHandlesEts, {{'_', File}, '_'}, 1) of
- {[_|_], _Cont} -> false;
- _ -> ok = file:delete(
- form_filename(Dir, filenum_to_name(File))),
- true
- end.
-
-close_all_indicated(#client_msstate { file_handles_ets = FileHandlesEts,
- client_ref = Ref } =
- CState) ->
- Objs = ets:match_object(FileHandlesEts, {{Ref, '_'}, close}),
- {ok, lists:foldl(fun ({Key = {_Ref, File}, close}, CStateM) ->
- true = ets:delete(FileHandlesEts, Key),
- close_handle(File, CStateM)
- end, CState, Objs)}.
-
-close_all_handles(CState = #client_msstate { file_handles_ets = FileHandlesEts,
- file_handle_cache = FHC,
- client_ref = Ref }) ->
- ok = dict:fold(fun (File, Hdl, ok) ->
- true = ets:delete(FileHandlesEts, {Ref, File}),
- file_handle_cache:close(Hdl)
- end, ok, FHC),
- CState #client_msstate { file_handle_cache = dict:new() };
-
-close_all_handles(State = #msstate { file_handle_cache = FHC }) ->
- ok = dict:fold(fun (_Key, Hdl, ok) -> file_handle_cache:close(Hdl) end,
- ok, FHC),
- State #msstate { file_handle_cache = dict:new() }.
-
-get_read_handle(FileNum, CState = #client_msstate { file_handle_cache = FHC,
- dir = Dir }) ->
- {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
- {Hdl, CState #client_msstate { file_handle_cache = FHC2 }};
-
-get_read_handle(FileNum, State = #msstate { file_handle_cache = FHC,
- dir = Dir }) ->
- {Hdl, FHC2} = get_read_handle(FileNum, FHC, Dir),
- {Hdl, State #msstate { file_handle_cache = FHC2 }}.
-
-get_read_handle(FileNum, FHC, Dir) ->
- case dict:find(FileNum, FHC) of
- {ok, Hdl} -> {Hdl, FHC};
- error -> {ok, Hdl} = open_file(Dir, filenum_to_name(FileNum),
- ?READ_MODE),
- {Hdl, dict:store(FileNum, Hdl, FHC)}
- end.
-
-preallocate(Hdl, FileSizeLimit, FinalPos) ->
- {ok, FileSizeLimit} = file_handle_cache:position(Hdl, FileSizeLimit),
- ok = file_handle_cache:truncate(Hdl),
- {ok, FinalPos} = file_handle_cache:position(Hdl, FinalPos),
- ok.
-
-truncate_and_extend_file(Hdl, Lowpoint, Highpoint) ->
- {ok, Lowpoint} = file_handle_cache:position(Hdl, Lowpoint),
- ok = file_handle_cache:truncate(Hdl),
- ok = preallocate(Hdl, Highpoint, Lowpoint).
-
-form_filename(Dir, Name) -> filename:join(Dir, Name).
-
-filenum_to_name(File) -> integer_to_list(File) ++ ?FILE_EXTENSION.
-
-filename_to_num(FileName) -> list_to_integer(filename:rootname(FileName)).
-
-list_sorted_filenames(Dir, Ext) ->
- lists:sort(fun (A, B) -> filename_to_num(A) < filename_to_num(B) end,
- filelib:wildcard("*" ++ Ext, Dir)).
-
-%%----------------------------------------------------------------------------
-%% index
-%%----------------------------------------------------------------------------
-
-index_lookup_positive_ref_count(Key, State) ->
- case index_lookup(Key, State) of
- not_found -> not_found;
- #msg_location { ref_count = 0 } -> not_found;
- #msg_location {} = MsgLocation -> MsgLocation
- end.
-
-index_update_ref_count(Key, RefCount, State) ->
- index_update_fields(Key, {#msg_location.ref_count, RefCount}, State).
-
-index_lookup(Key, #client_msstate { index_module = Index,
- index_state = State }) ->
- Index:lookup(Key, State);
-
-index_lookup(Key, #msstate { index_module = Index, index_state = State }) ->
- Index:lookup(Key, State).
-
-index_insert(Obj, #msstate { index_module = Index, index_state = State }) ->
- Index:insert(Obj, State).
-
-index_update(Obj, #msstate { index_module = Index, index_state = State }) ->
- Index:update(Obj, State).
-
-index_update_fields(Key, Updates, #msstate { index_module = Index,
- index_state = State }) ->
- Index:update_fields(Key, Updates, State).
-
-index_delete(Key, #msstate { index_module = Index, index_state = State }) ->
- Index:delete(Key, State).
-
-index_delete_by_file(File, #msstate { index_module = Index,
- index_state = State }) ->
- Index:delete_by_file(File, State).
-
-%%----------------------------------------------------------------------------
-%% shutdown and recovery
-%%----------------------------------------------------------------------------
-
-recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Server) ->
- {false, IndexModule:new(Dir), []};
-recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Server) ->
- rabbit_log:warning("~w: rebuilding indices from scratch~n", [Server]),
- {false, IndexModule:new(Dir), []};
-recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Server) ->
- Fresh = fun (ErrorMsg, ErrorArgs) ->
- rabbit_log:warning("~w: " ++ ErrorMsg ++ "~n"
- "rebuilding indices from scratch~n",
- [Server | ErrorArgs]),
- {false, IndexModule:new(Dir), []}
- end,
- case read_recovery_terms(Dir) of
- {false, Error} ->
- Fresh("failed to read recovery terms: ~p", [Error]);
- {true, Terms} ->
- RecClientRefs = proplists:get_value(client_refs, Terms, []),
- RecIndexModule = proplists:get_value(index_module, Terms),
- case (lists:sort(ClientRefs) =:= lists:sort(RecClientRefs)
- andalso IndexModule =:= RecIndexModule) of
- true -> case IndexModule:recover(Dir) of
- {ok, IndexState1} ->
- {true, IndexState1, ClientRefs};
- {error, Error} ->
- Fresh("failed to recover index: ~p", [Error])
- end;
- false -> Fresh("recovery terms differ from present", [])
- end
- end.
-
-store_recovery_terms(Terms, Dir) ->
- rabbit_file:write_term_file(filename:join(Dir, ?CLEAN_FILENAME), Terms).
-
-read_recovery_terms(Dir) ->
- Path = filename:join(Dir, ?CLEAN_FILENAME),
- case rabbit_file:read_term_file(Path) of
- {ok, Terms} -> case file:delete(Path) of
- ok -> {true, Terms};
- {error, Error} -> {false, Error}
- end;
- {error, Error} -> {false, Error}
- end.
-
-store_file_summary(Tid, Dir) ->
- ok = ets:tab2file(Tid, filename:join(Dir, ?FILE_SUMMARY_FILENAME),
- [{extended_info, [object_count]}]).
-
-recover_file_summary(false, _Dir) ->
- %% TODO: the only reason for this to be an *ordered*_set is so
- %% that a) maybe_compact can start a traversal from the eldest
- %% file, and b) build_index in fast recovery mode can easily
- %% identify the current file. It's awkward to have both that
- %% odering and the left/right pointers in the entries - replacing
- %% the former with some additional bit of state would be easy, but
- %% ditching the latter would be neater.
- {false, ets:new(rabbit_msg_store_file_summary,
- [ordered_set, public, {keypos, #file_summary.file}])};
-recover_file_summary(true, Dir) ->
- Path = filename:join(Dir, ?FILE_SUMMARY_FILENAME),
- case ets:file2tab(Path) of
- {ok, Tid} -> ok = file:delete(Path),
- {true, Tid};
- {error, _Error} -> recover_file_summary(false, Dir)
- end.
-
-count_msg_refs(Gen, Seed, State) ->
- case Gen(Seed) of
- finished ->
- ok;
- {_MsgId, 0, Next} ->
- count_msg_refs(Gen, Next, State);
- {MsgId, Delta, Next} ->
- ok = case index_lookup(MsgId, State) of
- not_found ->
- index_insert(#msg_location { msg_id = MsgId,
- file = undefined,
- ref_count = Delta },
- State);
- #msg_location { ref_count = RefCount } = StoreEntry ->
- NewRefCount = RefCount + Delta,
- case NewRefCount of
- 0 -> index_delete(MsgId, State);
- _ -> index_update(StoreEntry #msg_location {
- ref_count = NewRefCount },
- State)
- end
- end,
- count_msg_refs(Gen, Next, State)
- end.
-
-recover_crashed_compactions(Dir) ->
- FileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION),
- TmpFileNames = list_sorted_filenames(Dir, ?FILE_EXTENSION_TMP),
- lists:foreach(
- fun (TmpFileName) ->
- NonTmpRelatedFileName =
- filename:rootname(TmpFileName) ++ ?FILE_EXTENSION,
- true = lists:member(NonTmpRelatedFileName, FileNames),
- ok = recover_crashed_compaction(
- Dir, TmpFileName, NonTmpRelatedFileName)
- end, TmpFileNames),
- TmpFileNames == [].
-
-recover_crashed_compaction(Dir, TmpFileName, NonTmpRelatedFileName) ->
- %% Because a msg can legitimately appear multiple times in the
- %% same file, identifying the contents of the tmp file and where
- %% they came from is non-trivial. If we are recovering a crashed
- %% compaction then we will be rebuilding the index, which can cope
- %% with duplicates appearing. Thus the simplest and safest thing
- %% to do is to append the contents of the tmp file to its main
- %% file.
- {ok, TmpHdl} = open_file(Dir, TmpFileName, ?READ_MODE),
- {ok, MainHdl} = open_file(Dir, NonTmpRelatedFileName,
- ?READ_MODE ++ ?WRITE_MODE),
- {ok, _End} = file_handle_cache:position(MainHdl, eof),
- Size = filelib:file_size(form_filename(Dir, TmpFileName)),
- {ok, Size} = file_handle_cache:copy(TmpHdl, MainHdl, Size),
- ok = file_handle_cache:close(MainHdl),
- ok = file_handle_cache:delete(TmpHdl),
- ok.
-
-scan_file_for_valid_messages(Dir, FileName) ->
- case open_file(Dir, FileName, ?READ_MODE) of
- {ok, Hdl} -> Valid = rabbit_msg_file:scan(
- Hdl, filelib:file_size(
- form_filename(Dir, FileName)),
- fun scan_fun/2, []),
- ok = file_handle_cache:close(Hdl),
- Valid;
- {error, enoent} -> {ok, [], 0};
- {error, Reason} -> {error, {unable_to_scan_file, FileName, Reason}}
- end.
-
-scan_fun({MsgId, TotalSize, Offset, _Msg}, Acc) ->
- [{MsgId, TotalSize, Offset} | Acc].
-
-%% Takes the list in *ascending* order (i.e. eldest message
-%% first). This is the opposite of what scan_file_for_valid_messages
-%% produces. The list of msgs that is produced is youngest first.
-drop_contiguous_block_prefix(L) -> drop_contiguous_block_prefix(L, 0).
-
-drop_contiguous_block_prefix([], ExpectedOffset) ->
- {ExpectedOffset, []};
-drop_contiguous_block_prefix([#msg_location { offset = ExpectedOffset,
- total_size = TotalSize } | Tail],
- ExpectedOffset) ->
- ExpectedOffset1 = ExpectedOffset + TotalSize,
- drop_contiguous_block_prefix(Tail, ExpectedOffset1);
-drop_contiguous_block_prefix(MsgsAfterGap, ExpectedOffset) ->
- {ExpectedOffset, MsgsAfterGap}.
-
-build_index(true, _StartupFunState,
- State = #msstate { file_summary_ets = FileSummaryEts }) ->
- ets:foldl(
- fun (#file_summary { valid_total_size = ValidTotalSize,
- file_size = FileSize,
- file = File },
- {_Offset, State1 = #msstate { sum_valid_data = SumValid,
- sum_file_size = SumFileSize }}) ->
- {FileSize, State1 #msstate {
- sum_valid_data = SumValid + ValidTotalSize,
- sum_file_size = SumFileSize + FileSize,
- current_file = File }}
- end, {0, State}, FileSummaryEts);
-build_index(false, {MsgRefDeltaGen, MsgRefDeltaGenInit},
- State = #msstate { dir = Dir }) ->
- ok = count_msg_refs(MsgRefDeltaGen, MsgRefDeltaGenInit, State),
- {ok, Pid} = gatherer:start_link(),
- case [filename_to_num(FileName) ||
- FileName <- list_sorted_filenames(Dir, ?FILE_EXTENSION)] of
- [] -> build_index(Pid, undefined, [State #msstate.current_file],
- State);
- Files -> {Offset, State1} = build_index(Pid, undefined, Files, State),
- {Offset, lists:foldl(fun delete_file_if_empty/2,
- State1, Files)}
- end.
-
-build_index(Gatherer, Left, [],
- State = #msstate { file_summary_ets = FileSummaryEts,
- sum_valid_data = SumValid,
- sum_file_size = SumFileSize }) ->
- case gatherer:out(Gatherer) of
- empty ->
- unlink(Gatherer),
- ok = gatherer:stop(Gatherer),
- ok = index_delete_by_file(undefined, State),
- Offset = case ets:lookup(FileSummaryEts, Left) of
- [] -> 0;
- [#file_summary { file_size = FileSize }] -> FileSize
- end,
- {Offset, State #msstate { current_file = Left }};
- {value, #file_summary { valid_total_size = ValidTotalSize,
- file_size = FileSize } = FileSummary} ->
- true = ets:insert_new(FileSummaryEts, FileSummary),
- build_index(Gatherer, Left, [],
- State #msstate {
- sum_valid_data = SumValid + ValidTotalSize,
- sum_file_size = SumFileSize + FileSize })
- end;
-build_index(Gatherer, Left, [File|Files], State) ->
- ok = gatherer:fork(Gatherer),
- ok = worker_pool:submit_async(
- fun () -> build_index_worker(Gatherer, State,
- Left, File, Files)
- end),
- build_index(Gatherer, File, Files, State).
-
-build_index_worker(Gatherer, State = #msstate { dir = Dir },
- Left, File, Files) ->
- {ok, Messages, FileSize} =
- scan_file_for_valid_messages(Dir, filenum_to_name(File)),
- {ValidMessages, ValidTotalSize} =
- lists:foldl(
- fun (Obj = {MsgId, TotalSize, Offset}, {VMAcc, VTSAcc}) ->
- case index_lookup(MsgId, State) of
- #msg_location { file = undefined } = StoreEntry ->
- ok = index_update(StoreEntry #msg_location {
- file = File, offset = Offset,
- total_size = TotalSize },
- State),
- {[Obj | VMAcc], VTSAcc + TotalSize};
- _ ->
- {VMAcc, VTSAcc}
- end
- end, {[], 0}, Messages),
- {Right, FileSize1} =
- case Files of
- %% if it's the last file, we'll truncate to remove any
- %% rubbish above the last valid message. This affects the
- %% file size.
- [] -> {undefined, case ValidMessages of
- [] -> 0;
- _ -> {_MsgId, TotalSize, Offset} =
- lists:last(ValidMessages),
- Offset + TotalSize
- end};
- [F|_] -> {F, FileSize}
- end,
- ok = gatherer:in(Gatherer, #file_summary {
- file = File,
- valid_total_size = ValidTotalSize,
- left = Left,
- right = Right,
- file_size = FileSize1,
- locked = false,
- readers = 0 }),
- ok = gatherer:finish(Gatherer).
-
-%%----------------------------------------------------------------------------
-%% garbage collection / compaction / aggregation -- internal
-%%----------------------------------------------------------------------------
-
-maybe_roll_to_new_file(
- Offset,
- State = #msstate { dir = Dir,
- current_file_handle = CurHdl,
- current_file = CurFile,
- file_summary_ets = FileSummaryEts,
- cur_file_cache_ets = CurFileCacheEts,
- file_size_limit = FileSizeLimit })
- when Offset >= FileSizeLimit ->
- State1 = internal_sync(State),
- ok = file_handle_cache:close(CurHdl),
- NextFile = CurFile + 1,
- {ok, NextHdl} = open_file(Dir, filenum_to_name(NextFile), ?WRITE_MODE),
- true = ets:insert_new(FileSummaryEts, #file_summary {
- file = NextFile,
- valid_total_size = 0,
- left = CurFile,
- right = undefined,
- file_size = 0,
- locked = false,
- readers = 0 }),
- true = ets:update_element(FileSummaryEts, CurFile,
- {#file_summary.right, NextFile}),
- true = ets:match_delete(CurFileCacheEts, {'_', '_', 0}),
- maybe_compact(State1 #msstate { current_file_handle = NextHdl,
- current_file = NextFile });
-maybe_roll_to_new_file(_, State) ->
- State.
-
-maybe_compact(State = #msstate { sum_valid_data = SumValid,
- sum_file_size = SumFileSize,
- gc_pid = GCPid,
- pending_gc_completion = Pending,
- file_summary_ets = FileSummaryEts,
- file_size_limit = FileSizeLimit })
- when SumFileSize > 2 * FileSizeLimit andalso
- (SumFileSize - SumValid) / SumFileSize > ?GARBAGE_FRACTION ->
- %% TODO: the algorithm here is sub-optimal - it may result in a
- %% complete traversal of FileSummaryEts.
- First = ets:first(FileSummaryEts),
- case First =:= '$end_of_table' orelse
- orddict:size(Pending) >= ?MAXIMUM_SIMULTANEOUS_GC_FILES of
- true ->
- State;
- false ->
- case find_files_to_combine(FileSummaryEts, FileSizeLimit,
- ets:lookup(FileSummaryEts, First)) of
- not_found ->
- State;
- {Src, Dst} ->
- Pending1 = orddict_store(Dst, [],
- orddict_store(Src, [], Pending)),
- State1 = close_handle(Src, close_handle(Dst, State)),
- true = ets:update_element(FileSummaryEts, Src,
- {#file_summary.locked, true}),
- true = ets:update_element(FileSummaryEts, Dst,
- {#file_summary.locked, true}),
- ok = rabbit_msg_store_gc:combine(GCPid, Src, Dst),
- State1 #msstate { pending_gc_completion = Pending1 }
- end
- end;
-maybe_compact(State) ->
- State.
-
-find_files_to_combine(FileSummaryEts, FileSizeLimit,
- [#file_summary { file = Dst,
- valid_total_size = DstValid,
- right = Src,
- locked = DstLocked }]) ->
- case Src of
- undefined ->
- not_found;
- _ ->
- [#file_summary { file = Src,
- valid_total_size = SrcValid,
- left = Dst,
- right = SrcRight,
- locked = SrcLocked }] = Next =
- ets:lookup(FileSummaryEts, Src),
- case SrcRight of
- undefined -> not_found;
- _ -> case (DstValid + SrcValid =< FileSizeLimit) andalso
- (DstValid > 0) andalso (SrcValid > 0) andalso
- not (DstLocked orelse SrcLocked) of
- true -> {Src, Dst};
- false -> find_files_to_combine(
- FileSummaryEts, FileSizeLimit, Next)
- end
- end
- end.
-
-delete_file_if_empty(File, State = #msstate { current_file = File }) ->
- State;
-delete_file_if_empty(File, State = #msstate {
- gc_pid = GCPid,
- file_summary_ets = FileSummaryEts,
- pending_gc_completion = Pending }) ->
- [#file_summary { valid_total_size = ValidData,
- locked = false }] =
- ets:lookup(FileSummaryEts, File),
- case ValidData of
- %% don't delete the file_summary_ets entry for File here
- %% because we could have readers which need to be able to
- %% decrement the readers count.
- 0 -> true = ets:update_element(FileSummaryEts, File,
- {#file_summary.locked, true}),
- ok = rabbit_msg_store_gc:delete(GCPid, File),
- Pending1 = orddict_store(File, [], Pending),
- close_handle(File,
- State #msstate { pending_gc_completion = Pending1 });
- _ -> State
- end.
-
-cleanup_after_file_deletion(File,
- #msstate { file_handles_ets = FileHandlesEts,
- file_summary_ets = FileSummaryEts,
- clients = Clients }) ->
- %% Ensure that any clients that have open fhs to the file close
- %% them before using them again. This has to be done here (given
- %% it's done in the msg_store, and not the gc), and not when
- %% starting up the GC, because if done when starting up the GC,
- %% the client could find the close, and close and reopen the fh,
- %% whilst the GC is waiting for readers to disappear, before it's
- %% actually done the GC.
- true = mark_handle_to_close(Clients, FileHandlesEts, File, true),
- [#file_summary { left = Left,
- right = Right,
- locked = true,
- readers = 0 }] = ets:lookup(FileSummaryEts, File),
- %% We'll never delete the current file, so right is never undefined
- true = Right =/= undefined, %% ASSERTION
- true = ets:update_element(FileSummaryEts, Right,
- {#file_summary.left, Left}),
- %% ensure the double linked list is maintained
- true = case Left of
- undefined -> true; %% File is the eldest file (left-most)
- _ -> ets:update_element(FileSummaryEts, Left,
- {#file_summary.right, Right})
- end,
- true = ets:delete(FileSummaryEts, File),
- ok.
-
-%%----------------------------------------------------------------------------
-%% garbage collection / compaction / aggregation -- external
-%%----------------------------------------------------------------------------
-
-has_readers(File, #gc_state { file_summary_ets = FileSummaryEts }) ->
- [#file_summary { locked = true, readers = Count }] =
- ets:lookup(FileSummaryEts, File),
- Count /= 0.
-
-combine_files(Source, Destination,
- State = #gc_state { file_summary_ets = FileSummaryEts,
- file_handles_ets = FileHandlesEts,
- dir = Dir,
- msg_store = Server }) ->
- [#file_summary {
- readers = 0,
- left = Destination,
- valid_total_size = SourceValid,
- file_size = SourceFileSize,
- locked = true }] = ets:lookup(FileSummaryEts, Source),
- [#file_summary {
- readers = 0,
- right = Source,
- valid_total_size = DestinationValid,
- file_size = DestinationFileSize,
- locked = true }] = ets:lookup(FileSummaryEts, Destination),
-
- SourceName = filenum_to_name(Source),
- DestinationName = filenum_to_name(Destination),
- {ok, SourceHdl} = open_file(Dir, SourceName,
- ?READ_AHEAD_MODE),
- {ok, DestinationHdl} = open_file(Dir, DestinationName,
- ?READ_AHEAD_MODE ++ ?WRITE_MODE),
- TotalValidData = SourceValid + DestinationValid,
- %% if DestinationValid =:= DestinationContiguousTop then we don't
- %% need a tmp file
- %% if they're not equal, then we need to write out everything past
- %% the DestinationContiguousTop to a tmp file then truncate,
- %% copy back in, and then copy over from Source
- %% otherwise we just truncate straight away and copy over from Source
- {DestinationWorkList, DestinationValid} =
- load_and_vacuum_message_file(Destination, State),
- {DestinationContiguousTop, DestinationWorkListTail} =
- drop_contiguous_block_prefix(DestinationWorkList),
- case DestinationWorkListTail of
- [] -> ok = truncate_and_extend_file(
- DestinationHdl, DestinationContiguousTop, TotalValidData);
- _ -> Tmp = filename:rootname(DestinationName) ++ ?FILE_EXTENSION_TMP,
- {ok, TmpHdl} = open_file(Dir, Tmp, ?READ_AHEAD_MODE++?WRITE_MODE),
- ok = copy_messages(
- DestinationWorkListTail, DestinationContiguousTop,
- DestinationValid, DestinationHdl, TmpHdl, Destination,
- State),
- TmpSize = DestinationValid - DestinationContiguousTop,
- %% so now Tmp contains everything we need to salvage
- %% from Destination, and index_state has been updated to
- %% reflect the compaction of Destination so truncate
- %% Destination and copy from Tmp back to the end
- {ok, 0} = file_handle_cache:position(TmpHdl, 0),
- ok = truncate_and_extend_file(
- DestinationHdl, DestinationContiguousTop, TotalValidData),
- {ok, TmpSize} =
- file_handle_cache:copy(TmpHdl, DestinationHdl, TmpSize),
- %% position in DestinationHdl should now be DestinationValid
- ok = file_handle_cache:sync(DestinationHdl),
- ok = file_handle_cache:delete(TmpHdl)
- end,
- {SourceWorkList, SourceValid} = load_and_vacuum_message_file(Source, State),
- ok = copy_messages(SourceWorkList, DestinationValid, TotalValidData,
- SourceHdl, DestinationHdl, Destination, State),
- %% tidy up
- ok = file_handle_cache:close(DestinationHdl),
- ok = file_handle_cache:close(SourceHdl),
-
- %% don't update dest.right, because it could be changing at the
- %% same time
- true = ets:update_element(
- FileSummaryEts, Destination,
- [{#file_summary.valid_total_size, TotalValidData},
- {#file_summary.file_size, TotalValidData}]),
-
- Reclaimed = SourceFileSize + DestinationFileSize - TotalValidData,
- gen_server2:cast(Server, {combine_files, Source, Destination, Reclaimed}),
- safe_file_delete_fun(Source, Dir, FileHandlesEts).
-
-delete_file(File, State = #gc_state { file_summary_ets = FileSummaryEts,
- file_handles_ets = FileHandlesEts,
- dir = Dir,
- msg_store = Server }) ->
- [#file_summary { valid_total_size = 0,
- locked = true,
- file_size = FileSize,
- readers = 0 }] = ets:lookup(FileSummaryEts, File),
- {[], 0} = load_and_vacuum_message_file(File, State),
- gen_server2:cast(Server, {delete_file, File, FileSize}),
- safe_file_delete_fun(File, Dir, FileHandlesEts).
-
-load_and_vacuum_message_file(File, #gc_state { dir = Dir,
- index_module = Index,
- index_state = IndexState }) ->
- %% Messages here will be end-of-file at start-of-list
- {ok, Messages, _FileSize} =
- scan_file_for_valid_messages(Dir, filenum_to_name(File)),
- %% foldl will reverse so will end up with msgs in ascending offset order
- lists:foldl(
- fun ({MsgId, TotalSize, Offset}, Acc = {List, Size}) ->
- case Index:lookup(MsgId, IndexState) of
- #msg_location { file = File, total_size = TotalSize,
- offset = Offset, ref_count = 0 } = Entry ->
- ok = Index:delete_object(Entry, IndexState),
- Acc;
- #msg_location { file = File, total_size = TotalSize,
- offset = Offset } = Entry ->
- {[ Entry | List ], TotalSize + Size};
- _ ->
- Acc
- end
- end, {[], 0}, Messages).
-
-copy_messages(WorkList, InitOffset, FinalOffset, SourceHdl, DestinationHdl,
- Destination, #gc_state { index_module = Index,
- index_state = IndexState }) ->
- Copy = fun ({BlockStart, BlockEnd}) ->
- BSize = BlockEnd - BlockStart,
- {ok, BlockStart} =
- file_handle_cache:position(SourceHdl, BlockStart),
- {ok, BSize} =
- file_handle_cache:copy(SourceHdl, DestinationHdl, BSize)
- end,
- case
- lists:foldl(
- fun (#msg_location { msg_id = MsgId, offset = Offset,
- total_size = TotalSize },
- {CurOffset, Block = {BlockStart, BlockEnd}}) ->
- %% CurOffset is in the DestinationFile.
- %% Offset, BlockStart and BlockEnd are in the SourceFile
- %% update MsgLocation to reflect change of file and offset
- ok = Index:update_fields(MsgId,
- [{#msg_location.file, Destination},
- {#msg_location.offset, CurOffset}],
- IndexState),
- {CurOffset + TotalSize,
- case BlockEnd of
- undefined ->
- %% base case, called only for the first list elem
- {Offset, Offset + TotalSize};
- Offset ->
- %% extend the current block because the
- %% next msg follows straight on
- {BlockStart, BlockEnd + TotalSize};
- _ ->
- %% found a gap, so actually do the work for
- %% the previous block
- Copy(Block),
- {Offset, Offset + TotalSize}
- end}
- end, {InitOffset, {undefined, undefined}}, WorkList) of
- {FinalOffset, Block} ->
- case WorkList of
- [] -> ok;
- _ -> Copy(Block), %% do the last remaining block
- ok = file_handle_cache:sync(DestinationHdl)
- end;
- {FinalOffsetZ, _Block} ->
- {gc_error, [{expected, FinalOffset},
- {got, FinalOffsetZ},
- {destination, Destination}]}
- end.
-
-force_recovery(BaseDir, Store) ->
- Dir = filename:join(BaseDir, atom_to_list(Store)),
- case file:delete(filename:join(Dir, ?CLEAN_FILENAME)) of
- ok -> ok;
- {error, enoent} -> ok
- end,
- recover_crashed_compactions(BaseDir),
- ok.
-
-foreach_file(D, Fun, Files) ->
- [ok = Fun(filename:join(D, File)) || File <- Files].
-
-foreach_file(D1, D2, Fun, Files) ->
- [ok = Fun(filename:join(D1, File), filename:join(D2, File)) || File <- Files].
-
-transform_dir(BaseDir, Store, TransformFun) ->
- Dir = filename:join(BaseDir, atom_to_list(Store)),
- TmpDir = filename:join(Dir, ?TRANSFORM_TMP),
- TransformFile = fun (A, B) -> transform_msg_file(A, B, TransformFun) end,
- CopyFile = fun (Src, Dst) -> {ok, _Bytes} = file:copy(Src, Dst), ok end,
- case filelib:is_dir(TmpDir) of
- true -> throw({error, transform_failed_previously});
- false -> FileList = list_sorted_filenames(Dir, ?FILE_EXTENSION),
- foreach_file(Dir, TmpDir, TransformFile, FileList),
- foreach_file(Dir, fun file:delete/1, FileList),
- foreach_file(TmpDir, Dir, CopyFile, FileList),
- foreach_file(TmpDir, fun file:delete/1, FileList),
- ok = file:del_dir(TmpDir)
- end.
-
-transform_msg_file(FileOld, FileNew, TransformFun) ->
- ok = rabbit_file:ensure_parent_dirs_exist(FileNew),
- {ok, RefOld} = file_handle_cache:open(FileOld, [raw, binary, read], []),
- {ok, RefNew} = file_handle_cache:open(FileNew, [raw, binary, write],
- [{write_buffer,
- ?HANDLE_CACHE_BUFFER_SIZE}]),
- {ok, _Acc, _IgnoreSize} =
- rabbit_msg_file:scan(
- RefOld, filelib:file_size(FileOld),
- fun({MsgId, _Size, _Offset, BinMsg}, ok) ->
- {ok, MsgNew} = case binary_to_term(BinMsg) of
- <<>> -> {ok, <<>>}; %% dying client marker
- Msg -> TransformFun(Msg)
- end,
- {ok, _} = rabbit_msg_file:append(RefNew, MsgId, MsgNew),
- ok
- end, ok),
- ok = file_handle_cache:close(RefOld),
- ok = file_handle_cache:close(RefNew),
- ok.
diff --git a/src/rabbit_msg_store_ets_index.erl b/src/rabbit_msg_store_ets_index.erl
deleted file mode 100644
index c17ff2cb..00000000
--- a/src/rabbit_msg_store_ets_index.erl
+++ /dev/null
@@ -1,79 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_msg_store_ets_index).
-
--include("rabbit_msg_store.hrl").
-
--behaviour(rabbit_msg_store_index).
-
--export([new/1, recover/1,
- lookup/2, insert/2, update/2, update_fields/3, delete/2,
- delete_object/2, delete_by_file/2, terminate/1]).
-
--define(MSG_LOC_NAME, rabbit_msg_store_ets_index).
--define(FILENAME, "msg_store_index.ets").
-
--record(state, { table, dir }).
-
-new(Dir) ->
- file:delete(filename:join(Dir, ?FILENAME)),
- Tid = ets:new(?MSG_LOC_NAME, [set, public, {keypos, #msg_location.msg_id}]),
- #state { table = Tid, dir = Dir }.
-
-recover(Dir) ->
- Path = filename:join(Dir, ?FILENAME),
- case ets:file2tab(Path) of
- {ok, Tid} -> file:delete(Path),
- {ok, #state { table = Tid, dir = Dir }};
- Error -> Error
- end.
-
-lookup(Key, State) ->
- case ets:lookup(State #state.table, Key) of
- [] -> not_found;
- [Entry] -> Entry
- end.
-
-insert(Obj, State) ->
- true = ets:insert_new(State #state.table, Obj),
- ok.
-
-update(Obj, State) ->
- true = ets:insert(State #state.table, Obj),
- ok.
-
-update_fields(Key, Updates, State) ->
- true = ets:update_element(State #state.table, Key, Updates),
- ok.
-
-delete(Key, State) ->
- true = ets:delete(State #state.table, Key),
- ok.
-
-delete_object(Obj, State) ->
- true = ets:delete_object(State #state.table, Obj),
- ok.
-
-delete_by_file(File, State) ->
- MatchHead = #msg_location { file = File, _ = '_' },
- ets:select_delete(State #state.table, [{MatchHead, [], [true]}]),
- ok.
-
-terminate(#state { table = MsgLocations, dir = Dir }) ->
- ok = ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME),
- [{extended_info, [object_count]}]),
- ets:delete(MsgLocations).
diff --git a/src/rabbit_msg_store_gc.erl b/src/rabbit_msg_store_gc.erl
deleted file mode 100644
index 1edd7d51..00000000
--- a/src/rabbit_msg_store_gc.erl
+++ /dev/null
@@ -1,137 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_msg_store_gc).
-
--behaviour(gen_server2).
-
--export([start_link/1, combine/3, delete/2, no_readers/2, stop/1]).
-
--export([set_maximum_since_use/2]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3, prioritise_cast/3]).
-
--record(state,
- { pending_no_readers,
- on_action,
- msg_store_state
- }).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 :: (rabbit_msg_store:gc_state()) ->
- rabbit_types:ok_pid_or_error()).
--spec(combine/3 :: (pid(), rabbit_msg_store:file_num(),
- rabbit_msg_store:file_num()) -> 'ok').
--spec(delete/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok').
--spec(no_readers/2 :: (pid(), rabbit_msg_store:file_num()) -> 'ok').
--spec(stop/1 :: (pid()) -> 'ok').
--spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(MsgStoreState) ->
- gen_server2:start_link(?MODULE, [MsgStoreState],
- [{timeout, infinity}]).
-
-combine(Server, Source, Destination) ->
- gen_server2:cast(Server, {combine, Source, Destination}).
-
-delete(Server, File) ->
- gen_server2:cast(Server, {delete, File}).
-
-no_readers(Server, File) ->
- gen_server2:cast(Server, {no_readers, File}).
-
-stop(Server) ->
- gen_server2:call(Server, stop, infinity).
-
-set_maximum_since_use(Pid, Age) ->
- gen_server2:cast(Pid, {set_maximum_since_use, Age}).
-
-%%----------------------------------------------------------------------------
-
-init([MsgStoreState]) ->
- ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
- [self()]),
- {ok, #state { pending_no_readers = dict:new(),
- on_action = [],
- msg_store_state = MsgStoreState }, hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
-prioritise_cast(_Msg, _Len, _State) -> 0.
-
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State}.
-
-handle_cast({combine, Source, Destination}, State) ->
- {noreply, attempt_action(combine, [Source, Destination], State), hibernate};
-
-handle_cast({delete, File}, State) ->
- {noreply, attempt_action(delete, [File], State), hibernate};
-
-handle_cast({no_readers, File},
- State = #state { pending_no_readers = Pending }) ->
- {noreply, case dict:find(File, Pending) of
- error ->
- State;
- {ok, {Action, Files}} ->
- Pending1 = dict:erase(File, Pending),
- attempt_action(
- Action, Files,
- State #state { pending_no_readers = Pending1 })
- end, hibernate};
-
-handle_cast({set_maximum_since_use, Age}, State) ->
- ok = file_handle_cache:set_maximum_since_use(Age),
- {noreply, State, hibernate}.
-
-handle_info(Info, State) ->
- {stop, {unhandled_info, Info}, State}.
-
-terminate(_Reason, State) ->
- State.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-attempt_action(Action, Files,
- State = #state { pending_no_readers = Pending,
- on_action = Thunks,
- msg_store_state = MsgStoreState }) ->
- case [File || File <- Files,
- rabbit_msg_store:has_readers(File, MsgStoreState)] of
- [] -> State #state {
- on_action = lists:filter(
- fun (Thunk) -> not Thunk() end,
- [do_action(Action, Files, MsgStoreState) |
- Thunks]) };
- [File | _] -> Pending1 = dict:store(File, {Action, Files}, Pending),
- State #state { pending_no_readers = Pending1 }
- end.
-
-do_action(combine, [Source, Destination], MsgStoreState) ->
- rabbit_msg_store:combine_files(Source, Destination, MsgStoreState);
-do_action(delete, [File], MsgStoreState) ->
- rabbit_msg_store:delete_file(File, MsgStoreState).
diff --git a/src/rabbit_msg_store_index.erl b/src/rabbit_msg_store_index.erl
deleted file mode 100644
index bb5f11b0..00000000
--- a/src/rabbit_msg_store_index.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_msg_store_index).
-
--include("rabbit_msg_store.hrl").
-
--ifdef(use_specs).
-
--type(dir() :: any()).
--type(index_state() :: any()).
--type(keyvalue() :: any()).
--type(fieldpos() :: non_neg_integer()).
--type(fieldvalue() :: any()).
-
--callback new(dir()) -> index_state().
--callback recover(dir()) -> rabbit_types:ok_or_error2(index_state(), any()).
--callback lookup(rabbit_types:msg_id(), index_state()) -> ('not_found' | keyvalue()).
--callback insert(keyvalue(), index_state()) -> 'ok'.
--callback update(keyvalue(), index_state()) -> 'ok'.
--callback update_fields(rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} |
- [{fieldpos(), fieldvalue()}]),
- index_state()) -> 'ok'.
--callback delete(rabbit_types:msg_id(), index_state()) -> 'ok'.
--callback delete_object(keyvalue(), index_state()) -> 'ok'.
--callback delete_by_file(fieldvalue(), index_state()) -> 'ok'.
--callback terminate(index_state()) -> any().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{new, 1},
- {recover, 1},
- {lookup, 2},
- {insert, 2},
- {update, 2},
- {update_fields, 3},
- {delete, 2},
- {delete_by_file, 2},
- {terminate, 1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_net.erl b/src/rabbit_net.erl
deleted file mode 100644
index e8c96818..00000000
--- a/src/rabbit_net.erl
+++ /dev/null
@@ -1,232 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_net).
--include("rabbit.hrl").
-
--export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2,
- recv/1, sync_recv/2, async_recv/3, port_command/2, getopts/2,
- setopts/2, send/2, close/1, fast_close/1, sockname/1, peername/1,
- peercert/1, connection_string/2, socket_ends/2]).
-
-%%---------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([socket/0]).
-
--type(stat_option() ::
- 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
- 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend').
--type(ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any())).
--type(ok_or_any_error() :: rabbit_types:ok_or_error(any())).
--type(socket() :: port() | #ssl_socket{}).
--type(opts() :: [{atom(), any()} |
- {raw, non_neg_integer(), non_neg_integer(), binary()}]).
--type(host_or_ip() :: binary() | inet:ip_address()).
--spec(is_ssl/1 :: (socket()) -> boolean()).
--spec(ssl_info/1 :: (socket())
- -> 'nossl' | ok_val_or_error(
- {atom(), {atom(), atom(), atom()}})).
--spec(controlling_process/2 :: (socket(), pid()) -> ok_or_any_error()).
--spec(getstat/2 ::
- (socket(), [stat_option()])
- -> ok_val_or_error([{stat_option(), integer()}])).
--spec(recv/1 :: (socket()) ->
- {'data', [char()] | binary()} | 'closed' |
- rabbit_types:error(any()) | {'other', any()}).
--spec(sync_recv/2 :: (socket(), integer()) -> rabbit_types:ok(binary()) |
- rabbit_types:error(any())).
--spec(async_recv/3 ::
- (socket(), integer(), timeout()) -> rabbit_types:ok(any())).
--spec(port_command/2 :: (socket(), iolist()) -> 'true').
--spec(getopts/2 :: (socket(), [atom() | {raw,
- non_neg_integer(),
- non_neg_integer(),
- non_neg_integer() | binary()}])
- -> ok_val_or_error(opts())).
--spec(setopts/2 :: (socket(), opts()) -> ok_or_any_error()).
--spec(send/2 :: (socket(), binary() | iolist()) -> ok_or_any_error()).
--spec(close/1 :: (socket()) -> ok_or_any_error()).
--spec(fast_close/1 :: (socket()) -> ok_or_any_error()).
--spec(sockname/1 ::
- (socket())
- -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
--spec(peername/1 ::
- (socket())
- -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
--spec(peercert/1 ::
- (socket())
- -> 'nossl' | ok_val_or_error(rabbit_ssl:certificate())).
--spec(connection_string/2 ::
- (socket(), 'inbound' | 'outbound') -> ok_val_or_error(string())).
--spec(socket_ends/2 ::
- (socket(), 'inbound' | 'outbound')
- -> ok_val_or_error({host_or_ip(), rabbit_networking:ip_port(),
- host_or_ip(), rabbit_networking:ip_port()})).
-
--endif.
-
-%%---------------------------------------------------------------------------
-
--define(SSL_CLOSE_TIMEOUT, 5000).
-
--define(IS_SSL(Sock), is_record(Sock, ssl_socket)).
-
-is_ssl(Sock) -> ?IS_SSL(Sock).
-
-ssl_info(Sock) when ?IS_SSL(Sock) ->
- ssl:connection_info(Sock#ssl_socket.ssl);
-ssl_info(_Sock) ->
- nossl.
-
-controlling_process(Sock, Pid) when ?IS_SSL(Sock) ->
- ssl:controlling_process(Sock#ssl_socket.ssl, Pid);
-controlling_process(Sock, Pid) when is_port(Sock) ->
- gen_tcp:controlling_process(Sock, Pid).
-
-getstat(Sock, Stats) when ?IS_SSL(Sock) ->
- inet:getstat(Sock#ssl_socket.tcp, Stats);
-getstat(Sock, Stats) when is_port(Sock) ->
- inet:getstat(Sock, Stats).
-
-recv(Sock) when ?IS_SSL(Sock) ->
- recv(Sock#ssl_socket.ssl, {ssl, ssl_closed, ssl_error});
-recv(Sock) when is_port(Sock) ->
- recv(Sock, {tcp, tcp_closed, tcp_error}).
-
-recv(S, {DataTag, ClosedTag, ErrorTag}) ->
- receive
- {DataTag, S, Data} -> {data, Data};
- {ClosedTag, S} -> closed;
- {ErrorTag, S, Reason} -> {error, Reason};
- Other -> {other, Other}
- end.
-
-sync_recv(Sock, Length) when ?IS_SSL(Sock) ->
- ssl:recv(Sock#ssl_socket.ssl, Length);
-sync_recv(Sock, Length) ->
- gen_tcp:recv(Sock, Length).
-
-async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) ->
- Pid = self(),
- Ref = make_ref(),
-
- spawn(fun () -> Pid ! {inet_async, Sock, Ref,
- ssl:recv(Sock#ssl_socket.ssl, Length, Timeout)}
- end),
-
- {ok, Ref};
-async_recv(Sock, Length, infinity) when is_port(Sock) ->
- prim_inet:async_recv(Sock, Length, -1);
-async_recv(Sock, Length, Timeout) when is_port(Sock) ->
- prim_inet:async_recv(Sock, Length, Timeout).
-
-port_command(Sock, Data) when ?IS_SSL(Sock) ->
- case ssl:send(Sock#ssl_socket.ssl, Data) of
- ok -> self() ! {inet_reply, Sock, ok},
- true;
- {error, Reason} -> erlang:error(Reason)
- end;
-port_command(Sock, Data) when is_port(Sock) ->
- erlang:port_command(Sock, Data).
-
-getopts(Sock, Options) when ?IS_SSL(Sock) ->
- ssl:getopts(Sock#ssl_socket.ssl, Options);
-getopts(Sock, Options) when is_port(Sock) ->
- inet:getopts(Sock, Options).
-
-setopts(Sock, Options) when ?IS_SSL(Sock) ->
- ssl:setopts(Sock#ssl_socket.ssl, Options);
-setopts(Sock, Options) when is_port(Sock) ->
- inet:setopts(Sock, Options).
-
-send(Sock, Data) when ?IS_SSL(Sock) -> ssl:send(Sock#ssl_socket.ssl, Data);
-send(Sock, Data) when is_port(Sock) -> gen_tcp:send(Sock, Data).
-
-close(Sock) when ?IS_SSL(Sock) -> ssl:close(Sock#ssl_socket.ssl);
-close(Sock) when is_port(Sock) -> gen_tcp:close(Sock).
-
-fast_close(Sock) when ?IS_SSL(Sock) ->
- %% We cannot simply port_close the underlying tcp socket since the
- %% TLS protocol is quite insistent that a proper closing handshake
- %% should take place (see RFC 5245 s7.2.1). So we call ssl:close
- %% instead, but that can block for a very long time, e.g. when
- %% there is lots of pending output and there is tcp backpressure,
- %% or the ssl_connection process has entered the the
- %% workaround_transport_delivery_problems function during
- %% termination, which, inexplicably, does a gen_tcp:recv(Socket,
- %% 0), which may never return if the client doesn't send a FIN or
- %% that gets swallowed by the network. Since there is no timeout
- %% variant of ssl:close, we construct our own.
- {Pid, MRef} = spawn_monitor(fun () -> ssl:close(Sock#ssl_socket.ssl) end),
- erlang:send_after(?SSL_CLOSE_TIMEOUT, self(), {Pid, ssl_close_timeout}),
- receive
- {Pid, ssl_close_timeout} ->
- erlang:demonitor(MRef, [flush]),
- exit(Pid, kill);
- {'DOWN', MRef, process, Pid, _Reason} ->
- ok
- end,
- catch port_close(Sock#ssl_socket.tcp),
- ok;
-fast_close(Sock) when is_port(Sock) ->
- catch port_close(Sock), ok.
-
-sockname(Sock) when ?IS_SSL(Sock) -> ssl:sockname(Sock#ssl_socket.ssl);
-sockname(Sock) when is_port(Sock) -> inet:sockname(Sock).
-
-peername(Sock) when ?IS_SSL(Sock) -> ssl:peername(Sock#ssl_socket.ssl);
-peername(Sock) when is_port(Sock) -> inet:peername(Sock).
-
-peercert(Sock) when ?IS_SSL(Sock) -> ssl:peercert(Sock#ssl_socket.ssl);
-peercert(Sock) when is_port(Sock) -> nossl.
-
-connection_string(Sock, Direction) ->
- case socket_ends(Sock, Direction) of
- {ok, {FromAddress, FromPort, ToAddress, ToPort}} ->
- {ok, rabbit_misc:format(
- "~s:~p -> ~s:~p",
- [maybe_ntoab(FromAddress), FromPort,
- maybe_ntoab(ToAddress), ToPort])};
- Error ->
- Error
- end.
-
-socket_ends(Sock, Direction) ->
- {From, To} = sock_funs(Direction),
- case {From(Sock), To(Sock)} of
- {{ok, {FromAddress, FromPort}}, {ok, {ToAddress, ToPort}}} ->
- {ok, {rdns(FromAddress), FromPort,
- rdns(ToAddress), ToPort}};
- {{error, _Reason} = Error, _} ->
- Error;
- {_, {error, _Reason} = Error} ->
- Error
- end.
-
-maybe_ntoab(Addr) when is_tuple(Addr) -> rabbit_misc:ntoab(Addr);
-maybe_ntoab(Host) -> Host.
-
-rdns(Addr) ->
- {ok, Lookup} = application:get_env(rabbit, reverse_dns_lookups),
- case Lookup of
- true -> list_to_binary(rabbit_networking:tcp_host(Addr));
- _ -> Addr
- end.
-
-sock_funs(inbound) -> {fun peername/1, fun sockname/1};
-sock_funs(outbound) -> {fun sockname/1, fun peername/1}.
diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl
deleted file mode 100644
index 46cfabe3..00000000
--- a/src/rabbit_networking.erl
+++ /dev/null
@@ -1,470 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_networking).
-
--export([boot/0, start/0, start_tcp_listener/1, start_ssl_listener/2,
- stop_tcp_listener/1, on_node_down/1, active_listeners/0,
- node_listeners/1, register_connection/1, unregister_connection/1,
- connections/0, connection_info_keys/0,
- connection_info/1, connection_info/2,
- connection_info_all/0, connection_info_all/1,
- close_connection/2, force_connection_event_refresh/0, tcp_host/1]).
-
-%%used by TCP-based transports, e.g. STOMP adapter
--export([tcp_listener_addresses/1, tcp_listener_spec/6,
- ensure_ssl/0, ssl_transform_fun/1]).
-
--export([tcp_listener_started/3, tcp_listener_stopped/3,
- start_client/1, start_ssl_client/2]).
-
-%% Internal
--export([connections_local/0]).
-
--include("rabbit.hrl").
--include_lib("kernel/include/inet.hrl").
-
--define(SSL_TIMEOUT, 5). %% seconds
-
--define(FIRST_TEST_BIND_PORT, 10000).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([ip_port/0, hostname/0]).
-
--type(hostname() :: inet:hostname()).
--type(ip_port() :: inet:port_number()).
-
--type(family() :: atom()).
--type(listener_config() :: ip_port() |
- {hostname(), ip_port()} |
- {hostname(), ip_port(), family()}).
--type(address() :: {inet:ip_address(), ip_port(), family()}).
--type(name_prefix() :: atom()).
--type(protocol() :: atom()).
--type(label() :: string()).
-
--spec(start/0 :: () -> 'ok').
--spec(start_tcp_listener/1 :: (listener_config()) -> 'ok').
--spec(start_ssl_listener/2 ::
- (listener_config(), rabbit_types:infos()) -> 'ok').
--spec(stop_tcp_listener/1 :: (listener_config()) -> 'ok').
--spec(active_listeners/0 :: () -> [rabbit_types:listener()]).
--spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]).
--spec(register_connection/1 :: (pid()) -> ok).
--spec(unregister_connection/1 :: (pid()) -> ok).
--spec(connections/0 :: () -> [rabbit_types:connection()]).
--spec(connections_local/0 :: () -> [rabbit_types:connection()]).
--spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(connection_info/1 ::
- (rabbit_types:connection()) -> rabbit_types:infos()).
--spec(connection_info/2 ::
- (rabbit_types:connection(), rabbit_types:info_keys())
- -> rabbit_types:infos()).
--spec(connection_info_all/0 :: () -> [rabbit_types:infos()]).
--spec(connection_info_all/1 ::
- (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(close_connection/2 :: (pid(), string()) -> 'ok').
--spec(force_connection_event_refresh/0 :: () -> 'ok').
-
--spec(on_node_down/1 :: (node()) -> 'ok').
--spec(tcp_listener_addresses/1 :: (listener_config()) -> [address()]).
--spec(tcp_listener_spec/6 ::
- (name_prefix(), address(), [gen_tcp:listen_option()], protocol(),
- label(), rabbit_types:mfargs()) -> supervisor:child_spec()).
--spec(ensure_ssl/0 :: () -> rabbit_types:infos()).
--spec(ssl_transform_fun/1 ::
- (rabbit_types:infos())
- -> fun ((rabbit_net:socket())
- -> rabbit_types:ok_or_error(#ssl_socket{}))).
-
--spec(boot/0 :: () -> 'ok').
--spec(start_client/1 ::
- (port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
- atom() | pid() | port() | {atom(),atom()}).
--spec(start_ssl_client/2 ::
- (_,port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
- atom() | pid() | port() | {atom(),atom()}).
--spec(tcp_listener_started/3 ::
- (_,
- string() |
- {byte(),byte(),byte(),byte()} |
- {char(),char(),char(),char(),char(),char(),char(),char()},
- _) ->
- 'ok').
--spec(tcp_listener_stopped/3 ::
- (_,
- string() |
- {byte(),byte(),byte(),byte()} |
- {char(),char(),char(),char(),char(),char(),char(),char()},
- _) ->
- 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-boot() ->
- ok = start(),
- ok = boot_tcp(),
- ok = boot_ssl().
-
-boot_tcp() ->
- {ok, TcpListeners} = application:get_env(tcp_listeners),
- [ok = start_tcp_listener(Listener) || Listener <- TcpListeners],
- ok.
-
-boot_ssl() ->
- case application:get_env(ssl_listeners) of
- {ok, []} ->
- ok;
- {ok, SslListeners} ->
- [start_ssl_listener(Listener, ensure_ssl())
- || Listener <- SslListeners],
- ok
- end.
-
-start() -> rabbit_sup:start_supervisor_child(
- rabbit_tcp_client_sup, rabbit_client_sup,
- [{local, rabbit_tcp_client_sup},
- {rabbit_connection_sup,start_link,[]}]).
-
-ensure_ssl() ->
- {ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
- ok = app_utils:start_applications(SslAppsConfig),
- {ok, SslOptsConfig} = application:get_env(rabbit, ssl_options),
-
- % unknown_ca errors are silently ignored prior to R14B unless we
- % supply this verify_fun - remove when at least R14B is required
- case proplists:get_value(verify, SslOptsConfig, verify_none) of
- verify_none -> SslOptsConfig;
- verify_peer -> [{verify_fun, fun([]) -> true;
- ([_|_]) -> false
- end}
- | SslOptsConfig]
- end.
-
-ssl_transform_fun(SslOpts) ->
- fun (Sock) ->
- case catch ssl:ssl_accept(Sock, SslOpts, ?SSL_TIMEOUT * 1000) of
- {ok, SslSock} ->
- {ok, #ssl_socket{tcp = Sock, ssl = SslSock}};
- {error, timeout} ->
- {error, {ssl_upgrade_error, timeout}};
- {error, Reason} ->
- %% We have no idea what state the ssl_connection
- %% process is in - it could still be happily
- %% going, it might be stuck, or it could be just
- %% about to fail. There is little that our caller
- %% can do but close the TCP socket, but this could
- %% cause ssl alerts to get dropped (which is bad
- %% form, according to the TLS spec). So we give
- %% the ssl_connection a little bit of time to send
- %% such alerts.
- timer:sleep(?SSL_TIMEOUT * 1000),
- {error, {ssl_upgrade_error, Reason}};
- {'EXIT', Reason} ->
- {error, {ssl_upgrade_failure, Reason}}
- end
- end.
-
-tcp_listener_addresses(Port) when is_integer(Port) ->
- tcp_listener_addresses_auto(Port);
-tcp_listener_addresses({"auto", Port}) ->
- %% Variant to prevent lots of hacking around in bash and batch files
- tcp_listener_addresses_auto(Port);
-tcp_listener_addresses({Host, Port}) ->
- %% auto: determine family IPv4 / IPv6 after converting to IP address
- tcp_listener_addresses({Host, Port, auto});
-tcp_listener_addresses({Host, Port, Family0})
- when is_integer(Port) andalso (Port >= 0) andalso (Port =< 65535) ->
- [{IPAddress, Port, Family} ||
- {IPAddress, Family} <- getaddr(Host, Family0)];
-tcp_listener_addresses({_Host, Port, _Family0}) ->
- error_logger:error_msg("invalid port ~p - not 0..65535~n", [Port]),
- throw({error, {invalid_port, Port}}).
-
-tcp_listener_addresses_auto(Port) ->
- lists:append([tcp_listener_addresses(Listener) ||
- Listener <- port_to_listeners(Port)]).
-
-tcp_listener_spec(NamePrefix, {IPAddress, Port, Family}, SocketOpts,
- Protocol, Label, OnConnect) ->
- {rabbit_misc:tcp_name(NamePrefix, IPAddress, Port),
- {tcp_listener_sup, start_link,
- [IPAddress, Port, [Family | SocketOpts],
- {?MODULE, tcp_listener_started, [Protocol]},
- {?MODULE, tcp_listener_stopped, [Protocol]},
- OnConnect, Label]},
- transient, infinity, supervisor, [tcp_listener_sup]}.
-
-start_tcp_listener(Listener) ->
- start_listener(Listener, amqp, "TCP Listener",
- {?MODULE, start_client, []}).
-
-start_ssl_listener(Listener, SslOpts) ->
- start_listener(Listener, 'amqp/ssl', "SSL Listener",
- {?MODULE, start_ssl_client, [SslOpts]}).
-
-start_listener(Listener, Protocol, Label, OnConnect) ->
- [start_listener0(Address, Protocol, Label, OnConnect) ||
- Address <- tcp_listener_addresses(Listener)],
- ok.
-
-start_listener0(Address, Protocol, Label, OnConnect) ->
- Spec = tcp_listener_spec(rabbit_tcp_listener_sup, Address, tcp_opts(),
- Protocol, Label, OnConnect),
- case supervisor:start_child(rabbit_sup, Spec) of
- {ok, _} -> ok;
- {error, {shutdown, _}} -> {IPAddress, Port, _Family} = Address,
- exit({could_not_start_tcp_listener,
- {rabbit_misc:ntoa(IPAddress), Port}})
- end.
-
-stop_tcp_listener(Listener) ->
- [stop_tcp_listener0(Address) ||
- Address <- tcp_listener_addresses(Listener)],
- ok.
-
-stop_tcp_listener0({IPAddress, Port, _Family}) ->
- Name = rabbit_misc:tcp_name(rabbit_tcp_listener_sup, IPAddress, Port),
- ok = supervisor:terminate_child(rabbit_sup, Name),
- ok = supervisor:delete_child(rabbit_sup, Name).
-
-tcp_listener_started(Protocol, IPAddress, Port) ->
- %% We need the ip to distinguish e.g. 0.0.0.0 and 127.0.0.1
- %% We need the host so we can distinguish multiple instances of the above
- %% in a cluster.
- ok = mnesia:dirty_write(
- rabbit_listener,
- #listener{node = node(),
- protocol = Protocol,
- host = tcp_host(IPAddress),
- ip_address = IPAddress,
- port = Port}).
-
-tcp_listener_stopped(Protocol, IPAddress, Port) ->
- ok = mnesia:dirty_delete_object(
- rabbit_listener,
- #listener{node = node(),
- protocol = Protocol,
- host = tcp_host(IPAddress),
- ip_address = IPAddress,
- port = Port}).
-
-active_listeners() ->
- rabbit_misc:dirty_read_all(rabbit_listener).
-
-node_listeners(Node) ->
- mnesia:dirty_read(rabbit_listener, Node).
-
-on_node_down(Node) ->
- ok = mnesia:dirty_delete(rabbit_listener, Node).
-
-start_client(Sock, SockTransform) ->
- {ok, _Child, Reader} = supervisor:start_child(rabbit_tcp_client_sup, []),
- ok = rabbit_net:controlling_process(Sock, Reader),
- Reader ! {go, Sock, SockTransform},
-
- %% In the event that somebody floods us with connections, the
- %% reader processes can spew log events at error_logger faster
- %% than it can keep up, causing its mailbox to grow unbounded
- %% until we eat all the memory available and crash. So here is a
- %% meaningless synchronous call to the underlying gen_event
- %% mechanism. When it returns the mailbox is drained, and we
- %% return to our caller to accept more connetions.
- gen_event:which_handlers(error_logger),
-
- Reader.
-
-start_client(Sock) ->
- start_client(Sock, fun (S) -> {ok, S} end).
-
-start_ssl_client(SslOpts, Sock) ->
- start_client(Sock, ssl_transform_fun(SslOpts)).
-
-register_connection(Pid) -> pg_local:join(rabbit_connections, Pid).
-
-unregister_connection(Pid) -> pg_local:leave(rabbit_connections, Pid).
-
-connections() ->
- rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
- rabbit_networking, connections_local, []).
-
-connections_local() -> pg_local:get_members(rabbit_connections).
-
-connection_info_keys() -> rabbit_reader:info_keys().
-
-connection_info(Pid) -> rabbit_reader:info(Pid).
-connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items).
-
-connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end).
-connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end).
-
-close_connection(Pid, Explanation) ->
- rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
- case lists:member(Pid, connections()) of
- true -> rabbit_reader:shutdown(Pid, Explanation);
- false -> throw({error, {not_a_connection_pid, Pid}})
- end.
-
-force_connection_event_refresh() ->
- [rabbit_reader:force_event_refresh(C) || C <- connections()],
- ok.
-
-%%--------------------------------------------------------------------
-
-tcp_host({0,0,0,0}) ->
- hostname();
-
-tcp_host({0,0,0,0,0,0,0,0}) ->
- hostname();
-
-tcp_host(IPAddress) ->
- case inet:gethostbyaddr(IPAddress) of
- {ok, #hostent{h_name = Name}} -> Name;
- {error, _Reason} -> rabbit_misc:ntoa(IPAddress)
- end.
-
-hostname() ->
- {ok, Hostname} = inet:gethostname(),
- case inet:gethostbyname(Hostname) of
- {ok, #hostent{h_name = Name}} -> Name;
- {error, _Reason} -> Hostname
- end.
-
-cmap(F) -> rabbit_misc:filter_exit_map(F, connections()).
-
-tcp_opts() ->
- {ok, Opts} = application:get_env(rabbit, tcp_listen_options),
- Opts.
-
-%% inet_parse:address takes care of ip string, like "0.0.0.0"
-%% inet:getaddr returns immediately for ip tuple {0,0,0,0},
-%% and runs 'inet_gethost' port process for dns lookups.
-%% On Windows inet:getaddr runs dns resolver for ip string, which may fail.
-getaddr(Host, Family) ->
- case inet_parse:address(Host) of
- {ok, IPAddress} -> [{IPAddress, resolve_family(IPAddress, Family)}];
- {error, _} -> gethostaddr(Host, Family)
- end.
-
-gethostaddr(Host, auto) ->
- Lookups = [{Family, inet:getaddr(Host, Family)} || Family <- [inet, inet6]],
- case [{IP, Family} || {Family, {ok, IP}} <- Lookups] of
- [] -> host_lookup_error(Host, Lookups);
- IPs -> IPs
- end;
-
-gethostaddr(Host, Family) ->
- case inet:getaddr(Host, Family) of
- {ok, IPAddress} -> [{IPAddress, Family}];
- {error, Reason} -> host_lookup_error(Host, Reason)
- end.
-
-host_lookup_error(Host, Reason) ->
- error_logger:error_msg("invalid host ~p - ~p~n", [Host, Reason]),
- throw({error, {invalid_host, Host, Reason}}).
-
-resolve_family({_,_,_,_}, auto) -> inet;
-resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6;
-resolve_family(IP, auto) -> throw({error, {strange_family, IP}});
-resolve_family(_, F) -> F.
-
-%%--------------------------------------------------------------------
-
-%% There are three kinds of machine (for our purposes).
-%%
-%% * Those which treat IPv4 addresses as a special kind of IPv6 address
-%% ("Single stack")
-%% - Linux by default, Windows Vista and later
-%% - We also treat any (hypothetical?) IPv6-only machine the same way
-%% * Those which consider IPv6 and IPv4 to be completely separate things
-%% ("Dual stack")
-%% - OpenBSD, Windows XP / 2003, Linux if so configured
-%% * Those which do not support IPv6.
-%% - Ancient/weird OSes, Linux if so configured
-%%
-%% How to reconfigure Linux to test this:
-%% Single stack (default):
-%% echo 0 > /proc/sys/net/ipv6/bindv6only
-%% Dual stack:
-%% echo 1 > /proc/sys/net/ipv6/bindv6only
-%% IPv4 only:
-%% add ipv6.disable=1 to GRUB_CMDLINE_LINUX_DEFAULT in /etc/default/grub then
-%% sudo update-grub && sudo reboot
-%%
-%% This matters in (and only in) the case where the sysadmin (or the
-%% app descriptor) has only supplied a port and we wish to bind to
-%% "all addresses". This means different things depending on whether
-%% we're single or dual stack. On single stack binding to "::"
-%% implicitly includes all IPv4 addresses, and subsequently attempting
-%% to bind to "0.0.0.0" will fail. On dual stack, binding to "::" will
-%% only bind to IPv6 addresses, and we need another listener bound to
-%% "0.0.0.0" for IPv4. Finally, on IPv4-only systems we of course only
-%% want to bind to "0.0.0.0".
-%%
-%% Unfortunately it seems there is no way to detect single vs dual stack
-%% apart from attempting to bind to the port.
-port_to_listeners(Port) ->
- IPv4 = {"0.0.0.0", Port, inet},
- IPv6 = {"::", Port, inet6},
- case ipv6_status(?FIRST_TEST_BIND_PORT) of
- single_stack -> [IPv6];
- ipv6_only -> [IPv6];
- dual_stack -> [IPv6, IPv4];
- ipv4_only -> [IPv4]
- end.
-
-ipv6_status(TestPort) ->
- IPv4 = [inet, {ip, {0,0,0,0}}],
- IPv6 = [inet6, {ip, {0,0,0,0,0,0,0,0}}],
- case gen_tcp:listen(TestPort, IPv6) of
- {ok, LSock6} ->
- case gen_tcp:listen(TestPort, IPv4) of
- {ok, LSock4} ->
- %% Dual stack
- gen_tcp:close(LSock6),
- gen_tcp:close(LSock4),
- dual_stack;
- %% Checking the error here would only let us
- %% distinguish single stack IPv6 / IPv4 vs IPv6 only,
- %% which we figure out below anyway.
- {error, _} ->
- gen_tcp:close(LSock6),
- case gen_tcp:listen(TestPort, IPv4) of
- %% Single stack
- {ok, LSock4} -> gen_tcp:close(LSock4),
- single_stack;
- %% IPv6-only machine. Welcome to the future.
- {error, eafnosupport} -> ipv6_only; %% Linux
- {error, eprotonosupport}-> ipv6_only; %% FreeBSD
- %% Dual stack machine with something already
- %% on IPv4.
- {error, _} -> ipv6_status(TestPort + 1)
- end
- end;
- %% IPv4-only machine. Welcome to the 90s.
- {error, eafnosupport} -> %% Linux
- ipv4_only;
- {error, eprotonosupport} -> %% FreeBSD
- ipv4_only;
- %% Port in use
- {error, _} ->
- ipv6_status(TestPort + 1)
- end.
diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl
deleted file mode 100644
index 57dce7cd..00000000
--- a/src/rabbit_node_monitor.erl
+++ /dev/null
@@ -1,476 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_node_monitor).
-
--behaviour(gen_server).
-
--export([start_link/0]).
--export([running_nodes_filename/0,
- cluster_status_filename/0, prepare_cluster_status_files/0,
- write_cluster_status/1, read_cluster_status/0,
- update_cluster_status/0, reset_cluster_status/0]).
--export([notify_node_up/0, notify_joined_cluster/0, notify_left_cluster/1]).
--export([partitions/0, partitions/1, subscribe/1]).
-
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
- %% Utils
--export([all_rabbit_nodes_up/0, run_outside_applications/1]).
-
--define(SERVER, ?MODULE).
--define(RABBIT_UP_RPC_TIMEOUT, 2000).
--define(RABBIT_DOWN_PING_INTERVAL, 1000).
-
--record(state, {monitors, partitions, subscribers, down_ping_timer, autoheal}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-
--spec(running_nodes_filename/0 :: () -> string()).
--spec(cluster_status_filename/0 :: () -> string()).
--spec(prepare_cluster_status_files/0 :: () -> 'ok').
--spec(write_cluster_status/1 :: (rabbit_mnesia:cluster_status()) -> 'ok').
--spec(read_cluster_status/0 :: () -> rabbit_mnesia:cluster_status()).
--spec(update_cluster_status/0 :: () -> 'ok').
--spec(reset_cluster_status/0 :: () -> 'ok').
-
--spec(notify_node_up/0 :: () -> 'ok').
--spec(notify_joined_cluster/0 :: () -> 'ok').
--spec(notify_left_cluster/1 :: (node()) -> 'ok').
-
--spec(partitions/0 :: () -> [node()]).
--spec(partitions/1 :: ([node()]) -> [{node(), [node()]}]).
--spec(subscribe/1 :: (pid()) -> 'ok').
-
--spec(all_rabbit_nodes_up/0 :: () -> boolean()).
--spec(run_outside_applications/1 :: (fun (() -> any())) -> pid()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Start
-%%----------------------------------------------------------------------------
-
-start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
-
-%%----------------------------------------------------------------------------
-%% Cluster file operations
-%%----------------------------------------------------------------------------
-
-%% The cluster file information is kept in two files. The "cluster
-%% status file" contains all the clustered nodes and the disc nodes.
-%% The "running nodes file" contains the currently running nodes or
-%% the running nodes at shutdown when the node is down.
-%%
-%% We strive to keep the files up to date and we rely on this
-%% assumption in various situations. Obviously when mnesia is offline
-%% the information we have will be outdated, but it cannot be
-%% otherwise.
-
-running_nodes_filename() ->
- filename:join(rabbit_mnesia:dir(), "nodes_running_at_shutdown").
-
-cluster_status_filename() ->
- rabbit_mnesia:dir() ++ "/cluster_nodes.config".
-
-prepare_cluster_status_files() ->
- rabbit_mnesia:ensure_mnesia_dir(),
- Corrupt = fun(F) -> throw({error, corrupt_cluster_status_files, F}) end,
- RunningNodes1 = case try_read_file(running_nodes_filename()) of
- {ok, [Nodes]} when is_list(Nodes) -> Nodes;
- {ok, Other} -> Corrupt(Other);
- {error, enoent} -> []
- end,
- ThisNode = [node()],
- %% The running nodes file might contain a set or a list, in case
- %% of the legacy file
- RunningNodes2 = lists:usort(ThisNode ++ RunningNodes1),
- {AllNodes1, WantDiscNode} =
- case try_read_file(cluster_status_filename()) of
- {ok, [{AllNodes, DiscNodes0}]} ->
- {AllNodes, lists:member(node(), DiscNodes0)};
- {ok, [AllNodes0]} when is_list(AllNodes0) ->
- {legacy_cluster_nodes(AllNodes0),
- legacy_should_be_disc_node(AllNodes0)};
- {ok, Files} ->
- Corrupt(Files);
- {error, enoent} ->
- {legacy_cluster_nodes([]), true}
- end,
- AllNodes2 = lists:usort(AllNodes1 ++ RunningNodes2),
- DiscNodes = case WantDiscNode of
- true -> ThisNode;
- false -> []
- end,
- ok = write_cluster_status({AllNodes2, DiscNodes, RunningNodes2}).
-
-write_cluster_status({All, Disc, Running}) ->
- ClusterStatusFN = cluster_status_filename(),
- Res = case rabbit_file:write_term_file(ClusterStatusFN, [{All, Disc}]) of
- ok ->
- RunningNodesFN = running_nodes_filename(),
- {RunningNodesFN,
- rabbit_file:write_term_file(RunningNodesFN, [Running])};
- E1 = {error, _} ->
- {ClusterStatusFN, E1}
- end,
- case Res of
- {_, ok} -> ok;
- {FN, {error, E2}} -> throw({error, {could_not_write_file, FN, E2}})
- end.
-
-read_cluster_status() ->
- case {try_read_file(cluster_status_filename()),
- try_read_file(running_nodes_filename())} of
- {{ok, [{All, Disc}]}, {ok, [Running]}} when is_list(Running) ->
- {All, Disc, Running};
- {Stat, Run} ->
- throw({error, {corrupt_or_missing_cluster_files, Stat, Run}})
- end.
-
-update_cluster_status() ->
- {ok, Status} = rabbit_mnesia:cluster_status_from_mnesia(),
- write_cluster_status(Status).
-
-reset_cluster_status() ->
- write_cluster_status({[node()], [node()], [node()]}).
-
-%%----------------------------------------------------------------------------
-%% Cluster notifications
-%%----------------------------------------------------------------------------
-
-notify_node_up() ->
- Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
- gen_server:abcast(Nodes, ?SERVER,
- {node_up, node(), rabbit_mnesia:node_type()}),
- %% register other active rabbits with this rabbit
- DiskNodes = rabbit_mnesia:cluster_nodes(disc),
- [gen_server:cast(?SERVER, {node_up, N, case lists:member(N, DiskNodes) of
- true -> disc;
- false -> ram
- end}) || N <- Nodes],
- ok.
-
-notify_joined_cluster() ->
- Nodes = rabbit_mnesia:cluster_nodes(running) -- [node()],
- gen_server:abcast(Nodes, ?SERVER,
- {joined_cluster, node(), rabbit_mnesia:node_type()}),
- ok.
-
-notify_left_cluster(Node) ->
- Nodes = rabbit_mnesia:cluster_nodes(running),
- gen_server:abcast(Nodes, ?SERVER, {left_cluster, Node}),
- ok.
-
-%%----------------------------------------------------------------------------
-%% Server calls
-%%----------------------------------------------------------------------------
-
-partitions() ->
- gen_server:call(?SERVER, partitions, infinity).
-
-partitions(Nodes) ->
- {Replies, _} = gen_server:multi_call(Nodes, ?SERVER, partitions, infinity),
- Replies.
-
-subscribe(Pid) ->
- gen_server:cast(?SERVER, {subscribe, Pid}).
-
-%%----------------------------------------------------------------------------
-%% gen_server callbacks
-%%----------------------------------------------------------------------------
-
-init([]) ->
- %% We trap exits so that the supervisor will not just kill us. We
- %% want to be sure that we are not going to be killed while
- %% writing out the cluster status files - bad things can then
- %% happen.
- process_flag(trap_exit, true),
- net_kernel:monitor_nodes(true),
- {ok, _} = mnesia:subscribe(system),
- {ok, #state{monitors = pmon:new(),
- subscribers = pmon:new(),
- partitions = [],
- autoheal = rabbit_autoheal:init()}}.
-
-handle_call(partitions, _From, State = #state{partitions = Partitions}) ->
- {reply, Partitions, State};
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-%% Note: when updating the status file, we can't simply write the
-%% mnesia information since the message can (and will) overtake the
-%% mnesia propagation.
-handle_cast({node_up, Node, NodeType},
- State = #state{monitors = Monitors}) ->
- case pmon:is_monitored({rabbit, Node}, Monitors) of
- true -> {noreply, State};
- false -> rabbit_log:info("rabbit on node ~p up~n", [Node]),
- {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
- write_cluster_status({add_node(Node, AllNodes),
- case NodeType of
- disc -> add_node(Node, DiscNodes);
- ram -> DiscNodes
- end,
- add_node(Node, RunningNodes)}),
- ok = handle_live_rabbit(Node),
- {noreply, State#state{
- monitors = pmon:monitor({rabbit, Node}, Monitors)}}
- end;
-handle_cast({joined_cluster, Node, NodeType}, State) ->
- {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
- write_cluster_status({add_node(Node, AllNodes),
- case NodeType of
- disc -> add_node(Node, DiscNodes);
- ram -> DiscNodes
- end,
- RunningNodes}),
- {noreply, State};
-handle_cast({left_cluster, Node}, State) ->
- {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
- write_cluster_status({del_node(Node, AllNodes), del_node(Node, DiscNodes),
- del_node(Node, RunningNodes)}),
- {noreply, State};
-handle_cast({subscribe, Pid}, State = #state{subscribers = Subscribers}) ->
- {noreply, State#state{subscribers = pmon:monitor(Pid, Subscribers)}};
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({'DOWN', _MRef, process, {rabbit, Node}, _Reason},
- State = #state{monitors = Monitors, subscribers = Subscribers}) ->
- rabbit_log:info("rabbit on node ~p down~n", [Node]),
- {AllNodes, DiscNodes, RunningNodes} = read_cluster_status(),
- write_cluster_status({AllNodes, DiscNodes, del_node(Node, RunningNodes)}),
- ok = handle_dead_rabbit(Node),
- [P ! {node_down, Node} || P <- pmon:monitored(Subscribers)],
- {noreply, handle_dead_rabbit_state(
- Node,
- State#state{monitors = pmon:erase({rabbit, Node}, Monitors)})};
-
-handle_info({'DOWN', _MRef, process, Pid, _Reason},
- State = #state{subscribers = Subscribers}) ->
- {noreply, State#state{subscribers = pmon:erase(Pid, Subscribers)}};
-
-handle_info({nodedown, Node}, State) ->
- ok = handle_dead_node(Node),
- {noreply, State};
-
-handle_info({mnesia_system_event,
- {inconsistent_database, running_partitioned_network, Node}},
- State = #state{partitions = Partitions,
- monitors = Monitors,
- autoheal = AState}) ->
- %% We will not get a node_up from this node - yet we should treat it as
- %% up (mostly).
- State1 = case pmon:is_monitored({rabbit, Node}, Monitors) of
- true -> State;
- false -> State#state{
- monitors = pmon:monitor({rabbit, Node}, Monitors)}
- end,
- ok = handle_live_rabbit(Node),
- Partitions1 = ordsets:to_list(
- ordsets:add_element(Node, ordsets:from_list(Partitions))),
- {noreply, State1#state{partitions = Partitions1,
- autoheal = rabbit_autoheal:maybe_start(AState)}};
-
-handle_info({autoheal_msg, Msg}, State = #state{autoheal = AState,
- partitions = Partitions}) ->
- AState1 = rabbit_autoheal:handle_msg(Msg, AState, Partitions),
- {noreply, State#state{autoheal = AState1}};
-
-handle_info(ping_nodes, State) ->
- %% We ping nodes when some are down to ensure that we find out
- %% about healed partitions quickly. We ping all nodes rather than
- %% just the ones we know are down for simplicity; it's not expensive
- %% to ping the nodes that are up, after all.
- State1 = State#state{down_ping_timer = undefined},
- Self = self(),
- %% all_nodes_up() both pings all the nodes and tells us if we need to again.
- %%
- %% We ping in a separate process since in a partition it might
- %% take some noticeable length of time and we don't want to block
- %% the node monitor for that long.
- spawn_link(fun () ->
- case all_nodes_up() of
- true -> ok;
- false -> Self ! ping_again
- end
- end),
- {noreply, State1};
-
-handle_info(ping_again, State) ->
- {noreply, ensure_ping_timer(State)};
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, State) ->
- rabbit_misc:stop_timer(State, #state.down_ping_timer),
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-%% Functions that call the module specific hooks when nodes go up/down
-%%----------------------------------------------------------------------------
-
-%% TODO: This may turn out to be a performance hog when there are lots
-%% of nodes. We really only need to execute some of these statements
-%% on *one* node, rather than all of them.
-handle_dead_rabbit(Node) ->
- ok = rabbit_networking:on_node_down(Node),
- ok = rabbit_amqqueue:on_node_down(Node),
- ok = rabbit_alarm:on_node_down(Node),
- ok = rabbit_mnesia:on_node_down(Node),
- ok.
-
-handle_dead_node(_Node) ->
- %% In general in rabbit_node_monitor we care about whether the
- %% rabbit application is up rather than the node; we do this so
- %% that we can respond in the same way to "rabbitmqctl stop_app"
- %% and "rabbitmqctl stop" as much as possible.
- %%
- %% However, for pause_minority mode we can't do this, since we
- %% depend on looking at whether other nodes are up to decide
- %% whether to come back up ourselves - if we decide that based on
- %% the rabbit application we would go down and never come back.
- case application:get_env(rabbit, cluster_partition_handling) of
- {ok, pause_minority} ->
- case majority() of
- true -> ok;
- false -> await_cluster_recovery()
- end;
- {ok, ignore} ->
- ok;
- {ok, autoheal} ->
- ok;
- {ok, Term} ->
- rabbit_log:warning("cluster_partition_handling ~p unrecognised, "
- "assuming 'ignore'~n", [Term]),
- ok
- end.
-
-await_cluster_recovery() ->
- rabbit_log:warning("Cluster minority status detected - awaiting recovery~n",
- []),
- Nodes = rabbit_mnesia:cluster_nodes(all),
- run_outside_applications(fun () ->
- rabbit:stop(),
- wait_for_cluster_recovery(Nodes)
- end),
- ok.
-
-run_outside_applications(Fun) ->
- spawn(fun () ->
- %% If our group leader is inside an application we are about
- %% to stop, application:stop/1 does not return.
- group_leader(whereis(init), self()),
- %% Ensure only one such process at a time, the
- %% exit(badarg) is harmless if one is already running
- try register(rabbit_outside_app_process, self()) of
- true -> Fun()
- catch error:badarg -> ok
- end
- end).
-
-wait_for_cluster_recovery(Nodes) ->
- case majority() of
- true -> rabbit:start();
- false -> timer:sleep(?RABBIT_DOWN_PING_INTERVAL),
- wait_for_cluster_recovery(Nodes)
- end.
-
-handle_dead_rabbit_state(Node, State = #state{partitions = Partitions,
- autoheal = Autoheal}) ->
- %% If we have been partitioned, and we are now in the only remaining
- %% partition, we no longer care about partitions - forget them. Note
- %% that we do not attempt to deal with individual (other) partitions
- %% going away. It's only safe to forget anything about partitions when
- %% there are no partitions.
- Partitions1 = case Partitions -- (Partitions -- alive_rabbit_nodes()) of
- [] -> [];
- _ -> Partitions
- end,
- ensure_ping_timer(
- State#state{partitions = Partitions1,
- autoheal = rabbit_autoheal:node_down(Node, Autoheal)}).
-
-ensure_ping_timer(State) ->
- rabbit_misc:ensure_timer(
- State, #state.down_ping_timer, ?RABBIT_DOWN_PING_INTERVAL, ping_nodes).
-
-handle_live_rabbit(Node) ->
- ok = rabbit_alarm:on_node_up(Node),
- ok = rabbit_mnesia:on_node_up(Node).
-
-%%--------------------------------------------------------------------
-%% Internal utils
-%%--------------------------------------------------------------------
-
-try_read_file(FileName) ->
- case rabbit_file:read_term_file(FileName) of
- {ok, Term} -> {ok, Term};
- {error, enoent} -> {error, enoent};
- {error, E} -> throw({error, {cannot_read_file, FileName, E}})
- end.
-
-legacy_cluster_nodes(Nodes) ->
- %% We get all the info that we can, including the nodes from
- %% mnesia, which will be there if the node is a disc node (empty
- %% list otherwise)
- lists:usort(Nodes ++ mnesia:system_info(db_nodes)).
-
-legacy_should_be_disc_node(DiscNodes) ->
- DiscNodes == [] orelse lists:member(node(), DiscNodes).
-
-add_node(Node, Nodes) -> lists:usort([Node | Nodes]).
-
-del_node(Node, Nodes) -> Nodes -- [Node].
-
-%%--------------------------------------------------------------------
-
-%% mnesia:system_info(db_nodes) (and hence
-%% rabbit_mnesia:cluster_nodes(running)) does not give reliable
-%% results when partitioned. So we have a small set of replacement
-%% functions here. "rabbit" in a function's name implies we test if
-%% the rabbit application is up, not just the node.
-
-majority() ->
- Nodes = rabbit_mnesia:cluster_nodes(all),
- length(alive_nodes(Nodes)) / length(Nodes) > 0.5.
-
-all_nodes_up() ->
- Nodes = rabbit_mnesia:cluster_nodes(all),
- length(alive_nodes(Nodes)) =:= length(Nodes).
-
-all_rabbit_nodes_up() ->
- Nodes = rabbit_mnesia:cluster_nodes(all),
- length(alive_rabbit_nodes(Nodes)) =:= length(Nodes).
-
-alive_nodes(Nodes) -> [N || N <- Nodes, pong =:= net_adm:ping(N)].
-
-alive_rabbit_nodes() -> alive_rabbit_nodes(rabbit_mnesia:cluster_nodes(all)).
-
-alive_rabbit_nodes(Nodes) ->
- [N || N <- alive_nodes(Nodes), rabbit:is_running(N)].
diff --git a/src/rabbit_nodes.erl b/src/rabbit_nodes.erl
deleted file mode 100644
index b54fdd2e..00000000
--- a/src/rabbit_nodes.erl
+++ /dev/null
@@ -1,109 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_nodes).
-
--export([names/1, diagnostics/1, make/1, parts/1, cookie_hash/0,
- is_running/2, is_process_running/2]).
-
--define(EPMD_TIMEOUT, 30000).
-
-%%----------------------------------------------------------------------------
-%% Specs
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(names/1 :: (string()) -> rabbit_types:ok_or_error2(
- [{string(), integer()}], term())).
--spec(diagnostics/1 :: ([node()]) -> string()).
--spec(make/1 :: ({string(), string()} | string()) -> node()).
--spec(parts/1 :: (node() | string()) -> {string(), string()}).
--spec(cookie_hash/0 :: () -> string()).
--spec(is_running/2 :: (node(), atom()) -> boolean()).
--spec(is_process_running/2 :: (node(), atom()) -> boolean()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-names(Hostname) ->
- Self = self(),
- Ref = make_ref(),
- {Pid, MRef} = spawn_monitor(
- fun () -> Self ! {Ref, net_adm:names(Hostname)} end),
- timer:exit_after(?EPMD_TIMEOUT, Pid, timeout),
- receive
- {Ref, Names} -> erlang:demonitor(MRef, [flush]),
- Names;
- {'DOWN', MRef, process, Pid, Reason} -> {error, Reason}
- end.
-
-diagnostics(Nodes) ->
- Hosts = lists:usort([element(2, parts(Node)) || Node <- Nodes]),
- NodeDiags = [{"~nDIAGNOSTICS~n===========~n~n"
- "nodes in question: ~p~n~n"
- "hosts, their running nodes and ports:", [Nodes]}] ++
- [diagnostics_host(Host) || Host <- Hosts] ++
- diagnostics0(),
- rabbit_misc:format_many(lists:flatten(NodeDiags)).
-
-diagnostics0() ->
- [{"~ncurrent node details:~n- node name: ~w", [node()]},
- case init:get_argument(home) of
- {ok, [[Home]]} -> {"- home dir: ~s", [Home]};
- Other -> {"- no home dir: ~p", [Other]}
- end,
- {"- cookie hash: ~s", [cookie_hash()]}].
-
-diagnostics_host(Host) ->
- case names(Host) of
- {error, EpmdReason} ->
- {"- unable to connect to epmd on ~s: ~w (~s)",
- [Host, EpmdReason, rabbit_misc:format_inet_error(EpmdReason)]};
- {ok, NamePorts} ->
- {"- ~s: ~p",
- [Host, [{list_to_atom(Name), Port} ||
- {Name, Port} <- NamePorts]]}
- end.
-
-make({Prefix, Suffix}) -> list_to_atom(lists:append([Prefix, "@", Suffix]));
-make(NodeStr) -> make(parts(NodeStr)).
-
-parts(Node) when is_atom(Node) ->
- parts(atom_to_list(Node));
-parts(NodeStr) ->
- case lists:splitwith(fun (E) -> E =/= $@ end, NodeStr) of
- {Prefix, []} -> {_, Suffix} = parts(node()),
- {Prefix, Suffix};
- {Prefix, Suffix} -> {Prefix, tl(Suffix)}
- end.
-
-cookie_hash() ->
- base64:encode_to_string(erlang:md5(atom_to_list(erlang:get_cookie()))).
-
-is_running(Node, Application) ->
- case rpc:call(Node, rabbit_misc, which_applications, []) of
- {badrpc, _} -> false;
- Apps -> proplists:is_defined(Application, Apps)
- end.
-
-is_process_running(Node, Process) ->
- case rpc:call(Node, erlang, whereis, [Process]) of
- {badrpc, _} -> false;
- undefined -> false;
- P when is_pid(P) -> true
- end.
diff --git a/src/rabbit_parameter_validation.erl b/src/rabbit_parameter_validation.erl
deleted file mode 100644
index 0a878432..00000000
--- a/src/rabbit_parameter_validation.erl
+++ /dev/null
@@ -1,87 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_parameter_validation).
-
--export([number/2, binary/2, boolean/2, list/2, regex/2, proplist/3, enum/1]).
-
-number(_Name, Term) when is_number(Term) ->
- ok;
-
-number(Name, Term) ->
- {error, "~s should be number, actually was ~p", [Name, Term]}.
-
-binary(_Name, Term) when is_binary(Term) ->
- ok;
-
-binary(Name, Term) ->
- {error, "~s should be binary, actually was ~p", [Name, Term]}.
-
-boolean(_Name, Term) when is_boolean(Term) ->
- ok;
-boolean(Name, Term) ->
- {error, "~s should be boolean, actually was ~p", [Name, Term]}.
-
-list(_Name, Term) when is_list(Term) ->
- ok;
-
-list(Name, Term) ->
- {error, "~s should be list, actually was ~p", [Name, Term]}.
-
-regex(Name, Term) when is_binary(Term) ->
- case re:compile(Term) of
- {ok, _} -> ok;
- {error, Reason} -> {error, "~s should be regular expression "
- "but is invalid: ~p", [Name, Reason]}
- end;
-regex(Name, Term) ->
- {error, "~s should be a binary but was ~p", [Name, Term]}.
-
-proplist(Name, Constraints, Term) when is_list(Term) ->
- {Results, Remainder}
- = lists:foldl(
- fun ({Key, Fun, Needed}, {Results0, Term0}) ->
- case {lists:keytake(Key, 1, Term0), Needed} of
- {{value, {Key, Value}, Term1}, _} ->
- {[Fun(Key, Value) | Results0],
- Term1};
- {false, mandatory} ->
- {[{error, "Key \"~s\" not found in ~s",
- [Key, Name]} | Results0], Term0};
- {false, optional} ->
- {Results0, Term0}
- end
- end, {[], Term}, Constraints),
- case Remainder of
- [] -> Results;
- _ -> [{error, "Unrecognised terms ~p in ~s", [Remainder, Name]}
- | Results]
- end;
-
-proplist(Name, _Constraints, Term) ->
- {error, "~s not a list ~p", [Name, Term]}.
-
-enum(OptionsA) ->
- Options = [list_to_binary(atom_to_list(O)) || O <- OptionsA],
- fun (Name, Term) when is_binary(Term) ->
- case lists:member(Term, Options) of
- true -> ok;
- false -> {error, "~s should be one of ~p, actually was ~p",
- [Name, Options, Term]}
- end;
- (Name, Term) ->
- {error, "~s should be binary, actually was ~p", [Name, Term]}
- end.
diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl
deleted file mode 100644
index 168ced3c..00000000
--- a/src/rabbit_plugins.erl
+++ /dev/null
@@ -1,226 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_plugins).
--include("rabbit.hrl").
-
--export([setup/0, active/0, read_enabled/1, list/1, dependencies/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(plugin_name() :: atom()).
-
--spec(setup/0 :: () -> [plugin_name()]).
--spec(active/0 :: () -> [plugin_name()]).
--spec(list/1 :: (string()) -> [#plugin{}]).
--spec(read_enabled/1 :: (file:filename()) -> [plugin_name()]).
--spec(dependencies/3 :: (boolean(), [plugin_name()], [#plugin{}]) ->
- [plugin_name()]).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% @doc Prepares the file system and installs all enabled plugins.
-setup() ->
- {ok, PluginDir} = application:get_env(rabbit, plugins_dir),
- {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
- {ok, EnabledFile} = application:get_env(rabbit, enabled_plugins_file),
- prepare_plugins(EnabledFile, PluginDir, ExpandDir).
-
-%% @doc Lists the plugins which are currently running.
-active() ->
- {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
- InstalledPlugins = [ P#plugin.name || P <- list(ExpandDir) ],
- [App || {App, _, _} <- rabbit_misc:which_applications(),
- lists:member(App, InstalledPlugins)].
-
-%% @doc Get the list of plugins which are ready to be enabled.
-list(PluginsDir) ->
- EZs = [{ez, EZ} || EZ <- filelib:wildcard("*.ez", PluginsDir)],
- FreeApps = [{app, App} ||
- App <- filelib:wildcard("*/ebin/*.app", PluginsDir)],
- {Plugins, Problems} =
- lists:foldl(fun ({error, EZ, Reason}, {Plugins1, Problems1}) ->
- {Plugins1, [{EZ, Reason} | Problems1]};
- (Plugin = #plugin{}, {Plugins1, Problems1}) ->
- {[Plugin|Plugins1], Problems1}
- end, {[], []},
- [plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps]),
- case Problems of
- [] -> ok;
- _ -> error_logger:warning_msg(
- "Problem reading some plugins: ~p~n", [Problems])
- end,
- Plugins.
-
-%% @doc Read the list of enabled plugins from the supplied term file.
-read_enabled(PluginsFile) ->
- case rabbit_file:read_term_file(PluginsFile) of
- {ok, [Plugins]} -> Plugins;
- {ok, []} -> [];
- {ok, [_|_]} -> throw({error, {malformed_enabled_plugins_file,
- PluginsFile}});
- {error, enoent} -> [];
- {error, Reason} -> throw({error, {cannot_read_enabled_plugins_file,
- PluginsFile, Reason}})
- end.
-
-%% @doc Calculate the dependency graph from <i>Sources</i>.
-%% When Reverse =:= true the bottom/leaf level applications are returned in
-%% the resulting list, otherwise they're skipped.
-dependencies(Reverse, Sources, AllPlugins) ->
- {ok, G} = rabbit_misc:build_acyclic_graph(
- fun (App, _Deps) -> [{App, App}] end,
- fun (App, Deps) -> [{App, Dep} || Dep <- Deps] end,
- lists:ukeysort(
- 1, [{Name, Deps} ||
- #plugin{name = Name,
- dependencies = Deps} <- AllPlugins] ++
- [{Dep, []} ||
- #plugin{dependencies = Deps} <- AllPlugins,
- Dep <- Deps])),
- Dests = case Reverse of
- false -> digraph_utils:reachable(Sources, G);
- true -> digraph_utils:reaching(Sources, G)
- end,
- true = digraph:delete(G),
- Dests.
-
-%%----------------------------------------------------------------------------
-
-prepare_plugins(EnabledFile, PluginsDistDir, ExpandDir) ->
- AllPlugins = list(PluginsDistDir),
- Enabled = read_enabled(EnabledFile),
- ToUnpack = dependencies(false, Enabled, AllPlugins),
- ToUnpackPlugins = lookup_plugins(ToUnpack, AllPlugins),
-
- case Enabled -- plugin_names(ToUnpackPlugins) of
- [] -> ok;
- Missing -> error_logger:warning_msg(
- "The following enabled plugins were not found: ~p~n",
- [Missing])
- end,
-
- %% Eliminate the contents of the destination directory
- case delete_recursively(ExpandDir) of
- ok -> ok;
- {error, E1} -> throw({error, {cannot_delete_plugins_expand_dir,
- [ExpandDir, E1]}})
- end,
- case filelib:ensure_dir(ExpandDir ++ "/") of
- ok -> ok;
- {error, E2} -> throw({error, {cannot_create_plugins_expand_dir,
- [ExpandDir, E2]}})
- end,
-
- [prepare_plugin(Plugin, ExpandDir) || Plugin <- ToUnpackPlugins],
-
- [prepare_dir_plugin(PluginAppDescPath) ||
- PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")].
-
-prepare_dir_plugin(PluginAppDescPath) ->
- code:add_path(filename:dirname(PluginAppDescPath)),
- list_to_atom(filename:basename(PluginAppDescPath, ".app")).
-
-%%----------------------------------------------------------------------------
-
-delete_recursively(Fn) ->
- case rabbit_file:recursive_delete([Fn]) of
- ok -> ok;
- {error, {Path, E}} -> {error, {cannot_delete, Path, E}};
- Error -> Error
- end.
-
-prepare_plugin(#plugin{type = ez, location = Location}, ExpandDir) ->
- zip:unzip(Location, [{cwd, ExpandDir}]);
-prepare_plugin(#plugin{type = dir, name = Name, location = Location},
- ExpandDir) ->
- rabbit_file:recursive_copy(Location, filename:join([ExpandDir, Name])).
-
-plugin_info(Base, {ez, EZ0}) ->
- EZ = filename:join([Base, EZ0]),
- case read_app_file(EZ) of
- {application, Name, Props} -> mkplugin(Name, Props, ez, EZ);
- {error, Reason} -> {error, EZ, Reason}
- end;
-plugin_info(Base, {app, App0}) ->
- App = filename:join([Base, App0]),
- case rabbit_file:read_term_file(App) of
- {ok, [{application, Name, Props}]} ->
- mkplugin(Name, Props, dir,
- filename:absname(
- filename:dirname(filename:dirname(App))));
- {error, Reason} ->
- {error, App, {invalid_app, Reason}}
- end.
-
-mkplugin(Name, Props, Type, Location) ->
- Version = proplists:get_value(vsn, Props, "0"),
- Description = proplists:get_value(description, Props, ""),
- Dependencies =
- filter_applications(proplists:get_value(applications, Props, [])),
- #plugin{name = Name, version = Version, description = Description,
- dependencies = Dependencies, location = Location, type = Type}.
-
-read_app_file(EZ) ->
- case zip:list_dir(EZ) of
- {ok, [_|ZippedFiles]} ->
- case find_app_files(ZippedFiles) of
- [AppPath|_] ->
- {ok, [{AppPath, AppFile}]} =
- zip:extract(EZ, [{file_list, [AppPath]}, memory]),
- parse_binary(AppFile);
- [] ->
- {error, no_app_file}
- end;
- {error, Reason} ->
- {error, {invalid_ez, Reason}}
- end.
-
-find_app_files(ZippedFiles) ->
- {ok, RE} = re:compile("^.*/ebin/.*.app$"),
- [Path || {zip_file, Path, _, _, _, _} <- ZippedFiles,
- re:run(Path, RE, [{capture, none}]) =:= match].
-
-parse_binary(Bin) ->
- try
- {ok, Ts, _} = erl_scan:string(binary_to_list(Bin)),
- {ok, Term} = erl_parse:parse_term(Ts),
- Term
- catch
- Err -> {error, {invalid_app, Err}}
- end.
-
-filter_applications(Applications) ->
- [Application || Application <- Applications,
- not is_available_app(Application)].
-
-is_available_app(Application) ->
- case application:load(Application) of
- {error, {already_loaded, _}} -> true;
- ok -> application:unload(Application),
- true;
- _ -> false
- end.
-
-plugin_names(Plugins) ->
- [Name || #plugin{name = Name} <- Plugins].
-
-lookup_plugins(Names, AllPlugins) ->
- [P || P = #plugin{name = Name} <- AllPlugins, lists:member(Name, Names)].
diff --git a/src/rabbit_plugins_main.erl b/src/rabbit_plugins_main.erl
deleted file mode 100644
index 6355f935..00000000
--- a/src/rabbit_plugins_main.erl
+++ /dev/null
@@ -1,287 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_plugins_main).
--include("rabbit.hrl").
-
--export([start/0, stop/0]).
-
--define(VERBOSE_OPT, "-v").
--define(MINIMAL_OPT, "-m").
--define(ENABLED_OPT, "-E").
--define(ENABLED_ALL_OPT, "-e").
-
--define(VERBOSE_DEF, {?VERBOSE_OPT, flag}).
--define(MINIMAL_DEF, {?MINIMAL_OPT, flag}).
--define(ENABLED_DEF, {?ENABLED_OPT, flag}).
--define(ENABLED_ALL_DEF, {?ENABLED_ALL_OPT, flag}).
-
--define(GLOBAL_DEFS, []).
-
--define(COMMANDS,
- [{list, [?VERBOSE_DEF, ?MINIMAL_DEF, ?ENABLED_DEF, ?ENABLED_ALL_DEF]},
- enable,
- disable]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start/0 :: () -> no_return()).
--spec(stop/0 :: () -> 'ok').
--spec(usage/0 :: () -> no_return()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start() ->
- {ok, [[PluginsFile|_]|_]} =
- init:get_argument(enabled_plugins_file),
- {ok, [[PluginsDir|_]|_]} = init:get_argument(plugins_dist_dir),
- {Command, Opts, Args} =
- case rabbit_misc:parse_arguments(?COMMANDS, ?GLOBAL_DEFS,
- init:get_plain_arguments())
- of
- {ok, Res} -> Res;
- no_command -> print_error("could not recognise command", []),
- usage()
- end,
-
- PrintInvalidCommandError =
- fun () ->
- print_error("invalid command '~s'",
- [string:join([atom_to_list(Command) | Args], " ")])
- end,
-
- case catch action(Command, Args, Opts, PluginsFile, PluginsDir) of
- ok ->
- rabbit_misc:quit(0);
- {'EXIT', {function_clause, [{?MODULE, action, _} | _]}} ->
- PrintInvalidCommandError(),
- usage();
- {'EXIT', {function_clause, [{?MODULE, action, _, _} | _]}} ->
- PrintInvalidCommandError(),
- usage();
- {error, Reason} ->
- print_error("~p", [Reason]),
- rabbit_misc:quit(2);
- {error_string, Reason} ->
- print_error("~s", [Reason]),
- rabbit_misc:quit(2);
- Other ->
- print_error("~p", [Other]),
- rabbit_misc:quit(2)
- end.
-
-stop() ->
- ok.
-
-%%----------------------------------------------------------------------------
-
-action(list, [], Opts, PluginsFile, PluginsDir) ->
- action(list, [".*"], Opts, PluginsFile, PluginsDir);
-action(list, [Pat], Opts, PluginsFile, PluginsDir) ->
- format_plugins(Pat, Opts, PluginsFile, PluginsDir);
-
-action(enable, ToEnable0, _Opts, PluginsFile, PluginsDir) ->
- case ToEnable0 of
- [] -> throw({error_string, "Not enough arguments for 'enable'"});
- _ -> ok
- end,
- AllPlugins = rabbit_plugins:list(PluginsDir),
- Enabled = rabbit_plugins:read_enabled(PluginsFile),
- ImplicitlyEnabled = rabbit_plugins:dependencies(false,
- Enabled, AllPlugins),
- ToEnable = [list_to_atom(Name) || Name <- ToEnable0],
- Missing = ToEnable -- plugin_names(AllPlugins),
- NewEnabled = lists:usort(Enabled ++ ToEnable),
- NewImplicitlyEnabled = rabbit_plugins:dependencies(false,
- NewEnabled, AllPlugins),
- MissingDeps = (NewImplicitlyEnabled -- plugin_names(AllPlugins)) -- Missing,
- case {Missing, MissingDeps} of
- {[], []} -> ok;
- {Miss, []} -> throw({error_string, fmt_missing("plugins", Miss)});
- {[], Miss} -> throw({error_string, fmt_missing("dependencies", Miss)});
- {_, _} -> throw({error_string,
- fmt_missing("plugins", Missing) ++
- fmt_missing("dependencies", MissingDeps)})
- end,
- write_enabled_plugins(PluginsFile, NewEnabled),
- maybe_warn_mochiweb(NewImplicitlyEnabled),
- case NewEnabled -- ImplicitlyEnabled of
- [] -> io:format("Plugin configuration unchanged.~n");
- _ -> print_list("The following plugins have been enabled:",
- NewImplicitlyEnabled -- ImplicitlyEnabled),
- report_change()
- end;
-
-action(disable, ToDisable0, _Opts, PluginsFile, PluginsDir) ->
- case ToDisable0 of
- [] -> throw({error_string, "Not enough arguments for 'disable'"});
- _ -> ok
- end,
- ToDisable = [list_to_atom(Name) || Name <- ToDisable0],
- Enabled = rabbit_plugins:read_enabled(PluginsFile),
- AllPlugins = rabbit_plugins:list(PluginsDir),
- Missing = ToDisable -- plugin_names(AllPlugins),
- case Missing of
- [] -> ok;
- _ -> print_list("Warning: the following plugins could not be found:",
- Missing)
- end,
- ToDisableDeps = rabbit_plugins:dependencies(true, ToDisable, AllPlugins),
- NewEnabled = Enabled -- ToDisableDeps,
- case length(Enabled) =:= length(NewEnabled) of
- true -> io:format("Plugin configuration unchanged.~n");
- false -> ImplicitlyEnabled =
- rabbit_plugins:dependencies(false, Enabled, AllPlugins),
- NewImplicitlyEnabled =
- rabbit_plugins:dependencies(false,
- NewEnabled, AllPlugins),
- print_list("The following plugins have been disabled:",
- ImplicitlyEnabled -- NewImplicitlyEnabled),
- write_enabled_plugins(PluginsFile, NewEnabled),
- report_change()
- end.
-
-%%----------------------------------------------------------------------------
-
-print_error(Format, Args) ->
- rabbit_misc:format_stderr("Error: " ++ Format ++ "~n", Args).
-
-usage() ->
- io:format("~s", [rabbit_plugins_usage:usage()]),
- rabbit_misc:quit(1).
-
-%% Pretty print a list of plugins.
-format_plugins(Pattern, Opts, PluginsFile, PluginsDir) ->
- Verbose = proplists:get_bool(?VERBOSE_OPT, Opts),
- Minimal = proplists:get_bool(?MINIMAL_OPT, Opts),
- Format = case {Verbose, Minimal} of
- {false, false} -> normal;
- {true, false} -> verbose;
- {false, true} -> minimal;
- {true, true} -> throw({error_string,
- "Cannot specify -m and -v together"})
- end,
- OnlyEnabled = proplists:get_bool(?ENABLED_OPT, Opts),
- OnlyEnabledAll = proplists:get_bool(?ENABLED_ALL_OPT, Opts),
-
- AvailablePlugins = rabbit_plugins:list(PluginsDir),
- EnabledExplicitly = rabbit_plugins:read_enabled(PluginsFile),
- EnabledImplicitly =
- rabbit_plugins:dependencies(false, EnabledExplicitly,
- AvailablePlugins) -- EnabledExplicitly,
- Missing = [#plugin{name = Name, dependencies = []} ||
- Name <- ((EnabledExplicitly ++ EnabledImplicitly) --
- plugin_names(AvailablePlugins))],
- {ok, RE} = re:compile(Pattern),
- Plugins = [ Plugin ||
- Plugin = #plugin{name = Name} <- AvailablePlugins ++ Missing,
- re:run(atom_to_list(Name), RE, [{capture, none}]) =:= match,
- if OnlyEnabled -> lists:member(Name, EnabledExplicitly);
- OnlyEnabledAll -> (lists:member(Name,
- EnabledExplicitly) or
- lists:member(Name, EnabledImplicitly));
- true -> true
- end],
- Plugins1 = usort_plugins(Plugins),
- MaxWidth = lists:max([length(atom_to_list(Name)) ||
- #plugin{name = Name} <- Plugins1] ++ [0]),
- [format_plugin(P, EnabledExplicitly, EnabledImplicitly,
- plugin_names(Missing), Format, MaxWidth) || P <- Plugins1],
- ok.
-
-format_plugin(#plugin{name = Name, version = Version,
- description = Description, dependencies = Deps},
- EnabledExplicitly, EnabledImplicitly, Missing,
- Format, MaxWidth) ->
- Glyph = case {lists:member(Name, EnabledExplicitly),
- lists:member(Name, EnabledImplicitly),
- lists:member(Name, Missing)} of
- {true, false, false} -> "[E]";
- {false, true, false} -> "[e]";
- {_, _, true} -> "[!]";
- _ -> "[ ]"
- end,
- Opt = fun (_F, A, A) -> ok;
- ( F, A, _) -> io:format(F, [A])
- end,
- case Format of
- minimal -> io:format("~s~n", [Name]);
- normal -> io:format("~s ~-" ++ integer_to_list(MaxWidth) ++ "w ",
- [Glyph, Name]),
- Opt("~s", Version, undefined),
- io:format("~n");
- verbose -> io:format("~s ~w~n", [Glyph, Name]),
- Opt(" Version: \t~s~n", Version, undefined),
- Opt(" Dependencies:\t~p~n", Deps, []),
- Opt(" Description: \t~s~n", Description, undefined),
- io:format("~n")
- end.
-
-print_list(Header, Plugins) ->
- io:format(fmt_list(Header, Plugins)).
-
-fmt_list(Header, Plugins) ->
- lists:flatten(
- [Header, $\n, [io_lib:format(" ~s~n", [P]) || P <- Plugins]]).
-
-fmt_missing(Desc, Missing) ->
- fmt_list("The following " ++ Desc ++ " could not be found:", Missing).
-
-usort_plugins(Plugins) ->
- lists:usort(fun plugins_cmp/2, Plugins).
-
-plugins_cmp(#plugin{name = N1, version = V1},
- #plugin{name = N2, version = V2}) ->
- {N1, V1} =< {N2, V2}.
-
-%% Return the names of the given plugins.
-plugin_names(Plugins) ->
- [Name || #plugin{name = Name} <- Plugins].
-
-%% Write the enabled plugin names on disk.
-write_enabled_plugins(PluginsFile, Plugins) ->
- case rabbit_file:write_term_file(PluginsFile, [Plugins]) of
- ok -> ok;
- {error, Reason} -> throw({error, {cannot_write_enabled_plugins_file,
- PluginsFile, Reason}})
- end.
-
-maybe_warn_mochiweb(Enabled) ->
- V = erlang:system_info(otp_release),
- case lists:member(mochiweb, Enabled) andalso V < "R13B01" of
- true ->
- Stars = string:copies("*", 80),
- io:format("~n~n~s~n"
- " Warning: Mochiweb enabled and Erlang version ~s "
- "detected.~n"
- " Enabling plugins that depend on Mochiweb is not "
- "supported on this Erlang~n"
- " version. At least R13B01 is required.~n~n"
- " RabbitMQ will not start successfully in this "
- "configuration. You *must*~n"
- " disable the Mochiweb plugin, or upgrade Erlang.~n"
- "~s~n~n~n", [Stars, V, Stars]);
- false ->
- ok
- end.
-
-report_change() ->
- io:format("Plugin configuration has changed. "
- "Restart RabbitMQ for changes to take effect.~n").
diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl
deleted file mode 100644
index 91ca88dd..00000000
--- a/src/rabbit_policy.erl
+++ /dev/null
@@ -1,259 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_policy).
-
-%% TODO specs
-
--behaviour(rabbit_runtime_parameter).
-
--include("rabbit.hrl").
-
--import(rabbit_misc, [pget/2]).
-
--export([register/0]).
--export([name/1, get/2, set/1]).
--export([validate/4, notify/4, notify_clear/3]).
--export([parse_set/5, set/5, delete/2, lookup/2, list/0, list/1,
- list_formatted/1, info_keys/0]).
-
--rabbit_boot_step({?MODULE,
- [{description, "policy parameters"},
- {mfa, {rabbit_policy, register, []}},
- {requires, rabbit_registry},
- {enables, recovery}]}).
-
-register() ->
- rabbit_registry:register(runtime_parameter, <<"policy">>, ?MODULE).
-
-name(#amqqueue{policy = Policy}) -> name0(Policy);
-name(#exchange{policy = Policy}) -> name0(Policy).
-
-name0(undefined) -> none;
-name0(Policy) -> pget(name, Policy).
-
-set(Q = #amqqueue{name = Name}) -> Q#amqqueue{policy = set0(Name)};
-set(X = #exchange{name = Name}) -> rabbit_exchange_decorator:set(
- X#exchange{policy = set0(Name)}).
-
-set0(Name = #resource{virtual_host = VHost}) -> match(Name, list(VHost)).
-
-get(Name, #amqqueue{policy = Policy}) -> get0(Name, Policy);
-get(Name, #exchange{policy = Policy}) -> get0(Name, Policy);
-%% Caution - SLOW.
-get(Name, EntityName = #resource{virtual_host = VHost}) ->
- get0(Name, match(EntityName, list(VHost))).
-
-get0(_Name, undefined) -> {error, not_found};
-get0(Name, List) -> case pget(definition, List) of
- undefined -> {error, not_found};
- Policy -> case pget(Name, Policy) of
- undefined -> {error, not_found};
- Value -> {ok, Value}
- end
- end.
-
-%%----------------------------------------------------------------------------
-
-parse_set(VHost, Name, Pattern, Definition, undefined) ->
- parse_set0(VHost, Name, Pattern, Definition, 0);
-parse_set(VHost, Name, Pattern, Definition, Priority) ->
- try list_to_integer(Priority) of
- Num -> parse_set0(VHost, Name, Pattern, Definition, Num)
- catch
- error:badarg -> {error, "~p priority must be a number", [Priority]}
- end.
-
-parse_set0(VHost, Name, Pattern, Defn, Priority) ->
- case rabbit_misc:json_decode(Defn) of
- {ok, JSON} ->
- set0(VHost, Name,
- [{<<"pattern">>, list_to_binary(Pattern)},
- {<<"definition">>, rabbit_misc:json_to_term(JSON)},
- {<<"priority">>, Priority}]);
- error ->
- {error_string, "JSON decoding error"}
- end.
-
-set(VHost, Name, Pattern, Definition, Priority) ->
- PolicyProps = [{<<"pattern">>, Pattern},
- {<<"definition">>, Definition},
- {<<"priority">>, case Priority of
- undefined -> 0;
- _ -> Priority
- end}],
- set0(VHost, Name, PolicyProps).
-
-set0(VHost, Name, Term) ->
- rabbit_runtime_parameters:set_any(VHost, <<"policy">>, Name, Term).
-
-delete(VHost, Name) ->
- rabbit_runtime_parameters:clear_any(VHost, <<"policy">>, Name).
-
-lookup(VHost, Name) ->
- case rabbit_runtime_parameters:lookup(VHost, <<"policy">>, Name) of
- not_found -> not_found;
- P -> p(P, fun ident/1)
- end.
-
-list() ->
- list('_').
-
-list(VHost) ->
- list0(VHost, fun ident/1).
-
-list_formatted(VHost) ->
- order_policies(list0(VHost, fun format/1)).
-
-list0(VHost, DefnFun) ->
- [p(P, DefnFun) || P <- rabbit_runtime_parameters:list(VHost, <<"policy">>)].
-
-order_policies(PropList) ->
- lists:sort(fun (A, B) -> pget(priority, A) < pget(priority, B) end,
- PropList).
-
-p(Parameter, DefnFun) ->
- Value = pget(value, Parameter),
- [{vhost, pget(vhost, Parameter)},
- {name, pget(name, Parameter)},
- {pattern, pget(<<"pattern">>, Value)},
- {definition, DefnFun(pget(<<"definition">>, Value))},
- {priority, pget(<<"priority">>, Value)}].
-
-format(Term) ->
- {ok, JSON} = rabbit_misc:json_encode(rabbit_misc:term_to_json(Term)),
- list_to_binary(JSON).
-
-ident(X) -> X.
-
-info_keys() -> [vhost, name, pattern, definition, priority].
-
-%%----------------------------------------------------------------------------
-
-validate(_VHost, <<"policy">>, Name, Term) ->
- rabbit_parameter_validation:proplist(
- Name, policy_validation(), Term).
-
-notify(VHost, <<"policy">>, _Name, _Term) ->
- update_policies(VHost).
-
-notify_clear(VHost, <<"policy">>, _Name) ->
- update_policies(VHost).
-
-%%----------------------------------------------------------------------------
-
-update_policies(VHost) ->
- Policies = list(VHost),
- {Xs, Qs} = rabbit_misc:execute_mnesia_transaction(
- fun() ->
- {[update_exchange(X, Policies) ||
- X <- rabbit_exchange:list(VHost)],
- [update_queue(Q, Policies) ||
- Q <- rabbit_amqqueue:list(VHost)]}
- end),
- [catch notify(X) || X <- Xs],
- [catch notify(Q) || Q <- Qs],
- ok.
-
-update_exchange(X = #exchange{name = XName, policy = OldPolicy}, Policies) ->
- case match(XName, Policies) of
- OldPolicy -> no_change;
- NewPolicy -> case rabbit_exchange:update(
- XName, fun (X0) ->
- rabbit_exchange_decorator:set(
- X0 #exchange{policy = NewPolicy})
- end) of
- #exchange{} = X1 -> {X, X1};
- not_found -> {X, X }
- end
- end.
-
-update_queue(Q = #amqqueue{name = QName, policy = OldPolicy}, Policies) ->
- case match(QName, Policies) of
- OldPolicy -> no_change;
- NewPolicy -> rabbit_amqqueue:update(
- QName, fun(Q1) -> Q1#amqqueue{policy = NewPolicy} end),
- {Q, Q#amqqueue{policy = NewPolicy}}
- end.
-
-notify(no_change)->
- ok;
-notify({X1 = #exchange{}, X2 = #exchange{}}) ->
- rabbit_exchange:policy_changed(X1, X2);
-notify({Q1 = #amqqueue{}, Q2 = #amqqueue{}}) ->
- rabbit_amqqueue:policy_changed(Q1, Q2).
-
-match(Name, Policies) ->
- case lists:sort(fun sort_pred/2, [P || P <- Policies, matches(Name, P)]) of
- [] -> undefined;
- [Policy | _Rest] -> Policy
- end.
-
-matches(#resource{name = Name}, Policy) ->
- match =:= re:run(Name, pget(pattern, Policy), [{capture, none}]).
-
-sort_pred(A, B) -> pget(priority, A) >= pget(priority, B).
-
-%%----------------------------------------------------------------------------
-
-policy_validation() ->
- [{<<"priority">>, fun rabbit_parameter_validation:number/2, mandatory},
- {<<"pattern">>, fun rabbit_parameter_validation:regex/2, mandatory},
- {<<"definition">>, fun validation/2, mandatory}].
-
-validation(_Name, []) ->
- {error, "no policy provided", []};
-validation(_Name, Terms) when is_list(Terms) ->
- {Keys, Modules} = lists:unzip(
- rabbit_registry:lookup_all(policy_validator)),
- [] = dups(Keys), %% ASSERTION
- Validators = lists:zipwith(fun (M, K) -> {M, a2b(K)} end, Modules, Keys),
- case is_proplist(Terms) of
- true -> {TermKeys, _} = lists:unzip(Terms),
- case dups(TermKeys) of
- [] -> validation0(Validators, Terms);
- Dup -> {error, "~p duplicate keys not allowed", [Dup]}
- end;
- false -> {error, "definition must be a dictionary: ~p", [Terms]}
- end;
-validation(_Name, Term) ->
- {error, "parse error while reading policy: ~p", [Term]}.
-
-validation0(Validators, Terms) ->
- case lists:foldl(
- fun (Mod, {ok, TermsLeft}) ->
- ModKeys = proplists:get_all_values(Mod, Validators),
- case [T || {Key, _} = T <- TermsLeft,
- lists:member(Key, ModKeys)] of
- [] -> {ok, TermsLeft};
- Scope -> {Mod:validate_policy(Scope), TermsLeft -- Scope}
- end;
- (_, Acc) ->
- Acc
- end, {ok, Terms}, proplists:get_keys(Validators)) of
- {ok, []} ->
- ok;
- {ok, Unvalidated} ->
- {error, "~p are not recognised policy settings", [Unvalidated]};
- {Error, _} ->
- Error
- end.
-
-a2b(A) -> list_to_binary(atom_to_list(A)).
-
-dups(L) -> L -- lists:usort(L).
-
-is_proplist(L) -> length(L) =:= length([I || I = {_, _} <- L]).
diff --git a/src/rabbit_policy_validator.erl b/src/rabbit_policy_validator.erl
deleted file mode 100644
index 661db73d..00000000
--- a/src/rabbit_policy_validator.erl
+++ /dev/null
@@ -1,39 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_policy_validator).
-
--ifdef(use_specs).
-
--export_type([validate_results/0]).
-
--type(validate_results() ::
- 'ok' | {error, string(), [term()]} | [validate_results()]).
-
--callback validate_policy([{binary(), term()}]) -> validate_results().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [
- {validate_policy, 1}
- ];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_prelaunch.erl b/src/rabbit_prelaunch.erl
deleted file mode 100644
index be407a02..00000000
--- a/src/rabbit_prelaunch.erl
+++ /dev/null
@@ -1,72 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_prelaunch).
-
--export([start/0, stop/0]).
-
--include("rabbit.hrl").
-
--define(BaseApps, [rabbit]).
--define(ERROR_CODE, 1).
-
-%%----------------------------------------------------------------------------
-%% Specs
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start/0 :: () -> no_return()).
--spec(stop/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start() ->
- [NodeStr] = init:get_plain_arguments(),
- ok = duplicate_node_check(NodeStr),
- rabbit_misc:quit(0),
- ok.
-
-stop() ->
- ok.
-
-%%----------------------------------------------------------------------------
-
-%% Check whether a node with the same name is already running
-duplicate_node_check([]) ->
- %% Ignore running node while installing windows service
- ok;
-duplicate_node_check(NodeStr) ->
- Node = rabbit_nodes:make(NodeStr),
- {NodeName, NodeHost} = rabbit_nodes:parts(Node),
- case rabbit_nodes:names(NodeHost) of
- {ok, NamePorts} ->
- case proplists:is_defined(NodeName, NamePorts) of
- true -> io:format("ERROR: node with name ~p "
- "already running on ~p~n",
- [NodeName, NodeHost]),
- io:format(rabbit_nodes:diagnostics([Node]) ++ "~n"),
- rabbit_misc:quit(?ERROR_CODE);
- false -> ok
- end;
- {error, EpmdReason} ->
- io:format("ERROR: epmd error for host ~p: ~p (~s)~n",
- [NodeHost, EpmdReason,
- rabbit_misc:format_inet_error(EpmdReason)]),
- rabbit_misc:quit(?ERROR_CODE)
- end.
diff --git a/src/rabbit_queue_collector.erl b/src/rabbit_queue_collector.erl
deleted file mode 100644
index 6406f7e9..00000000
--- a/src/rabbit_queue_collector.erl
+++ /dev/null
@@ -1,90 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_queue_collector).
-
--behaviour(gen_server).
-
--export([start_link/0, register/2, delete_all/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--record(state, {monitors, delete_from}).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(register/2 :: (pid(), pid()) -> 'ok').
--spec(delete_all/1 :: (pid()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_server:start_link(?MODULE, [], []).
-
-register(CollectorPid, Q) ->
- gen_server:call(CollectorPid, {register, Q}, infinity).
-
-delete_all(CollectorPid) ->
- gen_server:call(CollectorPid, delete_all, infinity).
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, #state{monitors = pmon:new(), delete_from = undefined}}.
-
-%%--------------------------------------------------------------------------
-
-handle_call({register, QPid}, _From,
- State = #state{monitors = QMons, delete_from = Deleting}) ->
- case Deleting of
- undefined -> ok;
- _ -> ok = rabbit_amqqueue:delete_immediately([QPid])
- end,
- {reply, ok, State#state{monitors = pmon:monitor(QPid, QMons)}};
-
-handle_call(delete_all, From, State = #state{monitors = QMons,
- delete_from = undefined}) ->
- case pmon:monitored(QMons) of
- [] -> {reply, ok, State#state{delete_from = From}};
- QPids -> ok = rabbit_amqqueue:delete_immediately(QPids),
- {noreply, State#state{delete_from = From}}
- end.
-
-handle_cast(Msg, State) ->
- {stop, {unhandled_cast, Msg}, State}.
-
-handle_info({'DOWN', _MRef, process, DownPid, _Reason},
- State = #state{monitors = QMons, delete_from = Deleting}) ->
- QMons1 = pmon:erase(DownPid, QMons),
- case Deleting =/= undefined andalso pmon:is_empty(QMons1) of
- true -> gen_server:reply(Deleting, ok);
- false -> ok
- end,
- {noreply, State#state{monitors = QMons1}}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl
deleted file mode 100644
index 0908fb73..00000000
--- a/src/rabbit_queue_index.erl
+++ /dev/null
@@ -1,1119 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_queue_index).
-
--export([init/2, shutdown_terms/1, recover/5,
- terminate/2, delete_and_terminate/1,
- publish/5, deliver/2, ack/2, sync/1, needs_sync/1, flush/1,
- read/3, next_segment_boundary/1, bounds/1, recover/1]).
-
--export([scan/3]).
-
--export([add_queue_ttl/0]).
-
--define(CLEAN_FILENAME, "clean.dot").
-
-%%----------------------------------------------------------------------------
-
-%% The queue index is responsible for recording the order of messages
-%% within a queue on disk.
-%%
-%% Because of the fact that the queue can decide at any point to send
-%% a queue entry to disk, you can not rely on publishes appearing in
-%% order. The only thing you can rely on is a message being published,
-%% then delivered, then ack'd.
-%%
-%% In order to be able to clean up ack'd messages, we write to segment
-%% files. These files have a fixed maximum size: ?SEGMENT_ENTRY_COUNT
-%% publishes, delivers and acknowledgements. They are numbered, and so
-%% it is known that the 0th segment contains messages 0 ->
-%% ?SEGMENT_ENTRY_COUNT - 1, the 1st segment contains messages
-%% ?SEGMENT_ENTRY_COUNT -> 2*?SEGMENT_ENTRY_COUNT - 1 and so on. As
-%% such, in the segment files, we only refer to message sequence ids
-%% by the LSBs as SeqId rem ?SEGMENT_ENTRY_COUNT. This gives them a
-%% fixed size.
-%%
-%% However, transient messages which are not sent to disk at any point
-%% will cause gaps to appear in segment files. Therefore, we delete a
-%% segment file whenever the number of publishes == number of acks
-%% (note that although it is not fully enforced, it is assumed that a
-%% message will never be ackd before it is delivered, thus this test
-%% also implies == number of delivers). In practise, this does not
-%% cause disk churn in the pathological case because of the journal
-%% and caching (see below).
-%%
-%% Because of the fact that publishes, delivers and acks can occur all
-%% over, we wish to avoid lots of seeking. Therefore we have a fixed
-%% sized journal to which all actions are appended. When the number of
-%% entries in this journal reaches max_journal_entries, the journal
-%% entries are scattered out to their relevant files, and the journal
-%% is truncated to zero size. Note that entries in the journal must
-%% carry the full sequence id, thus the format of entries in the
-%% journal is different to that in the segments.
-%%
-%% The journal is also kept fully in memory, pre-segmented: the state
-%% contains a mapping from segment numbers to state-per-segment (this
-%% state is held for all segments which have been "seen": thus a
-%% segment which has been read but has no pending entries in the
-%% journal is still held in this mapping. Also note that a dict is
-%% used for this mapping, not an array because with an array, you will
-%% always have entries from 0). Actions are stored directly in this
-%% state. Thus at the point of flushing the journal, firstly no
-%% reading from disk is necessary, but secondly if the known number of
-%% acks and publishes in a segment are equal, given the known state of
-%% the segment file combined with the journal, no writing needs to be
-%% done to the segment file either (in fact it is deleted if it exists
-%% at all). This is safe given that the set of acks is a subset of the
-%% set of publishes. When it is necessary to sync messages, it is
-%% sufficient to fsync on the journal: when entries are distributed
-%% from the journal to segment files, those segments appended to are
-%% fsync'd prior to the journal being truncated.
-%%
-%% This module is also responsible for scanning the queue index files
-%% and seeding the message store on start up.
-%%
-%% Note that in general, the representation of a message's state as
-%% the tuple: {('no_pub'|{MsgId, MsgProps, IsPersistent}),
-%% ('del'|'no_del'), ('ack'|'no_ack')} is richer than strictly
-%% necessary for most operations. However, for startup, and to ensure
-%% the safe and correct combination of journal entries with entries
-%% read from the segment on disk, this richer representation vastly
-%% simplifies and clarifies the code.
-%%
-%% For notes on Clean Shutdown and startup, see documentation in
-%% variable_queue.
-%%
-%%----------------------------------------------------------------------------
-
-%% ---- Journal details ----
-
--define(JOURNAL_FILENAME, "journal.jif").
-
--define(PUB_PERSIST_JPREFIX, 2#00).
--define(PUB_TRANS_JPREFIX, 2#01).
--define(DEL_JPREFIX, 2#10).
--define(ACK_JPREFIX, 2#11).
--define(JPREFIX_BITS, 2).
--define(SEQ_BYTES, 8).
--define(SEQ_BITS, ((?SEQ_BYTES * 8) - ?JPREFIX_BITS)).
-
-%% ---- Segment details ----
-
--define(SEGMENT_EXTENSION, ".idx").
-
-%% TODO: The segment size would be configurable, but deriving all the
-%% other values is quite hairy and quite possibly noticably less
-%% efficient, depending on how clever the compiler is when it comes to
-%% binary generation/matching with constant vs variable lengths.
-
--define(REL_SEQ_BITS, 14).
--define(SEGMENT_ENTRY_COUNT, 16384). %% trunc(math:pow(2,?REL_SEQ_BITS))).
-
-%% seq only is binary 00 followed by 14 bits of rel seq id
-%% (range: 0 - 16383)
--define(REL_SEQ_ONLY_PREFIX, 00).
--define(REL_SEQ_ONLY_PREFIX_BITS, 2).
--define(REL_SEQ_ONLY_RECORD_BYTES, 2).
-
-%% publish record is binary 1 followed by a bit for is_persistent,
-%% then 14 bits of rel seq id, 64 bits for message expiry and 128 bits
-%% of md5sum msg id
--define(PUB_PREFIX, 1).
--define(PUB_PREFIX_BITS, 1).
-
--define(EXPIRY_BYTES, 8).
--define(EXPIRY_BITS, (?EXPIRY_BYTES * 8)).
--define(NO_EXPIRY, 0).
-
--define(MSG_ID_BYTES, 16). %% md5sum is 128 bit or 16 bytes
--define(MSG_ID_BITS, (?MSG_ID_BYTES * 8)).
-
-%% 16 bytes for md5sum + 8 for expiry
--define(PUB_RECORD_BODY_BYTES, (?MSG_ID_BYTES + ?EXPIRY_BYTES)).
-%% + 2 for seq, bits and prefix
--define(PUB_RECORD_BYTES, (?PUB_RECORD_BODY_BYTES + 2)).
-
-%% 1 publish, 1 deliver, 1 ack per msg
--define(SEGMENT_TOTAL_SIZE, ?SEGMENT_ENTRY_COUNT *
- (?PUB_RECORD_BYTES + (2 * ?REL_SEQ_ONLY_RECORD_BYTES))).
-
-%% ---- misc ----
-
--define(PUB, {_, _, _}). %% {MsgId, MsgProps, IsPersistent}
-
--define(READ_MODE, [binary, raw, read]).
--define(READ_AHEAD_MODE, [{read_ahead, ?SEGMENT_TOTAL_SIZE} | ?READ_MODE]).
--define(WRITE_MODE, [write | ?READ_MODE]).
-
-%%----------------------------------------------------------------------------
-
--record(qistate, { dir, segments, journal_handle, dirty_count,
- max_journal_entries, on_sync, unconfirmed }).
-
--record(segment, { num, path, journal_entries, unacked }).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--rabbit_upgrade({add_queue_ttl, local, []}).
-
--ifdef(use_specs).
-
--type(hdl() :: ('undefined' | any())).
--type(segment() :: ('undefined' |
- #segment { num :: non_neg_integer(),
- path :: file:filename(),
- journal_entries :: array(),
- unacked :: non_neg_integer()
- })).
--type(seq_id() :: integer()).
--type(seg_dict() :: {dict(), [segment()]}).
--type(on_sync_fun() :: fun ((gb_set()) -> ok)).
--type(qistate() :: #qistate { dir :: file:filename(),
- segments :: 'undefined' | seg_dict(),
- journal_handle :: hdl(),
- dirty_count :: integer(),
- max_journal_entries :: non_neg_integer(),
- on_sync :: on_sync_fun(),
- unconfirmed :: gb_set()
- }).
--type(contains_predicate() :: fun ((rabbit_types:msg_id()) -> boolean())).
--type(walker(A) :: fun ((A) -> 'finished' |
- {rabbit_types:msg_id(), non_neg_integer(), A})).
--type(shutdown_terms() :: [any()]).
-
--spec(init/2 :: (rabbit_amqqueue:name(), on_sync_fun()) -> qistate()).
--spec(shutdown_terms/1 :: (rabbit_amqqueue:name()) -> shutdown_terms()).
--spec(recover/5 :: (rabbit_amqqueue:name(), shutdown_terms(), boolean(),
- contains_predicate(), on_sync_fun()) ->
- {'undefined' | non_neg_integer(), qistate()}).
--spec(terminate/2 :: ([any()], qistate()) -> qistate()).
--spec(delete_and_terminate/1 :: (qistate()) -> qistate()).
--spec(publish/5 :: (rabbit_types:msg_id(), seq_id(),
- rabbit_types:message_properties(), boolean(), qistate())
- -> qistate()).
--spec(deliver/2 :: ([seq_id()], qistate()) -> qistate()).
--spec(ack/2 :: ([seq_id()], qistate()) -> qistate()).
--spec(sync/1 :: (qistate()) -> qistate()).
--spec(needs_sync/1 :: (qistate()) -> 'confirms' | 'other' | 'false').
--spec(flush/1 :: (qistate()) -> qistate()).
--spec(read/3 :: (seq_id(), seq_id(), qistate()) ->
- {[{rabbit_types:msg_id(), seq_id(),
- rabbit_types:message_properties(),
- boolean(), boolean()}], qistate()}).
--spec(next_segment_boundary/1 :: (seq_id()) -> seq_id()).
--spec(bounds/1 :: (qistate()) ->
- {non_neg_integer(), non_neg_integer(), qistate()}).
--spec(recover/1 :: ([rabbit_amqqueue:name()]) -> {[[any()]], {walker(A), A}}).
-
--spec(scan/3 :: (file:filename(),
- fun ((seq_id(), rabbit_types:msg_id(),
- rabbit_types:message_properties(), boolean(),
- ('del' | 'no_del'), ('ack' | 'no_ack'), A) -> A),
- A) -> A).
-
--spec(add_queue_ttl/0 :: () -> 'ok').
-
--endif.
-
-
-%%----------------------------------------------------------------------------
-%% public API
-%%----------------------------------------------------------------------------
-
-init(Name, OnSyncFun) ->
- State = #qistate { dir = Dir } = blank_state(Name),
- false = rabbit_file:is_file(Dir), %% is_file == is file or dir
- State #qistate { on_sync = OnSyncFun }.
-
-shutdown_terms(Name) ->
- #qistate { dir = Dir } = blank_state(Name),
- case read_shutdown_terms(Dir) of
- {error, _} -> [];
- {ok, Terms1} -> Terms1
- end.
-
-recover(Name, Terms, MsgStoreRecovered, ContainsCheckFun, OnSyncFun) ->
- State = #qistate { dir = Dir } = blank_state(Name),
- State1 = State #qistate { on_sync = OnSyncFun },
- CleanShutdown = detect_clean_shutdown(Dir),
- case CleanShutdown andalso MsgStoreRecovered of
- true -> RecoveredCounts = proplists:get_value(segments, Terms, []),
- init_clean(RecoveredCounts, State1);
- false -> init_dirty(CleanShutdown, ContainsCheckFun, State1)
- end.
-
-terminate(Terms, State) ->
- {SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State),
- store_clean_shutdown([{segments, SegmentCounts} | Terms], Dir),
- State1.
-
-delete_and_terminate(State) ->
- {_SegmentCounts, State1 = #qistate { dir = Dir }} = terminate(State),
- ok = rabbit_file:recursive_delete([Dir]),
- State1.
-
-publish(MsgId, SeqId, MsgProps, IsPersistent,
- State = #qistate { unconfirmed = Unconfirmed })
- when is_binary(MsgId) ->
- ?MSG_ID_BYTES = size(MsgId),
- {JournalHdl, State1} =
- get_journal_handle(
- case MsgProps#message_properties.needs_confirming of
- true -> Unconfirmed1 = gb_sets:add_element(MsgId, Unconfirmed),
- State #qistate { unconfirmed = Unconfirmed1 };
- false -> State
- end),
- ok = file_handle_cache:append(
- JournalHdl, [<<(case IsPersistent of
- true -> ?PUB_PERSIST_JPREFIX;
- false -> ?PUB_TRANS_JPREFIX
- end):?JPREFIX_BITS,
- SeqId:?SEQ_BITS>>,
- create_pub_record_body(MsgId, MsgProps)]),
- maybe_flush_journal(
- add_to_journal(SeqId, {MsgId, MsgProps, IsPersistent}, State1)).
-
-deliver(SeqIds, State) ->
- deliver_or_ack(del, SeqIds, State).
-
-ack(SeqIds, State) ->
- deliver_or_ack(ack, SeqIds, State).
-
-%% This is called when there are outstanding confirms or when the
-%% queue is idle and the journal needs syncing (see needs_sync/1).
-sync(State = #qistate { journal_handle = undefined }) ->
- State;
-sync(State = #qistate { journal_handle = JournalHdl }) ->
- ok = file_handle_cache:sync(JournalHdl),
- notify_sync(State).
-
-needs_sync(#qistate { journal_handle = undefined }) ->
- false;
-needs_sync(#qistate { journal_handle = JournalHdl, unconfirmed = UC }) ->
- case gb_sets:is_empty(UC) of
- true -> case file_handle_cache:needs_sync(JournalHdl) of
- true -> other;
- false -> false
- end;
- false -> confirms
- end.
-
-flush(State = #qistate { dirty_count = 0 }) -> State;
-flush(State) -> flush_journal(State).
-
-read(StartEnd, StartEnd, State) ->
- {[], State};
-read(Start, End, State = #qistate { segments = Segments,
- dir = Dir }) when Start =< End ->
- %% Start is inclusive, End is exclusive.
- LowerB = {StartSeg, _StartRelSeq} = seq_id_to_seg_and_rel_seq_id(Start),
- UpperB = {EndSeg, _EndRelSeq} = seq_id_to_seg_and_rel_seq_id(End - 1),
- {Messages, Segments1} =
- lists:foldr(fun (Seg, Acc) ->
- read_bounded_segment(Seg, LowerB, UpperB, Acc, Dir)
- end, {[], Segments}, lists:seq(StartSeg, EndSeg)),
- {Messages, State #qistate { segments = Segments1 }}.
-
-next_segment_boundary(SeqId) ->
- {Seg, _RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
- reconstruct_seq_id(Seg + 1, 0).
-
-bounds(State = #qistate { segments = Segments }) ->
- %% This is not particularly efficient, but only gets invoked on
- %% queue initialisation.
- SegNums = lists:sort(segment_nums(Segments)),
- %% Don't bother trying to figure out the lowest seq_id, merely the
- %% seq_id of the start of the lowest segment. That seq_id may not
- %% actually exist, but that's fine. The important thing is that
- %% the segment exists and the seq_id reported is on a segment
- %% boundary.
- %%
- %% We also don't really care about the max seq_id. Just start the
- %% next segment: it makes life much easier.
- %%
- %% SegNums is sorted, ascending.
- {LowSeqId, NextSeqId} =
- case SegNums of
- [] -> {0, 0};
- [MinSeg|_] -> {reconstruct_seq_id(MinSeg, 0),
- reconstruct_seq_id(1 + lists:last(SegNums), 0)}
- end,
- {LowSeqId, NextSeqId, State}.
-
-recover(DurableQueues) ->
- DurableDict = dict:from_list([ {queue_name_to_dir_name(Queue), Queue} ||
- Queue <- DurableQueues ]),
- QueuesDir = queues_dir(),
- QueueDirNames = all_queue_directory_names(QueuesDir),
- DurableDirectories = sets:from_list(dict:fetch_keys(DurableDict)),
- {DurableQueueNames, DurableTerms} =
- lists:foldl(
- fun (QueueDirName, {DurableAcc, TermsAcc}) ->
- QueueDirPath = filename:join(QueuesDir, QueueDirName),
- case sets:is_element(QueueDirName, DurableDirectories) of
- true ->
- TermsAcc1 =
- case read_shutdown_terms(QueueDirPath) of
- {error, _} -> TermsAcc;
- {ok, Terms} -> [Terms | TermsAcc]
- end,
- {[dict:fetch(QueueDirName, DurableDict) | DurableAcc],
- TermsAcc1};
- false ->
- ok = rabbit_file:recursive_delete([QueueDirPath]),
- {DurableAcc, TermsAcc}
- end
- end, {[], []}, QueueDirNames),
- {DurableTerms, {fun queue_index_walker/1, {start, DurableQueueNames}}}.
-
-all_queue_directory_names(Dir) ->
- case rabbit_file:list_dir(Dir) of
- {ok, Entries} -> [ Entry || Entry <- Entries,
- rabbit_file:is_dir(
- filename:join(Dir, Entry)) ];
- {error, enoent} -> []
- end.
-
-%%----------------------------------------------------------------------------
-%% startup and shutdown
-%%----------------------------------------------------------------------------
-
-blank_state(QueueName) ->
- blank_state_dir(
- filename:join(queues_dir(), queue_name_to_dir_name(QueueName))).
-
-blank_state_dir(Dir) ->
- {ok, MaxJournal} =
- application:get_env(rabbit, queue_index_max_journal_entries),
- #qistate { dir = Dir,
- segments = segments_new(),
- journal_handle = undefined,
- dirty_count = 0,
- max_journal_entries = MaxJournal,
- on_sync = fun (_) -> ok end,
- unconfirmed = gb_sets:new() }.
-
-clean_filename(Dir) -> filename:join(Dir, ?CLEAN_FILENAME).
-
-detect_clean_shutdown(Dir) ->
- case rabbit_file:delete(clean_filename(Dir)) of
- ok -> true;
- {error, enoent} -> false
- end.
-
-read_shutdown_terms(Dir) ->
- rabbit_file:read_term_file(clean_filename(Dir)).
-
-store_clean_shutdown(Terms, Dir) ->
- CleanFileName = clean_filename(Dir),
- ok = rabbit_file:ensure_dir(CleanFileName),
- rabbit_file:write_term_file(CleanFileName, Terms).
-
-init_clean(RecoveredCounts, State) ->
- %% Load the journal. Since this is a clean recovery this (almost)
- %% gets us back to where we were on shutdown.
- State1 = #qistate { dir = Dir, segments = Segments } = load_journal(State),
- %% The journal loading only creates records for segments touched
- %% by the journal, and the counts are based on the journal entries
- %% only. We need *complete* counts for *all* segments. By an
- %% amazing coincidence we stored that information on shutdown.
- Segments1 =
- lists:foldl(
- fun ({Seg, UnackedCount}, SegmentsN) ->
- Segment = segment_find_or_new(Seg, Dir, SegmentsN),
- segment_store(Segment #segment { unacked = UnackedCount },
- SegmentsN)
- end, Segments, RecoveredCounts),
- %% the counts above include transient messages, which would be the
- %% wrong thing to return
- {undefined, State1 # qistate { segments = Segments1 }}.
-
-init_dirty(CleanShutdown, ContainsCheckFun, State) ->
- %% Recover the journal completely. This will also load segments
- %% which have entries in the journal and remove duplicates. The
- %% counts will correctly reflect the combination of the segment
- %% and the journal.
- State1 = #qistate { dir = Dir, segments = Segments } =
- recover_journal(State),
- {Segments1, Count} =
- %% Load each segment in turn and filter out messages that are
- %% not in the msg_store, by adding acks to the journal. These
- %% acks only go to the RAM journal as it doesn't matter if we
- %% lose them. Also mark delivered if not clean shutdown. Also
- %% find the number of unacked messages.
- lists:foldl(
- fun (Seg, {Segments2, CountAcc}) ->
- Segment = #segment { unacked = UnackedCount } =
- recover_segment(ContainsCheckFun, CleanShutdown,
- segment_find_or_new(Seg, Dir, Segments2)),
- {segment_store(Segment, Segments2), CountAcc + UnackedCount}
- end, {Segments, 0}, all_segment_nums(State1)),
- %% Unconditionally flush since the dirty_count doesn't get updated
- %% by the above foldl.
- State2 = flush_journal(State1 #qistate { segments = Segments1 }),
- {Count, State2}.
-
-terminate(State = #qistate { journal_handle = JournalHdl,
- segments = Segments }) ->
- ok = case JournalHdl of
- undefined -> ok;
- _ -> file_handle_cache:close(JournalHdl)
- end,
- SegmentCounts =
- segment_fold(
- fun (#segment { num = Seg, unacked = UnackedCount }, Acc) ->
- [{Seg, UnackedCount} | Acc]
- end, [], Segments),
- {SegmentCounts, State #qistate { journal_handle = undefined,
- segments = undefined }}.
-
-recover_segment(ContainsCheckFun, CleanShutdown,
- Segment = #segment { journal_entries = JEntries }) ->
- {SegEntries, UnackedCount} = load_segment(false, Segment),
- {SegEntries1, UnackedCountDelta} =
- segment_plus_journal(SegEntries, JEntries),
- array:sparse_foldl(
- fun (RelSeq, {{MsgId, _MsgProps, _IsPersistent}, Del, no_ack},
- Segment1) ->
- recover_message(ContainsCheckFun(MsgId), CleanShutdown,
- Del, RelSeq, Segment1)
- end,
- Segment #segment { unacked = UnackedCount + UnackedCountDelta },
- SegEntries1).
-
-recover_message( true, true, _Del, _RelSeq, Segment) ->
- Segment;
-recover_message( true, false, del, _RelSeq, Segment) ->
- Segment;
-recover_message( true, false, no_del, RelSeq, Segment) ->
- add_to_journal(RelSeq, del, Segment);
-recover_message(false, _, del, RelSeq, Segment) ->
- add_to_journal(RelSeq, ack, Segment);
-recover_message(false, _, no_del, RelSeq, Segment) ->
- add_to_journal(RelSeq, ack, add_to_journal(RelSeq, del, Segment)).
-
-queue_name_to_dir_name(Name = #resource { kind = queue }) ->
- <<Num:128>> = erlang:md5(term_to_binary(Name)),
- rabbit_misc:format("~.36B", [Num]).
-
-queues_dir() ->
- filename:join(rabbit_mnesia:dir(), "queues").
-
-%%----------------------------------------------------------------------------
-%% msg store startup delta function
-%%----------------------------------------------------------------------------
-
-queue_index_walker({start, DurableQueues}) when is_list(DurableQueues) ->
- {ok, Gatherer} = gatherer:start_link(),
- [begin
- ok = gatherer:fork(Gatherer),
- ok = worker_pool:submit_async(
- fun () -> link(Gatherer),
- ok = queue_index_walker_reader(QueueName, Gatherer),
- unlink(Gatherer),
- ok
- end)
- end || QueueName <- DurableQueues],
- queue_index_walker({next, Gatherer});
-
-queue_index_walker({next, Gatherer}) when is_pid(Gatherer) ->
- case gatherer:out(Gatherer) of
- empty ->
- unlink(Gatherer),
- ok = gatherer:stop(Gatherer),
- finished;
- {value, {MsgId, Count}} ->
- {MsgId, Count, {next, Gatherer}}
- end.
-
-queue_index_walker_reader(QueueName, Gatherer) ->
- State = blank_state(QueueName),
- ok = scan_segments(
- fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, ok) ->
- gatherer:sync_in(Gatherer, {MsgId, 1});
- (_SeqId, _MsgId, _MsgProps, _IsPersistent, _IsDelivered,
- _IsAcked, Acc) ->
- Acc
- end, ok, State),
- ok = gatherer:finish(Gatherer).
-
-scan(Dir, Fun, Acc) ->
- scan_segments(Fun, Acc, blank_state_dir(Dir)).
-
-scan_segments(Fun, Acc, State) ->
- State1 = #qistate { segments = Segments, dir = Dir } =
- recover_journal(State),
- Result = lists:foldr(
- fun (Seg, AccN) ->
- segment_entries_foldr(
- fun (RelSeq, {{MsgId, MsgProps, IsPersistent},
- IsDelivered, IsAcked}, AccM) ->
- Fun(reconstruct_seq_id(Seg, RelSeq), MsgId, MsgProps,
- IsPersistent, IsDelivered, IsAcked, AccM)
- end, AccN, segment_find_or_new(Seg, Dir, Segments))
- end, Acc, all_segment_nums(State1)),
- {_SegmentCounts, _State} = terminate(State1),
- Result.
-
-%%----------------------------------------------------------------------------
-%% expiry/binary manipulation
-%%----------------------------------------------------------------------------
-
-create_pub_record_body(MsgId, #message_properties { expiry = Expiry }) ->
- [MsgId, expiry_to_binary(Expiry)].
-
-expiry_to_binary(undefined) -> <<?NO_EXPIRY:?EXPIRY_BITS>>;
-expiry_to_binary(Expiry) -> <<Expiry:?EXPIRY_BITS>>.
-
-parse_pub_record_body(<<MsgIdNum:?MSG_ID_BITS, Expiry:?EXPIRY_BITS>>) ->
- %% work around for binary data fragmentation. See
- %% rabbit_msg_file:read_next/2
- <<MsgId:?MSG_ID_BYTES/binary>> = <<MsgIdNum:?MSG_ID_BITS>>,
- Exp = case Expiry of
- ?NO_EXPIRY -> undefined;
- X -> X
- end,
- {MsgId, #message_properties { expiry = Exp }}.
-
-%%----------------------------------------------------------------------------
-%% journal manipulation
-%%----------------------------------------------------------------------------
-
-add_to_journal(SeqId, Action, State = #qistate { dirty_count = DCount,
- segments = Segments,
- dir = Dir }) ->
- {Seg, RelSeq} = seq_id_to_seg_and_rel_seq_id(SeqId),
- Segment = segment_find_or_new(Seg, Dir, Segments),
- Segment1 = add_to_journal(RelSeq, Action, Segment),
- State #qistate { dirty_count = DCount + 1,
- segments = segment_store(Segment1, Segments) };
-
-add_to_journal(RelSeq, Action,
- Segment = #segment { journal_entries = JEntries,
- unacked = UnackedCount }) ->
- Segment #segment {
- journal_entries = add_to_journal(RelSeq, Action, JEntries),
- unacked = UnackedCount + case Action of
- ?PUB -> +1;
- del -> 0;
- ack -> -1
- end};
-
-add_to_journal(RelSeq, Action, JEntries) ->
- case array:get(RelSeq, JEntries) of
- undefined ->
- array:set(RelSeq,
- case Action of
- ?PUB -> {Action, no_del, no_ack};
- del -> {no_pub, del, no_ack};
- ack -> {no_pub, no_del, ack}
- end, JEntries);
- ({Pub, no_del, no_ack}) when Action == del ->
- array:set(RelSeq, {Pub, del, no_ack}, JEntries);
- ({no_pub, del, no_ack}) when Action == ack ->
- array:set(RelSeq, {no_pub, del, ack}, JEntries);
- ({?PUB, del, no_ack}) when Action == ack ->
- array:reset(RelSeq, JEntries)
- end.
-
-maybe_flush_journal(State = #qistate { dirty_count = DCount,
- max_journal_entries = MaxJournal })
- when DCount > MaxJournal ->
- flush_journal(State);
-maybe_flush_journal(State) ->
- State.
-
-flush_journal(State = #qistate { segments = Segments }) ->
- Segments1 =
- segment_fold(
- fun (#segment { unacked = 0, path = Path }, SegmentsN) ->
- case rabbit_file:is_file(Path) of
- true -> ok = rabbit_file:delete(Path);
- false -> ok
- end,
- SegmentsN;
- (#segment {} = Segment, SegmentsN) ->
- segment_store(append_journal_to_segment(Segment), SegmentsN)
- end, segments_new(), Segments),
- {JournalHdl, State1} =
- get_journal_handle(State #qistate { segments = Segments1 }),
- ok = file_handle_cache:clear(JournalHdl),
- notify_sync(State1 #qistate { dirty_count = 0 }).
-
-append_journal_to_segment(#segment { journal_entries = JEntries,
- path = Path } = Segment) ->
- case array:sparse_size(JEntries) of
- 0 -> Segment;
- _ -> {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
- [{write_buffer, infinity}]),
- array:sparse_foldl(fun write_entry_to_segment/3, Hdl, JEntries),
- ok = file_handle_cache:close(Hdl),
- Segment #segment { journal_entries = array_new() }
- end.
-
-get_journal_handle(State = #qistate { journal_handle = undefined,
- dir = Dir }) ->
- Path = filename:join(Dir, ?JOURNAL_FILENAME),
- ok = rabbit_file:ensure_dir(Path),
- {ok, Hdl} = file_handle_cache:open(Path, ?WRITE_MODE,
- [{write_buffer, infinity}]),
- {Hdl, State #qistate { journal_handle = Hdl }};
-get_journal_handle(State = #qistate { journal_handle = Hdl }) ->
- {Hdl, State}.
-
-%% Loading Journal. This isn't idempotent and will mess up the counts
-%% if you call it more than once on the same state. Assumes the counts
-%% are 0 to start with.
-load_journal(State) ->
- {JournalHdl, State1} = get_journal_handle(State),
- {ok, 0} = file_handle_cache:position(JournalHdl, 0),
- load_journal_entries(State1).
-
-%% ditto
-recover_journal(State) ->
- State1 = #qistate { segments = Segments } = load_journal(State),
- Segments1 =
- segment_map(
- fun (Segment = #segment { journal_entries = JEntries,
- unacked = UnackedCountInJournal }) ->
- %% We want to keep ack'd entries in so that we can
- %% remove them if duplicates are in the journal. The
- %% counts here are purely from the segment itself.
- {SegEntries, UnackedCountInSeg} = load_segment(true, Segment),
- {JEntries1, UnackedCountDuplicates} =
- journal_minus_segment(JEntries, SegEntries),
- Segment #segment { journal_entries = JEntries1,
- unacked = (UnackedCountInJournal +
- UnackedCountInSeg -
- UnackedCountDuplicates) }
- end, Segments),
- State1 #qistate { segments = Segments1 }.
-
-load_journal_entries(State = #qistate { journal_handle = Hdl }) ->
- case file_handle_cache:read(Hdl, ?SEQ_BYTES) of
- {ok, <<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>>} ->
- case Prefix of
- ?DEL_JPREFIX ->
- load_journal_entries(add_to_journal(SeqId, del, State));
- ?ACK_JPREFIX ->
- load_journal_entries(add_to_journal(SeqId, ack, State));
- _ ->
- case file_handle_cache:read(Hdl, ?PUB_RECORD_BODY_BYTES) of
- {ok, Bin} ->
- {MsgId, MsgProps} = parse_pub_record_body(Bin),
- IsPersistent = case Prefix of
- ?PUB_PERSIST_JPREFIX -> true;
- ?PUB_TRANS_JPREFIX -> false
- end,
- load_journal_entries(
- add_to_journal(
- SeqId, {MsgId, MsgProps, IsPersistent}, State));
- _ErrOrEoF -> %% err, we've lost at least a publish
- State
- end
- end;
- _ErrOrEoF -> State
- end.
-
-deliver_or_ack(_Kind, [], State) ->
- State;
-deliver_or_ack(Kind, SeqIds, State) ->
- JPrefix = case Kind of ack -> ?ACK_JPREFIX; del -> ?DEL_JPREFIX end,
- {JournalHdl, State1} = get_journal_handle(State),
- ok = file_handle_cache:append(
- JournalHdl,
- [<<JPrefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>> || SeqId <- SeqIds]),
- maybe_flush_journal(lists:foldl(fun (SeqId, StateN) ->
- add_to_journal(SeqId, Kind, StateN)
- end, State1, SeqIds)).
-
-notify_sync(State = #qistate { unconfirmed = UC, on_sync = OnSyncFun }) ->
- case gb_sets:is_empty(UC) of
- true -> State;
- false -> OnSyncFun(UC),
- State #qistate { unconfirmed = gb_sets:new() }
- end.
-
-%%----------------------------------------------------------------------------
-%% segment manipulation
-%%----------------------------------------------------------------------------
-
-seq_id_to_seg_and_rel_seq_id(SeqId) ->
- { SeqId div ?SEGMENT_ENTRY_COUNT, SeqId rem ?SEGMENT_ENTRY_COUNT }.
-
-reconstruct_seq_id(Seg, RelSeq) ->
- (Seg * ?SEGMENT_ENTRY_COUNT) + RelSeq.
-
-all_segment_nums(#qistate { dir = Dir, segments = Segments }) ->
- lists:sort(
- sets:to_list(
- lists:foldl(
- fun (SegName, Set) ->
- sets:add_element(
- list_to_integer(
- lists:takewhile(fun (C) -> $0 =< C andalso C =< $9 end,
- SegName)), Set)
- end, sets:from_list(segment_nums(Segments)),
- rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)))).
-
-segment_find_or_new(Seg, Dir, Segments) ->
- case segment_find(Seg, Segments) of
- {ok, Segment} -> Segment;
- error -> SegName = integer_to_list(Seg) ++ ?SEGMENT_EXTENSION,
- Path = filename:join(Dir, SegName),
- #segment { num = Seg,
- path = Path,
- journal_entries = array_new(),
- unacked = 0 }
- end.
-
-segment_find(Seg, {_Segments, [Segment = #segment { num = Seg } |_]}) ->
- {ok, Segment}; %% 1 or (2, matches head)
-segment_find(Seg, {_Segments, [_, Segment = #segment { num = Seg }]}) ->
- {ok, Segment}; %% 2, matches tail
-segment_find(Seg, {Segments, _}) -> %% no match
- dict:find(Seg, Segments).
-
-segment_store(Segment = #segment { num = Seg }, %% 1 or (2, matches head)
- {Segments, [#segment { num = Seg } | Tail]}) ->
- {Segments, [Segment | Tail]};
-segment_store(Segment = #segment { num = Seg }, %% 2, matches tail
- {Segments, [SegmentA, #segment { num = Seg }]}) ->
- {Segments, [Segment, SegmentA]};
-segment_store(Segment = #segment { num = Seg }, {Segments, []}) ->
- {dict:erase(Seg, Segments), [Segment]};
-segment_store(Segment = #segment { num = Seg }, {Segments, [SegmentA]}) ->
- {dict:erase(Seg, Segments), [Segment, SegmentA]};
-segment_store(Segment = #segment { num = Seg },
- {Segments, [SegmentA, SegmentB]}) ->
- {dict:store(SegmentB#segment.num, SegmentB, dict:erase(Seg, Segments)),
- [Segment, SegmentA]}.
-
-segment_fold(Fun, Acc, {Segments, CachedSegments}) ->
- dict:fold(fun (_Seg, Segment, Acc1) -> Fun(Segment, Acc1) end,
- lists:foldl(Fun, Acc, CachedSegments), Segments).
-
-segment_map(Fun, {Segments, CachedSegments}) ->
- {dict:map(fun (_Seg, Segment) -> Fun(Segment) end, Segments),
- lists:map(Fun, CachedSegments)}.
-
-segment_nums({Segments, CachedSegments}) ->
- lists:map(fun (#segment { num = Num }) -> Num end, CachedSegments) ++
- dict:fetch_keys(Segments).
-
-segments_new() ->
- {dict:new(), []}.
-
-write_entry_to_segment(_RelSeq, {?PUB, del, ack}, Hdl) ->
- Hdl;
-write_entry_to_segment(RelSeq, {Pub, Del, Ack}, Hdl) ->
- ok = case Pub of
- no_pub ->
- ok;
- {MsgId, MsgProps, IsPersistent} ->
- file_handle_cache:append(
- Hdl, [<<?PUB_PREFIX:?PUB_PREFIX_BITS,
- (bool_to_int(IsPersistent)):1,
- RelSeq:?REL_SEQ_BITS>>,
- create_pub_record_body(MsgId, MsgProps)])
- end,
- ok = case {Del, Ack} of
- {no_del, no_ack} ->
- ok;
- _ ->
- Binary = <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
- RelSeq:?REL_SEQ_BITS>>,
- file_handle_cache:append(
- Hdl, case {Del, Ack} of
- {del, ack} -> [Binary, Binary];
- _ -> Binary
- end)
- end,
- Hdl.
-
-read_bounded_segment(Seg, {StartSeg, StartRelSeq}, {EndSeg, EndRelSeq},
- {Messages, Segments}, Dir) ->
- Segment = segment_find_or_new(Seg, Dir, Segments),
- {segment_entries_foldr(
- fun (RelSeq, {{MsgId, MsgProps, IsPersistent}, IsDelivered, no_ack}, Acc)
- when (Seg > StartSeg orelse StartRelSeq =< RelSeq) andalso
- (Seg < EndSeg orelse EndRelSeq >= RelSeq) ->
- [ {MsgId, reconstruct_seq_id(StartSeg, RelSeq), MsgProps,
- IsPersistent, IsDelivered == del} | Acc ];
- (_RelSeq, _Value, Acc) ->
- Acc
- end, Messages, Segment),
- segment_store(Segment, Segments)}.
-
-segment_entries_foldr(Fun, Init,
- Segment = #segment { journal_entries = JEntries }) ->
- {SegEntries, _UnackedCount} = load_segment(false, Segment),
- {SegEntries1, _UnackedCountD} = segment_plus_journal(SegEntries, JEntries),
- array:sparse_foldr(Fun, Init, SegEntries1).
-
-%% Loading segments
-%%
-%% Does not do any combining with the journal at all.
-load_segment(KeepAcked, #segment { path = Path }) ->
- Empty = {array_new(), 0},
- case rabbit_file:is_file(Path) of
- false -> Empty;
- true -> {ok, Hdl} = file_handle_cache:open(Path, ?READ_AHEAD_MODE, []),
- {ok, 0} = file_handle_cache:position(Hdl, bof),
- Res = case file_handle_cache:read(Hdl, ?SEGMENT_TOTAL_SIZE) of
- {ok, SegData} -> load_segment_entries(
- KeepAcked, SegData, Empty);
- eof -> Empty
- end,
- ok = file_handle_cache:close(Hdl),
- Res
- end.
-
-load_segment_entries(KeepAcked,
- <<?PUB_PREFIX:?PUB_PREFIX_BITS,
- IsPersistentNum:1, RelSeq:?REL_SEQ_BITS,
- PubRecordBody:?PUB_RECORD_BODY_BYTES/binary,
- SegData/binary>>,
- {SegEntries, UnackedCount}) ->
- {MsgId, MsgProps} = parse_pub_record_body(PubRecordBody),
- Obj = {{MsgId, MsgProps, 1 == IsPersistentNum}, no_del, no_ack},
- SegEntries1 = array:set(RelSeq, Obj, SegEntries),
- load_segment_entries(KeepAcked, SegData, {SegEntries1, UnackedCount + 1});
-load_segment_entries(KeepAcked,
- <<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
- RelSeq:?REL_SEQ_BITS, SegData/binary>>,
- {SegEntries, UnackedCount}) ->
- {UnackedCountDelta, SegEntries1} =
- case array:get(RelSeq, SegEntries) of
- {Pub, no_del, no_ack} ->
- { 0, array:set(RelSeq, {Pub, del, no_ack}, SegEntries)};
- {Pub, del, no_ack} when KeepAcked ->
- {-1, array:set(RelSeq, {Pub, del, ack}, SegEntries)};
- {_Pub, del, no_ack} ->
- {-1, array:reset(RelSeq, SegEntries)}
- end,
- load_segment_entries(KeepAcked, SegData,
- {SegEntries1, UnackedCount + UnackedCountDelta});
-load_segment_entries(_KeepAcked, _SegData, Res) ->
- Res.
-
-array_new() ->
- array:new([{default, undefined}, fixed, {size, ?SEGMENT_ENTRY_COUNT}]).
-
-bool_to_int(true ) -> 1;
-bool_to_int(false) -> 0.
-
-%%----------------------------------------------------------------------------
-%% journal & segment combination
-%%----------------------------------------------------------------------------
-
-%% Combine what we have just read from a segment file with what we're
-%% holding for that segment in memory. There must be no duplicates.
-segment_plus_journal(SegEntries, JEntries) ->
- array:sparse_foldl(
- fun (RelSeq, JObj, {SegEntriesOut, AdditionalUnacked}) ->
- SegEntry = array:get(RelSeq, SegEntriesOut),
- {Obj, AdditionalUnackedDelta} =
- segment_plus_journal1(SegEntry, JObj),
- {case Obj of
- undefined -> array:reset(RelSeq, SegEntriesOut);
- _ -> array:set(RelSeq, Obj, SegEntriesOut)
- end,
- AdditionalUnacked + AdditionalUnackedDelta}
- end, {SegEntries, 0}, JEntries).
-
-%% Here, the result is a tuple with the first element containing the
-%% item which we may be adding to (for items only in the journal),
-%% modifying in (bits in both), or, when returning 'undefined',
-%% erasing from (ack in journal, not segment) the segment array. The
-%% other element of the tuple is the delta for AdditionalUnacked.
-segment_plus_journal1(undefined, {?PUB, no_del, no_ack} = Obj) ->
- {Obj, 1};
-segment_plus_journal1(undefined, {?PUB, del, no_ack} = Obj) ->
- {Obj, 1};
-segment_plus_journal1(undefined, {?PUB, del, ack}) ->
- {undefined, 0};
-
-segment_plus_journal1({?PUB = Pub, no_del, no_ack}, {no_pub, del, no_ack}) ->
- {{Pub, del, no_ack}, 0};
-segment_plus_journal1({?PUB, no_del, no_ack}, {no_pub, del, ack}) ->
- {undefined, -1};
-segment_plus_journal1({?PUB, del, no_ack}, {no_pub, no_del, ack}) ->
- {undefined, -1}.
-
-%% Remove from the journal entries for a segment, items that are
-%% duplicates of entries found in the segment itself. Used on start up
-%% to clean up the journal.
-journal_minus_segment(JEntries, SegEntries) ->
- array:sparse_foldl(
- fun (RelSeq, JObj, {JEntriesOut, UnackedRemoved}) ->
- SegEntry = array:get(RelSeq, SegEntries),
- {Obj, UnackedRemovedDelta} =
- journal_minus_segment1(JObj, SegEntry),
- {case Obj of
- keep -> JEntriesOut;
- undefined -> array:reset(RelSeq, JEntriesOut);
- _ -> array:set(RelSeq, Obj, JEntriesOut)
- end,
- UnackedRemoved + UnackedRemovedDelta}
- end, {JEntries, 0}, JEntries).
-
-%% Here, the result is a tuple with the first element containing the
-%% item we are adding to or modifying in the (initially fresh) journal
-%% array. If the item is 'undefined' we leave the journal array
-%% alone. The other element of the tuple is the deltas for
-%% UnackedRemoved.
-
-%% Both the same. Must be at least the publish
-journal_minus_segment1({?PUB, _Del, no_ack} = Obj, Obj) ->
- {undefined, 1};
-journal_minus_segment1({?PUB, _Del, ack} = Obj, Obj) ->
- {undefined, 0};
-
-%% Just publish in journal
-journal_minus_segment1({?PUB, no_del, no_ack}, undefined) ->
- {keep, 0};
-
-%% Publish and deliver in journal
-journal_minus_segment1({?PUB, del, no_ack}, undefined) ->
- {keep, 0};
-journal_minus_segment1({?PUB = Pub, del, no_ack}, {Pub, no_del, no_ack}) ->
- {{no_pub, del, no_ack}, 1};
-
-%% Publish, deliver and ack in journal
-journal_minus_segment1({?PUB, del, ack}, undefined) ->
- {keep, 0};
-journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, no_del, no_ack}) ->
- {{no_pub, del, ack}, 1};
-journal_minus_segment1({?PUB = Pub, del, ack}, {Pub, del, no_ack}) ->
- {{no_pub, no_del, ack}, 1};
-
-%% Just deliver in journal
-journal_minus_segment1({no_pub, del, no_ack}, {?PUB, no_del, no_ack}) ->
- {keep, 0};
-journal_minus_segment1({no_pub, del, no_ack}, {?PUB, del, no_ack}) ->
- {undefined, 0};
-
-%% Just ack in journal
-journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, no_ack}) ->
- {keep, 0};
-journal_minus_segment1({no_pub, no_del, ack}, {?PUB, del, ack}) ->
- {undefined, -1};
-
-%% Deliver and ack in journal
-journal_minus_segment1({no_pub, del, ack}, {?PUB, no_del, no_ack}) ->
- {keep, 0};
-journal_minus_segment1({no_pub, del, ack}, {?PUB, del, no_ack}) ->
- {{no_pub, no_del, ack}, 0};
-journal_minus_segment1({no_pub, del, ack}, {?PUB, del, ack}) ->
- {undefined, -1};
-
-%% Missing segment. If flush_journal/1 is interrupted after deleting
-%% the segment but before truncating the journal we can get these
-%% cases: a delivery and an acknowledgement in the journal, or just an
-%% acknowledgement in the journal, but with no segment. In both cases
-%% we have really forgotten the message; so ignore what's in the
-%% journal.
-journal_minus_segment1({no_pub, no_del, ack}, undefined) ->
- {undefined, 0};
-journal_minus_segment1({no_pub, del, ack}, undefined) ->
- {undefined, 0}.
-
-%%----------------------------------------------------------------------------
-%% upgrade
-%%----------------------------------------------------------------------------
-
-add_queue_ttl() ->
- foreach_queue_index({fun add_queue_ttl_journal/1,
- fun add_queue_ttl_segment/1}).
-
-add_queue_ttl_journal(<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
- Rest/binary>>) ->
- {<<?DEL_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
-add_queue_ttl_journal(<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS,
- Rest/binary>>) ->
- {<<?ACK_JPREFIX:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, Rest};
-add_queue_ttl_journal(<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS,
- MsgId:?MSG_ID_BYTES/binary, Rest/binary>>) ->
- {[<<Prefix:?JPREFIX_BITS, SeqId:?SEQ_BITS>>, MsgId,
- expiry_to_binary(undefined)], Rest};
-add_queue_ttl_journal(_) ->
- stop.
-
-add_queue_ttl_segment(<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1,
- RelSeq:?REL_SEQ_BITS, MsgId:?MSG_ID_BYTES/binary,
- Rest/binary>>) ->
- {[<<?PUB_PREFIX:?PUB_PREFIX_BITS, IsPersistentNum:1, RelSeq:?REL_SEQ_BITS>>,
- MsgId, expiry_to_binary(undefined)], Rest};
-add_queue_ttl_segment(<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS,
- RelSeq:?REL_SEQ_BITS, Rest/binary>>) ->
- {<<?REL_SEQ_ONLY_PREFIX:?REL_SEQ_ONLY_PREFIX_BITS, RelSeq:?REL_SEQ_BITS>>,
- Rest};
-add_queue_ttl_segment(_) ->
- stop.
-
-%%----------------------------------------------------------------------------
-
-foreach_queue_index(Funs) ->
- QueuesDir = queues_dir(),
- QueueDirNames = all_queue_directory_names(QueuesDir),
- {ok, Gatherer} = gatherer:start_link(),
- [begin
- ok = gatherer:fork(Gatherer),
- ok = worker_pool:submit_async(
- fun () ->
- transform_queue(filename:join(QueuesDir, QueueDirName),
- Gatherer, Funs)
- end)
- end || QueueDirName <- QueueDirNames],
- empty = gatherer:out(Gatherer),
- unlink(Gatherer),
- ok = gatherer:stop(Gatherer).
-
-transform_queue(Dir, Gatherer, {JournalFun, SegmentFun}) ->
- ok = transform_file(filename:join(Dir, ?JOURNAL_FILENAME), JournalFun),
- [ok = transform_file(filename:join(Dir, Seg), SegmentFun)
- || Seg <- rabbit_file:wildcard(".*\\" ++ ?SEGMENT_EXTENSION, Dir)],
- ok = gatherer:finish(Gatherer).
-
-transform_file(Path, Fun) ->
- PathTmp = Path ++ ".upgrade",
- case rabbit_file:file_size(Path) of
- 0 -> ok;
- Size -> {ok, PathTmpHdl} =
- file_handle_cache:open(PathTmp, ?WRITE_MODE,
- [{write_buffer, infinity}]),
-
- {ok, PathHdl} = file_handle_cache:open(
- Path, [{read_ahead, Size} | ?READ_MODE], []),
- {ok, Content} = file_handle_cache:read(PathHdl, Size),
- ok = file_handle_cache:close(PathHdl),
-
- ok = drive_transform_fun(Fun, PathTmpHdl, Content),
-
- ok = file_handle_cache:close(PathTmpHdl),
- ok = rabbit_file:rename(PathTmp, Path)
- end.
-
-drive_transform_fun(Fun, Hdl, Contents) ->
- case Fun(Contents) of
- stop -> ok;
- {Output, Contents1} -> ok = file_handle_cache:append(Hdl, Output),
- drive_transform_fun(Fun, Hdl, Contents1)
- end.
diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl
deleted file mode 100644
index 9b6039d1..00000000
--- a/src/rabbit_reader.erl
+++ /dev/null
@@ -1,1059 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_reader).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--export([start_link/3, info_keys/0, info/1, info/2, force_event_refresh/1,
- shutdown/2]).
-
--export([system_continue/3, system_terminate/4, system_code_change/4]).
-
--export([init/4, mainloop/2, recvloop/2]).
-
--export([conserve_resources/3, server_properties/1]).
-
--define(HANDSHAKE_TIMEOUT, 10).
--define(NORMAL_TIMEOUT, 3).
--define(CLOSING_TIMEOUT, 30).
--define(CHANNEL_TERMINATION_TIMEOUT, 3).
--define(SILENT_CLOSE_DELAY, 3).
-
-%%--------------------------------------------------------------------------
-
--record(v1, {parent, sock, connection, callback, recv_len, pending_recv,
- connection_state, queue_collector, heartbeater, stats_timer,
- ch_sup3_pid, channel_sup_sup_pid, start_heartbeat_fun,
- buf, buf_len, throttle}).
-
--record(connection, {name, host, peer_host, port, peer_port,
- protocol, user, timeout_sec, frame_max, vhost,
- client_properties, capabilities,
- auth_mechanism, auth_state}).
-
--record(throttle, {conserve_resources, last_blocked_by, last_blocked_at}).
-
--define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt,
- send_pend, state, last_blocked_by, last_blocked_age,
- channels]).
-
--define(CREATION_EVENT_KEYS,
- [pid, name, port, peer_port, host,
- peer_host, ssl, peer_cert_subject, peer_cert_issuer,
- peer_cert_validity, auth_mechanism, ssl_protocol,
- ssl_key_exchange, ssl_cipher, ssl_hash, protocol, user, vhost,
- timeout, frame_max, client_properties]).
-
--define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
-
--define(IS_RUNNING(State),
- (State#v1.connection_state =:= running orelse
- State#v1.connection_state =:= blocking orelse
- State#v1.connection_state =:= blocked)).
-
--define(IS_STOPPING(State),
- (State#v1.connection_state =:= closing orelse
- State#v1.connection_state =:= closed)).
-
-%%--------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/3 :: (pid(), pid(), rabbit_heartbeat:start_heartbeat_fun()) ->
- rabbit_types:ok(pid())).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
--spec(force_event_refresh/1 :: (pid()) -> 'ok').
--spec(shutdown/2 :: (pid(), string()) -> 'ok').
--spec(conserve_resources/3 :: (pid(), atom(), boolean()) -> 'ok').
--spec(server_properties/1 :: (rabbit_types:protocol()) ->
- rabbit_framing:amqp_table()).
-
-%% These specs only exists to add no_return() to keep dialyzer happy
--spec(init/4 :: (pid(), pid(), pid(), rabbit_heartbeat:start_heartbeat_fun())
- -> no_return()).
--spec(start_connection/7 ::
- (pid(), pid(), pid(), rabbit_heartbeat:start_heartbeat_fun(), any(),
- rabbit_net:socket(),
- fun ((rabbit_net:socket()) ->
- rabbit_types:ok_or_error2(
- rabbit_net:socket(), any()))) -> no_return()).
-
--spec(mainloop/2 :: (_,#v1{}) -> any()).
--spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
--spec(system_continue/3 :: (_,_,#v1{}) -> any()).
--spec(system_terminate/4 :: (_,_,_,_) -> none()).
-
--endif.
-
-%%--------------------------------------------------------------------------
-
-start_link(ChannelSup3Pid, Collector, StartHeartbeatFun) ->
- {ok, proc_lib:spawn_link(?MODULE, init, [self(), ChannelSup3Pid,
- Collector, StartHeartbeatFun])}.
-
-shutdown(Pid, Explanation) ->
- gen_server:call(Pid, {shutdown, Explanation}, infinity).
-
-init(Parent, ChSup3Pid, Collector, StartHeartbeatFun) ->
- Deb = sys:debug_options([]),
- receive
- {go, Sock, SockTransform} ->
- start_connection(
- Parent, ChSup3Pid, Collector, StartHeartbeatFun, Deb, Sock,
- SockTransform)
- end.
-
-system_continue(Parent, Deb, State) ->
- ?MODULE:mainloop(Deb, State#v1{parent = Parent}).
-
-system_terminate(Reason, _Parent, _Deb, _State) ->
- exit(Reason).
-
-system_code_change(Misc, _Module, _OldVsn, _Extra) ->
- {ok, Misc}.
-
-info_keys() -> ?INFO_KEYS.
-
-info(Pid) ->
- gen_server:call(Pid, info, infinity).
-
-info(Pid, Items) ->
- case gen_server:call(Pid, {info, Items}, infinity) of
- {ok, Res} -> Res;
- {error, Error} -> throw(Error)
- end.
-
-force_event_refresh(Pid) ->
- gen_server:cast(Pid, force_event_refresh).
-
-conserve_resources(Pid, _Source, Conserve) ->
- Pid ! {conserve_resources, Conserve},
- ok.
-
-server_properties(Protocol) ->
- {ok, Product} = application:get_key(rabbit, id),
- {ok, Version} = application:get_key(rabbit, vsn),
-
- %% Get any configuration-specified server properties
- {ok, RawConfigServerProps} = application:get_env(rabbit,
- server_properties),
-
- %% Normalize the simplifed (2-tuple) and unsimplified (3-tuple) forms
- %% from the config and merge them with the generated built-in properties
- NormalizedConfigServerProps =
- [{<<"capabilities">>, table, server_capabilities(Protocol)} |
- [case X of
- {KeyAtom, Value} -> {list_to_binary(atom_to_list(KeyAtom)),
- longstr,
- list_to_binary(Value)};
- {BinKey, Type, Value} -> {BinKey, Type, Value}
- end || X <- RawConfigServerProps ++
- [{product, Product},
- {version, Version},
- {platform, "Erlang/OTP"},
- {copyright, ?COPYRIGHT_MESSAGE},
- {information, ?INFORMATION_MESSAGE}]]],
-
- %% Filter duplicated properties in favour of config file provided values
- lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end,
- NormalizedConfigServerProps).
-
-server_capabilities(rabbit_framing_amqp_0_9_1) ->
- [{<<"publisher_confirms">>, bool, true},
- {<<"exchange_exchange_bindings">>, bool, true},
- {<<"basic.nack">>, bool, true},
- {<<"consumer_cancel_notify">>, bool, true}];
-server_capabilities(_) ->
- [].
-
-%%--------------------------------------------------------------------------
-
-log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
-
-socket_error(Reason) ->
- log(error, "error on AMQP connection ~p: ~p (~s)~n",
- [self(), Reason, rabbit_misc:format_inet_error(Reason)]).
-
-inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
-
-socket_op(Sock, Fun) ->
- case Fun(Sock) of
- {ok, Res} -> Res;
- {error, Reason} -> socket_error(Reason),
- %% NB: this is tcp socket, even in case of ssl
- rabbit_net:fast_close(Sock),
- exit(normal)
- end.
-
-start_connection(Parent, ChSup3Pid, Collector, StartHeartbeatFun, Deb,
- Sock, SockTransform) ->
- process_flag(trap_exit, true),
- Name = case rabbit_net:connection_string(Sock, inbound) of
- {ok, Str} -> Str;
- {error, enotconn} -> rabbit_net:fast_close(Sock),
- exit(normal);
- {error, Reason} -> socket_error(Reason),
- rabbit_net:fast_close(Sock),
- exit(normal)
- end,
- log(info, "accepting AMQP connection ~p (~s)~n", [self(), Name]),
- ClientSock = socket_op(Sock, SockTransform),
- erlang:send_after(?HANDSHAKE_TIMEOUT * 1000, self(), handshake_timeout),
- {PeerHost, PeerPort, Host, Port} =
- socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
- State = #v1{parent = Parent,
- sock = ClientSock,
- connection = #connection{
- name = list_to_binary(Name),
- host = Host,
- peer_host = PeerHost,
- port = Port,
- peer_port = PeerPort,
- protocol = none,
- user = none,
- timeout_sec = ?HANDSHAKE_TIMEOUT,
- frame_max = ?FRAME_MIN_SIZE,
- vhost = none,
- client_properties = none,
- capabilities = [],
- auth_mechanism = none,
- auth_state = none},
- callback = uninitialized_callback,
- recv_len = 0,
- pending_recv = false,
- connection_state = pre_init,
- queue_collector = Collector,
- heartbeater = none,
- ch_sup3_pid = ChSup3Pid,
- channel_sup_sup_pid = none,
- start_heartbeat_fun = StartHeartbeatFun,
- buf = [],
- buf_len = 0,
- throttle = #throttle{
- conserve_resources = false,
- last_blocked_by = none,
- last_blocked_at = never}},
- try
- run({?MODULE, recvloop,
- [Deb, switch_callback(rabbit_event:init_stats_timer(
- State, #v1.stats_timer),
- handshake, 8)]}),
- log(info, "closing AMQP connection ~p (~s)~n", [self(), Name])
- catch
- Ex -> log(case Ex of
- connection_closed_abruptly -> warning;
- _ -> error
- end, "closing AMQP connection ~p (~s):~n~p~n",
- [self(), Name, Ex])
- after
- %% We don't call gen_tcp:close/1 here since it waits for
- %% pending output to be sent, which results in unnecessary
- %% delays. We could just terminate - the reader is the
- %% controlling process and hence its termination will close
- %% the socket. However, to keep the file_handle_cache
- %% accounting as accurate as possible we ought to close the
- %% socket w/o delay before termination.
- rabbit_net:fast_close(ClientSock),
- rabbit_networking:unregister_connection(self()),
- rabbit_event:notify(connection_closed, [{pid, self()}])
- end,
- done.
-
-run({M, F, A}) ->
- try apply(M, F, A)
- catch {become, MFA} -> run(MFA)
- end.
-
-recvloop(Deb, State = #v1{pending_recv = true}) ->
- mainloop(Deb, State);
-recvloop(Deb, State = #v1{connection_state = blocked}) ->
- mainloop(Deb, State);
-recvloop(Deb, State = #v1{sock = Sock, recv_len = RecvLen, buf_len = BufLen})
- when BufLen < RecvLen ->
- ok = rabbit_net:setopts(Sock, [{active, once}]),
- mainloop(Deb, State#v1{pending_recv = true});
-recvloop(Deb, State = #v1{recv_len = RecvLen, buf = Buf, buf_len = BufLen}) ->
- {Data, Rest} = split_binary(case Buf of
- [B] -> B;
- _ -> list_to_binary(lists:reverse(Buf))
- end, RecvLen),
- recvloop(Deb, handle_input(State#v1.callback, Data,
- State#v1{buf = [Rest],
- buf_len = BufLen - RecvLen})).
-
-mainloop(Deb, State = #v1{sock = Sock, buf = Buf, buf_len = BufLen}) ->
- case rabbit_net:recv(Sock) of
- {data, Data} ->
- recvloop(Deb, State#v1{buf = [Data | Buf],
- buf_len = BufLen + size(Data),
- pending_recv = false});
- closed when State#v1.connection_state =:= closed ->
- ok;
- closed ->
- maybe_emit_stats(State),
- throw(connection_closed_abruptly);
- {error, Reason} ->
- maybe_emit_stats(State),
- throw({inet_error, Reason});
- {other, {system, From, Request}} ->
- sys:handle_system_msg(Request, From, State#v1.parent,
- ?MODULE, Deb, State);
- {other, Other} ->
- case handle_other(Other, State) of
- stop -> ok;
- NewState -> recvloop(Deb, NewState)
- end
- end.
-
-handle_other({conserve_resources, Conserve},
- State = #v1{throttle = Throttle}) ->
- Throttle1 = Throttle#throttle{conserve_resources = Conserve},
- control_throttle(State#v1{throttle = Throttle1});
-handle_other({channel_closing, ChPid}, State) ->
- ok = rabbit_channel:ready_for_close(ChPid),
- channel_cleanup(ChPid),
- maybe_close(control_throttle(State));
-handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) ->
- terminate(io_lib:format("broker forced connection closure "
- "with reason '~w'", [Reason]), State),
- %% this is what we are expected to do according to
- %% http://www.erlang.org/doc/man/sys.html
- %%
- %% If we wanted to be *really* nice we should wait for a while for
- %% clients to close the socket at their end, just as we do in the
- %% ordinary error case. However, since this termination is
- %% initiated by our parent it is probably more important to exit
- %% quickly.
- maybe_emit_stats(State),
- exit(Reason);
-handle_other({channel_exit, _Channel, E = {writer, send_failed, _E}}, State) ->
- maybe_emit_stats(State),
- throw(E);
-handle_other({channel_exit, Channel, Reason}, State) ->
- handle_exception(State, Channel, Reason);
-handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) ->
- handle_dependent_exit(ChPid, Reason, State);
-handle_other(terminate_connection, State) ->
- maybe_emit_stats(State),
- stop;
-handle_other(handshake_timeout, State)
- when ?IS_RUNNING(State) orelse ?IS_STOPPING(State) ->
- State;
-handle_other(handshake_timeout, State) ->
- maybe_emit_stats(State),
- throw({handshake_timeout, State#v1.callback});
-handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
- State;
-handle_other(heartbeat_timeout, State = #v1{connection_state = S}) ->
- maybe_emit_stats(State),
- throw({heartbeat_timeout, S});
-handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
- {ForceTermination, NewState} = terminate(Explanation, State),
- gen_server:reply(From, ok),
- case ForceTermination of
- force -> stop;
- normal -> NewState
- end;
-handle_other({'$gen_call', From, info}, State) ->
- gen_server:reply(From, infos(?INFO_KEYS, State)),
- State;
-handle_other({'$gen_call', From, {info, Items}}, State) ->
- gen_server:reply(From, try {ok, infos(Items, State)}
- catch Error -> {error, Error}
- end),
- State;
-handle_other({'$gen_cast', force_event_refresh}, State)
- when ?IS_RUNNING(State) ->
- rabbit_event:notify(connection_created,
- [{type, network} | infos(?CREATION_EVENT_KEYS, State)]),
- State;
-handle_other({'$gen_cast', force_event_refresh}, State) ->
- %% Ignore, we will emit a created event once we start running.
- State;
-handle_other(ensure_stats, State) ->
- ensure_stats_timer(State);
-handle_other(emit_stats, State) ->
- emit_stats(State);
-handle_other({bump_credit, Msg}, State) ->
- credit_flow:handle_bump_msg(Msg),
- control_throttle(State);
-handle_other(Other, State) ->
- %% internal error -> something worth dying for
- maybe_emit_stats(State),
- exit({unexpected_message, Other}).
-
-switch_callback(State, Callback, Length) ->
- State#v1{callback = Callback, recv_len = Length}.
-
-terminate(Explanation, State) when ?IS_RUNNING(State) ->
- {normal, handle_exception(State, 0,
- rabbit_misc:amqp_error(
- connection_forced, Explanation, [], none))};
-terminate(_Explanation, State) ->
- {force, State}.
-
-control_throttle(State = #v1{connection_state = CS, throttle = Throttle}) ->
- case {CS, (Throttle#throttle.conserve_resources orelse
- credit_flow:blocked())} of
- {running, true} -> State#v1{connection_state = blocking};
- {blocking, false} -> State#v1{connection_state = running};
- {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
- State#v1.heartbeater),
- State#v1{connection_state = running};
- {blocked, true} -> State#v1{throttle = update_last_blocked_by(
- Throttle)};
- {_, _} -> State
- end.
-
-maybe_block(State = #v1{connection_state = blocking, throttle = Throttle}) ->
- ok = rabbit_heartbeat:pause_monitor(State#v1.heartbeater),
- State#v1{connection_state = blocked,
- throttle = update_last_blocked_by(
- Throttle#throttle{last_blocked_at = erlang:now()})};
-maybe_block(State) ->
- State.
-
-update_last_blocked_by(Throttle = #throttle{conserve_resources = true}) ->
- Throttle#throttle{last_blocked_by = resource};
-update_last_blocked_by(Throttle = #throttle{conserve_resources = false}) ->
- Throttle#throttle{last_blocked_by = flow}.
-
-%%--------------------------------------------------------------------------
-%% error handling / termination
-
-close_connection(State = #v1{queue_collector = Collector,
- connection = #connection{
- timeout_sec = TimeoutSec}}) ->
- %% The spec says "Exclusive queues may only be accessed by the
- %% current connection, and are deleted when that connection
- %% closes." This does not strictly imply synchrony, but in
- %% practice it seems to be what people assume.
- rabbit_queue_collector:delete_all(Collector),
- %% We terminate the connection after the specified interval, but
- %% no later than ?CLOSING_TIMEOUT seconds.
- erlang:send_after((if TimeoutSec > 0 andalso
- TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec;
- true -> ?CLOSING_TIMEOUT
- end) * 1000, self(), terminate_connection),
- State#v1{connection_state = closed}.
-
-handle_dependent_exit(ChPid, Reason, State) ->
- case {channel_cleanup(ChPid), termination_kind(Reason)} of
- {undefined, controlled} -> State;
- {undefined, uncontrolled} -> exit({abnormal_dependent_exit,
- ChPid, Reason});
- {_Channel, controlled} -> maybe_close(control_throttle(State));
- {Channel, uncontrolled} -> State1 = handle_exception(
- State, Channel, Reason),
- maybe_close(control_throttle(State1))
- end.
-
-terminate_channels() ->
- NChannels =
- length([rabbit_channel:shutdown(ChPid) || ChPid <- all_channels()]),
- if NChannels > 0 ->
- Timeout = 1000 * ?CHANNEL_TERMINATION_TIMEOUT * NChannels,
- TimerRef = erlang:send_after(Timeout, self(), cancel_wait),
- wait_for_channel_termination(NChannels, TimerRef);
- true -> ok
- end.
-
-wait_for_channel_termination(0, TimerRef) ->
- case erlang:cancel_timer(TimerRef) of
- false -> receive
- cancel_wait -> ok
- end;
- _ -> ok
- end;
-
-wait_for_channel_termination(N, TimerRef) ->
- receive
- {'DOWN', _MRef, process, ChPid, Reason} ->
- case {channel_cleanup(ChPid), termination_kind(Reason)} of
- {undefined, _} ->
- exit({abnormal_dependent_exit, ChPid, Reason});
- {_Channel, controlled} ->
- wait_for_channel_termination(N-1, TimerRef);
- {Channel, uncontrolled} ->
- log(error,
- "AMQP connection ~p, channel ~p - "
- "error while terminating:~n~p~n",
- [self(), Channel, Reason]),
- wait_for_channel_termination(N-1, TimerRef)
- end;
- cancel_wait ->
- exit(channel_termination_timeout)
- end.
-
-maybe_close(State = #v1{connection_state = closing,
- connection = #connection{protocol = Protocol},
- sock = Sock}) ->
- case all_channels() of
- [] ->
- NewState = close_connection(State),
- ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
- NewState;
- _ -> State
- end;
-maybe_close(State) ->
- State.
-
-termination_kind(normal) -> controlled;
-termination_kind(_) -> uncontrolled.
-
-handle_exception(State = #v1{connection_state = closed}, Channel, Reason) ->
- log(error, "AMQP connection ~p (~p), channel ~p - error:~n~p~n",
- [self(), closed, Channel, Reason]),
- State;
-handle_exception(State = #v1{connection = #connection{protocol = Protocol},
- connection_state = CS},
- Channel, Reason)
- when ?IS_RUNNING(State) orelse CS =:= closing ->
- log(error, "AMQP connection ~p (~p), channel ~p - error:~n~p~n",
- [self(), CS, Channel, Reason]),
- {0, CloseMethod} =
- rabbit_binary_generator:map_exception(Channel, Reason, Protocol),
- terminate_channels(),
- State1 = close_connection(State),
- ok = send_on_channel0(State1#v1.sock, CloseMethod, Protocol),
- State1;
-handle_exception(State, Channel, Reason) ->
- %% We don't trust the client at this point - force them to wait
- %% for a bit so they can't DOS us with repeated failed logins etc.
- timer:sleep(?SILENT_CLOSE_DELAY * 1000),
- throw({handshake_error, State#v1.connection_state, Channel, Reason}).
-
-%% we've "lost sync" with the client and hence must not accept any
-%% more input
-fatal_frame_error(Error, Type, Channel, Payload, State) ->
- frame_error(Error, Type, Channel, Payload, State),
- %% grace period to allow transmission of error
- timer:sleep(?SILENT_CLOSE_DELAY * 1000),
- throw(fatal_frame_error).
-
-frame_error(Error, Type, Channel, Payload, State) ->
- {Str, Bin} = payload_snippet(Payload),
- handle_exception(State, Channel,
- rabbit_misc:amqp_error(frame_error,
- "type ~p, ~s octets = ~p: ~p",
- [Type, Str, Bin, Error], none)).
-
-unexpected_frame(Type, Channel, Payload, State) ->
- {Str, Bin} = payload_snippet(Payload),
- handle_exception(State, Channel,
- rabbit_misc:amqp_error(unexpected_frame,
- "type ~p, ~s octets = ~p",
- [Type, Str, Bin], none)).
-
-payload_snippet(Payload) when size(Payload) =< 16 ->
- {"all", Payload};
-payload_snippet(<<Snippet:16/binary, _/binary>>) ->
- {"first 16", Snippet}.
-
-%%--------------------------------------------------------------------------
-
-create_channel(Channel, State) ->
- #v1{sock = Sock, queue_collector = Collector,
- channel_sup_sup_pid = ChanSupSup,
- connection = #connection{name = Name,
- protocol = Protocol,
- frame_max = FrameMax,
- user = User,
- vhost = VHost,
- capabilities = Capabilities}} = State,
- {ok, _ChSupPid, {ChPid, AState}} =
- rabbit_channel_sup_sup:start_channel(
- ChanSupSup, {tcp, Sock, Channel, FrameMax, self(), Name,
- Protocol, User, VHost, Capabilities, Collector}),
- MRef = erlang:monitor(process, ChPid),
- put({ch_pid, ChPid}, {Channel, MRef}),
- put({channel, Channel}, {ChPid, AState}),
- {ChPid, AState}.
-
-channel_cleanup(ChPid) ->
- case get({ch_pid, ChPid}) of
- undefined -> undefined;
- {Channel, MRef} -> credit_flow:peer_down(ChPid),
- erase({channel, Channel}),
- erase({ch_pid, ChPid}),
- erlang:demonitor(MRef, [flush]),
- Channel
- end.
-
-all_channels() -> [ChPid || {{ch_pid, ChPid}, _ChannelMRef} <- get()].
-
-%%--------------------------------------------------------------------------
-
-handle_frame(Type, 0, Payload,
- State = #v1{connection = #connection{protocol = Protocol}})
- when ?IS_STOPPING(State) ->
- case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
- {method, MethodName, FieldsBin} ->
- handle_method0(MethodName, FieldsBin, State);
- _Other -> State
- end;
-handle_frame(Type, 0, Payload,
- State = #v1{connection = #connection{protocol = Protocol}}) ->
- case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
- error -> frame_error(unknown_frame, Type, 0, Payload, State);
- heartbeat -> State;
- {method, MethodName, FieldsBin} ->
- handle_method0(MethodName, FieldsBin, State);
- _Other -> unexpected_frame(Type, 0, Payload, State)
- end;
-handle_frame(Type, Channel, Payload,
- State = #v1{connection = #connection{protocol = Protocol}})
- when ?IS_RUNNING(State) ->
- case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
- error -> frame_error(unknown_frame, Type, Channel, Payload, State);
- heartbeat -> unexpected_frame(Type, Channel, Payload, State);
- Frame -> process_frame(Frame, Channel, State)
- end;
-handle_frame(_Type, _Channel, _Payload, State) when ?IS_STOPPING(State) ->
- State;
-handle_frame(Type, Channel, Payload, State) ->
- unexpected_frame(Type, Channel, Payload, State).
-
-process_frame(Frame, Channel, State) ->
- ChKey = {channel, Channel},
- {ChPid, AState} = case get(ChKey) of
- undefined -> create_channel(Channel, State);
- Other -> Other
- end,
- case rabbit_command_assembler:process(Frame, AState) of
- {ok, NewAState} ->
- put(ChKey, {ChPid, NewAState}),
- post_process_frame(Frame, ChPid, State);
- {ok, Method, NewAState} ->
- rabbit_channel:do(ChPid, Method),
- put(ChKey, {ChPid, NewAState}),
- post_process_frame(Frame, ChPid, State);
- {ok, Method, Content, NewAState} ->
- rabbit_channel:do_flow(ChPid, Method, Content),
- put(ChKey, {ChPid, NewAState}),
- post_process_frame(Frame, ChPid, control_throttle(State));
- {error, Reason} ->
- handle_exception(State, Channel, Reason)
- end.
-
-post_process_frame({method, 'channel.close_ok', _}, ChPid, State) ->
- channel_cleanup(ChPid),
- %% This is not strictly necessary, but more obviously
- %% correct. Also note that we do not need to call maybe_close/1
- %% since we cannot possibly be in the 'closing' state.
- control_throttle(State);
-post_process_frame({content_header, _, _, _, _}, _ChPid, State) ->
- maybe_block(State);
-post_process_frame({content_body, _}, _ChPid, State) ->
- maybe_block(State);
-post_process_frame(_Frame, _ChPid, State) ->
- State.
-
-%%--------------------------------------------------------------------------
-
-%% We allow clients to exceed the frame size a little bit since quite
-%% a few get it wrong - off-by 1 or 8 (empty frame size) are typical.
--define(FRAME_SIZE_FUDGE, ?EMPTY_FRAME_SIZE).
-
-handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32>>,
- State = #v1{connection = #connection{frame_max = FrameMax}})
- when FrameMax /= 0 andalso
- PayloadSize > FrameMax - ?EMPTY_FRAME_SIZE + ?FRAME_SIZE_FUDGE ->
- fatal_frame_error(
- {frame_too_large, PayloadSize, FrameMax - ?EMPTY_FRAME_SIZE},
- Type, Channel, <<>>, State);
-handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32>>, State) ->
- ensure_stats_timer(
- switch_callback(State, {frame_payload, Type, Channel, PayloadSize},
- PayloadSize + 1));
-
-handle_input({frame_payload, Type, Channel, PayloadSize}, Data, State) ->
- <<Payload:PayloadSize/binary, EndMarker>> = Data,
- case EndMarker of
- ?FRAME_END -> State1 = handle_frame(Type, Channel, Payload, State),
- switch_callback(State1, frame_header, 7);
- _ -> fatal_frame_error({invalid_frame_end_marker, EndMarker},
- Type, Channel, Payload, State)
- end;
-
-%% The two rules pertaining to version negotiation:
-%%
-%% * If the server cannot support the protocol specified in the
-%% protocol header, it MUST respond with a valid protocol header and
-%% then close the socket connection.
-%%
-%% * The server MUST provide a protocol version that is lower than or
-%% equal to that requested by the client in the protocol header.
-handle_input(handshake, <<"AMQP", 0, 0, 9, 1>>, State) ->
- start_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State);
-
-%% This is the protocol header for 0-9, which we can safely treat as
-%% though it were 0-9-1.
-handle_input(handshake, <<"AMQP", 1, 1, 0, 9>>, State) ->
- start_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State);
-
-%% This is what most clients send for 0-8. The 0-8 spec, confusingly,
-%% defines the version as 8-0.
-handle_input(handshake, <<"AMQP", 1, 1, 8, 0>>, State) ->
- start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
-
-%% The 0-8 spec as on the AMQP web site actually has this as the
-%% protocol header; some libraries e.g., py-amqplib, send it when they
-%% want 0-8.
-handle_input(handshake, <<"AMQP", 1, 1, 9, 1>>, State) ->
- start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
-
-%% ... and finally, the 1.0 spec is crystal clear! Note that the
-handle_input(handshake, <<"AMQP", Id, 1, 0, 0>>, State) ->
- become_1_0(Id, State);
-
-handle_input(handshake, <<"AMQP", A, B, C, D>>, #v1{sock = Sock}) ->
- refuse_connection(Sock, {bad_version, {A, B, C, D}});
-
-handle_input(handshake, Other, #v1{sock = Sock}) ->
- refuse_connection(Sock, {bad_header, Other});
-
-handle_input(Callback, Data, _State) ->
- throw({bad_input, Callback, Data}).
-
-%% Offer a protocol version to the client. Connection.start only
-%% includes a major and minor version number, Luckily 0-9 and 0-9-1
-%% are similar enough that clients will be happy with either.
-start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision},
- Protocol,
- State = #v1{sock = Sock, connection = Connection}) ->
- rabbit_networking:register_connection(self()),
- Start = #'connection.start'{
- version_major = ProtocolMajor,
- version_minor = ProtocolMinor,
- server_properties = server_properties(Protocol),
- mechanisms = auth_mechanisms_binary(Sock),
- locales = <<"en_US">> },
- ok = send_on_channel0(Sock, Start, Protocol),
- switch_callback(State#v1{connection = Connection#connection{
- timeout_sec = ?NORMAL_TIMEOUT,
- protocol = Protocol},
- connection_state = starting},
- frame_header, 7).
-
-refuse_connection(Sock, Exception, {A, B, C, D}) ->
- ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end),
- throw(Exception).
-
--ifdef(use_specs).
--spec(refuse_connection/2 :: (rabbit_net:socket(), any()) -> no_return()).
--endif.
-refuse_connection(Sock, Exception) ->
- refuse_connection(Sock, Exception, {0, 0, 9, 1}).
-
-ensure_stats_timer(State = #v1{connection_state = running}) ->
- rabbit_event:ensure_stats_timer(State, #v1.stats_timer, emit_stats);
-ensure_stats_timer(State) ->
- State.
-
-%%--------------------------------------------------------------------------
-
-handle_method0(MethodName, FieldsBin,
- State = #v1{connection = #connection{protocol = Protocol}}) ->
- try
- handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin),
- State)
- catch exit:#amqp_error{method = none} = Reason ->
- handle_exception(State, 0, Reason#amqp_error{method = MethodName});
- Type:Reason ->
- Stack = erlang:get_stacktrace(),
- handle_exception(State, 0, {Type, Reason, MethodName, Stack})
- end.
-
-handle_method0(#'connection.start_ok'{mechanism = Mechanism,
- response = Response,
- client_properties = ClientProperties},
- State0 = #v1{connection_state = starting,
- connection = Connection,
- sock = Sock}) ->
- AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
- Capabilities =
- case rabbit_misc:table_lookup(ClientProperties, <<"capabilities">>) of
- {table, Capabilities1} -> Capabilities1;
- _ -> []
- end,
- State = State0#v1{connection_state = securing,
- connection =
- Connection#connection{
- client_properties = ClientProperties,
- capabilities = Capabilities,
- auth_mechanism = {Mechanism, AuthMechanism},
- auth_state = AuthMechanism:init(Sock)}},
- auth_phase(Response, State);
-
-handle_method0(#'connection.secure_ok'{response = Response},
- State = #v1{connection_state = securing}) ->
- auth_phase(Response, State);
-
-handle_method0(#'connection.tune_ok'{frame_max = FrameMax,
- heartbeat = ClientHeartbeat},
- State = #v1{connection_state = tuning,
- connection = Connection,
- sock = Sock,
- start_heartbeat_fun = SHF}) ->
- ServerFrameMax = server_frame_max(),
- if FrameMax /= 0 andalso FrameMax < ?FRAME_MIN_SIZE ->
- rabbit_misc:protocol_error(
- not_allowed, "frame_max=~w < ~w min size",
- [FrameMax, ?FRAME_MIN_SIZE]);
- ServerFrameMax /= 0 andalso FrameMax > ServerFrameMax ->
- rabbit_misc:protocol_error(
- not_allowed, "frame_max=~w > ~w max size",
- [FrameMax, ServerFrameMax]);
- true ->
- Frame = rabbit_binary_generator:build_heartbeat_frame(),
- SendFun = fun() -> catch rabbit_net:send(Sock, Frame) end,
- Parent = self(),
- ReceiveFun = fun() -> Parent ! heartbeat_timeout end,
- Heartbeater = SHF(Sock, ClientHeartbeat, SendFun,
- ClientHeartbeat, ReceiveFun),
- State#v1{connection_state = opening,
- connection = Connection#connection{
- timeout_sec = ClientHeartbeat,
- frame_max = FrameMax},
- heartbeater = Heartbeater}
- end;
-
-handle_method0(#'connection.open'{virtual_host = VHostPath},
- State = #v1{connection_state = opening,
- connection = Connection = #connection{
- user = User,
- protocol = Protocol},
- ch_sup3_pid = ChSup3Pid,
- sock = Sock,
- throttle = Throttle}) ->
- ok = rabbit_access_control:check_vhost_access(User, VHostPath),
- NewConnection = Connection#connection{vhost = VHostPath},
- ok = send_on_channel0(Sock, #'connection.open_ok'{}, Protocol),
- Conserve = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
- Throttle1 = Throttle#throttle{conserve_resources = Conserve},
- {ok, ChannelSupSupPid} =
- supervisor2:start_child(
- ChSup3Pid,
- {channel_sup_sup, {rabbit_channel_sup_sup, start_link, []},
- intrinsic, infinity, supervisor, [rabbit_channel_sup_sup]}),
- State1 = control_throttle(
- State#v1{connection_state = running,
- connection = NewConnection,
- channel_sup_sup_pid = ChannelSupSupPid,
- throttle = Throttle1}),
- rabbit_event:notify(connection_created,
- [{type, network} |
- infos(?CREATION_EVENT_KEYS, State1)]),
- maybe_emit_stats(State1),
- State1;
-handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) ->
- lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
- maybe_close(State#v1{connection_state = closing});
-handle_method0(#'connection.close'{},
- State = #v1{connection = #connection{protocol = Protocol},
- sock = Sock})
- when ?IS_STOPPING(State) ->
- %% We're already closed or closing, so we don't need to cleanup
- %% anything.
- ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
- State;
-handle_method0(#'connection.close_ok'{},
- State = #v1{connection_state = closed}) ->
- self() ! terminate_connection,
- State;
-handle_method0(_Method, State) when ?IS_STOPPING(State) ->
- State;
-handle_method0(_Method, #v1{connection_state = S}) ->
- rabbit_misc:protocol_error(
- channel_error, "unexpected method in connection state ~w", [S]).
-
-server_frame_max() ->
- {ok, FrameMax} = application:get_env(rabbit, frame_max),
- FrameMax.
-
-server_heartbeat() ->
- {ok, Heartbeat} = application:get_env(rabbit, heartbeat),
- Heartbeat.
-
-send_on_channel0(Sock, Method, Protocol) ->
- ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol).
-
-auth_mechanism_to_module(TypeBin, Sock) ->
- case rabbit_registry:binary_to_type(TypeBin) of
- {error, not_found} ->
- rabbit_misc:protocol_error(
- command_invalid, "unknown authentication mechanism '~s'",
- [TypeBin]);
- T ->
- case {lists:member(T, auth_mechanisms(Sock)),
- rabbit_registry:lookup_module(auth_mechanism, T)} of
- {true, {ok, Module}} ->
- Module;
- _ ->
- rabbit_misc:protocol_error(
- command_invalid,
- "invalid authentication mechanism '~s'", [T])
- end
- end.
-
-auth_mechanisms(Sock) ->
- {ok, Configured} = application:get_env(auth_mechanisms),
- [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
- Module:should_offer(Sock), lists:member(Name, Configured)].
-
-auth_mechanisms_binary(Sock) ->
- list_to_binary(
- string:join([atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")).
-
-auth_phase(Response,
- State = #v1{connection = Connection =
- #connection{protocol = Protocol,
- auth_mechanism = {Name, AuthMechanism},
- auth_state = AuthState},
- sock = Sock}) ->
- case AuthMechanism:handle_response(Response, AuthState) of
- {refused, Msg, Args} ->
- rabbit_misc:protocol_error(
- access_refused, "~s login refused: ~s",
- [Name, io_lib:format(Msg, Args)]);
- {protocol_error, Msg, Args} ->
- rabbit_misc:protocol_error(syntax_error, Msg, Args);
- {challenge, Challenge, AuthState1} ->
- Secure = #'connection.secure'{challenge = Challenge},
- ok = send_on_channel0(Sock, Secure, Protocol),
- State#v1{connection = Connection#connection{
- auth_state = AuthState1}};
- {ok, User} ->
- Tune = #'connection.tune'{channel_max = 0,
- frame_max = server_frame_max(),
- heartbeat = server_heartbeat()},
- ok = send_on_channel0(Sock, Tune, Protocol),
- State#v1{connection_state = tuning,
- connection = Connection#connection{user = User,
- auth_state = none}}
- end.
-
-%%--------------------------------------------------------------------------
-
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
-
-i(pid, #v1{}) -> self();
-i(SockStat, S) when SockStat =:= recv_oct;
- SockStat =:= recv_cnt;
- SockStat =:= send_oct;
- SockStat =:= send_cnt;
- SockStat =:= send_pend ->
- socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end,
- fun ([{_, I}]) -> I end, S);
-i(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock);
-i(ssl_protocol, S) -> ssl_info(fun ({P, _}) -> P end, S);
-i(ssl_key_exchange, S) -> ssl_info(fun ({_, {K, _, _}}) -> K end, S);
-i(ssl_cipher, S) -> ssl_info(fun ({_, {_, C, _}}) -> C end, S);
-i(ssl_hash, S) -> ssl_info(fun ({_, {_, _, H}}) -> H end, S);
-i(peer_cert_issuer, S) -> cert_info(fun rabbit_ssl:peer_cert_issuer/1, S);
-i(peer_cert_subject, S) -> cert_info(fun rabbit_ssl:peer_cert_subject/1, S);
-i(peer_cert_validity, S) -> cert_info(fun rabbit_ssl:peer_cert_validity/1, S);
-i(state, #v1{connection_state = CS}) -> CS;
-i(last_blocked_by, #v1{throttle = #throttle{last_blocked_by = By}}) -> By;
-i(last_blocked_age, #v1{throttle = #throttle{last_blocked_at = never}}) ->
- infinity;
-i(last_blocked_age, #v1{throttle = #throttle{last_blocked_at = T}}) ->
- timer:now_diff(erlang:now(), T) / 1000000;
-i(channels, #v1{}) -> length(all_channels());
-i(Item, #v1{connection = Conn}) -> ic(Item, Conn).
-
-ic(name, #connection{name = Name}) -> Name;
-ic(host, #connection{host = Host}) -> Host;
-ic(peer_host, #connection{peer_host = PeerHost}) -> PeerHost;
-ic(port, #connection{port = Port}) -> Port;
-ic(peer_port, #connection{peer_port = PeerPort}) -> PeerPort;
-ic(protocol, #connection{protocol = none}) -> none;
-ic(protocol, #connection{protocol = P}) -> P:version();
-ic(user, #connection{user = none}) -> '';
-ic(user, #connection{user = U}) -> U#user.username;
-ic(vhost, #connection{vhost = VHost}) -> VHost;
-ic(timeout, #connection{timeout_sec = Timeout}) -> Timeout;
-ic(frame_max, #connection{frame_max = FrameMax}) -> FrameMax;
-ic(client_properties, #connection{client_properties = CP}) -> CP;
-ic(auth_mechanism, #connection{auth_mechanism = none}) -> none;
-ic(auth_mechanism, #connection{auth_mechanism = {Name, _Mod}}) -> Name;
-ic(Item, #connection{}) -> throw({bad_argument, Item}).
-
-socket_info(Get, Select, #v1{sock = Sock}) ->
- case Get(Sock) of
- {ok, T} -> Select(T);
- {error, _} -> ''
- end.
-
-ssl_info(F, #v1{sock = Sock}) ->
- %% The first ok form is R14
- %% The second is R13 - the extra term is exportability (by inspection,
- %% the docs are wrong)
- case rabbit_net:ssl_info(Sock) of
- nossl -> '';
- {error, _} -> '';
- {ok, {P, {K, C, H}}} -> F({P, {K, C, H}});
- {ok, {P, {K, C, H, _}}} -> F({P, {K, C, H}})
- end.
-
-cert_info(F, #v1{sock = Sock}) ->
- case rabbit_net:peercert(Sock) of
- nossl -> '';
- {error, no_peercert} -> '';
- {ok, Cert} -> list_to_binary(F(Cert))
- end.
-
-maybe_emit_stats(State) ->
- rabbit_event:if_enabled(State, #v1.stats_timer,
- fun() -> emit_stats(State) end).
-
-emit_stats(State) ->
- rabbit_event:notify(connection_stats, infos(?STATISTICS_KEYS, State)),
- rabbit_event:reset_stats_timer(State, #v1.stats_timer).
-
-%% 1.0 stub
--ifdef(use_specs).
--spec(become_1_0/2 :: (non_neg_integer(), #v1{}) -> no_return()).
--endif.
-become_1_0(Id, State = #v1{sock = Sock}) ->
- case code:is_loaded(rabbit_amqp1_0_reader) of
- false -> refuse_connection(Sock, amqp1_0_plugin_not_enabled);
- _ -> Mode = case Id of
- 0 -> amqp;
- 3 -> sasl;
- _ -> refuse_connection(
- Sock, {unsupported_amqp1_0_protocol_id, Id},
- {3, 1, 0, 0})
- end,
- throw({become, {rabbit_amqp1_0_reader, init,
- [Mode, pack_for_1_0(State)]}})
- end.
-
-pack_for_1_0(#v1{parent = Parent,
- sock = Sock,
- recv_len = RecvLen,
- pending_recv = PendingRecv,
- queue_collector = QueueCollector,
- ch_sup3_pid = ChSup3Pid,
- start_heartbeat_fun = SHF,
- buf = Buf,
- buf_len = BufLen}) ->
- {Parent, Sock, RecvLen, PendingRecv, QueueCollector, ChSup3Pid, SHF,
- Buf, BufLen}.
diff --git a/src/rabbit_registry.erl b/src/rabbit_registry.erl
deleted file mode 100644
index f933e4e9..00000000
--- a/src/rabbit_registry.erl
+++ /dev/null
@@ -1,163 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_registry).
-
--behaviour(gen_server).
-
--export([start_link/0]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
--export([register/3, unregister/2,
- binary_to_type/1, lookup_module/2, lookup_all/1]).
-
--define(SERVER, ?MODULE).
--define(ETS_NAME, ?MODULE).
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(register/3 :: (atom(), binary(), atom()) -> 'ok').
--spec(unregister/2 :: (atom(), binary()) -> 'ok').
--spec(binary_to_type/1 ::
- (binary()) -> atom() | rabbit_types:error('not_found')).
--spec(lookup_module/2 ::
- (atom(), atom()) -> rabbit_types:ok_or_error2(atom(), 'not_found')).
--spec(lookup_all/1 :: (atom()) -> [{atom(), atom()}]).
-
--endif.
-
-%%---------------------------------------------------------------------------
-
-start_link() ->
- gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
-
-%%---------------------------------------------------------------------------
-
-register(Class, TypeName, ModuleName) ->
- gen_server:call(?SERVER, {register, Class, TypeName, ModuleName}, infinity).
-
-unregister(Class, TypeName) ->
- gen_server:call(?SERVER, {unregister, Class, TypeName}, infinity).
-
-%% This is used with user-supplied arguments (e.g., on exchange
-%% declare), so we restrict it to existing atoms only. This means it
-%% can throw a badarg, indicating that the type cannot have been
-%% registered.
-binary_to_type(TypeBin) when is_binary(TypeBin) ->
- case catch list_to_existing_atom(binary_to_list(TypeBin)) of
- {'EXIT', {badarg, _}} -> {error, not_found};
- TypeAtom -> TypeAtom
- end.
-
-lookup_module(Class, T) when is_atom(T) ->
- case ets:lookup(?ETS_NAME, {Class, T}) of
- [{_, Module}] ->
- {ok, Module};
- [] ->
- {error, not_found}
- end.
-
-lookup_all(Class) ->
- [{K, V} || [K, V] <- ets:match(?ETS_NAME, {{Class, '$1'}, '$2'})].
-
-%%---------------------------------------------------------------------------
-
-internal_binary_to_type(TypeBin) when is_binary(TypeBin) ->
- list_to_atom(binary_to_list(TypeBin)).
-
-internal_register(Class, TypeName, ModuleName)
- when is_atom(Class), is_binary(TypeName), is_atom(ModuleName) ->
- ok = sanity_check_module(class_module(Class), ModuleName),
- RegArg = {{Class, internal_binary_to_type(TypeName)}, ModuleName},
- true = ets:insert(?ETS_NAME, RegArg),
- conditional_register(RegArg),
- ok.
-
-internal_unregister(Class, TypeName) ->
- UnregArg = {Class, internal_binary_to_type(TypeName)},
- conditional_unregister(UnregArg),
- true = ets:delete(?ETS_NAME, UnregArg),
- ok.
-
-%% register exchange decorator route callback only when implemented,
-%% in order to avoid unnecessary decorator calls on the fast
-%% publishing path
-conditional_register({{exchange_decorator, Type}, ModuleName}) ->
- case erlang:function_exported(ModuleName, route, 2) of
- true -> true = ets:insert(?ETS_NAME,
- {{exchange_decorator_route, Type},
- ModuleName});
- false -> ok
- end;
-conditional_register(_) ->
- ok.
-
-conditional_unregister({exchange_decorator, Type}) ->
- true = ets:delete(?ETS_NAME, {exchange_decorator_route, Type}),
- ok;
-conditional_unregister(_) ->
- ok.
-
-sanity_check_module(ClassModule, Module) ->
- case catch lists:member(ClassModule,
- lists:flatten(
- [Bs || {Attr, Bs} <-
- Module:module_info(attributes),
- Attr =:= behavior orelse
- Attr =:= behaviour])) of
- {'EXIT', {undef, _}} -> {error, not_module};
- false -> {error, {not_type, ClassModule}};
- true -> ok
- end.
-
-class_module(exchange) -> rabbit_exchange_type;
-class_module(auth_mechanism) -> rabbit_auth_mechanism;
-class_module(runtime_parameter) -> rabbit_runtime_parameter;
-class_module(exchange_decorator) -> rabbit_exchange_decorator;
-class_module(policy_validator) -> rabbit_policy_validator;
-class_module(ha_mode) -> rabbit_mirror_queue_mode.
-
-%%---------------------------------------------------------------------------
-
-init([]) ->
- ?ETS_NAME = ets:new(?ETS_NAME, [protected, set, named_table]),
- {ok, none}.
-
-handle_call({register, Class, TypeName, ModuleName}, _From, State) ->
- ok = internal_register(Class, TypeName, ModuleName),
- {reply, ok, State};
-
-handle_call({unregister, Class, TypeName}, _From, State) ->
- ok = internal_unregister(Class, TypeName),
- {reply, ok, State};
-
-handle_call(Request, _From, State) ->
- {stop, {unhandled_call, Request}, State}.
-
-handle_cast(Request, State) ->
- {stop, {unhandled_cast, Request}, State}.
-
-handle_info(Message, State) ->
- {stop, {unhandled_info, Message}, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/rabbit_restartable_sup.erl b/src/rabbit_restartable_sup.erl
deleted file mode 100644
index 65a2ca0a..00000000
--- a/src/rabbit_restartable_sup.erl
+++ /dev/null
@@ -1,43 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_restartable_sup).
-
--behaviour(supervisor).
-
--export([start_link/2]).
-
--export([init/1]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/2 :: (atom(), rabbit_types:mfargs()) ->
- rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Name, {_M, _F, _A} = Fun) ->
- supervisor:start_link({local, Name}, ?MODULE, [Fun]).
-
-init([{Mod, _F, _A} = Fun]) ->
- {ok, {{one_for_one, 10, 10},
- [{Mod, Fun, transient, ?MAX_WAIT, worker, [Mod]}]}}.
diff --git a/src/rabbit_router.erl b/src/rabbit_router.erl
deleted file mode 100644
index 00343570..00000000
--- a/src/rabbit_router.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_router).
--include_lib("stdlib/include/qlc.hrl").
--include("rabbit.hrl").
-
--export([match_bindings/2, match_routing_key/2]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([routing_key/0, match_result/0]).
-
--type(routing_key() :: binary()).
--type(match_result() :: [rabbit_types:binding_destination()]).
-
--spec(match_bindings/2 :: (rabbit_types:binding_source(),
- fun ((rabbit_types:binding()) -> boolean())) ->
- match_result()).
--spec(match_routing_key/2 :: (rabbit_types:binding_source(),
- [routing_key()] | ['_']) ->
- match_result()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% TODO: Maybe this should be handled by a cursor instead.
-%% TODO: This causes a full scan for each entry with the same source
-match_bindings(SrcName, Match) ->
- Query = qlc:q([DestinationName ||
- #route{binding = Binding = #binding{
- source = SrcName1,
- destination = DestinationName}} <-
- mnesia:table(rabbit_route),
- SrcName == SrcName1,
- Match(Binding)]),
- mnesia:async_dirty(fun qlc:e/1, [Query]).
-
-match_routing_key(SrcName, [RoutingKey]) ->
- find_routes(#route{binding = #binding{source = SrcName,
- destination = '$1',
- key = RoutingKey,
- _ = '_'}},
- []);
-match_routing_key(SrcName, [_|_] = RoutingKeys) ->
- find_routes(#route{binding = #binding{source = SrcName,
- destination = '$1',
- key = '$2',
- _ = '_'}},
- [list_to_tuple(['orelse' | [{'=:=', '$2', RKey} ||
- RKey <- RoutingKeys]])]).
-
-%%--------------------------------------------------------------------
-
-%% Normally we'd call mnesia:dirty_select/2 here, but that is quite
-%% expensive for the same reasons as above, and, additionally, due to
-%% mnesia 'fixing' the table with ets:safe_fixtable/2, which is wholly
-%% unnecessary. According to the ets docs (and the code in erl_db.c),
-%% 'select' is safe anyway ("Functions that internally traverse over a
-%% table, like select and match, will give the same guarantee as
-%% safe_fixtable.") and, furthermore, even the lower level iterators
-%% ('first' and 'next') are safe on ordered_set tables ("Note that for
-%% tables of the ordered_set type, safe_fixtable/2 is not necessary as
-%% calls to first/1 and next/2 will always succeed."), which
-%% rabbit_route is.
-find_routes(MatchHead, Conditions) ->
- ets:select(rabbit_route, [{MatchHead, Conditions, ['$1']}]).
diff --git a/src/rabbit_runtime_parameter.erl b/src/rabbit_runtime_parameter.erl
deleted file mode 100644
index ee48165b..00000000
--- a/src/rabbit_runtime_parameter.erl
+++ /dev/null
@@ -1,42 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_runtime_parameter).
-
--ifdef(use_specs).
-
--type(validate_results() ::
- 'ok' | {error, string(), [term()]} | [validate_results()]).
-
--callback validate(rabbit_types:vhost(), binary(), binary(),
- term()) -> validate_results().
--callback notify(rabbit_types:vhost(), binary(), binary(), term()) -> 'ok'.
--callback notify_clear(rabbit_types:vhost(), binary(), binary()) -> 'ok'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [
- {validate, 4},
- {notify, 4},
- {notify_clear, 3}
- ];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_runtime_parameters.erl b/src/rabbit_runtime_parameters.erl
deleted file mode 100644
index c13c333e..00000000
--- a/src/rabbit_runtime_parameters.erl
+++ /dev/null
@@ -1,221 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_runtime_parameters).
-
--include("rabbit.hrl").
-
--export([parse_set/4, set/4, set_any/4, clear/3, clear_any/3, list/0, list/1,
- list_component/1, list/2, list_formatted/1, lookup/3,
- value/3, value/4, info_keys/0]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(ok_or_error_string() :: 'ok' | {'error_string', string()}).
-
--spec(parse_set/4 :: (rabbit_types:vhost(), binary(), binary(), string())
- -> ok_or_error_string()).
--spec(set/4 :: (rabbit_types:vhost(), binary(), binary(), term())
- -> ok_or_error_string()).
--spec(set_any/4 :: (rabbit_types:vhost(), binary(), binary(), term())
- -> ok_or_error_string()).
--spec(clear/3 :: (rabbit_types:vhost(), binary(), binary())
- -> ok_or_error_string()).
--spec(clear_any/3 :: (rabbit_types:vhost(), binary(), binary())
- -> ok_or_error_string()).
--spec(list/0 :: () -> [rabbit_types:infos()]).
--spec(list/1 :: (rabbit_types:vhost() | '_') -> [rabbit_types:infos()]).
--spec(list_component/1 :: (binary()) -> [rabbit_types:infos()]).
--spec(list/2 :: (rabbit_types:vhost() | '_', binary() | '_')
- -> [rabbit_types:infos()]).
--spec(list_formatted/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(lookup/3 :: (rabbit_types:vhost(), binary(), binary())
- -> rabbit_types:infos() | 'not_found').
--spec(value/3 :: (rabbit_types:vhost(), binary(), binary()) -> term()).
--spec(value/4 :: (rabbit_types:vhost(), binary(), binary(), term()) -> term()).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
-
--endif.
-
-%%---------------------------------------------------------------------------
-
--import(rabbit_misc, [pget/2, pset/3]).
-
--define(TABLE, rabbit_runtime_parameters).
-
-%%---------------------------------------------------------------------------
-
-parse_set(_, <<"policy">>, _, _) ->
- {error_string, "policies may not be set using this method"};
-parse_set(VHost, Component, Name, String) ->
- case rabbit_misc:json_decode(String) of
- {ok, JSON} -> set(VHost, Component, Name,
- rabbit_misc:json_to_term(JSON));
- error -> {error_string, "JSON decoding error"}
- end.
-
-set(_, <<"policy">>, _, _) ->
- {error_string, "policies may not be set using this method"};
-set(VHost, Component, Name, Term) ->
- set_any(VHost, Component, Name, Term).
-
-format_error(L) ->
- {error_string, rabbit_misc:format_many([{"Validation failed~n", []} | L])}.
-
-set_any(VHost, Component, Name, Term) ->
- case set_any0(VHost, Component, Name, Term) of
- ok -> ok;
- {errors, L} -> format_error(L)
- end.
-
-set_any0(VHost, Component, Name, Term) ->
- case lookup_component(Component) of
- {ok, Mod} ->
- case flatten_errors(Mod:validate(VHost, Component, Name, Term)) of
- ok ->
- case mnesia_update(VHost, Component, Name, Term) of
- {old, Term} -> ok;
- _ -> Mod:notify(VHost, Component, Name, Term)
- end,
- ok;
- E ->
- E
- end;
- E ->
- E
- end.
-
-mnesia_update(VHost, Comp, Name, Term) ->
- F = fun () ->
- Res = case mnesia:read(?TABLE, {VHost, Comp, Name}, read) of
- [] -> new;
- [Params] -> {old, Params#runtime_parameters.value}
- end,
- ok = mnesia:write(?TABLE, c(VHost, Comp, Name, Term), write),
- Res
- end,
- rabbit_misc:execute_mnesia_transaction(rabbit_vhost:with(VHost, F)).
-
-clear(_, <<"policy">> , _) ->
- {error_string, "policies may not be cleared using this method"};
-clear(VHost, Component, Name) ->
- clear_any(VHost, Component, Name).
-
-clear_any(VHost, Component, Name) ->
- case lookup(VHost, Component, Name) of
- not_found -> {error_string, "Parameter does not exist"};
- _ -> mnesia_clear(VHost, Component, Name),
- case lookup_component(Component) of
- {ok, Mod} -> Mod:notify_clear(VHost, Component, Name);
- _ -> ok
- end
- end.
-
-mnesia_clear(VHost, Component, Name) ->
- F = fun () ->
- ok = mnesia:delete(?TABLE, {VHost, Component, Name}, write)
- end,
- ok = rabbit_misc:execute_mnesia_transaction(rabbit_vhost:with(VHost, F)).
-
-list() ->
- [p(P) || #runtime_parameters{ key = {_VHost, Comp, _Name}} = P <-
- rabbit_misc:dirty_read_all(?TABLE), Comp /= <<"policy">>].
-
-list(VHost) -> list(VHost, '_').
-list_component(Component) -> list('_', Component).
-
-list(VHost, Component) ->
- case VHost of
- '_' -> ok;
- _ -> rabbit_vhost:assert(VHost)
- end,
- Match = #runtime_parameters{key = {VHost, Component, '_'}, _ = '_'},
- [p(P) || #runtime_parameters{key = {_VHost, Comp, _Name}} = P <-
- mnesia:dirty_match_object(?TABLE, Match),
- Comp =/= <<"policy">> orelse Component =:= <<"policy">>].
-
-list_formatted(VHost) ->
- [pset(value, format(pget(value, P)), P) || P <- list(VHost)].
-
-lookup(VHost, Component, Name) ->
- case lookup0(VHost, Component, Name, rabbit_misc:const(not_found)) of
- not_found -> not_found;
- Params -> p(Params)
- end.
-
-value(VHost, Component, Name) ->
- case lookup0(VHost, Component, Name, rabbit_misc:const(not_found)) of
- not_found -> not_found;
- Params -> Params#runtime_parameters.value
- end.
-
-value(VHost, Component, Name, Default) ->
- Params = lookup0(VHost, Component, Name,
- fun () ->
- lookup_missing(VHost, Component, Name, Default)
- end),
- Params#runtime_parameters.value.
-
-lookup0(VHost, Component, Name, DefaultFun) ->
- case mnesia:dirty_read(?TABLE, {VHost, Component, Name}) of
- [] -> DefaultFun();
- [R] -> R
- end.
-
-lookup_missing(VHost, Component, Name, Default) ->
- rabbit_misc:execute_mnesia_transaction(
- fun () ->
- case mnesia:read(?TABLE, {VHost, Component, Name}, read) of
- [] -> Record = c(VHost, Component, Name, Default),
- mnesia:write(?TABLE, Record, write),
- Record;
- [R] -> R
- end
- end).
-
-c(VHost, Component, Name, Default) ->
- #runtime_parameters{key = {VHost, Component, Name},
- value = Default}.
-
-p(#runtime_parameters{key = {VHost, Component, Name}, value = Value}) ->
- [{vhost, VHost},
- {component, Component},
- {name, Name},
- {value, Value}].
-
-info_keys() -> [component, name, value].
-
-%%---------------------------------------------------------------------------
-
-lookup_component(Component) ->
- case rabbit_registry:lookup_module(
- runtime_parameter, list_to_atom(binary_to_list(Component))) of
- {error, not_found} -> {errors,
- [{"component ~s not found", [Component]}]};
- {ok, Module} -> {ok, Module}
- end.
-
-format(Term) ->
- {ok, JSON} = rabbit_misc:json_encode(rabbit_misc:term_to_json(Term)),
- list_to_binary(JSON).
-
-flatten_errors(L) ->
- case [{F, A} || I <- lists:flatten([L]), {error, F, A} <- [I]] of
- [] -> ok;
- E -> {errors, E}
- end.
diff --git a/src/rabbit_runtime_parameters_test.erl b/src/rabbit_runtime_parameters_test.erl
deleted file mode 100644
index 05c85881..00000000
--- a/src/rabbit_runtime_parameters_test.erl
+++ /dev/null
@@ -1,64 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_runtime_parameters_test).
--behaviour(rabbit_runtime_parameter).
--behaviour(rabbit_policy_validator).
-
--export([validate/4, notify/4, notify_clear/3]).
--export([register/0, unregister/0]).
--export([validate_policy/1]).
--export([register_policy_validator/0, unregister_policy_validator/0]).
-
-%----------------------------------------------------------------------------
-
-register() ->
- rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
-
-unregister() ->
- rabbit_registry:unregister(runtime_parameter, <<"test">>).
-
-validate(_, <<"test">>, <<"good">>, _Term) -> ok;
-validate(_, <<"test">>, <<"maybe">>, <<"good">>) -> ok;
-validate(_, <<"test">>, _, _) -> {error, "meh", []}.
-
-notify(_, _, _, _) -> ok.
-notify_clear(_, _, _) -> ok.
-
-%----------------------------------------------------------------------------
-
-register_policy_validator() ->
- rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
- rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE).
-
-unregister_policy_validator() ->
- rabbit_registry:unregister(policy_validator, <<"testeven">>),
- rabbit_registry:unregister(policy_validator, <<"testpos">>).
-
-validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
- case length(Terms) rem 2 =:= 0 of
- true -> ok;
- false -> {error, "meh", []}
- end;
-
-validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
- case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
- true -> ok;
- false -> {error, "meh", []}
- end;
-
-validate_policy(_) ->
- {error, "meh", []}.
diff --git a/src/rabbit_sasl_report_file_h.erl b/src/rabbit_sasl_report_file_h.erl
deleted file mode 100644
index 39a10ac3..00000000
--- a/src/rabbit_sasl_report_file_h.erl
+++ /dev/null
@@ -1,93 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_sasl_report_file_h).
-
--behaviour(gen_event).
-
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
- code_change/3]).
-
-%% rabbit_sasl_report_file_h is a wrapper around the sasl_report_file_h
-%% module because the original's init/1 does not match properly
-%% with the result of closing the old handler when swapping handlers.
-%% The first init/1 additionally allows for simple log rotation
-%% when the suffix is not the empty string.
-%% The original init/1 also opened the file in 'write' mode, thus
-%% overwriting old logs. To remedy this, init/1 from
-%% lib/sasl/src/sasl_report_file_h.erl from R14B3 was copied as
-%% init_file/1 and changed so that it opens the file in 'append' mode.
-
-%% Used only when swapping handlers and performing
-%% log rotation
-init({{File, Suffix}, []}) ->
- case rabbit_file:append_file(File, Suffix) of
- ok -> file:delete(File),
- ok;
- {error, Error} ->
- rabbit_log:error("Failed to append contents of "
- "sasl log file '~s' to '~s':~n~p~n",
- [File, [File, Suffix], Error])
- end,
- init(File);
-%% Used only when swapping handlers and the original handler
-%% failed to terminate or was never installed
-init({{File, _}, error}) ->
- init(File);
-%% Used only when swapping handlers without
-%% doing any log rotation
-init({File, []}) ->
- init(File);
-init({File, _Type} = FileInfo) ->
- rabbit_file:ensure_parent_dirs_exist(File),
- init_file(FileInfo);
-init(File) ->
- rabbit_file:ensure_parent_dirs_exist(File),
- init_file({File, sasl_error_logger_type()}).
-
-init_file({File, Type}) ->
- process_flag(trap_exit, true),
- case file:open(File, [append]) of
- {ok,Fd} -> {ok, {Fd, File, Type}};
- Error -> Error
- end.
-
-handle_event(Event, State) ->
- sasl_report_file_h:handle_event(Event, State).
-
-handle_info(Event, State) ->
- sasl_report_file_h:handle_info(Event, State).
-
-handle_call(Event, State) ->
- sasl_report_file_h:handle_call(Event, State).
-
-terminate(Reason, State) ->
- sasl_report_file_h:terminate(Reason, State).
-
-code_change(_OldVsn, State, _Extra) ->
- %% There is no sasl_report_file_h:code_change/3
- {ok, State}.
-
-%%----------------------------------------------------------------------
-
-sasl_error_logger_type() ->
- case application:get_env(sasl, errlog_type) of
- {ok, error} -> error;
- {ok, progress} -> progress;
- {ok, all} -> all;
- {ok, Bad} -> throw({error, {wrong_errlog_type, Bad}});
- _ -> all
- end.
diff --git a/src/rabbit_ssl.erl b/src/rabbit_ssl.erl
deleted file mode 100644
index 109bff30..00000000
--- a/src/rabbit_ssl.erl
+++ /dev/null
@@ -1,302 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_ssl).
-
--include("rabbit.hrl").
-
--include_lib("public_key/include/public_key.hrl").
-
--export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]).
--export([peer_cert_subject_items/2, peer_cert_auth_name/1]).
-
-%%--------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([certificate/0]).
-
--type(certificate() :: binary()).
-
--spec(peer_cert_issuer/1 :: (certificate()) -> string()).
--spec(peer_cert_subject/1 :: (certificate()) -> string()).
--spec(peer_cert_validity/1 :: (certificate()) -> string()).
--spec(peer_cert_subject_items/2 ::
- (certificate(), tuple()) -> [string()] | 'not_found').
--spec(peer_cert_auth_name/1 ::
- (certificate()) -> binary() | 'not_found' | 'unsafe').
-
--endif.
-
-%%--------------------------------------------------------------------------
-%% High-level functions used by reader
-%%--------------------------------------------------------------------------
-
-%% Return a string describing the certificate's issuer.
-peer_cert_issuer(Cert) ->
- cert_info(fun(#'OTPCertificate' {
- tbsCertificate = #'OTPTBSCertificate' {
- issuer = Issuer }}) ->
- format_rdn_sequence(Issuer)
- end, Cert).
-
-%% Return a string describing the certificate's subject, as per RFC4514.
-peer_cert_subject(Cert) ->
- cert_info(fun(#'OTPCertificate' {
- tbsCertificate = #'OTPTBSCertificate' {
- subject = Subject }}) ->
- format_rdn_sequence(Subject)
- end, Cert).
-
-%% Return the parts of the certificate's subject.
-peer_cert_subject_items(Cert, Type) ->
- cert_info(fun(#'OTPCertificate' {
- tbsCertificate = #'OTPTBSCertificate' {
- subject = Subject }}) ->
- find_by_type(Type, Subject)
- end, Cert).
-
-%% Return a string describing the certificate's validity.
-peer_cert_validity(Cert) ->
- cert_info(fun(#'OTPCertificate' {
- tbsCertificate = #'OTPTBSCertificate' {
- validity = {'Validity', Start, End} }}) ->
- rabbit_misc:format("~s - ~s", [format_asn1_value(Start),
- format_asn1_value(End)])
- end, Cert).
-
-%% Extract a username from the certificate
-peer_cert_auth_name(Cert) ->
- {ok, Mode} = application:get_env(rabbit, ssl_cert_login_from),
- peer_cert_auth_name(Mode, Cert).
-
-peer_cert_auth_name(distinguished_name, Cert) ->
- case auth_config_sane() of
- true -> iolist_to_binary(peer_cert_subject(Cert));
- false -> unsafe
- end;
-
-peer_cert_auth_name(common_name, Cert) ->
- %% If there is more than one CN then we join them with "," in a
- %% vaguely DN-like way. But this is more just so we do something
- %% more intelligent than crashing, if you actually want to escape
- %% things properly etc, use DN mode.
- case auth_config_sane() of
- true -> case peer_cert_subject_items(Cert, ?'id-at-commonName') of
- not_found -> not_found;
- CNs -> list_to_binary(string:join(CNs, ","))
- end;
- false -> unsafe
- end.
-
-auth_config_sane() ->
- {ok, Opts} = application:get_env(rabbit, ssl_options),
- case {proplists:get_value(fail_if_no_peer_cert, Opts),
- proplists:get_value(verify, Opts)} of
- {true, verify_peer} ->
- true;
- {F, V} ->
- rabbit_log:warning("SSL certificate authentication disabled, "
- "fail_if_no_peer_cert=~p; "
- "verify=~p~n", [F, V]),
- false
- end.
-
-%%--------------------------------------------------------------------------
-
-cert_info(F, Cert) ->
- F(case public_key:pkix_decode_cert(Cert, otp) of
- {ok, DecCert} -> DecCert; %%pre R14B
- DecCert -> DecCert %%R14B onwards
- end).
-
-find_by_type(Type, {rdnSequence, RDNs}) ->
- case [V || #'AttributeTypeAndValue'{type = T, value = V}
- <- lists:flatten(RDNs),
- T == Type] of
- [] -> not_found;
- L -> [format_asn1_value(V) || V <- L]
- end.
-
-%%--------------------------------------------------------------------------
-%% Formatting functions
-%%--------------------------------------------------------------------------
-
-%% Format and rdnSequence as a RFC4514 subject string.
-format_rdn_sequence({rdnSequence, Seq}) ->
- string:join(lists:reverse([format_complex_rdn(RDN) || RDN <- Seq]), ",").
-
-%% Format an RDN set.
-format_complex_rdn(RDNs) ->
- string:join([format_rdn(RDN) || RDN <- RDNs], "+").
-
-%% Format an RDN. If the type name is unknown, use the dotted decimal
-%% representation. See RFC4514, section 2.3.
-format_rdn(#'AttributeTypeAndValue'{type = T, value = V}) ->
- FV = escape_rdn_value(format_asn1_value(V)),
- Fmts = [{?'id-at-surname' , "SN"},
- {?'id-at-givenName' , "GIVENNAME"},
- {?'id-at-initials' , "INITIALS"},
- {?'id-at-generationQualifier' , "GENERATIONQUALIFIER"},
- {?'id-at-commonName' , "CN"},
- {?'id-at-localityName' , "L"},
- {?'id-at-stateOrProvinceName' , "ST"},
- {?'id-at-organizationName' , "O"},
- {?'id-at-organizationalUnitName' , "OU"},
- {?'id-at-title' , "TITLE"},
- {?'id-at-countryName' , "C"},
- {?'id-at-serialNumber' , "SERIALNUMBER"},
- {?'id-at-pseudonym' , "PSEUDONYM"},
- {?'id-domainComponent' , "DC"},
- {?'id-emailAddress' , "EMAILADDRESS"},
- {?'street-address' , "STREET"},
- {{0,9,2342,19200300,100,1,1} , "UID"}], %% Not in public_key.hrl
- case proplists:lookup(T, Fmts) of
- {_, Fmt} ->
- rabbit_misc:format(Fmt ++ "=~s", [FV]);
- none when is_tuple(T) ->
- TypeL = [rabbit_misc:format("~w", [X]) || X <- tuple_to_list(T)],
- rabbit_misc:format("~s=~s", [string:join(TypeL, "."), FV]);
- none ->
- rabbit_misc:format("~p=~s", [T, FV])
- end.
-
-%% Escape a string as per RFC4514.
-escape_rdn_value(V) ->
- escape_rdn_value(V, start).
-
-escape_rdn_value([], _) ->
- [];
-escape_rdn_value([C | S], start) when C =:= $ ; C =:= $# ->
- [$\\, C | escape_rdn_value(S, middle)];
-escape_rdn_value(S, start) ->
- escape_rdn_value(S, middle);
-escape_rdn_value([$ ], middle) ->
- [$\\, $ ];
-escape_rdn_value([C | S], middle) when C =:= $"; C =:= $+; C =:= $,; C =:= $;;
- C =:= $<; C =:= $>; C =:= $\\ ->
- [$\\, C | escape_rdn_value(S, middle)];
-escape_rdn_value([C | S], middle) when C < 32 ; C >= 126 ->
- %% Of ASCII characters only U+0000 needs escaping, but for display
- %% purposes it's handy to escape all non-printable chars. All non-ASCII
- %% characters get converted to UTF-8 sequences and then escaped. We've
- %% already got a UTF-8 sequence here, so just escape it.
- rabbit_misc:format("\\~2.16.0B", [C]) ++ escape_rdn_value(S, middle);
-escape_rdn_value([C | S], middle) ->
- [C | escape_rdn_value(S, middle)].
-
-%% Get the string representation of an OTPCertificate field.
-format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString;
- ST =:= universalString; ST =:= utf8String;
- ST =:= bmpString ->
- format_directory_string(ST, S);
-format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2,
- Min1, Min2, S1, S2, $Z]}) ->
- rabbit_misc:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ",
- [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]);
-%% We appear to get an untagged value back for an ia5string
-%% (e.g. domainComponent).
-format_asn1_value(V) when is_list(V) ->
- V;
-format_asn1_value(V) when is_binary(V) ->
- %% OTP does not decode some values when combined with an unknown
- %% type. That's probably wrong, so as a last ditch effort let's
- %% try manually decoding. 'DirectoryString' is semi-arbitrary -
- %% but it is the type which covers the various string types we
- %% handle below.
- try
- {ST, S} = public_key:der_decode('DirectoryString', V),
- format_directory_string(ST, S)
- catch _:_ ->
- rabbit_misc:format("~p", [V])
- end;
-format_asn1_value(V) ->
- rabbit_misc:format("~p", [V]).
-
-%% DirectoryString { INTEGER : maxSize } ::= CHOICE {
-%% teletexString TeletexString (SIZE (1..maxSize)),
-%% printableString PrintableString (SIZE (1..maxSize)),
-%% bmpString BMPString (SIZE (1..maxSize)),
-%% universalString UniversalString (SIZE (1..maxSize)),
-%% uTF8String UTF8String (SIZE (1..maxSize)) }
-%%
-%% Precise definitions of printable / teletexString are hard to come
-%% by. This is what I reconstructed:
-%%
-%% printableString:
-%% "intended to represent the limited character sets available to
-%% mainframe input terminals"
-%% A-Z a-z 0-9 ' ( ) + , - . / : = ? [space]
-%% http://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx
-%%
-%% teletexString:
-%% "a sizable volume of software in the world treats TeletexString
-%% (T61String) as a simple 8-bit string with mostly Windows Latin 1
-%% (superset of iso-8859-1) encoding"
-%% http://www.mail-archive.com/asn1@asn1.org/msg00460.html
-%%
-%% (However according to that link X.680 actually defines
-%% TeletexString in some much more involved and crazy way. I suggest
-%% we treat it as ISO-8859-1 since Erlang does not support Windows
-%% Latin 1).
-%%
-%% bmpString:
-%% UCS-2 according to RFC 3641. Hence cannot represent Unicode
-%% characters above 65535 (outside the "Basic Multilingual Plane").
-%%
-%% universalString:
-%% UCS-4 according to RFC 3641.
-%%
-%% utf8String:
-%% UTF-8 according to RFC 3641.
-%%
-%% Within Rabbit we assume UTF-8 encoding. Since printableString is a
-%% subset of ASCII it is also a subset of UTF-8. The others need
-%% converting. Fortunately since the Erlang SSL library does the
-%% decoding for us (albeit into a weird format, see below), we just
-%% need to handle encoding into UTF-8. Note also that utf8Strings come
-%% back as binary.
-%%
-%% Note for testing: the default Ubuntu configuration for openssl will
-%% only create printableString or teletexString types no matter what
-%% you do. Edit string_mask in the [req] section of
-%% /etc/ssl/openssl.cnf to change this (see comments there). You
-%% probably also need to set utf8 = yes to get it to accept UTF-8 on
-%% the command line. Also note I could not get openssl to generate a
-%% universalString.
-
-format_directory_string(printableString, S) -> S;
-format_directory_string(teletexString, S) -> utf8_list_from(S);
-format_directory_string(bmpString, S) -> utf8_list_from(S);
-format_directory_string(universalString, S) -> utf8_list_from(S);
-format_directory_string(utf8String, S) -> binary_to_list(S).
-
-utf8_list_from(S) ->
- binary_to_list(
- unicode:characters_to_binary(flatten_ssl_list(S), utf32, utf8)).
-
-%% The Erlang SSL implementation invents its own representation for
-%% non-ascii strings - looking like [97,{0,0,3,187}] (that's LATIN
-%% SMALL LETTER A followed by GREEK SMALL LETTER LAMDA). We convert
-%% this into a list of unicode characters, which we can tell
-%% unicode:characters_to_binary is utf32.
-
-flatten_ssl_list(L) -> [flatten_ssl_list_item(I) || I <- L].
-
-flatten_ssl_list_item({A, B, C, D}) ->
- A * (1 bsl 24) + B * (1 bsl 16) + C * (1 bsl 8) + D;
-flatten_ssl_list_item(N) when is_number (N) ->
- N.
diff --git a/src/rabbit_sup.erl b/src/rabbit_sup.erl
deleted file mode 100644
index c1deb14b..00000000
--- a/src/rabbit_sup.erl
+++ /dev/null
@@ -1,95 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_sup).
-
--behaviour(supervisor).
-
--export([start_link/0, start_child/1, start_child/2, start_child/3,
- start_supervisor_child/1, start_supervisor_child/2,
- start_supervisor_child/3,
- start_restartable_child/1, start_restartable_child/2, stop_child/1]).
-
--export([init/1]).
-
--include("rabbit.hrl").
-
--define(SERVER, ?MODULE).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_child/1 :: (atom()) -> 'ok').
--spec(start_child/2 :: (atom(), [any()]) -> 'ok').
--spec(start_child/3 :: (atom(), atom(), [any()]) -> 'ok').
--spec(start_supervisor_child/1 :: (atom()) -> 'ok').
--spec(start_supervisor_child/2 :: (atom(), [any()]) -> 'ok').
--spec(start_supervisor_child/3 :: (atom(), atom(), [any()]) -> 'ok').
--spec(start_restartable_child/1 :: (atom()) -> 'ok').
--spec(start_restartable_child/2 :: (atom(), [any()]) -> 'ok').
--spec(stop_child/1 :: (atom()) -> rabbit_types:ok_or_error(any())).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() -> supervisor:start_link({local, ?SERVER}, ?MODULE, []).
-
-start_child(Mod) -> start_child(Mod, []).
-
-start_child(Mod, Args) -> start_child(Mod, Mod, Args).
-
-start_child(ChildId, Mod, Args) ->
- child_reply(supervisor:start_child(
- ?SERVER,
- {ChildId, {Mod, start_link, Args},
- transient, ?MAX_WAIT, worker, [Mod]})).
-
-start_supervisor_child(Mod) -> start_supervisor_child(Mod, []).
-
-start_supervisor_child(Mod, Args) -> start_supervisor_child(Mod, Mod, Args).
-
-start_supervisor_child(ChildId, Mod, Args) ->
- child_reply(supervisor:start_child(
- ?SERVER,
- {ChildId, {Mod, start_link, Args},
- transient, infinity, supervisor, [Mod]})).
-
-start_restartable_child(Mod) -> start_restartable_child(Mod, []).
-
-start_restartable_child(Mod, Args) ->
- Name = list_to_atom(atom_to_list(Mod) ++ "_sup"),
- child_reply(supervisor:start_child(
- ?SERVER,
- {Name, {rabbit_restartable_sup, start_link,
- [Name, {Mod, start_link, Args}]},
- transient, infinity, supervisor, [rabbit_restartable_sup]})).
-
-stop_child(ChildId) ->
- case supervisor:terminate_child(?SERVER, ChildId) of
- ok -> supervisor:delete_child(?SERVER, ChildId);
- E -> E
- end.
-
-init([]) -> {ok, {{one_for_all, 0, 1}, []}}.
-
-
-%%----------------------------------------------------------------------------
-
-child_reply({ok, _}) -> ok;
-child_reply(X) -> X.
diff --git a/src/rabbit_table.erl b/src/rabbit_table.erl
deleted file mode 100644
index a29c57d5..00000000
--- a/src/rabbit_table.erl
+++ /dev/null
@@ -1,311 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_table).
-
--export([create/0, create_local_copy/1, wait_for_replicated/0, wait/1,
- force_load/0, is_present/0, is_empty/0,
- check_schema_integrity/0, clear_ram_only_tables/0]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(create/0 :: () -> 'ok').
--spec(create_local_copy/1 :: ('disc' | 'ram') -> 'ok').
--spec(wait_for_replicated/0 :: () -> 'ok').
--spec(wait/1 :: ([atom()]) -> 'ok').
--spec(force_load/0 :: () -> 'ok').
--spec(is_present/0 :: () -> boolean()).
--spec(is_empty/0 :: () -> boolean()).
--spec(check_schema_integrity/0 :: () -> rabbit_types:ok_or_error(any())).
--spec(clear_ram_only_tables/0 :: () -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Main interface
-%%----------------------------------------------------------------------------
-
-create() ->
- lists:foreach(fun ({Tab, TabDef}) ->
- TabDef1 = proplists:delete(match, TabDef),
- case mnesia:create_table(Tab, TabDef1) of
- {atomic, ok} -> ok;
- {aborted, Reason} ->
- throw({error, {table_creation_failed,
- Tab, TabDef1, Reason}})
- end
- end, definitions()),
- ok.
-
-%% The sequence in which we delete the schema and then the other
-%% tables is important: if we delete the schema first when moving to
-%% RAM mnesia will loudly complain since it doesn't make much sense to
-%% do that. But when moving to disc, we need to move the schema first.
-create_local_copy(disc) ->
- create_local_copy(schema, disc_copies),
- create_local_copies(disc);
-create_local_copy(ram) ->
- create_local_copies(ram),
- create_local_copy(schema, ram_copies).
-
-wait_for_replicated() ->
- wait([Tab || {Tab, TabDef} <- definitions(),
- not lists:member({local_content, true}, TabDef)]).
-
-wait(TableNames) ->
- case mnesia:wait_for_tables(TableNames, 30000) of
- ok ->
- ok;
- {timeout, BadTabs} ->
- throw({error, {timeout_waiting_for_tables, BadTabs}});
- {error, Reason} ->
- throw({error, {failed_waiting_for_tables, Reason}})
- end.
-
-force_load() -> [mnesia:force_load_table(T) || T <- names()], ok.
-
-is_present() -> names() -- mnesia:system_info(tables) =:= [].
-
-is_empty() ->
- lists:all(fun (Tab) -> mnesia:dirty_first(Tab) == '$end_of_table' end,
- names()).
-
-check_schema_integrity() ->
- Tables = mnesia:system_info(tables),
- case check(fun (Tab, TabDef) ->
- case lists:member(Tab, Tables) of
- false -> {error, {table_missing, Tab}};
- true -> check_attributes(Tab, TabDef)
- end
- end) of
- ok -> ok = wait(names()),
- check(fun check_content/2);
- Other -> Other
- end.
-
-clear_ram_only_tables() ->
- Node = node(),
- lists:foreach(
- fun (TabName) ->
- case lists:member(Node, mnesia:table_info(TabName, ram_copies)) of
- true -> {atomic, ok} = mnesia:clear_table(TabName);
- false -> ok
- end
- end, names()),
- ok.
-
-%%--------------------------------------------------------------------
-%% Internal helpers
-%%--------------------------------------------------------------------
-
-create_local_copies(Type) ->
- lists:foreach(
- fun ({Tab, TabDef}) ->
- HasDiscCopies = has_copy_type(TabDef, disc_copies),
- HasDiscOnlyCopies = has_copy_type(TabDef, disc_only_copies),
- LocalTab = proplists:get_bool(local_content, TabDef),
- StorageType =
- if
- Type =:= disc orelse LocalTab ->
- if
- HasDiscCopies -> disc_copies;
- HasDiscOnlyCopies -> disc_only_copies;
- true -> ram_copies
- end;
- Type =:= ram ->
- ram_copies
- end,
- ok = create_local_copy(Tab, StorageType)
- end, definitions(Type)),
- ok.
-
-create_local_copy(Tab, Type) ->
- StorageType = mnesia:table_info(Tab, storage_type),
- {atomic, ok} =
- if
- StorageType == unknown ->
- mnesia:add_table_copy(Tab, node(), Type);
- StorageType /= Type ->
- mnesia:change_table_copy_type(Tab, node(), Type);
- true -> {atomic, ok}
- end,
- ok.
-
-has_copy_type(TabDef, DiscType) ->
- lists:member(node(), proplists:get_value(DiscType, TabDef, [])).
-
-check_attributes(Tab, TabDef) ->
- {_, ExpAttrs} = proplists:lookup(attributes, TabDef),
- case mnesia:table_info(Tab, attributes) of
- ExpAttrs -> ok;
- Attrs -> {error, {table_attributes_mismatch, Tab, ExpAttrs, Attrs}}
- end.
-
-check_content(Tab, TabDef) ->
- {_, Match} = proplists:lookup(match, TabDef),
- case mnesia:dirty_first(Tab) of
- '$end_of_table' ->
- ok;
- Key ->
- ObjList = mnesia:dirty_read(Tab, Key),
- MatchComp = ets:match_spec_compile([{Match, [], ['$_']}]),
- case ets:match_spec_run(ObjList, MatchComp) of
- ObjList -> ok;
- _ -> {error, {table_content_invalid, Tab, Match, ObjList}}
- end
- end.
-
-check(Fun) ->
- case [Error || {Tab, TabDef} <- definitions(),
- case Fun(Tab, TabDef) of
- ok -> Error = none, false;
- {error, Error} -> true
- end] of
- [] -> ok;
- Errors -> {error, Errors}
- end.
-
-%%--------------------------------------------------------------------
-%% Table definitions
-%%--------------------------------------------------------------------
-
-names() -> [Tab || {Tab, _} <- definitions()].
-
-%% The tables aren't supposed to be on disk on a ram node
-definitions(disc) ->
- definitions();
-definitions(ram) ->
- [{Tab, [{disc_copies, []}, {ram_copies, [node()]} |
- proplists:delete(
- ram_copies, proplists:delete(disc_copies, TabDef))]} ||
- {Tab, TabDef} <- definitions()].
-
-definitions() ->
- [{rabbit_user,
- [{record_name, internal_user},
- {attributes, record_info(fields, internal_user)},
- {disc_copies, [node()]},
- {match, #internal_user{_='_'}}]},
- {rabbit_user_permission,
- [{record_name, user_permission},
- {attributes, record_info(fields, user_permission)},
- {disc_copies, [node()]},
- {match, #user_permission{user_vhost = #user_vhost{_='_'},
- permission = #permission{_='_'},
- _='_'}}]},
- {rabbit_vhost,
- [{record_name, vhost},
- {attributes, record_info(fields, vhost)},
- {disc_copies, [node()]},
- {match, #vhost{_='_'}}]},
- {rabbit_listener,
- [{record_name, listener},
- {attributes, record_info(fields, listener)},
- {type, bag},
- {match, #listener{_='_'}}]},
- {rabbit_durable_route,
- [{record_name, route},
- {attributes, record_info(fields, route)},
- {disc_copies, [node()]},
- {match, #route{binding = binding_match(), _='_'}}]},
- {rabbit_semi_durable_route,
- [{record_name, route},
- {attributes, record_info(fields, route)},
- {type, ordered_set},
- {match, #route{binding = binding_match(), _='_'}}]},
- {rabbit_route,
- [{record_name, route},
- {attributes, record_info(fields, route)},
- {type, ordered_set},
- {match, #route{binding = binding_match(), _='_'}}]},
- {rabbit_reverse_route,
- [{record_name, reverse_route},
- {attributes, record_info(fields, reverse_route)},
- {type, ordered_set},
- {match, #reverse_route{reverse_binding = reverse_binding_match(),
- _='_'}}]},
- {rabbit_topic_trie_node,
- [{record_name, topic_trie_node},
- {attributes, record_info(fields, topic_trie_node)},
- {type, ordered_set},
- {match, #topic_trie_node{trie_node = trie_node_match(), _='_'}}]},
- {rabbit_topic_trie_edge,
- [{record_name, topic_trie_edge},
- {attributes, record_info(fields, topic_trie_edge)},
- {type, ordered_set},
- {match, #topic_trie_edge{trie_edge = trie_edge_match(), _='_'}}]},
- {rabbit_topic_trie_binding,
- [{record_name, topic_trie_binding},
- {attributes, record_info(fields, topic_trie_binding)},
- {type, ordered_set},
- {match, #topic_trie_binding{trie_binding = trie_binding_match(),
- _='_'}}]},
- {rabbit_durable_exchange,
- [{record_name, exchange},
- {attributes, record_info(fields, exchange)},
- {disc_copies, [node()]},
- {match, #exchange{name = exchange_name_match(), _='_'}}]},
- {rabbit_exchange,
- [{record_name, exchange},
- {attributes, record_info(fields, exchange)},
- {match, #exchange{name = exchange_name_match(), _='_'}}]},
- {rabbit_exchange_serial,
- [{record_name, exchange_serial},
- {attributes, record_info(fields, exchange_serial)},
- {match, #exchange_serial{name = exchange_name_match(), _='_'}}]},
- {rabbit_runtime_parameters,
- [{record_name, runtime_parameters},
- {attributes, record_info(fields, runtime_parameters)},
- {disc_copies, [node()]},
- {match, #runtime_parameters{_='_'}}]},
- {rabbit_durable_queue,
- [{record_name, amqqueue},
- {attributes, record_info(fields, amqqueue)},
- {disc_copies, [node()]},
- {match, #amqqueue{name = queue_name_match(), _='_'}}]},
- {rabbit_queue,
- [{record_name, amqqueue},
- {attributes, record_info(fields, amqqueue)},
- {match, #amqqueue{name = queue_name_match(), _='_'}}]}]
- ++ gm:table_definitions()
- ++ mirrored_supervisor:table_definitions().
-
-binding_match() ->
- #binding{source = exchange_name_match(),
- destination = binding_destination_match(),
- _='_'}.
-reverse_binding_match() ->
- #reverse_binding{destination = binding_destination_match(),
- source = exchange_name_match(),
- _='_'}.
-binding_destination_match() ->
- resource_match('_').
-trie_node_match() ->
- #trie_node{ exchange_name = exchange_name_match(), _='_'}.
-trie_edge_match() ->
- #trie_edge{ exchange_name = exchange_name_match(), _='_'}.
-trie_binding_match() ->
- #trie_binding{exchange_name = exchange_name_match(), _='_'}.
-exchange_name_match() ->
- resource_match(exchange).
-queue_name_match() ->
- resource_match(queue).
-resource_match(Kind) ->
- #resource{kind = Kind, _='_'}.
diff --git a/src/rabbit_tests.erl b/src/rabbit_tests.erl
deleted file mode 100644
index 30cf9114..00000000
--- a/src/rabbit_tests.erl
+++ /dev/null
@@ -1,2870 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_tests).
-
--compile([export_all]).
-
--export([all_tests/0]).
-
--import(rabbit_misc, [pget/2]).
-
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
--include_lib("kernel/include/file.hrl").
-
--define(PERSISTENT_MSG_STORE, msg_store_persistent).
--define(TRANSIENT_MSG_STORE, msg_store_transient).
--define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
--define(TIMEOUT, 5000).
-
-all_tests() ->
- ok = setup_cluster(),
- ok = supervisor2_tests:test_all(),
- passed = gm_tests:all_tests(),
- passed = mirrored_supervisor_tests:all_tests(),
- application:set_env(rabbit, file_handles_high_watermark, 10, infinity),
- ok = file_handle_cache:set_limit(10),
- passed = test_version_equivalance(),
- passed = test_multi_call(),
- passed = test_file_handle_cache(),
- passed = test_backing_queue(),
- passed = test_rabbit_basic_header_handling(),
- passed = test_priority_queue(),
- passed = test_pg_local(),
- passed = test_unfold(),
- passed = test_supervisor_delayed_restart(),
- passed = test_table_codec(),
- passed = test_content_framing(),
- passed = test_content_transcoding(),
- passed = test_topic_matching(),
- passed = test_log_management(),
- passed = test_app_management(),
- passed = test_log_management_during_startup(),
- passed = test_statistics(),
- passed = test_arguments_parser(),
- passed = test_dynamic_mirroring(),
- passed = test_user_management(),
- passed = test_runtime_parameters(),
- passed = test_policy_validation(),
- passed = test_ha_policy_validation(),
- passed = test_server_status(),
- passed = test_amqp_connection_refusal(),
- passed = test_confirms(),
- passed = test_with_state(),
- passed =
- do_if_secondary_node(
- fun run_cluster_dependent_tests/1,
- fun (SecondaryNode) ->
- io:format("Skipping cluster dependent tests with node ~p~n",
- [SecondaryNode]),
- passed
- end),
- passed = test_configurable_server_properties(),
- passed.
-
-
-do_if_secondary_node(Up, Down) ->
- SecondaryNode = rabbit_nodes:make("hare"),
-
- case net_adm:ping(SecondaryNode) of
- pong -> Up(SecondaryNode);
- pang -> Down(SecondaryNode)
- end.
-
-setup_cluster() ->
- do_if_secondary_node(
- fun (SecondaryNode) ->
- cover:stop(SecondaryNode),
- ok = control_action(stop_app, []),
- %% 'cover' does not cope at all well with nodes disconnecting,
- %% which happens as part of reset. So we turn it off
- %% temporarily. That is ok even if we're not in general using
- %% cover, it just turns the engine on / off and doesn't log
- %% anything. Note that this way cover won't be on when joining
- %% the cluster, but this is OK since we're testing the clustering
- %% interface elsewere anyway.
- cover:stop(nodes()),
- ok = control_action(join_cluster,
- [atom_to_list(SecondaryNode)]),
- cover:start(nodes()),
- ok = control_action(start_app, []),
- ok = control_action(start_app, SecondaryNode, [], [])
- end,
- fun (_) -> ok end).
-
-maybe_run_cluster_dependent_tests() ->
- do_if_secondary_node(
- fun (SecondaryNode) ->
- passed = run_cluster_dependent_tests(SecondaryNode)
- end,
- fun (SecondaryNode) ->
- io:format("Skipping cluster dependent tests with node ~p~n",
- [SecondaryNode])
- end).
-
-run_cluster_dependent_tests(SecondaryNode) ->
- io:format("Running cluster dependent tests with node ~p~n", [SecondaryNode]),
- passed = test_delegates_async(SecondaryNode),
- passed = test_delegates_sync(SecondaryNode),
- passed = test_queue_cleanup(SecondaryNode),
- passed = test_declare_on_dead_queue(SecondaryNode),
- passed = test_refresh_events(SecondaryNode),
-
- %% we now run the tests remotely, so that code coverage on the
- %% local node picks up more of the delegate
- Node = node(),
- Self = self(),
- Remote = spawn(SecondaryNode,
- fun () -> Rs = [ test_delegates_async(Node),
- test_delegates_sync(Node),
- test_queue_cleanup(Node),
- test_declare_on_dead_queue(Node),
- test_refresh_events(Node) ],
- Self ! {self(), Rs}
- end),
- receive
- {Remote, Result} ->
- Result = lists:duplicate(length(Result), passed)
- after 30000 ->
- throw(timeout)
- end,
-
- passed.
-
-test_version_equivalance() ->
- true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
- true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
- true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
- passed.
-
-test_multi_call() ->
- Fun = fun() ->
- receive
- {'$gen_call', {From, Mref}, request} ->
- From ! {Mref, response}
- end,
- receive
- never -> ok
- end
- end,
- Pid1 = spawn(Fun),
- Pid2 = spawn(Fun),
- Pid3 = spawn(Fun),
- exit(Pid2, bang),
- {[{Pid1, response}, {Pid3, response}], [{Pid2, _Fail}]} =
- rabbit_misc:multi_call([Pid1, Pid2, Pid3], request),
- exit(Pid1, bang),
- exit(Pid3, bang),
- passed.
-
-test_rabbit_basic_header_handling() ->
- passed = write_table_with_invalid_existing_type_test(),
- passed = invalid_existing_headers_test(),
- passed = disparate_invalid_header_entries_accumulate_separately_test(),
- passed = corrupt_or_invalid_headers_are_overwritten_test(),
- passed = invalid_same_header_entry_accumulation_test(),
- passed.
-
--define(XDEATH_TABLE,
- [{<<"reason">>, longstr, <<"blah">>},
- {<<"queue">>, longstr, <<"foo.bar.baz">>},
- {<<"exchange">>, longstr, <<"my-exchange">>},
- {<<"routing-keys">>, array, []}]).
-
--define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
-
--define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
--define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
--define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
-
-write_table_with_invalid_existing_type_test() ->
- prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]),
- passed.
-
-invalid_existing_headers_test() ->
- Headers =
- prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
- {array, [{table, ?ROUTE_TABLE}]} =
- rabbit_misc:table_lookup(Headers, <<"header2">>),
- passed.
-
-disparate_invalid_header_entries_accumulate_separately_test() ->
- BadHeaders = [?BAD_HEADER("header2")],
- Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
- Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
- [?BAD_HEADER("header1") | Headers]),
- {table, [?FOUND_BAD_HEADER("header1"),
- ?FOUND_BAD_HEADER("header2")]} =
- rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
- passed.
-
-corrupt_or_invalid_headers_are_overwritten_test() ->
- Headers0 = [?BAD_HEADER("header1"),
- ?BAD_HEADER("x-invalid-headers")],
- Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
- {table,[?FOUND_BAD_HEADER("header1"),
- ?FOUND_BAD_HEADER("x-invalid-headers")]} =
- rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
- passed.
-
-invalid_same_header_entry_accumulation_test() ->
- BadHeader1 = ?BAD_HEADER2("header1", "a"),
- Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
- Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
- [?BAD_HEADER2("header1", "b") | Headers]),
- {table, InvalidHeaders} =
- rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
- {array, [{longstr,<<"bad header1b">>},
- {longstr,<<"bad header1a">>}]} =
- rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
- passed.
-
-prepend_check(HeaderKey, HeaderTable, Headers) ->
- Headers1 = rabbit_basic:prepend_table_header(
- HeaderKey, HeaderTable, Headers),
- {table, Invalid} =
- rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
- {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
- {array, [{Type, Value} | _]} =
- rabbit_misc:table_lookup(Invalid, HeaderKey),
- Headers1.
-
-test_priority_queue() ->
-
- false = priority_queue:is_queue(not_a_queue),
-
- %% empty Q
- Q = priority_queue:new(),
- {true, true, 0, [], []} = test_priority_queue(Q),
-
- %% 1-4 element no-priority Q
- true = lists:all(fun (X) -> X =:= passed end,
- lists:map(fun test_simple_n_element_queue/1,
- lists:seq(1, 4))),
-
- %% 1-element priority Q
- Q1 = priority_queue:in(foo, 1, priority_queue:new()),
- {true, false, 1, [{1, foo}], [foo]} =
- test_priority_queue(Q1),
-
- %% 2-element same-priority Q
- Q2 = priority_queue:in(bar, 1, Q1),
- {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
- test_priority_queue(Q2),
-
- %% 2-element different-priority Q
- Q3 = priority_queue:in(bar, 2, Q1),
- {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
- test_priority_queue(Q3),
-
- %% 1-element negative priority Q
- Q4 = priority_queue:in(foo, -1, priority_queue:new()),
- {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
-
- %% merge 2 * 1-element no-priority Qs
- Q5 = priority_queue:join(priority_queue:in(foo, Q),
- priority_queue:in(bar, Q)),
- {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q5),
-
- %% merge 1-element no-priority Q with 1-element priority Q
- Q6 = priority_queue:join(priority_queue:in(foo, Q),
- priority_queue:in(bar, 1, Q)),
- {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
- test_priority_queue(Q6),
-
- %% merge 1-element priority Q with 1-element no-priority Q
- Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
- priority_queue:in(bar, Q)),
- {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q7),
-
- %% merge 2 * 1-element same-priority Qs
- Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
- priority_queue:in(bar, 1, Q)),
- {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
- test_priority_queue(Q8),
-
- %% merge 2 * 1-element different-priority Qs
- Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
- priority_queue:in(bar, 2, Q)),
- {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
- test_priority_queue(Q9),
-
- %% merge 2 * 1-element different-priority Qs (other way around)
- Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
- priority_queue:in(foo, 1, Q)),
- {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
- test_priority_queue(Q10),
-
- %% merge 2 * 2-element multi-different-priority Qs
- Q11 = priority_queue:join(Q6, Q5),
- {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
- [bar, foo, foo, bar]} = test_priority_queue(Q11),
-
- %% and the other way around
- Q12 = priority_queue:join(Q5, Q6),
- {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
- [bar, foo, bar, foo]} = test_priority_queue(Q12),
-
- %% merge with negative priorities
- Q13 = priority_queue:join(Q4, Q5),
- {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
- test_priority_queue(Q13),
-
- %% and the other way around
- Q14 = priority_queue:join(Q5, Q4),
- {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
- test_priority_queue(Q14),
-
- %% joins with empty queues:
- Q1 = priority_queue:join(Q, Q1),
- Q1 = priority_queue:join(Q1, Q),
-
- %% insert with priority into non-empty zero-priority queue
- Q15 = priority_queue:in(baz, 1, Q5),
- {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
- test_priority_queue(Q15),
-
- %% 1-element infinity priority Q
- Q16 = priority_queue:in(foo, infinity, Q),
- {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
-
- %% add infinity to 0-priority Q
- Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
- {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q17),
-
- %% and the other way around
- Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
- {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q18),
-
- %% add infinity to mixed-priority Q
- Q19 = priority_queue:in(qux, infinity, Q3),
- {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
- test_priority_queue(Q19),
-
- %% merge the above with a negative priority Q
- Q20 = priority_queue:join(Q19, Q4),
- {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
- [qux, bar, foo, foo]} = test_priority_queue(Q20),
-
- %% merge two infinity priority queues
- Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
- priority_queue:in(bar, infinity, Q)),
- {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
- test_priority_queue(Q21),
-
- %% merge two mixed priority with infinity queues
- Q22 = priority_queue:join(Q18, Q20),
- {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
- {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
- test_priority_queue(Q22),
-
- passed.
-
-priority_queue_in_all(Q, L) ->
- lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
-
-priority_queue_out_all(Q) ->
- case priority_queue:out(Q) of
- {empty, _} -> [];
- {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
- end.
-
-test_priority_queue(Q) ->
- {priority_queue:is_queue(Q),
- priority_queue:is_empty(Q),
- priority_queue:len(Q),
- priority_queue:to_list(Q),
- priority_queue_out_all(Q)}.
-
-test_simple_n_element_queue(N) ->
- Items = lists:seq(1, N),
- Q = priority_queue_in_all(priority_queue:new(), Items),
- ToListRes = [{0, X} || X <- Items],
- {true, false, N, ToListRes, Items} = test_priority_queue(Q),
- passed.
-
-test_pg_local() ->
- [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- [x, x]],
- check_pg_local(ok, [], []),
- check_pg_local(pg_local:join(a, P), [P], []),
- check_pg_local(pg_local:join(b, P), [P], [P]),
- check_pg_local(pg_local:join(a, P), [P, P], [P]),
- check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
- check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
- check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
- check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
- check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
- check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
- check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
- [begin X ! done,
- Ref = erlang:monitor(process, X),
- receive {'DOWN', Ref, process, X, _Info} -> ok end
- end || X <- [P, Q]],
- check_pg_local(ok, [], []),
- passed.
-
-check_pg_local(ok, APids, BPids) ->
- ok = pg_local:sync(),
- [true, true] = [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
- {Key, Pids} <- [{a, APids}, {b, BPids}]].
-
-test_unfold() ->
- {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
- List = lists:seq(2,20,2),
- {List, 0} = rabbit_misc:unfold(fun (0) -> false;
- (N) -> {true, N*2, N-1}
- end, 10),
- passed.
-
-test_table_codec() ->
- %% FIXME this does not test inexact numbers (double and float) yet,
- %% because they won't pass the equality assertions
- Table = [{<<"longstr">>, longstr, <<"Here is a long string">>},
- {<<"signedint">>, signedint, 12345},
- {<<"decimal">>, decimal, {3, 123456}},
- {<<"timestamp">>, timestamp, 109876543209876},
- {<<"table">>, table, [{<<"one">>, signedint, 54321},
- {<<"two">>, longstr,
- <<"A long string">>}]},
- {<<"byte">>, byte, 255},
- {<<"long">>, long, 1234567890},
- {<<"short">>, short, 655},
- {<<"bool">>, bool, true},
- {<<"binary">>, binary, <<"a binary string">>},
- {<<"void">>, void, undefined},
- {<<"array">>, array, [{signedint, 54321},
- {longstr, <<"A long string">>}]}
- ],
- Binary = <<
- 7,"longstr", "S", 21:32, "Here is a long string",
- 9,"signedint", "I", 12345:32/signed,
- 7,"decimal", "D", 3, 123456:32,
- 9,"timestamp", "T", 109876543209876:64,
- 5,"table", "F", 31:32, % length of table
- 3,"one", "I", 54321:32,
- 3,"two", "S", 13:32, "A long string",
- 4,"byte", "b", 255:8,
- 4,"long", "l", 1234567890:64,
- 5,"short", "s", 655:16,
- 4,"bool", "t", 1,
- 6,"binary", "x", 15:32, "a binary string",
- 4,"void", "V",
- 5,"array", "A", 23:32,
- "I", 54321:32,
- "S", 13:32, "A long string"
- >>,
- Binary = rabbit_binary_generator:generate_table(Table),
- Table = rabbit_binary_parser:parse_table(Binary),
- passed.
-
-%% Test that content frames don't exceed frame-max
-test_content_framing(FrameMax, BodyBin) ->
- [Header | Frames] =
- rabbit_binary_generator:build_simple_content_frames(
- 1,
- rabbit_binary_generator:ensure_content_encoded(
- rabbit_basic:build_content(#'P_basic'{}, BodyBin),
- rabbit_framing_amqp_0_9_1),
- FrameMax,
- rabbit_framing_amqp_0_9_1),
- %% header is formatted correctly and the size is the total of the
- %% fragments
- <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
- BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
- BodySize = size(BodyBin),
- true = lists:all(
- fun (ContentFrame) ->
- FrameBinary = list_to_binary(ContentFrame),
- %% assert
- <<_TypeAndChannel:3/binary,
- Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
- FrameBinary,
- size(FrameBinary) =< FrameMax
- end, Frames),
- passed.
-
-test_content_framing() ->
- %% no content
- passed = test_content_framing(4096, <<>>),
- %% easily fit in one frame
- passed = test_content_framing(4096, <<"Easy">>),
- %% exactly one frame (empty frame = 8 bytes)
- passed = test_content_framing(11, <<"One">>),
- %% more than one frame
- passed = test_content_framing(11, <<"More than one frame">>),
- passed.
-
-test_content_transcoding() ->
- %% there are no guarantees provided by 'clear' - it's just a hint
- ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
- ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
- EnsureDecoded =
- fun (C0) ->
- C1 = rabbit_binary_parser:ensure_content_decoded(C0),
- true = C1#content.properties =/= none,
- C1
- end,
- EnsureEncoded =
- fun (Protocol) ->
- fun (C0) ->
- C1 = rabbit_binary_generator:ensure_content_encoded(
- C0, Protocol),
- true = C1#content.properties_bin =/= none,
- C1
- end
- end,
- %% Beyond the assertions in Ensure*, the only testable guarantee
- %% is that the operations should never fail.
- %%
- %% If we were using quickcheck we'd simply stuff all the above
- %% into a generator for sequences of operations. In the absence of
- %% quickcheck we pick particularly interesting sequences that:
- %%
- %% - execute every op twice since they are idempotent
- %% - invoke clear_decoded, clear_encoded, decode and transcode
- %% with one or both of decoded and encoded content present
- [begin
- sequence_with_content([Op]),
- sequence_with_content([ClearEncoded, Op]),
- sequence_with_content([ClearDecoded, Op])
- end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
- EnsureEncoded(rabbit_framing_amqp_0_9_1),
- EnsureEncoded(rabbit_framing_amqp_0_8)]],
- passed.
-
-sequence_with_content(Sequence) ->
- lists:foldl(fun (F, V) -> F(F(V)) end,
- rabbit_binary_generator:ensure_content_encoded(
- rabbit_basic:build_content(#'P_basic'{}, <<>>),
- rabbit_framing_amqp_0_9_1),
- Sequence).
-
-test_topic_matching() ->
- XName = #resource{virtual_host = <<"/">>,
- kind = exchange,
- name = <<"test_exchange">>},
- X0 = #exchange{name = XName, type = topic, durable = false,
- auto_delete = false, arguments = []},
- X = rabbit_exchange_decorator:set(X0),
- %% create
- rabbit_exchange_type_topic:validate(X),
- exchange_op_callback(X, create, []),
-
- %% add some bindings
- Bindings = [#binding{source = XName,
- key = list_to_binary(Key),
- destination = #resource{virtual_host = <<"/">>,
- kind = queue,
- name = list_to_binary(Q)}} ||
- {Key, Q} <- [{"a.b.c", "t1"},
- {"a.*.c", "t2"},
- {"a.#.b", "t3"},
- {"a.b.b.c", "t4"},
- {"#", "t5"},
- {"#.#", "t6"},
- {"#.b", "t7"},
- {"*.*", "t8"},
- {"a.*", "t9"},
- {"*.b.c", "t10"},
- {"a.#", "t11"},
- {"a.#.#", "t12"},
- {"b.b.c", "t13"},
- {"a.b.b", "t14"},
- {"a.b", "t15"},
- {"b.c", "t16"},
- {"", "t17"},
- {"*.*.*", "t18"},
- {"vodka.martini", "t19"},
- {"a.b.c", "t20"},
- {"*.#", "t21"},
- {"#.*.#", "t22"},
- {"*.#.#", "t23"},
- {"#.#.#", "t24"},
- {"*", "t25"},
- {"#.b.#", "t26"}]],
- lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
- Bindings),
-
- %% test some matches
- test_topic_expect_match(
- X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
- "t18", "t20", "t21", "t22", "t23", "t24",
- "t26"]},
- {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
- "t12", "t15", "t21", "t22", "t23", "t24",
- "t26"]},
- {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
- "t18", "t21", "t22", "t23", "t24", "t26"]},
- {"", ["t5", "t6", "t17", "t24"]},
- {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23",
- "t24", "t26"]},
- {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22",
- "t23", "t24"]},
- {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23",
- "t24"]},
- {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
- "t24"]},
- {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21",
- "t22", "t23", "t24", "t26"]},
- {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
- {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24",
- "t25"]}]),
-
- %% remove some bindings
- RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
- lists:nth(11, Bindings), lists:nth(19, Bindings),
- lists:nth(21, Bindings)],
- exchange_op_callback(X, remove_bindings, [RemovedBindings]),
- RemainingBindings = ordsets:to_list(
- ordsets:subtract(ordsets:from_list(Bindings),
- ordsets:from_list(RemovedBindings))),
-
- %% test some matches
- test_topic_expect_match(
- X,
- [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
- "t23", "t24", "t26"]},
- {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
- "t22", "t23", "t24", "t26"]},
- {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
- "t23", "t24", "t26"]},
- {"", ["t6", "t17", "t24"]},
- {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]},
- {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]},
- {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]},
- {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]},
- {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23",
- "t24", "t26"]},
- {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
- {"oneword", ["t6", "t22", "t23", "t24", "t25"]}]),
-
- %% remove the entire exchange
- exchange_op_callback(X, delete, [RemainingBindings]),
- %% none should match now
- test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
- passed.
-
-exchange_op_callback(X, Fun, Args) ->
- rabbit_misc:execute_mnesia_transaction(
- fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
- rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
-
-test_topic_expect_match(X, List) ->
- lists:foreach(
- fun ({Key, Expected}) ->
- BinKey = list_to_binary(Key),
- Message = rabbit_basic:message(X#exchange.name, BinKey,
- #'P_basic'{}, <<>>),
- Res = rabbit_exchange_type_topic:route(
- X, #delivery{mandatory = false,
- sender = self(),
- message = Message}),
- ExpectedRes = lists:map(
- fun (Q) -> #resource{virtual_host = <<"/">>,
- kind = queue,
- name = list_to_binary(Q)}
- end, Expected),
- true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
- end, List).
-
-test_app_management() ->
- control_action(wait, [rabbit_mnesia:dir() ++ ".pid"]),
- %% Starting, stopping and diagnostics. Note that we don't try
- %% 'report' when the rabbit app is stopped and that we enable
- %% tracing for the duration of this function.
- ok = control_action(trace_on, []),
- ok = control_action(stop_app, []),
- ok = control_action(stop_app, []),
- ok = control_action(status, []),
- ok = control_action(cluster_status, []),
- ok = control_action(environment, []),
- ok = control_action(start_app, []),
- ok = control_action(start_app, []),
- ok = control_action(status, []),
- ok = control_action(report, []),
- ok = control_action(cluster_status, []),
- ok = control_action(environment, []),
- ok = control_action(trace_off, []),
- passed.
-
-test_log_management() ->
- MainLog = rabbit:log_location(kernel),
- SaslLog = rabbit:log_location(sasl),
- Suffix = ".1",
-
- %% prepare basic logs
- file:delete([MainLog, Suffix]),
- file:delete([SaslLog, Suffix]),
-
- %% simple logs reopening
- ok = control_action(rotate_logs, []),
- [true, true] = empty_files([MainLog, SaslLog]),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% simple log rotation
- ok = control_action(rotate_logs, [Suffix]),
- [true, true] = non_empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
- [true, true] = empty_files([MainLog, SaslLog]),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% reopening logs with log rotation performed first
- ok = clean_logs([MainLog, SaslLog], Suffix),
- ok = control_action(rotate_logs, []),
- ok = file:rename(MainLog, [MainLog, Suffix]),
- ok = file:rename(SaslLog, [SaslLog, Suffix]),
- ok = test_logs_working([MainLog, Suffix], [SaslLog, Suffix]),
- ok = control_action(rotate_logs, []),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% log rotation on empty files (the main log will have a ctl action logged)
- ok = clean_logs([MainLog, SaslLog], Suffix),
- ok = control_action(rotate_logs, []),
- ok = control_action(rotate_logs, [Suffix]),
- [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
-
- %% logs with suffix are not writable
- ok = control_action(rotate_logs, [Suffix]),
- ok = make_files_non_writable([[MainLog, Suffix], [SaslLog, Suffix]]),
- ok = control_action(rotate_logs, [Suffix]),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% rotate when original log files are not writable
- ok = make_files_non_writable([MainLog, SaslLog]),
- ok = control_action(rotate_logs, []),
-
- %% logging directed to tty (first, remove handlers)
- ok = delete_log_handlers([rabbit_sasl_report_file_h,
- rabbit_error_logger_file_h]),
- ok = clean_logs([MainLog, SaslLog], Suffix),
- ok = application:set_env(rabbit, sasl_error_logger, tty),
- ok = application:set_env(rabbit, error_logger, tty),
- ok = control_action(rotate_logs, []),
- [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
-
- %% rotate logs when logging is turned off
- ok = application:set_env(rabbit, sasl_error_logger, false),
- ok = application:set_env(rabbit, error_logger, silent),
- ok = control_action(rotate_logs, []),
- [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
-
- %% cleanup
- ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
- ok = application:set_env(rabbit, error_logger, {file, MainLog}),
- ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog},
- {rabbit_sasl_report_file_h, SaslLog}]),
- passed.
-
-test_log_management_during_startup() ->
- MainLog = rabbit:log_location(kernel),
- SaslLog = rabbit:log_location(sasl),
-
- %% start application with simple tty logging
- ok = control_action(stop_app, []),
- ok = application:set_env(rabbit, error_logger, tty),
- ok = application:set_env(rabbit, sasl_error_logger, tty),
- ok = add_log_handlers([{error_logger_tty_h, []},
- {sasl_report_tty_h, []}]),
- ok = control_action(start_app, []),
-
- %% start application with tty logging and
- %% proper handlers not installed
- ok = control_action(stop_app, []),
- ok = error_logger:tty(false),
- ok = delete_log_handlers([sasl_report_tty_h]),
- ok = case catch control_action(start_app, []) of
- ok -> exit({got_success_but_expected_failure,
- log_rotation_tty_no_handlers_test});
- {badrpc, {'EXIT', {rabbit,failure_during_boot,
- {error,{cannot_log_to_tty,
- _, not_installed}}}}} -> ok
- end,
-
- %% fix sasl logging
- ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
-
- %% start application with logging to non-existing directory
- TmpLog = "/tmp/rabbit-tests/test.log",
- delete_file(TmpLog),
- ok = application:set_env(rabbit, error_logger, {file, TmpLog}),
-
- ok = delete_log_handlers([rabbit_error_logger_file_h]),
- ok = add_log_handlers([{error_logger_file_h, MainLog}]),
- ok = control_action(start_app, []),
-
- %% start application with logging to directory with no
- %% write permissions
- TmpDir = "/tmp/rabbit-tests",
- ok = set_permissions(TmpDir, 8#00400),
- ok = delete_log_handlers([rabbit_error_logger_file_h]),
- ok = add_log_handlers([{error_logger_file_h, MainLog}]),
- ok = case control_action(start_app, []) of
- ok -> exit({got_success_but_expected_failure,
- log_rotation_no_write_permission_dir_test});
- {badrpc, {'EXIT',
- {rabbit, failure_during_boot,
- {error, {cannot_log_to_file, _, _}}}}} -> ok
- end,
-
- %% start application with logging to a subdirectory which
- %% parent directory has no write permissions
- TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log",
- ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}),
- ok = add_log_handlers([{error_logger_file_h, MainLog}]),
- ok = case control_action(start_app, []) of
- ok -> exit({got_success_but_expected_failure,
- log_rotatation_parent_dirs_test});
- {badrpc,
- {'EXIT', {rabbit,failure_during_boot,
- {error, {cannot_log_to_file, _,
- {error,
- {cannot_create_parent_dirs, _, eacces}}}}}}} -> ok
- end,
- ok = set_permissions(TmpDir, 8#00700),
- ok = set_permissions(TmpLog, 8#00600),
- ok = delete_file(TmpLog),
- ok = file:del_dir(TmpDir),
-
- %% start application with standard error_logger_file_h
- %% handler not installed
- ok = application:set_env(rabbit, error_logger, {file, MainLog}),
- ok = control_action(start_app, []),
- ok = control_action(stop_app, []),
-
- %% start application with standard sasl handler not installed
- %% and rabbit main log handler installed correctly
- ok = delete_log_handlers([rabbit_sasl_report_file_h]),
- ok = control_action(start_app, []),
- passed.
-
-test_arguments_parser() ->
- GlobalOpts1 = [{"-f1", flag}, {"-o1", {option, "foo"}}],
- Commands1 = [command1, {command2, [{"-f2", flag}, {"-o2", {option, "bar"}}]}],
-
- GetOptions =
- fun (Args) ->
- rabbit_misc:parse_arguments(Commands1, GlobalOpts1, Args)
- end,
-
- check_parse_arguments(no_command, GetOptions, []),
- check_parse_arguments(no_command, GetOptions, ["foo", "bar"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "foo"}], []}},
- GetOptions, ["command1"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
- GetOptions, ["command1", "-o1", "blah"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", true}, {"-o1", "foo"}], []}},
- GetOptions, ["command1", "-f1"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
- GetOptions, ["-o1", "blah", "command1"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], ["quux"]}},
- GetOptions, ["-o1", "blah", "command1", "quux"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", true}, {"-o1", "blah"}], ["quux", "baz"]}},
- GetOptions, ["command1", "quux", "-f1", "-o1", "blah", "baz"]),
- %% For duplicate flags, the last one counts
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "second"}], []}},
- GetOptions, ["-o1", "first", "command1", "-o1", "second"]),
- %% If the flag "eats" the command, the command won't be recognised
- check_parse_arguments(no_command, GetOptions,
- ["-o1", "command1", "quux"]),
- %% If a flag eats another flag, the eaten flag won't be recognised
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "-f1"}], []}},
- GetOptions, ["command1", "-o1", "-f1"]),
-
- %% Now for some command-specific flags...
- check_parse_arguments(
- {ok, {command2, [{"-f1", false}, {"-f2", false},
- {"-o1", "foo"}, {"-o2", "bar"}], []}},
- GetOptions, ["command2"]),
-
- check_parse_arguments(
- {ok, {command2, [{"-f1", false}, {"-f2", true},
- {"-o1", "baz"}, {"-o2", "bar"}], ["quux", "foo"]}},
- GetOptions, ["-f2", "command2", "quux", "-o1", "baz", "foo"]),
-
- passed.
-
-test_dynamic_mirroring() ->
- %% Just unit tests of the node selection logic, see multi node
- %% tests for the rest...
- Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
- {MNode, SNodes, SSNodes}, All) ->
- {ok, M} = rabbit_mirror_queue_misc:module(Policy),
- {NewM, NewSs0} = M:suggested_queue_nodes(
- Params, MNode, SNodes, SSNodes, All),
- NewSs1 = lists:sort(NewSs0),
- case dm_list_match(NewSs, NewSs1, ExtraSs) of
- ok -> ok;
- error -> exit({no_match, NewSs, NewSs1, ExtraSs})
- end
- end,
-
- Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]),
- Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
- Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]),
-
- N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
-
- %% Add a node
- Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
- Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
- %% Add two nodes and drop one
- Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
- %% Don't try to include nodes that are not running
- Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
- %% If we can't find any of the nodes listed then just keep the master
- Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
- %% And once that's happened, still keep the master even when not listed,
- %% if nothing is synced
- Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]),
- Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]),
- %% But if something is synced we can lose the master - but make
- %% sure we pick the new master from the nodes which are synced!
- Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]),
- Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]),
-
- Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]),
- Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]),
- Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]),
- Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]),
- Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
- Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
-
- passed.
-
-%% Does the first list match the second where the second is required
-%% to have exactly Extra superfluous items?
-dm_list_match([], [], 0) -> ok;
-dm_list_match(_, [], _Extra) -> error;
-dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
-dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
-
-test_user_management() ->
-
- %% lots if stuff that should fail
- {error, {no_such_user, _}} =
- control_action(delete_user, ["foo"]),
- {error, {no_such_user, _}} =
- control_action(change_password, ["foo", "baz"]),
- {error, {no_such_vhost, _}} =
- control_action(delete_vhost, ["/testhost"]),
- {error, {no_such_user, _}} =
- control_action(set_permissions, ["foo", ".*", ".*", ".*"]),
- {error, {no_such_user, _}} =
- control_action(clear_permissions, ["foo"]),
- {error, {no_such_user, _}} =
- control_action(list_user_permissions, ["foo"]),
- {error, {no_such_vhost, _}} =
- control_action(list_permissions, [], [{"-p", "/testhost"}]),
- {error, {invalid_regexp, _, _}} =
- control_action(set_permissions, ["guest", "+foo", ".*", ".*"]),
- {error, {no_such_user, _}} =
- control_action(set_user_tags, ["foo", "bar"]),
-
- %% user creation
- ok = control_action(add_user, ["foo", "bar"]),
- {error, {user_already_exists, _}} =
- control_action(add_user, ["foo", "bar"]),
- ok = control_action(clear_password, ["foo"]),
- ok = control_action(change_password, ["foo", "baz"]),
-
- TestTags = fun (Tags) ->
- Args = ["foo" | [atom_to_list(T) || T <- Tags]],
- ok = control_action(set_user_tags, Args),
- {ok, #internal_user{tags = Tags}} =
- rabbit_auth_backend_internal:lookup_user(<<"foo">>),
- ok = control_action(list_users, [])
- end,
- TestTags([foo, bar, baz]),
- TestTags([administrator]),
- TestTags([]),
-
- %% vhost creation
- ok = control_action(add_vhost, ["/testhost"]),
- {error, {vhost_already_exists, _}} =
- control_action(add_vhost, ["/testhost"]),
- ok = control_action(list_vhosts, []),
-
- %% user/vhost mapping
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
- ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
- ok = control_action(list_user_permissions, ["foo"]),
-
- %% user/vhost unmapping
- ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
- ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
-
- %% vhost deletion
- ok = control_action(delete_vhost, ["/testhost"]),
- {error, {no_such_vhost, _}} =
- control_action(delete_vhost, ["/testhost"]),
-
- %% deleting a populated vhost
- ok = control_action(add_vhost, ["/testhost"]),
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- ok = control_action(delete_vhost, ["/testhost"]),
-
- %% user deletion
- ok = control_action(delete_user, ["foo"]),
- {error, {no_such_user, _}} =
- control_action(delete_user, ["foo"]),
-
- passed.
-
-test_runtime_parameters() ->
- rabbit_runtime_parameters_test:register(),
- Good = fun(L) -> ok = control_action(set_parameter, L) end,
- Bad = fun(L) -> {error_string, _} = control_action(set_parameter, L) end,
-
- %% Acceptable for bijection
- Good(["test", "good", "\"ignore\""]),
- Good(["test", "good", "123"]),
- Good(["test", "good", "true"]),
- Good(["test", "good", "false"]),
- Good(["test", "good", "null"]),
- Good(["test", "good", "{\"key\": \"value\"}"]),
-
- %% Invalid json
- Bad(["test", "good", "atom"]),
- Bad(["test", "good", "{\"foo\": \"bar\""]),
- Bad(["test", "good", "{foo: \"bar\"}"]),
-
- %% Test actual validation hook
- Good(["test", "maybe", "\"good\""]),
- Bad(["test", "maybe", "\"bad\""]),
-
- ok = control_action(list_parameters, []),
-
- ok = control_action(clear_parameter, ["test", "good"]),
- ok = control_action(clear_parameter, ["test", "maybe"]),
- {error_string, _} =
- control_action(clear_parameter, ["test", "neverexisted"]),
-
- %% We can delete for a component that no longer exists
- Good(["test", "good", "\"ignore\""]),
- rabbit_runtime_parameters_test:unregister(),
- ok = control_action(clear_parameter, ["test", "good"]),
- passed.
-
-test_policy_validation() ->
- rabbit_runtime_parameters_test:register_policy_validator(),
- SetPol =
- fun (Key, Val) ->
- control_action(
- set_policy,
- ["name", ".*", rabbit_misc:format("{\"~s\":~p}", [Key, Val])])
- end,
-
- ok = SetPol("testeven", []),
- ok = SetPol("testeven", [1, 2]),
- ok = SetPol("testeven", [1, 2, 3, 4]),
- ok = SetPol("testpos", [2, 5, 5678]),
-
- {error_string, _} = SetPol("testpos", [-1, 0, 1]),
- {error_string, _} = SetPol("testeven", [ 1, 2, 3]),
-
- ok = control_action(clear_policy, ["name"]),
- rabbit_runtime_parameters_test:unregister_policy_validator(),
- passed.
-
-test_ha_policy_validation() ->
- Set = fun (JSON) -> control_action(set_policy, ["name", ".*", JSON]) end,
- OK = fun (JSON) -> ok = Set(JSON) end,
- Fail = fun (JSON) -> {error_string, _} = Set(JSON) end,
-
- OK ("{\"ha-mode\":\"all\"}"),
- Fail("{\"ha-mode\":\"made_up\"}"),
-
- Fail("{\"ha-mode\":\"nodes\"}"),
- Fail("{\"ha-mode\":\"nodes\",\"ha-params\":2}"),
- Fail("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}"),
- OK ("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}"),
- Fail("{\"ha-params\":[\"a\",\"b\"]}"),
-
- Fail("{\"ha-mode\":\"exactly\"}"),
- Fail("{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}"),
- OK ("{\"ha-mode\":\"exactly\",\"ha-params\":2}"),
- Fail("{\"ha-params\":2}"),
-
- OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}"),
- OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}"),
- Fail("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}"),
- Fail("{\"ha-sync-mode\":\"manual\"}"),
- Fail("{\"ha-sync-mode\":\"automatic\"}"),
-
- ok = control_action(clear_policy, ["name"]),
- passed.
-
-test_server_status() ->
- %% create a few things so there is some useful information to list
- {_Writer, Limiter, Ch} = test_channel(),
- [Q, Q2] = [Queue || Name <- [<<"foo">>, <<"bar">>],
- {new, Queue = #amqqueue{}} <-
- [rabbit_amqqueue:declare(
- rabbit_misc:r(<<"/">>, queue, Name),
- false, false, [], none)]],
- ok = rabbit_amqqueue:basic_consume(
- Q, true, Ch, Limiter, false, <<"ctag">>, true, none, undefined),
-
- %% list queues
- ok = info_action(list_queues, rabbit_amqqueue:info_keys(), true),
-
- %% list exchanges
- ok = info_action(list_exchanges, rabbit_exchange:info_keys(), true),
-
- %% list bindings
- ok = info_action(list_bindings, rabbit_binding:info_keys(), true),
- %% misc binding listing APIs
- [_|_] = rabbit_binding:list_for_source(
- rabbit_misc:r(<<"/">>, exchange, <<"">>)),
- [_] = rabbit_binding:list_for_destination(
- rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
- [_] = rabbit_binding:list_for_source_and_destination(
- rabbit_misc:r(<<"/">>, exchange, <<"">>),
- rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
-
- %% list connections
- {H, P} = find_listener(),
- {ok, C} = gen_tcp:connect(H, P, []),
- gen_tcp:send(C, <<"AMQP", 0, 0, 9, 1>>),
- timer:sleep(100),
- ok = info_action(list_connections,
- rabbit_networking:connection_info_keys(), false),
- %% close_connection
- [ConnPid] = rabbit_networking:connections(),
- ok = control_action(close_connection, [rabbit_misc:pid_to_string(ConnPid),
- "go away"]),
-
- %% list channels
- ok = info_action(list_channels, rabbit_channel:info_keys(), false),
-
- %% list consumers
- ok = control_action(list_consumers, []),
-
- %% set vm memory high watermark
- HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
- ok = control_action(set_vm_memory_high_watermark, ["1"]),
- ok = control_action(set_vm_memory_high_watermark, ["1.0"]),
- %% this will trigger an alarm
- ok = control_action(set_vm_memory_high_watermark, ["0.0"]),
- %% reset
- ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]),
-
- %% eval
- {error_string, _} = control_action(eval, ["\""]),
- {error_string, _} = control_action(eval, ["a("]),
- ok = control_action(eval, ["a."]),
-
- %% cleanup
- [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
-
- unlink(Ch),
- ok = rabbit_channel:shutdown(Ch),
-
- passed.
-
-test_amqp_connection_refusal() ->
- [passed = test_amqp_connection_refusal(V) ||
- V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
- passed.
-
-test_amqp_connection_refusal(Header) ->
- {H, P} = find_listener(),
- {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
- ok = gen_tcp:send(C, Header),
- {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
- ok = gen_tcp:close(C),
- passed.
-
-find_listener() ->
- [#listener{host = H, port = P} | _] =
- [L || L = #listener{node = N} <- rabbit_networking:active_listeners(),
- N =:= node()],
- {H, P}.
-
-test_writer(Pid) ->
- receive
- {'$gen_call', From, flush} -> gen_server:reply(From, ok),
- test_writer(Pid);
- {send_command, Method} -> Pid ! Method,
- test_writer(Pid);
- shutdown -> ok
- end.
-
-test_channel() ->
- Me = self(),
- Writer = spawn(fun () -> test_writer(Me) end),
- {ok, Limiter} = rabbit_limiter:start_link(),
- {ok, Ch} = rabbit_channel:start_link(
- 1, Me, Writer, Me, "", rabbit_framing_amqp_0_9_1,
- user(<<"guest">>), <<"/">>, [], Me, Limiter),
- {Writer, Limiter, Ch}.
-
-test_spawn() ->
- {Writer, _Limiter, Ch} = test_channel(),
- ok = rabbit_channel:do(Ch, #'channel.open'{}),
- receive #'channel.open_ok'{} -> ok
- after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
- end,
- {Writer, Ch}.
-
-test_spawn(Node) ->
- rpc:call(Node, ?MODULE, test_spawn_remote, []).
-
-%% Spawn an arbitrary long lived process, so we don't end up linking
-%% the channel to the short-lived process (RPC, here) spun up by the
-%% RPC server.
-test_spawn_remote() ->
- RPC = self(),
- spawn(fun () ->
- {Writer, Ch} = test_spawn(),
- RPC ! {Writer, Ch},
- link(Ch),
- receive
- _ -> ok
- end
- end),
- receive Res -> Res
- after ?TIMEOUT -> throw(failed_to_receive_result)
- end.
-
-user(Username) ->
- #user{username = Username,
- tags = [administrator],
- auth_backend = rabbit_auth_backend_internal,
- impl = #internal_user{username = Username,
- tags = [administrator]}}.
-
-test_confirms() ->
- {_Writer, Ch} = test_spawn(),
- DeclareBindDurableQueue =
- fun() ->
- rabbit_channel:do(Ch, #'queue.declare'{durable = true}),
- receive #'queue.declare_ok'{queue = Q0} ->
- rabbit_channel:do(Ch, #'queue.bind'{
- queue = Q0,
- exchange = <<"amq.direct">>,
- routing_key = "magic" }),
- receive #'queue.bind_ok'{} -> Q0
- after ?TIMEOUT -> throw(failed_to_bind_queue)
- end
- after ?TIMEOUT -> throw(failed_to_declare_queue)
- end
- end,
- %% Declare and bind two queues
- QName1 = DeclareBindDurableQueue(),
- QName2 = DeclareBindDurableQueue(),
- %% Get the first one's pid (we'll crash it later)
- {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
- QPid1 = Q1#amqqueue.pid,
- %% Enable confirms
- rabbit_channel:do(Ch, #'confirm.select'{}),
- receive
- #'confirm.select_ok'{} -> ok
- after ?TIMEOUT -> throw(failed_to_enable_confirms)
- end,
- %% Publish a message
- rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
- routing_key = "magic"
- },
- rabbit_basic:build_content(
- #'P_basic'{delivery_mode = 2}, <<"">>)),
- %% We must not kill the queue before the channel has processed the
- %% 'publish'.
- ok = rabbit_channel:flush(Ch),
- %% Crash the queue
- QPid1 ! boom,
- %% Wait for a nack
- receive
- #'basic.nack'{} -> ok;
- #'basic.ack'{} -> throw(received_ack_instead_of_nack)
- after ?TIMEOUT-> throw(did_not_receive_nack)
- end,
- receive
- #'basic.ack'{} -> throw(received_ack_when_none_expected)
- after 1000 -> ok
- end,
- %% Cleanup
- rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}),
- receive
- #'queue.delete_ok'{} -> ok
- after ?TIMEOUT -> throw(failed_to_cleanup_queue)
- end,
- unlink(Ch),
- ok = rabbit_channel:shutdown(Ch),
-
- passed.
-
-test_with_state() ->
- fhc_state = gen_server2:with_state(file_handle_cache,
- fun (S) -> element(1, S) end),
- passed.
-
-test_statistics_event_receiver(Pid) ->
- receive
- Foo -> Pid ! Foo, test_statistics_event_receiver(Pid)
- end.
-
-test_statistics_receive_event(Ch, Matcher) ->
- rabbit_channel:flush(Ch),
- Ch ! emit_stats,
- test_statistics_receive_event1(Ch, Matcher).
-
-test_statistics_receive_event1(Ch, Matcher) ->
- receive #event{type = channel_stats, props = Props} ->
- case Matcher(Props) of
- true -> Props;
- _ -> test_statistics_receive_event1(Ch, Matcher)
- end
- after ?TIMEOUT -> throw(failed_to_receive_event)
- end.
-
-test_statistics() ->
- application:set_env(rabbit, collect_statistics, fine),
-
- %% ATM this just tests the queue / exchange stats in channels. That's
- %% by far the most complex code though.
-
- %% Set up a channel and queue
- {_Writer, Ch} = test_spawn(),
- rabbit_channel:do(Ch, #'queue.declare'{}),
- QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
- after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
- end,
- QRes = rabbit_misc:r(<<"/">>, queue, QName),
- X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
-
- rabbit_tests_event_receiver:start(self(), [node()], [channel_stats]),
-
- %% Check stats empty
- Event = test_statistics_receive_event(Ch, fun (_) -> true end),
- [] = proplists:get_value(channel_queue_stats, Event),
- [] = proplists:get_value(channel_exchange_stats, Event),
- [] = proplists:get_value(channel_queue_exchange_stats, Event),
-
- %% Publish and get a message
- rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
- routing_key = QName},
- rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
- rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
-
- %% Check the stats reflect that
- Event2 = test_statistics_receive_event(
- Ch,
- fun (E) ->
- length(proplists:get_value(
- channel_queue_exchange_stats, E)) > 0
- end),
- [{QRes, [{get,1}]}] = proplists:get_value(channel_queue_stats, Event2),
- [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2),
- [{{QRes,X},[{publish,1}]}] =
- proplists:get_value(channel_queue_exchange_stats, Event2),
-
- %% Check the stats remove stuff on queue deletion
- rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
- Event3 = test_statistics_receive_event(
- Ch,
- fun (E) ->
- length(proplists:get_value(
- channel_queue_exchange_stats, E)) == 0
- end),
-
- [] = proplists:get_value(channel_queue_stats, Event3),
- [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3),
- [] = proplists:get_value(channel_queue_exchange_stats, Event3),
-
- rabbit_channel:shutdown(Ch),
- rabbit_tests_event_receiver:stop(),
- passed.
-
-test_refresh_events(SecondaryNode) ->
- rabbit_tests_event_receiver:start(self(), [node(), SecondaryNode],
- [channel_created, queue_created]),
-
- {_Writer, Ch} = test_spawn(),
- expect_events(pid, Ch, channel_created),
- rabbit_channel:shutdown(Ch),
-
- {_Writer2, Ch2} = test_spawn(SecondaryNode),
- expect_events(pid, Ch2, channel_created),
- rabbit_channel:shutdown(Ch2),
-
- {new, #amqqueue{name = QName} = Q} =
- rabbit_amqqueue:declare(test_queue(), false, false, [], none),
- expect_events(name, QName, queue_created),
- rabbit_amqqueue:delete(Q, false, false),
-
- rabbit_tests_event_receiver:stop(),
- passed.
-
-expect_events(Tag, Key, Type) ->
- expect_event(Tag, Key, Type),
- rabbit:force_event_refresh(),
- expect_event(Tag, Key, Type).
-
-expect_event(Tag, Key, Type) ->
- receive #event{type = Type, props = Props} ->
- case pget(Tag, Props) of
- Key -> ok;
- _ -> expect_event(Tag, Key, Type)
- end
- after ?TIMEOUT -> throw({failed_to_receive_event, Type})
- end.
-
-test_delegates_async(SecondaryNode) ->
- Self = self(),
- Sender = fun (Pid) -> Pid ! {invoked, Self} end,
-
- Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
-
- ok = delegate:invoke_no_result(spawn(Responder), Sender),
- ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
- await_response(2),
-
- LocalPids = spawn_responders(node(), Responder, 10),
- RemotePids = spawn_responders(SecondaryNode, Responder, 10),
- ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender),
- await_response(20),
-
- passed.
-
-make_responder(FMsg) -> make_responder(FMsg, timeout).
-make_responder(FMsg, Throw) ->
- fun () ->
- receive Msg -> FMsg(Msg)
- after ?TIMEOUT -> throw(Throw)
- end
- end.
-
-spawn_responders(Node, Responder, Count) ->
- [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
-
-await_response(0) ->
- ok;
-await_response(Count) ->
- receive
- response -> ok,
- await_response(Count - 1)
- after ?TIMEOUT -> throw(timeout)
- end.
-
-must_exit(Fun) ->
- try
- Fun(),
- throw(exit_not_thrown)
- catch
- exit:_ -> ok
- end.
-
-test_delegates_sync(SecondaryNode) ->
- Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
- BadSender = fun (_Pid) -> exit(exception) end,
-
- Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
- gen_server:reply(From, response)
- end),
-
- BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
- gen_server:reply(From, response)
- end, bad_responder_died),
-
- response = delegate:invoke(spawn(Responder), Sender),
- response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
-
- must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
- must_exit(fun () ->
- delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
-
- LocalGoodPids = spawn_responders(node(), Responder, 2),
- RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
- LocalBadPids = spawn_responders(node(), BadResponder, 2),
- RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
-
- {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
- true = lists:all(fun ({_, response}) -> true end, GoodRes),
- GoodResPids = [Pid || {Pid, _} <- GoodRes],
-
- Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
- Good = lists:usort(GoodResPids),
-
- {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
- true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
- BadResPids = [Pid || {Pid, _} <- BadRes],
-
- Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
- Bad = lists:usort(BadResPids),
-
- MagicalPids = [rabbit_misc:string_to_pid(Str) ||
- Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
- {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
- true = lists:all(
- fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
- BadNodes),
- BadNodesPids = [Pid || {Pid, _} <- BadNodes],
-
- Magical = lists:usort(MagicalPids),
- Magical = lists:usort(BadNodesPids),
-
- passed.
-
-test_queue_cleanup(_SecondaryNode) ->
- {_Writer, Ch} = test_spawn(),
- rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
- receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
- ok
- after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
- end,
- rabbit_channel:shutdown(Ch),
- rabbit:stop(),
- rabbit:start(),
- {_Writer2, Ch2} = test_spawn(),
- rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
- queue = ?CLEANUP_QUEUE_NAME }),
- receive
- #'channel.close'{reply_code = ?NOT_FOUND} ->
- ok
- after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
- end,
- rabbit_channel:shutdown(Ch2),
- passed.
-
-test_declare_on_dead_queue(SecondaryNode) ->
- QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
- Self = self(),
- Pid = spawn(SecondaryNode,
- fun () ->
- {new, #amqqueue{name = QueueName, pid = QPid}} =
- rabbit_amqqueue:declare(QueueName, false, false, [],
- none),
- exit(QPid, kill),
- Self ! {self(), killed, QPid}
- end),
- receive
- {Pid, killed, QPid} ->
- {existing, #amqqueue{name = QueueName,
- pid = QPid}} =
- rabbit_amqqueue:declare(QueueName, false, false, [], none),
- false = rabbit_misc:is_process_alive(QPid),
- {new, Q} = rabbit_amqqueue:declare(QueueName, false, false, [],
- none),
- true = rabbit_misc:is_process_alive(Q#amqqueue.pid),
- {ok, 0} = rabbit_amqqueue:delete(Q, false, false),
- passed
- after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
- end.
-
-%%---------------------------------------------------------------------
-
-control_action(Command, Args) ->
- control_action(Command, node(), Args, default_options()).
-
-control_action(Command, Args, NewOpts) ->
- control_action(Command, node(), Args,
- expand_options(default_options(), NewOpts)).
-
-control_action(Command, Node, Args, Opts) ->
- case catch rabbit_control_main:action(
- Command, Node, Args, Opts,
- fun (Format, Args1) ->
- io:format(Format ++ " ...~n", Args1)
- end) of
- ok ->
- io:format("done.~n"),
- ok;
- Other ->
- io:format("failed.~n"),
- Other
- end.
-
-info_action(Command, Args, CheckVHost) ->
- ok = control_action(Command, []),
- if CheckVHost -> ok = control_action(Command, [], ["-p", "/"]);
- true -> ok
- end,
- ok = control_action(Command, lists:map(fun atom_to_list/1, Args)),
- {bad_argument, dummy} = control_action(Command, ["dummy"]),
- ok.
-
-default_options() -> [{"-p", "/"}, {"-q", "false"}].
-
-expand_options(As, Bs) ->
- lists:foldl(fun({K, _}=A, R) ->
- case proplists:is_defined(K, R) of
- true -> R;
- false -> [A | R]
- end
- end, Bs, As).
-
-check_parse_arguments(ExpRes, Fun, As) ->
- SortRes =
- fun (no_command) -> no_command;
- ({ok, {C, KVs, As1}}) -> {ok, {C, lists:sort(KVs), As1}}
- end,
-
- true = SortRes(ExpRes) =:= SortRes(Fun(As)).
-
-empty_files(Files) ->
- [case file:read_file_info(File) of
- {ok, FInfo} -> FInfo#file_info.size == 0;
- Error -> Error
- end || File <- Files].
-
-non_empty_files(Files) ->
- [case EmptyFile of
- {error, Reason} -> {error, Reason};
- _ -> not(EmptyFile)
- end || EmptyFile <- empty_files(Files)].
-
-test_logs_working(MainLogFile, SaslLogFile) ->
- ok = rabbit_log:error("foo bar"),
- ok = error_logger:error_report(crash_report, [foo, bar]),
- %% give the error loggers some time to catch up
- timer:sleep(100),
- [true, true] = non_empty_files([MainLogFile, SaslLogFile]),
- ok.
-
-set_permissions(Path, Mode) ->
- case file:read_file_info(Path) of
- {ok, FInfo} -> file:write_file_info(
- Path,
- FInfo#file_info{mode=Mode});
- Error -> Error
- end.
-
-clean_logs(Files, Suffix) ->
- [begin
- ok = delete_file(File),
- ok = delete_file([File, Suffix])
- end || File <- Files],
- ok.
-
-assert_ram_node() ->
- case rabbit_mnesia:node_type() of
- disc -> exit('not_ram_node');
- ram -> ok
- end.
-
-assert_disc_node() ->
- case rabbit_mnesia:node_type() of
- disc -> ok;
- ram -> exit('not_disc_node')
- end.
-
-delete_file(File) ->
- case file:delete(File) of
- ok -> ok;
- {error, enoent} -> ok;
- Error -> Error
- end.
-
-make_files_non_writable(Files) ->
- [ok = file:write_file_info(File, #file_info{mode=0}) ||
- File <- Files],
- ok.
-
-add_log_handlers(Handlers) ->
- [ok = error_logger:add_report_handler(Handler, Args) ||
- {Handler, Args} <- Handlers],
- ok.
-
-delete_log_handlers(Handlers) ->
- [[] = error_logger:delete_report_handler(Handler) ||
- Handler <- Handlers],
- ok.
-
-test_supervisor_delayed_restart() ->
- test_sup:test_supervisor_delayed_restart().
-
-test_file_handle_cache() ->
- %% test copying when there is just one spare handle
- Limit = file_handle_cache:get_limit(),
- ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
- TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
- ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
- [Src1, Dst1, Src2, Dst2] = Files =
- [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
- Content = <<"foo">>,
- CopyFun = fun (Src, Dst) ->
- {ok, Hdl} = prim_file:open(Src, [binary, write]),
- ok = prim_file:write(Hdl, Content),
- ok = prim_file:sync(Hdl),
- prim_file:close(Hdl),
-
- {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
- {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
- Size = size(Content),
- {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
- ok = file_handle_cache:delete(SrcHdl),
- ok = file_handle_cache:delete(DstHdl)
- end,
- Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
- filename:join(TmpDir, "file5"),
- [write], []),
- receive {next, Pid1} -> Pid1 ! {next, self()} end,
- file_handle_cache:delete(Hdl),
- %% This will block and never return, so we
- %% exercise the fhc tidying up the pending
- %% queue on the death of a process.
- ok = CopyFun(Src1, Dst1)
- end),
- ok = CopyFun(Src1, Dst1),
- ok = file_handle_cache:set_limit(2),
- Pid ! {next, self()},
- receive {next, Pid} -> ok end,
- timer:sleep(100),
- Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
- timer:sleep(100),
- erlang:monitor(process, Pid),
- erlang:monitor(process, Pid1),
- exit(Pid, kill),
- exit(Pid1, kill),
- receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
- receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
- [file:delete(File) || File <- Files],
- ok = file_handle_cache:set_limit(Limit),
- passed.
-
-test_backing_queue() ->
- case application:get_env(rabbit, backing_queue_module) of
- {ok, rabbit_variable_queue} ->
- {ok, FileSizeLimit} =
- application:get_env(rabbit, msg_store_file_size_limit),
- application:set_env(rabbit, msg_store_file_size_limit, 512,
- infinity),
- {ok, MaxJournal} =
- application:get_env(rabbit, queue_index_max_journal_entries),
- application:set_env(rabbit, queue_index_max_journal_entries, 128,
- infinity),
- passed = test_msg_store(),
- application:set_env(rabbit, msg_store_file_size_limit,
- FileSizeLimit, infinity),
- passed = test_queue_index(),
- passed = test_queue_index_props(),
- passed = test_variable_queue(),
- passed = test_variable_queue_delete_msg_store_files_callback(),
- passed = test_queue_recover(),
- application:set_env(rabbit, queue_index_max_journal_entries,
- MaxJournal, infinity),
- %% We will have restarted the message store, and thus changed
- %% the order of the children of rabbit_sup. This will cause
- %% problems if there are subsequent failures - see bug 24262.
- ok = restart_app(),
- passed;
- _ ->
- passed
- end.
-
-restart_msg_store_empty() ->
- ok = rabbit_variable_queue:stop_msg_store(),
- ok = rabbit_variable_queue:start_msg_store(
- undefined, {fun (ok) -> finished end, ok}).
-
-msg_id_bin(X) ->
- erlang:md5(term_to_binary(X)).
-
-msg_store_client_init(MsgStore, Ref) ->
- rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined).
-
-on_disk_capture() ->
- receive
- {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
- stop -> done
- end.
-
-on_disk_capture([_|_], _Awaiting, Pid) ->
- Pid ! {self(), surplus};
-on_disk_capture(OnDisk, Awaiting, Pid) ->
- receive
- {on_disk, MsgIdsS} ->
- MsgIds = gb_sets:to_list(MsgIdsS),
- on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
- Pid);
- stop ->
- done
- after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
- case Awaiting of
- [] -> Pid ! {self(), arrived}, on_disk_capture();
- _ -> Pid ! {self(), timeout}
- end
- end.
-
-on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
- Pid ! {await, MsgIds, self()},
- receive
- {Pid, arrived} -> ok;
- {Pid, Error} -> Error
- end.
-
-on_disk_stop(Pid) ->
- MRef = erlang:monitor(process, Pid),
- Pid ! stop,
- receive {'DOWN', MRef, process, Pid, _Reason} ->
- ok
- end.
-
-msg_store_client_init_capture(MsgStore, Ref) ->
- Pid = spawn(fun on_disk_capture/0),
- {Pid, rabbit_msg_store:client_init(
- MsgStore, Ref, fun (MsgIds, _ActionTaken) ->
- Pid ! {on_disk, MsgIds}
- end, undefined)}.
-
-msg_store_contains(Atom, MsgIds, MSCState) ->
- Atom = lists:foldl(
- fun (MsgId, Atom1) when Atom1 =:= Atom ->
- rabbit_msg_store:contains(MsgId, MSCState) end,
- Atom, MsgIds).
-
-msg_store_read(MsgIds, MSCState) ->
- lists:foldl(fun (MsgId, MSCStateM) ->
- {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
- MsgId, MSCStateM),
- MSCStateN
- end, MSCState, MsgIds).
-
-msg_store_write(MsgIds, MSCState) ->
- ok = lists:foldl(fun (MsgId, ok) ->
- rabbit_msg_store:write(MsgId, MsgId, MSCState)
- end, ok, MsgIds).
-
-msg_store_remove(MsgIds, MSCState) ->
- rabbit_msg_store:remove(MsgIds, MSCState).
-
-msg_store_remove(MsgStore, Ref, MsgIds) ->
- with_msg_store_client(MsgStore, Ref,
- fun (MSCStateM) ->
- ok = msg_store_remove(MsgIds, MSCStateM),
- MSCStateM
- end).
-
-with_msg_store_client(MsgStore, Ref, Fun) ->
- rabbit_msg_store:client_terminate(
- Fun(msg_store_client_init(MsgStore, Ref))).
-
-foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
- rabbit_msg_store:client_terminate(
- lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
- msg_store_client_init(MsgStore, Ref), L)).
-
-test_msg_store() ->
- restart_msg_store_empty(),
- MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
- {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
- Ref = rabbit_guid:gen(),
- {Cap, MSCState} = msg_store_client_init_capture(
- ?PERSISTENT_MSG_STORE, Ref),
- Ref2 = rabbit_guid:gen(),
- {Cap2, MSC2State} = msg_store_client_init_capture(
- ?PERSISTENT_MSG_STORE, Ref2),
- %% check we don't contain any of the msgs we're about to publish
- false = msg_store_contains(false, MsgIds, MSCState),
- %% test confirm logic
- passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
- %% check we don't contain any of the msgs we're about to publish
- false = msg_store_contains(false, MsgIds, MSCState),
- %% publish the first half
- ok = msg_store_write(MsgIds1stHalf, MSCState),
- %% sync on the first half
- ok = on_disk_await(Cap, MsgIds1stHalf),
- %% publish the second half
- ok = msg_store_write(MsgIds2ndHalf, MSCState),
- %% check they're all in there
- true = msg_store_contains(true, MsgIds, MSCState),
- %% publish the latter half twice so we hit the caching and ref
- %% count code. We need to do this through a 2nd client since a
- %% single client is not supposed to write the same message more
- %% than once without first removing it.
- ok = msg_store_write(MsgIds2ndHalf, MSC2State),
- %% check they're still all in there
- true = msg_store_contains(true, MsgIds, MSCState),
- %% sync on the 2nd half
- ok = on_disk_await(Cap2, MsgIds2ndHalf),
- %% cleanup
- ok = on_disk_stop(Cap2),
- ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
- ok = on_disk_stop(Cap),
- %% read them all
- MSCState1 = msg_store_read(MsgIds, MSCState),
- %% read them all again - this will hit the cache, not disk
- MSCState2 = msg_store_read(MsgIds, MSCState1),
- %% remove them all
- ok = msg_store_remove(MsgIds, MSCState2),
- %% check first half doesn't exist
- false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
- %% check second half does exist
- true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
- %% read the second half again
- MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
- %% read the second half again, just for fun (aka code coverage)
- MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
- ok = rabbit_msg_store:client_terminate(MSCState4),
- %% stop and restart, preserving every other msg in 2nd half
- ok = rabbit_variable_queue:stop_msg_store(),
- ok = rabbit_variable_queue:start_msg_store(
- [], {fun ([]) -> finished;
- ([MsgId|MsgIdsTail])
- when length(MsgIdsTail) rem 2 == 0 ->
- {MsgId, 1, MsgIdsTail};
- ([MsgId|MsgIdsTail]) ->
- {MsgId, 0, MsgIdsTail}
- end, MsgIds2ndHalf}),
- MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- %% check we have the right msgs left
- lists:foldl(
- fun (MsgId, Bool) ->
- not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
- end, false, MsgIds2ndHalf),
- ok = rabbit_msg_store:client_terminate(MSCState5),
- %% restart empty
- restart_msg_store_empty(),
- MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- %% check we don't contain any of the msgs
- false = msg_store_contains(false, MsgIds, MSCState6),
- %% publish the first half again
- ok = msg_store_write(MsgIds1stHalf, MSCState6),
- %% this should force some sort of sync internally otherwise misread
- ok = rabbit_msg_store:client_terminate(
- msg_store_read(MsgIds1stHalf, MSCState6)),
- MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- ok = msg_store_remove(MsgIds1stHalf, MSCState7),
- ok = rabbit_msg_store:client_terminate(MSCState7),
- %% restart empty
- restart_msg_store_empty(), %% now safe to reuse msg_ids
- %% push a lot of msgs in... at least 100 files worth
- {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
- PayloadSizeBits = 65536,
- BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
- MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
- Payload = << 0:PayloadSizeBits >>,
- ok = with_msg_store_client(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MSCStateM) ->
- [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
- MsgId <- MsgIdsBig],
- MSCStateM
- end),
- %% now read them to ensure we hit the fast client-side reading
- ok = foreach_with_msg_store_client(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MsgId, MSCStateM) ->
- {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
- MsgId, MSCStateM),
- MSCStateN
- end, MsgIdsBig),
- %% .., then 3s by 1...
- ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
- [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
- %% .., then remove 3s by 2, from the young end first. This hits
- %% GC (under 50% good data left, but no empty files. Must GC).
- ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
- [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
- %% .., then remove 3s by 3, from the young end first. This hits
- %% GC...
- ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
- [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
- %% ensure empty
- ok = with_msg_store_client(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MSCStateM) ->
- false = msg_store_contains(false, MsgIdsBig, MSCStateM),
- MSCStateM
- end),
- %%
- passed = test_msg_store_client_delete_and_terminate(),
- %% restart empty
- restart_msg_store_empty(),
- passed.
-
-test_msg_store_confirms(MsgIds, Cap, MSCState) ->
- %% write -> confirmed
- ok = msg_store_write(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% remove -> _
- ok = msg_store_remove(MsgIds, MSCState),
- ok = on_disk_await(Cap, []),
- %% write, remove -> confirmed
- ok = msg_store_write(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% write, remove, write -> confirmed, confirmed
- ok = msg_store_write(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- ok = msg_store_write(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds ++ MsgIds),
- %% remove, write -> confirmed
- ok = msg_store_remove(MsgIds, MSCState),
- ok = msg_store_write(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% remove, write, remove -> confirmed
- ok = msg_store_remove(MsgIds, MSCState),
- ok = msg_store_write(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% confirmation on timer-based sync
- passed = test_msg_store_confirm_timer(),
- passed.
-
-test_msg_store_confirm_timer() ->
- Ref = rabbit_guid:gen(),
- MsgId = msg_id_bin(1),
- Self = self(),
- MSCState = rabbit_msg_store:client_init(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MsgIds, _ActionTaken) ->
- case gb_sets:is_member(MsgId, MsgIds) of
- true -> Self ! on_disk;
- false -> ok
- end
- end, undefined),
- ok = msg_store_write([MsgId], MSCState),
- ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState),
- ok = msg_store_remove([MsgId], MSCState),
- ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
- passed.
-
-msg_store_keep_busy_until_confirm(MsgIds, MSCState) ->
- receive
- on_disk -> ok
- after 0 ->
- ok = msg_store_write(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- msg_store_keep_busy_until_confirm(MsgIds, MSCState)
- end.
-
-test_msg_store_client_delete_and_terminate() ->
- restart_msg_store_empty(),
- MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
- Ref = rabbit_guid:gen(),
- MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- ok = msg_store_write(MsgIds, MSCState),
- %% test the 'dying client' fast path for writes
- ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
- passed.
-
-queue_name(Name) ->
- rabbit_misc:r(<<"/">>, queue, Name).
-
-test_queue() ->
- queue_name(<<"test">>).
-
-init_test_queue() ->
- TestQueue = test_queue(),
- Terms = rabbit_queue_index:shutdown_terms(TestQueue),
- PRef = proplists:get_value(persistent_ref, Terms, rabbit_guid:gen()),
- PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
- Res = rabbit_queue_index:recover(
- TestQueue, Terms, false,
- fun (MsgId) ->
- rabbit_msg_store:contains(MsgId, PersistentClient)
- end,
- fun nop/1),
- ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
- Res.
-
-restart_test_queue(Qi) ->
- _ = rabbit_queue_index:terminate([], Qi),
- ok = rabbit_variable_queue:stop(),
- ok = rabbit_variable_queue:start([test_queue()]),
- init_test_queue().
-
-empty_test_queue() ->
- ok = rabbit_variable_queue:stop(),
- ok = rabbit_variable_queue:start([]),
- {0, Qi} = init_test_queue(),
- _ = rabbit_queue_index:delete_and_terminate(Qi),
- ok.
-
-with_empty_test_queue(Fun) ->
- ok = empty_test_queue(),
- {0, Qi} = init_test_queue(),
- rabbit_queue_index:delete_and_terminate(Fun(Qi)).
-
-restart_app() ->
- rabbit:stop(),
- rabbit:start().
-
-queue_index_publish(SeqIds, Persistent, Qi) ->
- Ref = rabbit_guid:gen(),
- MsgStore = case Persistent of
- true -> ?PERSISTENT_MSG_STORE;
- false -> ?TRANSIENT_MSG_STORE
- end,
- MSCState = msg_store_client_init(MsgStore, Ref),
- {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
- lists:foldl(
- fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
- MsgId = rabbit_guid:gen(),
- QiM = rabbit_queue_index:publish(
- MsgId, SeqId, #message_properties{}, Persistent, QiN),
- ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
- {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
- end, {Qi, []}, SeqIds),
- %% do this just to force all of the publishes through to the msg_store:
- true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
- ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
- {A, B}.
-
-verify_read_with_published(_Delivered, _Persistent, [], _) ->
- ok;
-verify_read_with_published(Delivered, Persistent,
- [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
- [{SeqId, MsgId}|Published]) ->
- verify_read_with_published(Delivered, Persistent, Read, Published);
-verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
- ko.
-
-test_queue_index_props() ->
- with_empty_test_queue(
- fun(Qi0) ->
- MsgId = rabbit_guid:gen(),
- Props = #message_properties{expiry=12345},
- Qi1 = rabbit_queue_index:publish(MsgId, 1, Props, true, Qi0),
- {[{MsgId, 1, Props, _, _}], Qi2} =
- rabbit_queue_index:read(1, 2, Qi1),
- Qi2
- end),
-
- ok = rabbit_variable_queue:stop(),
- ok = rabbit_variable_queue:start([]),
-
- passed.
-
-test_queue_index() ->
- SegmentSize = rabbit_queue_index:next_segment_boundary(0),
- TwoSegs = SegmentSize + SegmentSize,
- MostOfASegment = trunc(SegmentSize*0.75),
- SeqIdsA = lists:seq(0, MostOfASegment-1),
- SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
- SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
- SeqIdsD = lists:seq(0, SegmentSize*4),
-
- with_empty_test_queue(
- fun (Qi0) ->
- {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
- {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
- {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
- {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
- ok = verify_read_with_published(false, false, ReadA,
- lists:reverse(SeqIdsMsgIdsA)),
- %% should get length back as 0, as all the msgs were transient
- {0, Qi6} = restart_test_queue(Qi4),
- {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
- {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
- {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
- {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
- ok = verify_read_with_published(false, true, ReadB,
- lists:reverse(SeqIdsMsgIdsB)),
- %% should get length back as MostOfASegment
- LenB = length(SeqIdsB),
- {LenB, Qi12} = restart_test_queue(Qi10),
- {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
- Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
- {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
- ok = verify_read_with_published(true, true, ReadC,
- lists:reverse(SeqIdsMsgIdsB)),
- Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
- Qi17 = rabbit_queue_index:flush(Qi16),
- %% Everything will have gone now because #pubs == #acks
- {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
- %% should get length back as 0 because all persistent
- %% msgs have been acked
- {0, Qi19} = restart_test_queue(Qi18),
- Qi19
- end),
-
- %% These next bits are just to hit the auto deletion of segment files.
- %% First, partials:
- %% a) partial pub+del+ack, then move to new segment
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
- Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
- Qi4 = rabbit_queue_index:flush(Qi3),
- {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
- false, Qi4),
- Qi5
- end),
-
- %% b) partial pub+del, then move to new segment, then ack all in old segment
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
- {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
- false, Qi2),
- Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
- rabbit_queue_index:flush(Qi4)
- end),
-
- %% c) just fill up several segments of all pubs, then +dels, then +acks
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
- Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
- rabbit_queue_index:flush(Qi3)
- end),
-
- %% d) get messages in all states to a segment, then flush, then do
- %% the same again, don't flush and read. This will hit all
- %% possibilities in combining the segment with the journal.
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
- Qi3 = rabbit_queue_index:ack([0], Qi2),
- Qi4 = rabbit_queue_index:flush(Qi3),
- {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
- Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
- Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
- {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
- {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
- ok = verify_read_with_published(true, false, ReadD,
- [Four, Five, Six]),
- {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
- ok = verify_read_with_published(false, false, ReadE,
- [Seven, Eight]),
- Qi10
- end),
-
- %% e) as for (d), but use terminate instead of read, which will
- %% exercise journal_minus_segment, not segment_plus_journal.
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
- true, Qi0),
- Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
- Qi3 = rabbit_queue_index:ack([0], Qi2),
- {5, Qi4} = restart_test_queue(Qi3),
- {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
- Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
- Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
- {5, Qi8} = restart_test_queue(Qi7),
- Qi8
- end),
-
- ok = rabbit_variable_queue:stop(),
- ok = rabbit_variable_queue:start([]),
-
- passed.
-
-variable_queue_init(Q, Recover) ->
- rabbit_variable_queue:init(
- Q, Recover, fun nop/2, fun nop/2, fun nop/1).
-
-variable_queue_publish(IsPersistent, Count, VQ) ->
- variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
-
-variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
- variable_queue_publish(IsPersistent, 1, Count, PropFun,
- fun (_N) -> <<>> end, VQ).
-
-variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
- lists:foldl(
- fun (N, VQN) ->
- rabbit_variable_queue:publish(
- rabbit_basic:message(
- rabbit_misc:r(<<>>, exchange, <<>>),
- <<>>, #'P_basic'{delivery_mode = case IsPersistent of
- true -> 2;
- false -> 1
- end},
- PayloadFun(N)),
- PropFun(N, #message_properties{}), false, self(), VQN)
- end, VQ, lists:seq(Start, Start + Count - 1)).
-
-variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
- lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
- Rem = Len - N,
- {{#basic_message { is_persistent = IsPersistent },
- IsDelivered, AckTagN}, VQM} =
- rabbit_variable_queue:fetch(true, VQN),
- Rem = rabbit_variable_queue:len(VQM),
- {VQM, [AckTagN | AckTagsAcc]}
- end, {VQ, []}, lists:seq(1, Count)).
-
-assert_prop(List, Prop, Value) ->
- Value = proplists:get_value(Prop, List).
-
-assert_props(List, PropVals) ->
- [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
-
-test_amqqueue(Durable) ->
- (rabbit_amqqueue:pseudo_queue(test_queue(), self()))
- #amqqueue { durable = Durable }.
-
-with_fresh_variable_queue(Fun) ->
- Ref = make_ref(),
- Me = self(),
- %% Run in a separate process since rabbit_msg_store will send
- %% bump_credit messages and we want to ignore them
- spawn_link(fun() ->
- ok = empty_test_queue(),
- VQ = variable_queue_init(test_amqqueue(true), false),
- S0 = rabbit_variable_queue:status(VQ),
- assert_props(S0, [{q1, 0}, {q2, 0},
- {delta,
- {delta, undefined, 0, undefined}},
- {q3, 0}, {q4, 0},
- {len, 0}]),
- _ = rabbit_variable_queue:delete_and_terminate(
- shutdown, Fun(VQ)),
- Me ! Ref
- end),
- receive
- Ref -> ok
- end,
- passed.
-
-publish_and_confirm(Q, Payload, Count) ->
- Seqs = lists:seq(1, Count),
- [begin
- Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
- <<>>, #'P_basic'{delivery_mode = 2},
- Payload),
- Delivery = #delivery{mandatory = false, sender = self(),
- message = Msg, msg_seq_no = Seq},
- {routed, _} = rabbit_amqqueue:deliver([Q], Delivery)
- end || Seq <- Seqs],
- wait_for_confirms(gb_sets:from_list(Seqs)).
-
-wait_for_confirms(Unconfirmed) ->
- case gb_sets:is_empty(Unconfirmed) of
- true -> ok;
- false -> receive {'$gen_cast', {confirm, Confirmed, _}} ->
- wait_for_confirms(
- rabbit_misc:gb_sets_difference(
- Unconfirmed, gb_sets:from_list(Confirmed)))
- after ?TIMEOUT -> exit(timeout_waiting_for_confirm)
- end
- end.
-
-test_variable_queue() ->
- [passed = with_fresh_variable_queue(F) ||
- F <- [fun test_variable_queue_dynamic_duration_change/1,
- fun test_variable_queue_partial_segments_delta_thing/1,
- fun test_variable_queue_all_the_bits_not_covered_elsewhere1/1,
- fun test_variable_queue_all_the_bits_not_covered_elsewhere2/1,
- fun test_drop/1,
- fun test_variable_queue_fold_msg_on_disk/1,
- fun test_dropfetchwhile/1,
- fun test_dropwhile_varying_ram_duration/1,
- fun test_fetchwhile_varying_ram_duration/1,
- fun test_variable_queue_ack_limiting/1,
- fun test_variable_queue_purge/1,
- fun test_variable_queue_requeue/1,
- fun test_variable_queue_requeue_ram_beta/1,
- fun test_variable_queue_fold/1]],
- passed.
-
-test_variable_queue_fold(VQ0) ->
- {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
- variable_queue_with_holes(VQ0),
- Count = rabbit_variable_queue:depth(VQ1),
- Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
- lists:foldl(fun (Cut, VQ2) ->
- test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
- end, VQ1, [0, 1, 2, Count div 2,
- Count - 1, Count, Count + 1, Count * 2]).
-
-test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
- {Acc, VQ1} = rabbit_variable_queue:fold(
- fun (M, _, Pending, A) ->
- MInt = msg2int(M),
- Pending = lists:member(MInt, PendingMsgs), %% assert
- case MInt =< Cut of
- true -> {cont, [MInt | A]};
- false -> {stop, A}
- end
- end, [], VQ0),
- Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
- Expected = lists:reverse(Acc), %% assertion
- VQ1.
-
-msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
- binary_to_term(list_to_binary(lists:reverse(P))).
-
-ack_subset(AckSeqs, Interval, Rem) ->
- lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
-
-requeue_one_by_one(Acks, VQ) ->
- lists:foldl(fun (AckTag, VQN) ->
- {_MsgId, VQM} = rabbit_variable_queue:requeue(
- [AckTag], VQN),
- VQM
- end, VQ, Acks).
-
-%% Create a vq with messages in q1, delta, and q3, and holes (in the
-%% form of pending acks) in the latter two.
-variable_queue_with_holes(VQ0) ->
- Interval = 64,
- Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
- Seq = lists:seq(1, Count),
- VQ1 = rabbit_variable_queue:set_ram_duration_target(0, VQ0),
- VQ2 = variable_queue_publish(
- false, 1, Count,
- fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
- {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
- Acks = lists:reverse(AcksR),
- AckSeqs = lists:zip(Acks, Seq),
- [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
- [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
- %% we requeue in three phases in order to exercise requeuing logic
- %% in various vq states
- {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
- Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
- VQ5 = requeue_one_by_one(Subset1, VQ4),
- %% by now we have some messages (and holes) in delt
- VQ6 = requeue_one_by_one(Subset2, VQ5),
- VQ7 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ6),
- %% add the q1 tail
- VQ8 = variable_queue_publish(
- true, Count + 1, 64,
- fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
- %% assertions
- [false = case V of
- {delta, _, 0, _} -> true;
- 0 -> true;
- _ -> false
- end || {K, V} <- rabbit_variable_queue:status(VQ8),
- lists:member(K, [q1, delta, q3])],
- Depth = Count + 64,
- Depth = rabbit_variable_queue:depth(VQ8),
- Len = Depth - length(Subset3),
- Len = rabbit_variable_queue:len(VQ8),
- {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + 64), VQ8}.
-
-test_variable_queue_requeue(VQ0) ->
- {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
- variable_queue_with_holes(VQ0),
- Msgs =
- lists:zip(RequeuedMsgs,
- lists:duplicate(length(RequeuedMsgs), true)) ++
- lists:zip(FreshMsgs,
- lists:duplicate(length(FreshMsgs), false)),
- VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
- {{M, MRequeued, _}, VQb} =
- rabbit_variable_queue:fetch(true, VQa),
- Requeued = MRequeued, %% assertion
- I = msg2int(M), %% assertion
- VQb
- end, VQ1, Msgs),
- {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
- VQ3.
-
-%% requeue from ram_pending_ack into q3, move to delta and then empty queue
-test_variable_queue_requeue_ram_beta(VQ0) ->
- Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
- VQ1 = rabbit_tests:variable_queue_publish(false, Count, VQ0),
- {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
- {Back, Front} = lists:split(Count div 2, AcksR),
- {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
- VQ4 = rabbit_variable_queue:set_ram_duration_target(0, VQ3),
- {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
- VQ6 = requeue_one_by_one(Front, VQ5),
- {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
- {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
- VQ8.
-
-test_variable_queue_purge(VQ0) ->
- LenDepth = fun (VQ) ->
- {rabbit_variable_queue:len(VQ),
- rabbit_variable_queue:depth(VQ)}
- end,
- VQ1 = variable_queue_publish(false, 10, VQ0),
- {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
- {4, VQ3} = rabbit_variable_queue:purge(VQ2),
- {0, 6} = LenDepth(VQ3),
- {_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
- {2, 6} = LenDepth(VQ4),
- VQ5 = rabbit_variable_queue:purge_acks(VQ4),
- {2, 2} = LenDepth(VQ5),
- VQ5.
-
-test_variable_queue_ack_limiting(VQ0) ->
- %% start by sending in a bunch of messages
- Len = 1024,
- VQ1 = variable_queue_publish(false, Len, VQ0),
-
- %% squeeze and relax queue
- Churn = Len div 32,
- VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
-
- %% update stats for duration
- {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
-
- %% fetch half the messages
- {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
-
- VQ5 = check_variable_queue_status(VQ4, [{len , Len div 2},
- {ram_ack_count, Len div 2},
- {ram_msg_count, Len div 2}]),
-
- %% ensure all acks go to disk on 0 duration target
- VQ6 = check_variable_queue_status(
- rabbit_variable_queue:set_ram_duration_target(0, VQ5),
- [{len, Len div 2},
- {target_ram_count, 0},
- {ram_msg_count, 0},
- {ram_ack_count, 0}]),
-
- VQ6.
-
-test_drop(VQ0) ->
- %% start by sending a messages
- VQ1 = variable_queue_publish(false, 1, VQ0),
- %% drop message with AckRequired = true
- {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
- true = rabbit_variable_queue:is_empty(VQ2),
- true = AckTag =/= undefinded,
- %% drop again -> empty
- {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
- %% requeue
- {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
- %% drop message with AckRequired = false
- {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
- true = rabbit_variable_queue:is_empty(VQ5),
- VQ5.
-
-test_dropfetchwhile(VQ0) ->
- Count = 10,
-
- %% add messages with sequential expiry
- VQ1 = variable_queue_publish(
- false, 1, Count,
- fun (N, Props) -> Props#message_properties{expiry = N} end,
- fun erlang:term_to_binary/1, VQ0),
-
- %% fetch the first 5 messages
- {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
- rabbit_variable_queue:fetchwhile(
- fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
- fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
- {[Msg | MsgAcc], [AckTag | AckAcc]}
- end, {[], []}, VQ1),
- true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
-
- %% requeue them
- {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
-
- %% drop the first 5 messages
- {#message_properties{expiry = 6}, VQ4} =
- rabbit_variable_queue:dropwhile(
- fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
-
- %% fetch 5
- VQ5 = lists:foldl(fun (N, VQN) ->
- {{Msg, _, _}, VQM} =
- rabbit_variable_queue:fetch(false, VQN),
- true = msg2int(Msg) == N,
- VQM
- end, VQ4, lists:seq(6, Count)),
-
- %% should be empty now
- true = rabbit_variable_queue:is_empty(VQ5),
-
- VQ5.
-
-test_dropwhile_varying_ram_duration(VQ0) ->
- test_dropfetchwhile_varying_ram_duration(
- fun (VQ1) ->
- {_, VQ2} = rabbit_variable_queue:dropwhile(
- fun (_) -> false end, VQ1),
- VQ2
- end, VQ0).
-
-test_fetchwhile_varying_ram_duration(VQ0) ->
- test_dropfetchwhile_varying_ram_duration(
- fun (VQ1) ->
- {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
- fun (_) -> false end,
- fun (_, _, A) -> A end,
- ok, VQ1),
- VQ2
- end, VQ0).
-
-test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
- VQ1 = variable_queue_publish(false, 1, VQ0),
- VQ2 = rabbit_variable_queue:set_ram_duration_target(0, VQ1),
- VQ3 = Fun(VQ2),
- VQ4 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ3),
- VQ5 = variable_queue_publish(false, 1, VQ4),
- VQ6 = Fun(VQ5),
- VQ6.
-
-test_variable_queue_dynamic_duration_change(VQ0) ->
- SegmentSize = rabbit_queue_index:next_segment_boundary(0),
-
- %% start by sending in a couple of segments worth
- Len = 2*SegmentSize,
- VQ1 = variable_queue_publish(false, Len, VQ0),
- %% squeeze and relax queue
- Churn = Len div 32,
- VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
-
- {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
- VQ7 = lists:foldl(
- fun (Duration1, VQ4) ->
- {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
- io:format("~p:~n~p~n",
- [Duration1, rabbit_variable_queue:status(VQ5)]),
- VQ6 = rabbit_variable_queue:set_ram_duration_target(
- Duration1, VQ5),
- publish_fetch_and_ack(Churn, Len, VQ6)
- end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
-
- %% drain
- {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
- {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
- {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
-
- VQ10.
-
-publish_fetch_and_ack(0, _Len, VQ0) ->
- VQ0;
-publish_fetch_and_ack(N, Len, VQ0) ->
- VQ1 = variable_queue_publish(false, 1, VQ0),
- {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
- Len = rabbit_variable_queue:len(VQ2),
- {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
- publish_fetch_and_ack(N-1, Len, VQ3).
-
-test_variable_queue_partial_segments_delta_thing(VQ0) ->
- SegmentSize = rabbit_queue_index:next_segment_boundary(0),
- HalfSegment = SegmentSize div 2,
- OneAndAHalfSegment = SegmentSize + HalfSegment,
- VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
- {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
- VQ3 = check_variable_queue_status(
- rabbit_variable_queue:set_ram_duration_target(0, VQ2),
- %% one segment in q3, and half a segment in delta
- [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
- {q3, SegmentSize},
- {len, SegmentSize + HalfSegment}]),
- VQ4 = rabbit_variable_queue:set_ram_duration_target(infinity, VQ3),
- VQ5 = check_variable_queue_status(
- variable_queue_publish(true, 1, VQ4),
- %% one alpha, but it's in the same segment as the deltas
- [{q1, 1},
- {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
- {q3, SegmentSize},
- {len, SegmentSize + HalfSegment + 1}]),
- {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
- SegmentSize + HalfSegment + 1, VQ5),
- VQ7 = check_variable_queue_status(
- VQ6,
- %% the half segment should now be in q3
- [{q1, 1},
- {delta, {delta, undefined, 0, undefined}},
- {q3, HalfSegment},
- {len, HalfSegment + 1}]),
- {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
- HalfSegment + 1, VQ7),
- {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
- %% should be empty now
- {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
- VQ10.
-
-check_variable_queue_status(VQ0, Props) ->
- VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
- S = rabbit_variable_queue:status(VQ1),
- io:format("~p~n", [S]),
- assert_props(S, Props),
- VQ1.
-
-variable_queue_wait_for_shuffling_end(VQ) ->
- case rabbit_variable_queue:needs_timeout(VQ) of
- false -> VQ;
- _ -> variable_queue_wait_for_shuffling_end(
- rabbit_variable_queue:timeout(VQ))
- end.
-
-test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) ->
- Count = 2 * rabbit_queue_index:next_segment_boundary(0),
- VQ1 = variable_queue_publish(true, Count, VQ0),
- VQ2 = variable_queue_publish(false, Count, VQ1),
- VQ3 = rabbit_variable_queue:set_ram_duration_target(0, VQ2),
- {VQ4, _AckTags} = variable_queue_fetch(Count, true, false,
- Count + Count, VQ3),
- {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
- Count, VQ4),
- _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
- VQ7 = variable_queue_init(test_amqqueue(true), true),
- {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
- Count1 = rabbit_variable_queue:len(VQ8),
- VQ9 = variable_queue_publish(false, 1, VQ8),
- VQ10 = rabbit_variable_queue:set_ram_duration_target(0, VQ9),
- {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
- {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
- VQ12.
-
-test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) ->
- VQ1 = rabbit_variable_queue:set_ram_duration_target(0, VQ0),
- VQ2 = variable_queue_publish(false, 4, VQ1),
- {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
- {_Guids, VQ4} =
- rabbit_variable_queue:requeue(AckTags, VQ3),
- VQ5 = rabbit_variable_queue:timeout(VQ4),
- _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
- VQ7 = variable_queue_init(test_amqqueue(true), true),
- {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
- VQ8.
-
-test_variable_queue_fold_msg_on_disk(VQ0) ->
- VQ1 = variable_queue_publish(true, 1, VQ0),
- {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
- {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
- ok, VQ2, AckTags),
- VQ3.
-
-test_queue_recover() ->
- Count = 2 * rabbit_queue_index:next_segment_boundary(0),
- {new, #amqqueue { pid = QPid, name = QName } = Q} =
- rabbit_amqqueue:declare(test_queue(), true, false, [], none),
- publish_and_confirm(Q, <<>>, Count),
-
- exit(QPid, kill),
- MRef = erlang:monitor(process, QPid),
- receive {'DOWN', MRef, process, QPid, _Info} -> ok
- after 10000 -> exit(timeout_waiting_for_queue_death)
- end,
- rabbit_amqqueue:stop(),
- rabbit_amqqueue:start(rabbit_amqqueue:recover()),
- {ok, Limiter} = rabbit_limiter:start_link(),
- rabbit_amqqueue:with_or_die(
- QName,
- fun (Q1 = #amqqueue { pid = QPid1 }) ->
- CountMinusOne = Count - 1,
- {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} =
- rabbit_amqqueue:basic_get(Q1, self(), false, Limiter),
- exit(QPid1, shutdown),
- VQ1 = variable_queue_init(Q, true),
- {{_Msg1, true, _AckTag1}, VQ2} =
- rabbit_variable_queue:fetch(true, VQ1),
- CountMinusOne = rabbit_variable_queue:len(VQ2),
- _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
- rabbit_amqqueue:internal_delete(QName)
- end),
- passed.
-
-test_variable_queue_delete_msg_store_files_callback() ->
- ok = restart_msg_store_empty(),
- {new, #amqqueue { pid = QPid, name = QName } = Q} =
- rabbit_amqqueue:declare(test_queue(), true, false, [], none),
- Payload = <<0:8388608>>, %% 1MB
- Count = 30,
- publish_and_confirm(Q, Payload, Count),
-
- rabbit_amqqueue:set_ram_duration_target(QPid, 0),
-
- {ok, Limiter} = rabbit_limiter:start_link(),
-
- CountMinusOne = Count - 1,
- {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} =
- rabbit_amqqueue:basic_get(Q, self(), true, Limiter),
- {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
-
- %% give the queue a second to receive the close_fds callback msg
- timer:sleep(1000),
-
- rabbit_amqqueue:delete(Q, false, false),
- passed.
-
-test_configurable_server_properties() ->
- %% List of the names of the built-in properties do we expect to find
- BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
- <<"copyright">>, <<"information">>],
-
- Protocol = rabbit_framing_amqp_0_9_1,
-
- %% Verify that the built-in properties are initially present
- ActualPropNames = [Key || {Key, longstr, _} <-
- rabbit_reader:server_properties(Protocol)],
- true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
- BuiltInPropNames),
-
- %% Get the initial server properties configured in the environment
- {ok, ServerProperties} = application:get_env(rabbit, server_properties),
-
- %% Helper functions
- ConsProp = fun (X) -> application:set_env(rabbit,
- server_properties,
- [X | ServerProperties]) end,
- IsPropPresent =
- fun (X) ->
- lists:member(X, rabbit_reader:server_properties(Protocol))
- end,
-
- %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
- NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
- ConsProp(NewSimplifiedProperty),
- %% Do we find hare soup, appropriately formatted in the generated properties?
- ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
- longstr,
- list_to_binary(NewHareVal)},
- true = IsPropPresent(ExpectedHareImage),
-
- %% Add a wholly new property of the {BinaryKey, Type, Value} form
- %% and check for it
- NewProperty = {<<"new-bin-key">>, signedint, -1},
- ConsProp(NewProperty),
- %% Do we find the new property?
- true = IsPropPresent(NewProperty),
-
- %% Add a property that clobbers a built-in, and verify correct clobbering
- {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
- {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
- list_to_binary(NewVerVal)},
- ConsProp(NewVersion),
- ClobberedServerProps = rabbit_reader:server_properties(Protocol),
- %% Is the clobbering insert present?
- true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
- %% Is the clobbering insert the only thing with the clobbering key?
- [{BinNewVerKey, longstr, BinNewVerVal}] =
- [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
-
- application:set_env(rabbit, server_properties, ServerProperties),
- passed.
-
-nop(_) -> ok.
-nop(_, _) -> ok.
diff --git a/src/rabbit_tests_event_receiver.erl b/src/rabbit_tests_event_receiver.erl
deleted file mode 100644
index 7b756cbc..00000000
--- a/src/rabbit_tests_event_receiver.erl
+++ /dev/null
@@ -1,58 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_tests_event_receiver).
-
--export([start/3, stop/0]).
-
--export([init/1, handle_call/2, handle_event/2, handle_info/2,
- terminate/2, code_change/3]).
-
--include("rabbit.hrl").
-
-start(Pid, Nodes, Types) ->
- Oks = [ok || _ <- Nodes],
- {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
- [rabbit_event, ?MODULE, [Pid, Types]]).
-
-stop() ->
- gen_event:delete_handler(rabbit_event, ?MODULE, []).
-
-%%----------------------------------------------------------------------------
-
-init([Pid, Types]) ->
- {ok, {Pid, Types}}.
-
-handle_call(_Request, State) ->
- {ok, not_understood, State}.
-
-handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
- case lists:member(Type, Types) of
- true -> Pid ! Event;
- false -> ok
- end,
- {ok, State}.
-
-handle_info(_Info, State) ->
- {ok, State}.
-
-terminate(_Arg, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
diff --git a/src/rabbit_trace.erl b/src/rabbit_trace.erl
deleted file mode 100644
index d0dcaa71..00000000
--- a/src/rabbit_trace.erl
+++ /dev/null
@@ -1,119 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_trace).
-
--export([init/1, enabled/1, tap_in/2, tap_out/2, start/1, stop/1]).
-
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--define(TRACE_VHOSTS, trace_vhosts).
--define(XNAME, <<"amq.rabbitmq.trace">>).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(state() :: rabbit_types:exchange() | 'none').
-
--spec(init/1 :: (rabbit_types:vhost()) -> state()).
--spec(enabled/1 :: (rabbit_types:vhost()) -> boolean()).
--spec(tap_in/2 :: (rabbit_types:basic_message(), state()) -> 'ok').
--spec(tap_out/2 :: (rabbit_amqqueue:qmsg(), state()) -> 'ok').
-
--spec(start/1 :: (rabbit_types:vhost()) -> 'ok').
--spec(stop/1 :: (rabbit_types:vhost()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-init(VHost) ->
- case enabled(VHost) of
- false -> none;
- true -> {ok, X} = rabbit_exchange:lookup(
- rabbit_misc:r(VHost, exchange, ?XNAME)),
- X
- end.
-
-enabled(VHost) ->
- {ok, VHosts} = application:get_env(rabbit, ?TRACE_VHOSTS),
- lists:member(VHost, VHosts).
-
-tap_in(_Msg, none) -> ok;
-tap_in(Msg = #basic_message{exchange_name = #resource{name = XName}}, TraceX) ->
- trace(TraceX, Msg, <<"publish">>, XName, []).
-
-tap_out(_Msg, none) -> ok;
-tap_out({#resource{name = QName}, _QPid, _QMsgId, Redelivered, Msg}, TraceX) ->
- RedeliveredNum = case Redelivered of true -> 1; false -> 0 end,
- trace(TraceX, Msg, <<"deliver">>, QName,
- [{<<"redelivered">>, signedint, RedeliveredNum}]).
-
-%%----------------------------------------------------------------------------
-
-start(VHost) ->
- rabbit_log:info("Enabling tracing for vhost '~s'~n", [VHost]),
- update_config(fun (VHosts) -> [VHost | VHosts -- [VHost]] end).
-
-stop(VHost) ->
- rabbit_log:info("Disabling tracing for vhost '~s'~n", [VHost]),
- update_config(fun (VHosts) -> VHosts -- [VHost] end).
-
-update_config(Fun) ->
- {ok, VHosts0} = application:get_env(rabbit, ?TRACE_VHOSTS),
- VHosts = Fun(VHosts0),
- application:set_env(rabbit, ?TRACE_VHOSTS, VHosts),
- rabbit_channel:refresh_config_local(),
- ok.
-
-%%----------------------------------------------------------------------------
-
-trace(#exchange{name = Name}, #basic_message{exchange_name = Name},
- _RKPrefix, _RKSuffix, _Extra) ->
- ok;
-trace(X, Msg = #basic_message{content = #content{payload_fragments_rev = PFR}},
- RKPrefix, RKSuffix, Extra) ->
- {ok, _, _} = rabbit_basic:publish(
- X, <<RKPrefix/binary, ".", RKSuffix/binary>>,
- #'P_basic'{headers = msg_to_table(Msg) ++ Extra}, PFR),
- ok.
-
-msg_to_table(#basic_message{exchange_name = #resource{name = XName},
- routing_keys = RoutingKeys,
- content = Content}) ->
- #content{properties = Props} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- {PropsTable, _Ix} =
- lists:foldl(fun (K, {L, Ix}) ->
- V = element(Ix, Props),
- NewL = case V of
- undefined -> L;
- _ -> [{a2b(K), type(V), V} | L]
- end,
- {NewL, Ix + 1}
- end, {[], 2}, record_info(fields, 'P_basic')),
- [{<<"exchange_name">>, longstr, XName},
- {<<"routing_keys">>, array, [{longstr, K} || K <- RoutingKeys]},
- {<<"properties">>, table, PropsTable},
- {<<"node">>, longstr, a2b(node())}].
-
-a2b(A) -> list_to_binary(atom_to_list(A)).
-
-type(V) when is_list(V) -> table;
-type(V) when is_integer(V) -> signedint;
-type(_V) -> longstr.
diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl
deleted file mode 100644
index a36613db..00000000
--- a/src/rabbit_types.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_types).
-
--include("rabbit.hrl").
-
--ifdef(use_specs).
-
--export_type([maybe/1, info/0, infos/0, info_key/0, info_keys/0,
- message/0, msg_id/0, basic_message/0,
- delivery/0, content/0, decoded_content/0, undecoded_content/0,
- unencoded_content/0, encoded_content/0, message_properties/0,
- vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0,
- binding/0, binding_source/0, binding_destination/0,
- amqqueue/0, exchange/0,
- connection/0, protocol/0, user/0, internal_user/0,
- username/0, password/0, password_hash/0,
- ok/1, error/1, ok_or_error/1, ok_or_error2/2, ok_pid_or_error/0,
- channel_exit/0, connection_exit/0, mfargs/0]).
-
--type(maybe(T) :: T | 'none').
--type(vhost() :: binary()).
--type(ctag() :: binary()).
-
-%% TODO: make this more precise by tying specific class_ids to
-%% specific properties
--type(undecoded_content() ::
- #content{class_id :: rabbit_framing:amqp_class_id(),
- properties :: 'none',
- properties_bin :: binary(),
- payload_fragments_rev :: [binary()]} |
- #content{class_id :: rabbit_framing:amqp_class_id(),
- properties :: rabbit_framing:amqp_property_record(),
- properties_bin :: 'none',
- payload_fragments_rev :: [binary()]}).
--type(unencoded_content() :: undecoded_content()).
--type(decoded_content() ::
- #content{class_id :: rabbit_framing:amqp_class_id(),
- properties :: rabbit_framing:amqp_property_record(),
- properties_bin :: maybe(binary()),
- payload_fragments_rev :: [binary()]}).
--type(encoded_content() ::
- #content{class_id :: rabbit_framing:amqp_class_id(),
- properties :: maybe(rabbit_framing:amqp_property_record()),
- properties_bin :: binary(),
- payload_fragments_rev :: [binary()]}).
--type(content() :: undecoded_content() | decoded_content()).
--type(msg_id() :: rabbit_guid:guid()).
--type(basic_message() ::
- #basic_message{exchange_name :: rabbit_exchange:name(),
- routing_keys :: [rabbit_router:routing_key()],
- content :: content(),
- id :: msg_id(),
- is_persistent :: boolean()}).
--type(message() :: basic_message()).
--type(delivery() ::
- #delivery{mandatory :: boolean(),
- sender :: pid(),
- message :: message()}).
--type(message_properties() ::
- #message_properties{expiry :: pos_integer() | 'undefined',
- needs_confirming :: boolean()}).
-
--type(info_key() :: atom()).
--type(info_keys() :: [info_key()]).
-
--type(info() :: {info_key(), any()}).
--type(infos() :: [info()]).
-
--type(amqp_error() ::
- #amqp_error{name :: rabbit_framing:amqp_exception(),
- explanation :: string(),
- method :: rabbit_framing:amqp_method_name()}).
-
--type(r(Kind) ::
- r2(vhost(), Kind)).
--type(r2(VirtualHost, Kind) ::
- r3(VirtualHost, Kind, rabbit_misc:resource_name())).
--type(r3(VirtualHost, Kind, Name) ::
- #resource{virtual_host :: VirtualHost,
- kind :: Kind,
- name :: Name}).
-
--type(listener() ::
- #listener{node :: node(),
- protocol :: atom(),
- host :: rabbit_networking:hostname(),
- port :: rabbit_networking:ip_port()}).
-
--type(binding_source() :: rabbit_exchange:name()).
--type(binding_destination() :: rabbit_amqqueue:name() | rabbit_exchange:name()).
-
--type(binding() ::
- #binding{source :: rabbit_exchange:name(),
- destination :: binding_destination(),
- key :: rabbit_binding:key(),
- args :: rabbit_framing:amqp_table()}).
-
--type(amqqueue() ::
- #amqqueue{name :: rabbit_amqqueue:name(),
- durable :: boolean(),
- auto_delete :: boolean(),
- exclusive_owner :: rabbit_types:maybe(pid()),
- arguments :: rabbit_framing:amqp_table(),
- pid :: rabbit_types:maybe(pid()),
- slave_pids :: [pid()]}).
-
--type(exchange() ::
- #exchange{name :: rabbit_exchange:name(),
- type :: rabbit_exchange:type(),
- durable :: boolean(),
- auto_delete :: boolean(),
- arguments :: rabbit_framing:amqp_table()}).
-
--type(connection() :: pid()).
-
--type(protocol() :: rabbit_framing:protocol()).
-
--type(user() ::
- #user{username :: username(),
- tags :: [atom()],
- auth_backend :: atom(),
- impl :: any()}).
-
--type(internal_user() ::
- #internal_user{username :: username(),
- password_hash :: password_hash(),
- tags :: [atom()]}).
-
--type(username() :: binary()).
--type(password() :: binary()).
--type(password_hash() :: binary()).
-
--type(ok(A) :: {'ok', A}).
--type(error(A) :: {'error', A}).
--type(ok_or_error(A) :: 'ok' | error(A)).
--type(ok_or_error2(A, B) :: ok(A) | error(B)).
--type(ok_pid_or_error() :: ok_or_error2(pid(), any())).
-
--type(channel_exit() :: no_return()).
--type(connection_exit() :: no_return()).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--endif. % use_specs
diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl
deleted file mode 100644
index 1047b823..00000000
--- a/src/rabbit_upgrade.erl
+++ /dev/null
@@ -1,281 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_upgrade).
-
--export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0]).
-
--include("rabbit.hrl").
-
--define(VERSION_FILENAME, "schema_version").
--define(LOCK_FILENAME, "schema_upgrade_lock").
-
-%% -------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(maybe_upgrade_mnesia/0 :: () -> 'ok').
--spec(maybe_upgrade_local/0 :: () -> 'ok' |
- 'version_not_available' |
- 'starting_from_scratch').
-
--endif.
-
-%% -------------------------------------------------------------------
-
-%% The upgrade logic is quite involved, due to the existence of
-%% clusters.
-%%
-%% Firstly, we have two different types of upgrades to do: Mnesia and
-%% everythinq else. Mnesia upgrades must only be done by one node in
-%% the cluster (we treat a non-clustered node as a single-node
-%% cluster). This is the primary upgrader. The other upgrades need to
-%% be done by all nodes.
-%%
-%% The primary upgrader has to start first (and do its Mnesia
-%% upgrades). Secondary upgraders need to reset their Mnesia database
-%% and then rejoin the cluster. They can't do the Mnesia upgrades as
-%% well and then merge databases since the cookie for each table will
-%% end up different and the merge will fail.
-%%
-%% This in turn means that we need to determine whether we are the
-%% primary or secondary upgrader *before* Mnesia comes up. If we
-%% didn't then the secondary upgrader would try to start Mnesia, and
-%% either hang waiting for a node which is not yet up, or fail since
-%% its schema differs from the other nodes in the cluster.
-%%
-%% Also, the primary upgrader needs to start Mnesia to do its
-%% upgrades, but needs to forcibly load tables rather than wait for
-%% them (in case it was not the last node to shut down, in which case
-%% it would wait forever).
-%%
-%% This in turn means that maybe_upgrade_mnesia/0 has to be patched
-%% into the boot process by prelaunch before the mnesia application is
-%% started. By the time Mnesia is started the upgrades have happened
-%% (on the primary), or Mnesia has been reset (on the secondary) and
-%% rabbit_mnesia:init_db_unchecked/2 can then make the node rejoin the cluster
-%% in the normal way.
-%%
-%% The non-mnesia upgrades are then triggered by
-%% rabbit_mnesia:init_db_unchecked/2. Of course, it's possible for a given
-%% upgrade process to only require Mnesia upgrades, or only require
-%% non-Mnesia upgrades. In the latter case no Mnesia resets and
-%% reclusterings occur.
-%%
-%% The primary upgrader needs to be a disc node. Ideally we would like
-%% it to be the last disc node to shut down (since otherwise there's a
-%% risk of data loss). On each node we therefore record the disc nodes
-%% that were still running when we shut down. A disc node that knows
-%% other nodes were up when it shut down, or a ram node, will refuse
-%% to be the primary upgrader, and will thus not start when upgrades
-%% are needed.
-%%
-%% However, this is racy if several nodes are shut down at once. Since
-%% rabbit records the running nodes, and shuts down before mnesia, the
-%% race manifests as all disc nodes thinking they are not the primary
-%% upgrader. Therefore the user can remove the record of the last disc
-%% node to shut down to get things going again. This may lose any
-%% mnesia changes that happened after the node chosen as the primary
-%% upgrader was shut down.
-
-%% -------------------------------------------------------------------
-
-ensure_backup_taken() ->
- case filelib:is_file(lock_filename()) of
- false -> case filelib:is_dir(backup_dir()) of
- false -> ok = take_backup();
- _ -> ok
- end;
- true -> throw({error, previous_upgrade_failed})
- end.
-
-take_backup() ->
- BackupDir = backup_dir(),
- case rabbit_mnesia:copy_db(BackupDir) of
- ok -> info("upgrades: Mnesia dir backed up to ~p~n",
- [BackupDir]);
- {error, E} -> throw({could_not_back_up_mnesia_dir, E})
- end.
-
-ensure_backup_removed() ->
- case filelib:is_dir(backup_dir()) of
- true -> ok = remove_backup();
- _ -> ok
- end.
-
-remove_backup() ->
- ok = rabbit_file:recursive_delete([backup_dir()]),
- info("upgrades: Mnesia backup removed~n", []).
-
-maybe_upgrade_mnesia() ->
- AllNodes = rabbit_mnesia:cluster_nodes(all),
- case rabbit_version:upgrades_required(mnesia) of
- {error, starting_from_scratch} ->
- ok;
- {error, version_not_available} ->
- case AllNodes of
- [] -> die("Cluster upgrade needed but upgrading from "
- "< 2.1.1.~nUnfortunately you will need to "
- "rebuild the cluster.", []);
- _ -> ok
- end;
- {error, _} = Err ->
- throw(Err);
- {ok, []} ->
- ok;
- {ok, Upgrades} ->
- ensure_backup_taken(),
- ok = case upgrade_mode(AllNodes) of
- primary -> primary_upgrade(Upgrades, AllNodes);
- secondary -> secondary_upgrade(AllNodes)
- end
- end.
-
-upgrade_mode(AllNodes) ->
- case nodes_running(AllNodes) of
- [] ->
- AfterUs = rabbit_mnesia:cluster_nodes(running) -- [node()],
- case {node_type_legacy(), AfterUs} of
- {disc, []} ->
- primary;
- {disc, _} ->
- Filename = rabbit_node_monitor:running_nodes_filename(),
- die("Cluster upgrade needed but other disc nodes shut "
- "down after this one.~nPlease first start the last "
- "disc node to shut down.~n~nNote: if several disc "
- "nodes were shut down simultaneously they may "
- "all~nshow this message. In which case, remove "
- "the lock file on one of them and~nstart that node. "
- "The lock file on this node is:~n~n ~s ", [Filename]);
- {ram, _} ->
- die("Cluster upgrade needed but this is a ram node.~n"
- "Please first start the last disc node to shut down.",
- [])
- end;
- [Another|_] ->
- MyVersion = rabbit_version:desired_for_scope(mnesia),
- ErrFun = fun (ClusterVersion) ->
- %% The other node(s) are running an
- %% unexpected version.
- die("Cluster upgrade needed but other nodes are "
- "running ~p~nand I want ~p",
- [ClusterVersion, MyVersion])
- end,
- case rpc:call(Another, rabbit_version, desired_for_scope,
- [mnesia]) of
- {badrpc, {'EXIT', {undef, _}}} -> ErrFun(unknown_old_version);
- {badrpc, Reason} -> ErrFun({unknown, Reason});
- CV -> case rabbit_version:matches(
- MyVersion, CV) of
- true -> secondary;
- false -> ErrFun(CV)
- end
- end
- end.
-
-die(Msg, Args) ->
- %% We don't throw or exit here since that gets thrown
- %% straight out into do_boot, generating an erl_crash.dump
- %% and displaying any error message in a confusing way.
- error_logger:error_msg(Msg, Args),
- io:format("~n~n****~n~n" ++ Msg ++ "~n~n****~n~n~n", Args),
- error_logger:logfile(close),
- halt(1).
-
-primary_upgrade(Upgrades, Nodes) ->
- Others = Nodes -- [node()],
- ok = apply_upgrades(
- mnesia,
- Upgrades,
- fun () ->
- rabbit_table:force_load(),
- case Others of
- [] -> ok;
- _ -> info("mnesia upgrades: Breaking cluster~n", []),
- [{atomic, ok} = mnesia:del_table_copy(schema, Node)
- || Node <- Others]
- end
- end),
- ok.
-
-secondary_upgrade(AllNodes) ->
- %% must do this before we wipe out schema
- NodeType = node_type_legacy(),
- rabbit_misc:ensure_ok(mnesia:delete_schema([node()]),
- cannot_delete_schema),
- rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
- ok = rabbit_mnesia:init_db_unchecked(AllNodes, NodeType),
- ok = rabbit_version:record_desired_for_scope(mnesia),
- ok.
-
-nodes_running(Nodes) ->
- [N || N <- Nodes, rabbit:is_running(N)].
-
-%% -------------------------------------------------------------------
-
-maybe_upgrade_local() ->
- case rabbit_version:upgrades_required(local) of
- {error, version_not_available} -> version_not_available;
- {error, starting_from_scratch} -> starting_from_scratch;
- {error, _} = Err -> throw(Err);
- {ok, []} -> ensure_backup_removed(),
- ok;
- {ok, Upgrades} -> mnesia:stop(),
- ensure_backup_taken(),
- ok = apply_upgrades(local, Upgrades,
- fun () -> ok end),
- ensure_backup_removed(),
- ok
- end.
-
-%% -------------------------------------------------------------------
-
-apply_upgrades(Scope, Upgrades, Fun) ->
- ok = rabbit_file:lock_file(lock_filename()),
- info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]),
- rabbit_misc:ensure_ok(mnesia:start(), cannot_start_mnesia),
- Fun(),
- [apply_upgrade(Scope, Upgrade) || Upgrade <- Upgrades],
- info("~s upgrades: All upgrades applied successfully~n", [Scope]),
- ok = rabbit_version:record_desired_for_scope(Scope),
- ok = file:delete(lock_filename()).
-
-apply_upgrade(Scope, {M, F}) ->
- info("~s upgrades: Applying ~w:~w~n", [Scope, M, F]),
- ok = apply(M, F, []).
-
-%% -------------------------------------------------------------------
-
-dir() -> rabbit_mnesia:dir().
-
-lock_filename() -> lock_filename(dir()).
-lock_filename(Dir) -> filename:join(Dir, ?LOCK_FILENAME).
-backup_dir() -> dir() ++ "-upgrade-backup".
-
-node_type_legacy() ->
- %% This is pretty ugly but we can't start Mnesia and ask it (will
- %% hang), we can't look at the config file (may not include us
- %% even if we're a disc node). We also can't use
- %% rabbit_mnesia:node_type/0 because that will give false
- %% postivies on Rabbit up to 2.5.1.
- case filelib:is_regular(filename:join(dir(), "rabbit_durable_exchange.DCD")) of
- true -> disc;
- false -> ram
- end.
-
-%% NB: we cannot use rabbit_log here since it may not have been
-%% started yet
-info(Msg, Args) -> error_logger:info_msg(Msg, Args).
diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl
deleted file mode 100644
index 1613838c..00000000
--- a/src/rabbit_upgrade_functions.erl
+++ /dev/null
@@ -1,327 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_upgrade_functions).
-
-%% If you are tempted to add include("rabbit.hrl"). here, don't. Using record
-%% defs here leads to pain later.
-
--compile([export_all]).
-
--rabbit_upgrade({remove_user_scope, mnesia, []}).
--rabbit_upgrade({hash_passwords, mnesia, []}).
--rabbit_upgrade({add_ip_to_listener, mnesia, []}).
--rabbit_upgrade({internal_exchanges, mnesia, []}).
--rabbit_upgrade({user_to_internal_user, mnesia, [hash_passwords]}).
--rabbit_upgrade({topic_trie, mnesia, []}).
--rabbit_upgrade({semi_durable_route, mnesia, []}).
--rabbit_upgrade({exchange_event_serial, mnesia, []}).
--rabbit_upgrade({trace_exchanges, mnesia, [internal_exchanges]}).
--rabbit_upgrade({user_admin_to_tags, mnesia, [user_to_internal_user]}).
--rabbit_upgrade({ha_mirrors, mnesia, []}).
--rabbit_upgrade({gm, mnesia, []}).
--rabbit_upgrade({exchange_scratch, mnesia, [trace_exchanges]}).
--rabbit_upgrade({mirrored_supervisor, mnesia, []}).
--rabbit_upgrade({topic_trie_node, mnesia, []}).
--rabbit_upgrade({runtime_parameters, mnesia, []}).
--rabbit_upgrade({exchange_scratches, mnesia, [exchange_scratch]}).
--rabbit_upgrade({policy, mnesia,
- [exchange_scratches, ha_mirrors]}).
--rabbit_upgrade({sync_slave_pids, mnesia, [policy]}).
--rabbit_upgrade({no_mirror_nodes, mnesia, [sync_slave_pids]}).
--rabbit_upgrade({gm_pids, mnesia, [no_mirror_nodes]}).
--rabbit_upgrade({exchange_decorators, mnesia, [policy]}).
-
-%% -------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(remove_user_scope/0 :: () -> 'ok').
--spec(hash_passwords/0 :: () -> 'ok').
--spec(add_ip_to_listener/0 :: () -> 'ok').
--spec(internal_exchanges/0 :: () -> 'ok').
--spec(user_to_internal_user/0 :: () -> 'ok').
--spec(topic_trie/0 :: () -> 'ok').
--spec(semi_durable_route/0 :: () -> 'ok').
--spec(exchange_event_serial/0 :: () -> 'ok').
--spec(trace_exchanges/0 :: () -> 'ok').
--spec(user_admin_to_tags/0 :: () -> 'ok').
--spec(ha_mirrors/0 :: () -> 'ok').
--spec(gm/0 :: () -> 'ok').
--spec(exchange_scratch/0 :: () -> 'ok').
--spec(mirrored_supervisor/0 :: () -> 'ok').
--spec(topic_trie_node/0 :: () -> 'ok').
--spec(runtime_parameters/0 :: () -> 'ok').
--spec(policy/0 :: () -> 'ok').
--spec(sync_slave_pids/0 :: () -> 'ok').
--spec(no_mirror_nodes/0 :: () -> 'ok').
--spec(gm_pids/0 :: () -> 'ok').
--spec(exchange_decorators/0 :: () -> 'ok').
-
--endif.
-
-%%--------------------------------------------------------------------
-
-%% It's a bad idea to use records or record_info here, even for the
-%% destination form. Because in the future, the destination form of
-%% your current transform may not match the record any more, and it
-%% would be messy to have to go back and fix old transforms at that
-%% point.
-
-remove_user_scope() ->
- transform(
- rabbit_user_permission,
- fun ({user_permission, UV, {permission, _Scope, Conf, Write, Read}}) ->
- {user_permission, UV, {permission, Conf, Write, Read}}
- end,
- [user_vhost, permission]).
-
-hash_passwords() ->
- transform(
- rabbit_user,
- fun ({user, Username, Password, IsAdmin}) ->
- Hash = rabbit_auth_backend_internal:hash_password(Password),
- {user, Username, Hash, IsAdmin}
- end,
- [username, password_hash, is_admin]).
-
-add_ip_to_listener() ->
- transform(
- rabbit_listener,
- fun ({listener, Node, Protocol, Host, Port}) ->
- {listener, Node, Protocol, Host, {0,0,0,0}, Port}
- end,
- [node, protocol, host, ip_address, port]).
-
-internal_exchanges() ->
- Tables = [rabbit_exchange, rabbit_durable_exchange],
- AddInternalFun =
- fun ({exchange, Name, Type, Durable, AutoDelete, Args}) ->
- {exchange, Name, Type, Durable, AutoDelete, false, Args}
- end,
- [ ok = transform(T,
- AddInternalFun,
- [name, type, durable, auto_delete, internal, arguments])
- || T <- Tables ],
- ok.
-
-user_to_internal_user() ->
- transform(
- rabbit_user,
- fun({user, Username, PasswordHash, IsAdmin}) ->
- {internal_user, Username, PasswordHash, IsAdmin}
- end,
- [username, password_hash, is_admin], internal_user).
-
-topic_trie() ->
- create(rabbit_topic_trie_edge, [{record_name, topic_trie_edge},
- {attributes, [trie_edge, node_id]},
- {type, ordered_set}]),
- create(rabbit_topic_trie_binding, [{record_name, topic_trie_binding},
- {attributes, [trie_binding, value]},
- {type, ordered_set}]).
-
-semi_durable_route() ->
- create(rabbit_semi_durable_route, [{record_name, route},
- {attributes, [binding, value]}]).
-
-exchange_event_serial() ->
- create(rabbit_exchange_serial, [{record_name, exchange_serial},
- {attributes, [name, next]}]).
-
-trace_exchanges() ->
- [declare_exchange(
- rabbit_misc:r(VHost, exchange, <<"amq.rabbitmq.trace">>), topic) ||
- VHost <- rabbit_vhost:list()],
- ok.
-
-user_admin_to_tags() ->
- transform(
- rabbit_user,
- fun({internal_user, Username, PasswordHash, true}) ->
- {internal_user, Username, PasswordHash, [administrator]};
- ({internal_user, Username, PasswordHash, false}) ->
- {internal_user, Username, PasswordHash, [management]}
- end,
- [username, password_hash, tags], internal_user).
-
-ha_mirrors() ->
- Tables = [rabbit_queue, rabbit_durable_queue],
- AddMirrorPidsFun =
- fun ({amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid}) ->
- {amqqueue, Name, Durable, AutoDelete, Owner, Arguments, Pid,
- [], undefined}
- end,
- [ ok = transform(T,
- AddMirrorPidsFun,
- [name, durable, auto_delete, exclusive_owner, arguments,
- pid, slave_pids, mirror_nodes])
- || T <- Tables ],
- ok.
-
-gm() ->
- create(gm_group, [{record_name, gm_group},
- {attributes, [name, version, members]}]).
-
-exchange_scratch() ->
- ok = exchange_scratch(rabbit_exchange),
- ok = exchange_scratch(rabbit_durable_exchange).
-
-exchange_scratch(Table) ->
- transform(
- Table,
- fun ({exchange, Name, Type, Dur, AutoDel, Int, Args}) ->
- {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
- end,
- [name, type, durable, auto_delete, internal, arguments, scratch]).
-
-mirrored_supervisor() ->
- create(mirrored_sup_childspec,
- [{record_name, mirrored_sup_childspec},
- {attributes, [key, mirroring_pid, childspec]}]).
-
-topic_trie_node() ->
- create(rabbit_topic_trie_node,
- [{record_name, topic_trie_node},
- {attributes, [trie_node, edge_count, binding_count]},
- {type, ordered_set}]).
-
-runtime_parameters() ->
- create(rabbit_runtime_parameters,
- [{record_name, runtime_parameters},
- {attributes, [key, value]},
- {disc_copies, [node()]}]).
-
-exchange_scratches() ->
- ok = exchange_scratches(rabbit_exchange),
- ok = exchange_scratches(rabbit_durable_exchange).
-
-exchange_scratches(Table) ->
- transform(
- Table,
- fun ({exchange, Name, Type = <<"x-federation">>, Dur, AutoDel, Int, Args,
- Scratch}) ->
- Scratches = orddict:store(federation, Scratch, orddict:new()),
- {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches};
- %% We assert here that nothing else uses the scratch mechanism ATM
- ({exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}) ->
- {exchange, Name, Type, Dur, AutoDel, Int, Args, undefined}
- end,
- [name, type, durable, auto_delete, internal, arguments, scratches]).
-
-policy() ->
- ok = exchange_policy(rabbit_exchange),
- ok = exchange_policy(rabbit_durable_exchange),
- ok = queue_policy(rabbit_queue),
- ok = queue_policy(rabbit_durable_queue).
-
-exchange_policy(Table) ->
- transform(
- Table,
- fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches}) ->
- {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
- undefined}
- end,
- [name, type, durable, auto_delete, internal, arguments, scratches,
- policy]).
-
-queue_policy(Table) ->
- transform(
- Table,
- fun ({amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes}) ->
- {amqqueue, Name, Dur, AutoDel, Excl, Args, Pid, SPids, MNodes,
- undefined}
- end,
- [name, durable, auto_delete, exclusive_owner, arguments, pid,
- slave_pids, mirror_nodes, policy]).
-
-sync_slave_pids() ->
- Tables = [rabbit_queue, rabbit_durable_queue],
- AddSyncSlavesFun =
- fun ({amqqueue, N, D, AD, Excl, Args, Pid, SPids, MNodes, Pol}) ->
- {amqqueue, N, D, AD, Excl, Args, Pid, SPids, [], MNodes, Pol}
- end,
- [ok = transform(T, AddSyncSlavesFun,
- [name, durable, auto_delete, exclusive_owner, arguments,
- pid, slave_pids, sync_slave_pids, mirror_nodes, policy])
- || T <- Tables],
- ok.
-
-no_mirror_nodes() ->
- Tables = [rabbit_queue, rabbit_durable_queue],
- RemoveMirrorNodesFun =
- fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, _MNodes, Pol}) ->
- {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}
- end,
- [ok = transform(T, RemoveMirrorNodesFun,
- [name, durable, auto_delete, exclusive_owner, arguments,
- pid, slave_pids, sync_slave_pids, policy])
- || T <- Tables],
- ok.
-
-gm_pids() ->
- Tables = [rabbit_queue, rabbit_durable_queue],
- AddGMPidsFun =
- fun ({amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol}) ->
- {amqqueue, N, D, AD, O, A, Pid, SPids, SSPids, Pol, []}
- end,
- [ok = transform(T, AddGMPidsFun,
- [name, durable, auto_delete, exclusive_owner, arguments,
- pid, slave_pids, sync_slave_pids, policy, gm_pids])
- || T <- Tables],
- ok.
-
-exchange_decorators() ->
- ok = exchange_decorators(rabbit_exchange),
- ok = exchange_decorators(rabbit_durable_exchange).
-
-exchange_decorators(Table) ->
- transform(
- Table,
- fun ({exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches,
- Policy}) ->
- {exchange, Name, Type, Dur, AutoDel, Int, Args, Scratches, Policy,
- {[], []}}
- end,
- [name, type, durable, auto_delete, internal, arguments, scratches, policy,
- decorators]).
-
-
-%%--------------------------------------------------------------------
-
-transform(TableName, Fun, FieldList) ->
- rabbit_table:wait([TableName]),
- {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList),
- ok.
-
-transform(TableName, Fun, FieldList, NewRecordName) ->
- rabbit_table:wait([TableName]),
- {atomic, ok} = mnesia:transform_table(TableName, Fun, FieldList,
- NewRecordName),
- ok.
-
-create(Tab, TabDef) ->
- {atomic, ok} = mnesia:create_table(Tab, TabDef),
- ok.
-
-%% Dumb replacement for rabbit_exchange:declare that does not require
-%% the exchange type registry or worker pool to be running by dint of
-%% not validating anything and assuming the exchange type does not
-%% require serialisation.
-%% NB: this assumes the pre-exchange-scratch-space format
-declare_exchange(XName, Type) ->
- X = {exchange, XName, Type, true, false, false, []},
- ok = mnesia:dirty_write(rabbit_durable_exchange, X).
diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl
deleted file mode 100644
index ac2b9f52..00000000
--- a/src/rabbit_variable_queue.erl
+++ /dev/null
@@ -1,1792 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_variable_queue).
-
--export([init/3, terminate/2, delete_and_terminate/2, purge/1, purge_acks/1,
- publish/5, publish_delivered/4, discard/3, drain_confirmed/1,
- dropwhile/2, fetchwhile/4,
- fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3, len/1,
- is_empty/1, depth/1, set_ram_duration_target/2, ram_duration/1,
- needs_timeout/1, timeout/1, handle_pre_hibernate/1, status/1, invoke/3,
- is_duplicate/2, multiple_routing_keys/0]).
-
--export([start/1, stop/0]).
-
-%% exported for testing only
--export([start_msg_store/2, stop_msg_store/0, init/5]).
-
-%%----------------------------------------------------------------------------
-%% Definitions:
-
-%% alpha: this is a message where both the message itself, and its
-%% position within the queue are held in RAM
-%%
-%% beta: this is a message where the message itself is only held on
-%% disk, but its position within the queue is held in RAM.
-%%
-%% gamma: this is a message where the message itself is only held on
-%% disk, but its position is both in RAM and on disk.
-%%
-%% delta: this is a collection of messages, represented by a single
-%% term, where the messages and their position are only held on
-%% disk.
-%%
-%% Note that for persistent messages, the message and its position
-%% within the queue are always held on disk, *in addition* to being in
-%% one of the above classifications.
-%%
-%% Also note that within this code, the term gamma seldom
-%% appears. It's frequently the case that gammas are defined by betas
-%% who have had their queue position recorded on disk.
-%%
-%% In general, messages move q1 -> q2 -> delta -> q3 -> q4, though
-%% many of these steps are frequently skipped. q1 and q4 only hold
-%% alphas, q2 and q3 hold both betas and gammas. When a message
-%% arrives, its classification is determined. It is then added to the
-%% rightmost appropriate queue.
-%%
-%% If a new message is determined to be a beta or gamma, q1 is
-%% empty. If a new message is determined to be a delta, q1 and q2 are
-%% empty (and actually q4 too).
-%%
-%% When removing messages from a queue, if q4 is empty then q3 is read
-%% directly. If q3 becomes empty then the next segment's worth of
-%% messages from delta are read into q3, reducing the size of
-%% delta. If the queue is non empty, either q4 or q3 contain
-%% entries. It is never permitted for delta to hold all the messages
-%% in the queue.
-%%
-%% The duration indicated to us by the memory_monitor is used to
-%% calculate, given our current ingress and egress rates, how many
-%% messages we should hold in RAM (i.e. as alphas). We track the
-%% ingress and egress rates for both messages and pending acks and
-%% rates for both are considered when calculating the number of
-%% messages to hold in RAM. When we need to push alphas to betas or
-%% betas to gammas, we favour writing out messages that are further
-%% from the head of the queue. This minimises writes to disk, as the
-%% messages closer to the tail of the queue stay in the queue for
-%% longer, thus do not need to be replaced as quickly by sending other
-%% messages to disk.
-%%
-%% Whilst messages are pushed to disk and forgotten from RAM as soon
-%% as requested by a new setting of the queue RAM duration, the
-%% inverse is not true: we only load messages back into RAM as
-%% demanded as the queue is read from. Thus only publishes to the
-%% queue will take up available spare capacity.
-%%
-%% When we report our duration to the memory monitor, we calculate
-%% average ingress and egress rates over the last two samples, and
-%% then calculate our duration based on the sum of the ingress and
-%% egress rates. More than two samples could be used, but it's a
-%% balance between responding quickly enough to changes in
-%% producers/consumers versus ignoring temporary blips. The problem
-%% with temporary blips is that with just a few queues, they can have
-%% substantial impact on the calculation of the average duration and
-%% hence cause unnecessary I/O. Another alternative is to increase the
-%% amqqueue_process:RAM_DURATION_UPDATE_PERIOD to beyond 5
-%% seconds. However, that then runs the risk of being too slow to
-%% inform the memory monitor of changes. Thus a 5 second interval,
-%% plus a rolling average over the last two samples seems to work
-%% well in practice.
-%%
-%% The sum of the ingress and egress rates is used because the egress
-%% rate alone is not sufficient. Adding in the ingress rate means that
-%% queues which are being flooded by messages are given more memory,
-%% resulting in them being able to process the messages faster (by
-%% doing less I/O, or at least deferring it) and thus helping keep
-%% their mailboxes empty and thus the queue as a whole is more
-%% responsive. If such a queue also has fast but previously idle
-%% consumers, the consumer can then start to be driven as fast as it
-%% can go, whereas if only egress rate was being used, the incoming
-%% messages may have to be written to disk and then read back in,
-%% resulting in the hard disk being a bottleneck in driving the
-%% consumers. Generally, we want to give Rabbit every chance of
-%% getting rid of messages as fast as possible and remaining
-%% responsive, and using only the egress rate impacts that goal.
-%%
-%% Once the queue has more alphas than the target_ram_count, the
-%% surplus must be converted to betas, if not gammas, if not rolled
-%% into delta. The conditions under which these transitions occur
-%% reflect the conflicting goals of minimising RAM cost per msg, and
-%% minimising CPU cost per msg. Once the msg has become a beta, its
-%% payload is no longer in RAM, thus a read from the msg_store must
-%% occur before the msg can be delivered, but the RAM cost of a beta
-%% is the same as a gamma, so converting a beta to gamma will not free
-%% up any further RAM. To reduce the RAM cost further, the gamma must
-%% be rolled into delta. Whilst recovering a beta or a gamma to an
-%% alpha requires only one disk read (from the msg_store), recovering
-%% a msg from within delta will require two reads (queue_index and
-%% then msg_store). But delta has a near-0 per-msg RAM cost. So the
-%% conflict is between using delta more, which will free up more
-%% memory, but require additional CPU and disk ops, versus using delta
-%% less and gammas and betas more, which will cost more memory, but
-%% require fewer disk ops and less CPU overhead.
-%%
-%% In the case of a persistent msg published to a durable queue, the
-%% msg is immediately written to the msg_store and queue_index. If
-%% then additionally converted from an alpha, it'll immediately go to
-%% a gamma (as it's already in queue_index), and cannot exist as a
-%% beta. Thus a durable queue with a mixture of persistent and
-%% transient msgs in it which has more messages than permitted by the
-%% target_ram_count may contain an interspersed mixture of betas and
-%% gammas in q2 and q3.
-%%
-%% There is then a ratio that controls how many betas and gammas there
-%% can be. This is based on the target_ram_count and thus expresses
-%% the fact that as the number of permitted alphas in the queue falls,
-%% so should the number of betas and gammas fall (i.e. delta
-%% grows). If q2 and q3 contain more than the permitted number of
-%% betas and gammas, then the surplus are forcibly converted to gammas
-%% (as necessary) and then rolled into delta. The ratio is that
-%% delta/(betas+gammas+delta) equals
-%% (betas+gammas+delta)/(target_ram_count+betas+gammas+delta). I.e. as
-%% the target_ram_count shrinks to 0, so must betas and gammas.
-%%
-%% The conversion of betas to gammas is done in batches of exactly
-%% ?IO_BATCH_SIZE. This value should not be too small, otherwise the
-%% frequent operations on the queues of q2 and q3 will not be
-%% effectively amortised (switching the direction of queue access
-%% defeats amortisation), nor should it be too big, otherwise
-%% converting a batch stalls the queue for too long. Therefore, it
-%% must be just right.
-%%
-%% The conversion from alphas to betas is also chunked, but only to
-%% ensure no more than ?IO_BATCH_SIZE alphas are converted to betas at
-%% any one time. This further smooths the effects of changes to the
-%% target_ram_count and ensures the queue remains responsive
-%% even when there is a large amount of IO work to do. The
-%% timeout callback is utilised to ensure that conversions are
-%% done as promptly as possible whilst ensuring the queue remains
-%% responsive.
-%%
-%% In the queue we keep track of both messages that are pending
-%% delivery and messages that are pending acks. In the event of a
-%% queue purge, we only need to load qi segments if the queue has
-%% elements in deltas (i.e. it came under significant memory
-%% pressure). In the event of a queue deletion, in addition to the
-%% preceding, by keeping track of pending acks in RAM, we do not need
-%% to search through qi segments looking for messages that are yet to
-%% be acknowledged.
-%%
-%% Pending acks are recorded in memory by storing the message itself.
-%% If the message has been sent to disk, we do not store the message
-%% content. During memory reduction, pending acks containing message
-%% content have that content removed and the corresponding messages
-%% are pushed out to disk.
-%%
-%% Messages from pending acks are returned to q4, q3 and delta during
-%% requeue, based on the limits of seq_id contained in each. Requeued
-%% messages retain their original seq_id, maintaining order
-%% when requeued.
-%%
-%% The order in which alphas are pushed to betas and pending acks
-%% are pushed to disk is determined dynamically. We always prefer to
-%% push messages for the source (alphas or acks) that is growing the
-%% fastest (with growth measured as avg. ingress - avg. egress). In
-%% each round of memory reduction a chunk of messages at most
-%% ?IO_BATCH_SIZE in size is allocated to be pushed to disk. The
-%% fastest growing source will be reduced by as much of this chunk as
-%% possible. If there is any remaining allocation in the chunk after
-%% the first source has been reduced to zero, the second source will
-%% be reduced by as much of the remaining chunk as possible.
-%%
-%% Notes on Clean Shutdown
-%% (This documents behaviour in variable_queue, queue_index and
-%% msg_store.)
-%%
-%% In order to try to achieve as fast a start-up as possible, if a
-%% clean shutdown occurs, we try to save out state to disk to reduce
-%% work on startup. In the msg_store this takes the form of the
-%% index_module's state, plus the file_summary ets table, and client
-%% refs. In the VQ, this takes the form of the count of persistent
-%% messages in the queue and references into the msg_stores. The
-%% queue_index adds to these terms the details of its segments and
-%% stores the terms in the queue directory.
-%%
-%% Two message stores are used. One is created for persistent messages
-%% to durable queues that must survive restarts, and the other is used
-%% for all other messages that just happen to need to be written to
-%% disk. On start up we can therefore nuke the transient message
-%% store, and be sure that the messages in the persistent store are
-%% all that we need.
-%%
-%% The references to the msg_stores are there so that the msg_store
-%% knows to only trust its saved state if all of the queues it was
-%% previously talking to come up cleanly. Likewise, the queues
-%% themselves (esp queue_index) skips work in init if all the queues
-%% and msg_store were shutdown cleanly. This gives both good speed
-%% improvements and also robustness so that if anything possibly went
-%% wrong in shutdown (or there was subsequent manual tampering), all
-%% messages and queues that can be recovered are recovered, safely.
-%%
-%% To delete transient messages lazily, the variable_queue, on
-%% startup, stores the next_seq_id reported by the queue_index as the
-%% transient_threshold. From that point on, whenever it's reading a
-%% message off disk via the queue_index, if the seq_id is below this
-%% threshold and the message is transient then it drops the message
-%% (the message itself won't exist on disk because it would have been
-%% stored in the transient msg_store which would have had its saved
-%% state nuked on startup). This avoids the expensive operation of
-%% scanning the entire queue on startup in order to delete transient
-%% messages that were only pushed to disk to save memory.
-%%
-%%----------------------------------------------------------------------------
-
--behaviour(rabbit_backing_queue).
-
--record(vqstate,
- { q1,
- q2,
- delta,
- q3,
- q4,
- next_seq_id,
- ram_pending_ack,
- disk_pending_ack,
- index_state,
- msg_store_clients,
- durable,
- transient_threshold,
-
- len,
- persistent_count,
-
- target_ram_count,
- ram_msg_count,
- ram_msg_count_prev,
- ram_ack_count_prev,
- out_counter,
- in_counter,
- rates,
- msgs_on_disk,
- msg_indices_on_disk,
- unconfirmed,
- confirmed,
- ack_out_counter,
- ack_in_counter,
- ack_rates
- }).
-
--record(rates, { egress, ingress, avg_egress, avg_ingress, timestamp }).
-
--record(msg_status,
- { seq_id,
- msg_id,
- msg,
- is_persistent,
- is_delivered,
- msg_on_disk,
- index_on_disk,
- msg_props
- }).
-
--record(delta,
- { start_seq_id, %% start_seq_id is inclusive
- count,
- end_seq_id %% end_seq_id is exclusive
- }).
-
-%% When we discover, on publish, that we should write some indices to
-%% disk for some betas, the IO_BATCH_SIZE sets the number of betas
-%% that we must be due to write indices for before we do any work at
-%% all. This is both a minimum and a maximum - we don't write fewer
-%% than IO_BATCH_SIZE indices out in one go, and we don't write more -
-%% we can always come back on the next publish to do more.
--define(IO_BATCH_SIZE, 64).
--define(PERSISTENT_MSG_STORE, msg_store_persistent).
--define(TRANSIENT_MSG_STORE, msg_store_transient).
--define(QUEUE, lqueue).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--rabbit_upgrade({multiple_routing_keys, local, []}).
-
--ifdef(use_specs).
-
--type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
--type(seq_id() :: non_neg_integer()).
-
--type(rates() :: #rates { egress :: {timestamp(), non_neg_integer()},
- ingress :: {timestamp(), non_neg_integer()},
- avg_egress :: float(),
- avg_ingress :: float(),
- timestamp :: timestamp() }).
-
--type(delta() :: #delta { start_seq_id :: non_neg_integer(),
- count :: non_neg_integer(),
- end_seq_id :: non_neg_integer() }).
-
-%% The compiler (rightfully) complains that ack() and state() are
-%% unused. For this reason we duplicate a -spec from
-%% rabbit_backing_queue with the only intent being to remove
-%% warnings. The problem here is that we can't parameterise the BQ
-%% behaviour by these two types as we would like to. We still leave
-%% these here for documentation purposes.
--type(ack() :: seq_id()).
--type(state() :: #vqstate {
- q1 :: ?QUEUE:?QUEUE(),
- q2 :: ?QUEUE:?QUEUE(),
- delta :: delta(),
- q3 :: ?QUEUE:?QUEUE(),
- q4 :: ?QUEUE:?QUEUE(),
- next_seq_id :: seq_id(),
- ram_pending_ack :: gb_tree(),
- disk_pending_ack :: gb_tree(),
- index_state :: any(),
- msg_store_clients :: 'undefined' | {{any(), binary()},
- {any(), binary()}},
- durable :: boolean(),
- transient_threshold :: non_neg_integer(),
-
- len :: non_neg_integer(),
- persistent_count :: non_neg_integer(),
-
- target_ram_count :: non_neg_integer() | 'infinity',
- ram_msg_count :: non_neg_integer(),
- ram_msg_count_prev :: non_neg_integer(),
- out_counter :: non_neg_integer(),
- in_counter :: non_neg_integer(),
- rates :: rates(),
- msgs_on_disk :: gb_set(),
- msg_indices_on_disk :: gb_set(),
- unconfirmed :: gb_set(),
- confirmed :: gb_set(),
- ack_out_counter :: non_neg_integer(),
- ack_in_counter :: non_neg_integer(),
- ack_rates :: rates() }).
-%% Duplicated from rabbit_backing_queue
--spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}).
-
--spec(multiple_routing_keys/0 :: () -> 'ok').
-
--endif.
-
--define(BLANK_DELTA, #delta { start_seq_id = undefined,
- count = 0,
- end_seq_id = undefined }).
--define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z,
- count = 0,
- end_seq_id = Z }).
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-start(DurableQueues) ->
- {AllTerms, StartFunState} = rabbit_queue_index:recover(DurableQueues),
- start_msg_store(
- [Ref || Terms <- AllTerms,
- begin
- Ref = proplists:get_value(persistent_ref, Terms),
- Ref =/= undefined
- end],
- StartFunState).
-
-stop() -> stop_msg_store().
-
-start_msg_store(Refs, StartFunState) ->
- ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE, rabbit_msg_store,
- [?TRANSIENT_MSG_STORE, rabbit_mnesia:dir(),
- undefined, {fun (ok) -> finished end, ok}]),
- ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE, rabbit_msg_store,
- [?PERSISTENT_MSG_STORE, rabbit_mnesia:dir(),
- Refs, StartFunState]).
-
-stop_msg_store() ->
- ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE),
- ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE).
-
-init(Queue, Recover, AsyncCallback) ->
- init(Queue, Recover, AsyncCallback,
- fun (MsgIds, ActionTaken) ->
- msgs_written_to_disk(AsyncCallback, MsgIds, ActionTaken)
- end,
- fun (MsgIds) -> msg_indices_written_to_disk(AsyncCallback, MsgIds) end).
-
-init(#amqqueue { name = QueueName, durable = IsDurable }, false,
- AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) ->
- IndexState = rabbit_queue_index:init(QueueName, MsgIdxOnDiskFun),
- init(IsDurable, IndexState, 0, [],
- case IsDurable of
- true -> msg_store_client_init(?PERSISTENT_MSG_STORE,
- MsgOnDiskFun, AsyncCallback);
- false -> undefined
- end,
- msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback));
-
-init(#amqqueue { name = QueueName, durable = true }, true,
- AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun) ->
- Terms = rabbit_queue_index:shutdown_terms(QueueName),
- {PRef, Terms1} =
- case proplists:get_value(persistent_ref, Terms) of
- undefined -> {rabbit_guid:gen(), []};
- PRef1 -> {PRef1, Terms}
- end,
- PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
- MsgOnDiskFun, AsyncCallback),
- TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE,
- undefined, AsyncCallback),
- {DeltaCount, IndexState} =
- rabbit_queue_index:recover(
- QueueName, Terms1,
- rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE),
- fun (MsgId) ->
- rabbit_msg_store:contains(MsgId, PersistentClient)
- end,
- MsgIdxOnDiskFun),
- init(true, IndexState, DeltaCount, Terms1,
- PersistentClient, TransientClient).
-
-terminate(_Reason, State) ->
- State1 = #vqstate { persistent_count = PCount,
- index_state = IndexState,
- msg_store_clients = {MSCStateP, MSCStateT} } =
- purge_pending_ack(true, State),
- PRef = case MSCStateP of
- undefined -> undefined;
- _ -> ok = rabbit_msg_store:client_terminate(MSCStateP),
- rabbit_msg_store:client_ref(MSCStateP)
- end,
- ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
- Terms = [{persistent_ref, PRef}, {persistent_count, PCount}],
- a(State1 #vqstate { index_state = rabbit_queue_index:terminate(
- Terms, IndexState),
- msg_store_clients = undefined }).
-
-%% the only difference between purge and delete is that delete also
-%% needs to delete everything that's been delivered and not ack'd.
-delete_and_terminate(_Reason, State) ->
- %% TODO: there is no need to interact with qi at all - which we do
- %% as part of 'purge' and 'purge_pending_ack', other than
- %% deleting it.
- {_PurgeCount, State1} = purge(State),
- State2 = #vqstate { index_state = IndexState,
- msg_store_clients = {MSCStateP, MSCStateT} } =
- purge_pending_ack(false, State1),
- IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
- case MSCStateP of
- undefined -> ok;
- _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
- end,
- rabbit_msg_store:client_delete_and_terminate(MSCStateT),
- a(State2 #vqstate { index_state = IndexState1,
- msg_store_clients = undefined }).
-
-purge(State = #vqstate { q4 = Q4,
- index_state = IndexState,
- msg_store_clients = MSCState,
- len = Len,
- persistent_count = PCount }) ->
- %% TODO: when there are no pending acks, which is a common case,
- %% we could simply wipe the qi instead of issuing delivers and
- %% acks for all the messages.
- {LensByStore, IndexState1} = remove_queue_entries(
- fun ?QUEUE:foldl/3, Q4,
- orddict:new(), IndexState, MSCState),
- {LensByStore1, State1 = #vqstate { q1 = Q1,
- index_state = IndexState2,
- msg_store_clients = MSCState1 }} =
- purge_betas_and_deltas(LensByStore,
- State #vqstate { q4 = ?QUEUE:new(),
- index_state = IndexState1 }),
- {LensByStore2, IndexState3} = remove_queue_entries(
- fun ?QUEUE:foldl/3, Q1,
- LensByStore1, IndexState2, MSCState1),
- PCount1 = PCount - find_persistent_count(LensByStore2),
- {Len, a(State1 #vqstate { q1 = ?QUEUE:new(),
- index_state = IndexState3,
- len = 0,
- ram_msg_count = 0,
- persistent_count = PCount1 })}.
-
-purge_acks(State) -> a(purge_pending_ack(false, State)).
-
-publish(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
- MsgProps = #message_properties { needs_confirming = NeedsConfirming },
- IsDelivered, _ChPid, State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
- next_seq_id = SeqId,
- len = Len,
- in_counter = InCount,
- persistent_count = PCount,
- durable = IsDurable,
- unconfirmed = UC }) ->
- IsPersistent1 = IsDurable andalso IsPersistent,
- MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps),
- {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
- State2 = case ?QUEUE:is_empty(Q3) of
- false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
- true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
- end,
- PCount1 = PCount + one_if(IsPersistent1),
- UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
- a(reduce_memory_use(
- inc_ram_msg_count(State2 #vqstate { next_seq_id = SeqId + 1,
- len = Len + 1,
- in_counter = InCount + 1,
- persistent_count = PCount1,
- unconfirmed = UC1 }))).
-
-publish_delivered(Msg = #basic_message { is_persistent = IsPersistent,
- id = MsgId },
- MsgProps = #message_properties {
- needs_confirming = NeedsConfirming },
- _ChPid, State = #vqstate { next_seq_id = SeqId,
- out_counter = OutCount,
- in_counter = InCount,
- persistent_count = PCount,
- durable = IsDurable,
- unconfirmed = UC }) ->
- IsPersistent1 = IsDurable andalso IsPersistent,
- MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps),
- {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
- State2 = record_pending_ack(m(MsgStatus1), State1),
- PCount1 = PCount + one_if(IsPersistent1),
- UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
- {SeqId, a(reduce_memory_use(
- State2 #vqstate { next_seq_id = SeqId + 1,
- out_counter = OutCount + 1,
- in_counter = InCount + 1,
- persistent_count = PCount1,
- unconfirmed = UC1 }))}.
-
-discard(_MsgId, _ChPid, State) -> State.
-
-drain_confirmed(State = #vqstate { confirmed = C }) ->
- case gb_sets:is_empty(C) of
- true -> {[], State}; %% common case
- false -> {gb_sets:to_list(C), State #vqstate {
- confirmed = gb_sets:new() }}
- end.
-
-dropwhile(Pred, State) ->
- case queue_out(State) of
- {empty, State1} ->
- {undefined, a(State1)};
- {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
- case Pred(MsgProps) of
- true -> {_, State2} = remove(false, MsgStatus, State1),
- dropwhile(Pred, State2);
- false -> {MsgProps, a(in_r(MsgStatus, State1))}
- end
- end.
-
-fetchwhile(Pred, Fun, Acc, State) ->
- case queue_out(State) of
- {empty, State1} ->
- {undefined, Acc, a(State1)};
- {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
- case Pred(MsgProps) of
- true -> {Msg, State2} = read_msg(MsgStatus, State1),
- {AckTag, State3} = remove(true, MsgStatus, State2),
- fetchwhile(Pred, Fun, Fun(Msg, AckTag, Acc), State3);
- false -> {MsgProps, Acc, a(in_r(MsgStatus, State1))}
- end
- end.
-
-fetch(AckRequired, State) ->
- case queue_out(State) of
- {empty, State1} ->
- {empty, a(State1)};
- {{value, MsgStatus}, State1} ->
- %% it is possible that the message wasn't read from disk
- %% at this point, so read it in.
- {Msg, State2} = read_msg(MsgStatus, State1),
- {AckTag, State3} = remove(AckRequired, MsgStatus, State2),
- {{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)}
- end.
-
-drop(AckRequired, State) ->
- case queue_out(State) of
- {empty, State1} ->
- {empty, a(State1)};
- {{value, MsgStatus}, State1} ->
- {AckTag, State2} = remove(AckRequired, MsgStatus, State1),
- {{MsgStatus#msg_status.msg_id, AckTag}, a(State2)}
- end.
-
-ack([], State) ->
- {[], State};
-ack(AckTags, State) ->
- {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds},
- State1 = #vqstate { index_state = IndexState,
- msg_store_clients = MSCState,
- persistent_count = PCount,
- ack_out_counter = AckOutCount }} =
- lists:foldl(
- fun (SeqId, {Acc, State2}) ->
- {MsgStatus, State3} = remove_pending_ack(SeqId, State2),
- {accumulate_ack(MsgStatus, Acc), State3}
- end, {accumulate_ack_init(), State}, AckTags),
- IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
- [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
- || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)],
- PCount1 = PCount - find_persistent_count(sum_msg_ids_by_store_to_len(
- orddict:new(), MsgIdsByStore)),
- {lists:reverse(AllMsgIds),
- a(State1 #vqstate { index_state = IndexState1,
- persistent_count = PCount1,
- ack_out_counter = AckOutCount + length(AckTags) })}.
-
-requeue(AckTags, #vqstate { delta = Delta,
- q3 = Q3,
- q4 = Q4,
- in_counter = InCounter,
- len = Len } = State) ->
- {SeqIds, Q4a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q4, [],
- beta_limit(Q3),
- fun publish_alpha/2, State),
- {SeqIds1, Q3a, MsgIds1, State2} = queue_merge(SeqIds, Q3, MsgIds,
- delta_limit(Delta),
- fun publish_beta/2, State1),
- {Delta1, MsgIds2, State3} = delta_merge(SeqIds1, Delta, MsgIds1,
- State2),
- MsgCount = length(MsgIds2),
- {MsgIds2, a(reduce_memory_use(
- State3 #vqstate { delta = Delta1,
- q3 = Q3a,
- q4 = Q4a,
- in_counter = InCounter + MsgCount,
- len = Len + MsgCount }))}.
-
-ackfold(MsgFun, Acc, State, AckTags) ->
- {AccN, StateN} =
- lists:foldl(fun(SeqId, {Acc0, State0}) ->
- MsgStatus = lookup_pending_ack(SeqId, State0),
- {Msg, State1} = read_msg(MsgStatus, State0),
- {MsgFun(Msg, SeqId, Acc0), State1}
- end, {Acc, State}, AckTags),
- {AccN, a(StateN)}.
-
-fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
- {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
- [msg_iterator(State),
- disk_ack_iterator(State),
- ram_ack_iterator(State)]),
- ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
-
-len(#vqstate { len = Len }) -> Len.
-
-is_empty(State) -> 0 == len(State).
-
-depth(State = #vqstate { ram_pending_ack = RPA, disk_pending_ack = DPA }) ->
- len(State) + gb_trees:size(RPA) + gb_trees:size(DPA).
-
-set_ram_duration_target(
- DurationTarget, State = #vqstate {
- rates = #rates { avg_egress = AvgEgressRate,
- avg_ingress = AvgIngressRate },
- ack_rates = #rates { avg_egress = AvgAckEgressRate,
- avg_ingress = AvgAckIngressRate },
- target_ram_count = TargetRamCount }) ->
- Rate =
- AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate,
- TargetRamCount1 =
- case DurationTarget of
- infinity -> infinity;
- _ -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec
- end,
- State1 = State #vqstate { target_ram_count = TargetRamCount1 },
- a(case TargetRamCount1 == infinity orelse
- (TargetRamCount =/= infinity andalso
- TargetRamCount1 >= TargetRamCount) of
- true -> State1;
- false -> reduce_memory_use(State1)
- end).
-
-ram_duration(State = #vqstate {
- rates = #rates { timestamp = Timestamp,
- egress = Egress,
- ingress = Ingress } = Rates,
- ack_rates = #rates { timestamp = AckTimestamp,
- egress = AckEgress,
- ingress = AckIngress } = ARates,
- in_counter = InCount,
- out_counter = OutCount,
- ack_in_counter = AckInCount,
- ack_out_counter = AckOutCount,
- ram_msg_count = RamMsgCount,
- ram_msg_count_prev = RamMsgCountPrev,
- ram_pending_ack = RPA,
- ram_ack_count_prev = RamAckCountPrev }) ->
- Now = now(),
- {AvgEgressRate, Egress1} = update_rate(Now, Timestamp, OutCount, Egress),
- {AvgIngressRate, Ingress1} = update_rate(Now, Timestamp, InCount, Ingress),
-
- {AvgAckEgressRate, AckEgress1} =
- update_rate(Now, AckTimestamp, AckOutCount, AckEgress),
- {AvgAckIngressRate, AckIngress1} =
- update_rate(Now, AckTimestamp, AckInCount, AckIngress),
-
- RamAckCount = gb_trees:size(RPA),
-
- Duration = %% msgs+acks / (msgs+acks/sec) == sec
- case (AvgEgressRate == 0 andalso AvgIngressRate == 0 andalso
- AvgAckEgressRate == 0 andalso AvgAckIngressRate == 0) of
- true -> infinity;
- false -> (RamMsgCountPrev + RamMsgCount +
- RamAckCount + RamAckCountPrev) /
- (4 * (AvgEgressRate + AvgIngressRate +
- AvgAckEgressRate + AvgAckIngressRate))
- end,
-
- {Duration, State #vqstate {
- rates = Rates #rates {
- egress = Egress1,
- ingress = Ingress1,
- avg_egress = AvgEgressRate,
- avg_ingress = AvgIngressRate,
- timestamp = Now },
- ack_rates = ARates #rates {
- egress = AckEgress1,
- ingress = AckIngress1,
- avg_egress = AvgAckEgressRate,
- avg_ingress = AvgAckIngressRate,
- timestamp = Now },
- in_counter = 0,
- out_counter = 0,
- ack_in_counter = 0,
- ack_out_counter = 0,
- ram_msg_count_prev = RamMsgCount,
- ram_ack_count_prev = RamAckCount }}.
-
-needs_timeout(State = #vqstate { index_state = IndexState,
- target_ram_count = TargetRamCount }) ->
- case rabbit_queue_index:needs_sync(IndexState) of
- confirms -> timed;
- other -> idle;
- false when TargetRamCount == infinity -> false;
- false -> case reduce_memory_use(
- fun (_Quota, State1) -> {0, State1} end,
- fun (_Quota, State1) -> State1 end,
- fun (_Quota, State1) -> {0, State1} end,
- State) of
- {true, _State} -> idle;
- {false, _State} -> false
- end
- end.
-
-timeout(State = #vqstate { index_state = IndexState }) ->
- IndexState1 = rabbit_queue_index:sync(IndexState),
- State1 = State #vqstate { index_state = IndexState1 },
- a(reduce_memory_use(State1)).
-
-handle_pre_hibernate(State = #vqstate { index_state = IndexState }) ->
- State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }.
-
-status(#vqstate {
- q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
- len = Len,
- ram_pending_ack = RPA,
- disk_pending_ack = DPA,
- target_ram_count = TargetRamCount,
- ram_msg_count = RamMsgCount,
- next_seq_id = NextSeqId,
- persistent_count = PersistentCount,
- rates = #rates { avg_egress = AvgEgressRate,
- avg_ingress = AvgIngressRate },
- ack_rates = #rates { avg_egress = AvgAckEgressRate,
- avg_ingress = AvgAckIngressRate } }) ->
- [ {q1 , ?QUEUE:len(Q1)},
- {q2 , ?QUEUE:len(Q2)},
- {delta , Delta},
- {q3 , ?QUEUE:len(Q3)},
- {q4 , ?QUEUE:len(Q4)},
- {len , Len},
- {pending_acks , gb_trees:size(RPA) + gb_trees:size(DPA)},
- {target_ram_count , TargetRamCount},
- {ram_msg_count , RamMsgCount},
- {ram_ack_count , gb_trees:size(RPA)},
- {next_seq_id , NextSeqId},
- {persistent_count , PersistentCount},
- {avg_ingress_rate , AvgIngressRate},
- {avg_egress_rate , AvgEgressRate},
- {avg_ack_ingress_rate, AvgAckIngressRate},
- {avg_ack_egress_rate , AvgAckEgressRate} ].
-
-invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
-invoke( _, _, State) -> State.
-
-is_duplicate(_Msg, State) -> {false, State}.
-
-%%----------------------------------------------------------------------------
-%% Minor helpers
-%%----------------------------------------------------------------------------
-
-a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
- len = Len,
- persistent_count = PersistentCount,
- ram_msg_count = RamMsgCount }) ->
- E1 = ?QUEUE:is_empty(Q1),
- E2 = ?QUEUE:is_empty(Q2),
- ED = Delta#delta.count == 0,
- E3 = ?QUEUE:is_empty(Q3),
- E4 = ?QUEUE:is_empty(Q4),
- LZ = Len == 0,
-
- true = E1 or not E3,
- true = E2 or not ED,
- true = ED or not E3,
- true = LZ == (E3 and E4),
-
- true = Len >= 0,
- true = PersistentCount >= 0,
- true = RamMsgCount >= 0,
- true = RamMsgCount =< Len,
-
- State.
-
-d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
- when Start + Count =< End ->
- Delta.
-
-m(MsgStatus = #msg_status { msg = Msg,
- is_persistent = IsPersistent,
- msg_on_disk = MsgOnDisk,
- index_on_disk = IndexOnDisk }) ->
- true = (not IsPersistent) or IndexOnDisk,
- true = (not IndexOnDisk) or MsgOnDisk,
- true = (Msg =/= undefined) or MsgOnDisk,
-
- MsgStatus.
-
-one_if(true ) -> 1;
-one_if(false) -> 0.
-
-cons_if(true, E, L) -> [E | L];
-cons_if(false, _E, L) -> L.
-
-gb_sets_maybe_insert(false, _Val, Set) -> Set;
-gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set).
-
-msg_status(IsPersistent, IsDelivered, SeqId,
- Msg = #basic_message {id = MsgId}, MsgProps) ->
- #msg_status{seq_id = SeqId,
- msg_id = MsgId,
- msg = Msg,
- is_persistent = IsPersistent,
- is_delivered = IsDelivered,
- msg_on_disk = false,
- index_on_disk = false,
- msg_props = MsgProps}.
-
-beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) ->
- #msg_status{seq_id = SeqId,
- msg_id = MsgId,
- msg = undefined,
- is_persistent = IsPersistent,
- is_delivered = IsDelivered,
- msg_on_disk = true,
- index_on_disk = true,
- msg_props = MsgProps}.
-
-trim_msg_status(MsgStatus) -> MsgStatus #msg_status { msg = undefined }.
-
-with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) ->
- {Result, MSCStateP1} = Fun(MSCStateP),
- {Result, {MSCStateP1, MSCStateT}};
-with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) ->
- {Result, MSCStateT1} = Fun(MSCStateT),
- {Result, {MSCStateP, MSCStateT1}}.
-
-with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
- {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent,
- fun (MSCState1) ->
- {Fun(MSCState1), MSCState1}
- end),
- Res.
-
-msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) ->
- msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
- Callback).
-
-msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) ->
- CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE),
- rabbit_msg_store:client_init(MsgStore, Ref, MsgOnDiskFun,
- fun () -> Callback(?MODULE, CloseFDsFun) end).
-
-msg_store_write(MSCState, IsPersistent, MsgId, Msg) ->
- with_immutable_msg_store_state(
- MSCState, IsPersistent,
- fun (MSCState1) ->
- rabbit_msg_store:write_flow(MsgId, Msg, MSCState1)
- end).
-
-msg_store_read(MSCState, IsPersistent, MsgId) ->
- with_msg_store_state(
- MSCState, IsPersistent,
- fun (MSCState1) ->
- rabbit_msg_store:read(MsgId, MSCState1)
- end).
-
-msg_store_remove(MSCState, IsPersistent, MsgIds) ->
- with_immutable_msg_store_state(
- MSCState, IsPersistent,
- fun (MCSState1) ->
- rabbit_msg_store:remove(MsgIds, MCSState1)
- end).
-
-msg_store_close_fds(MSCState, IsPersistent) ->
- with_msg_store_state(
- MSCState, IsPersistent,
- fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end).
-
-msg_store_close_fds_fun(IsPersistent) ->
- fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) ->
- {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent),
- State #vqstate { msg_store_clients = MSCState1 }
- end.
-
-maybe_write_delivered(false, _SeqId, IndexState) ->
- IndexState;
-maybe_write_delivered(true, SeqId, IndexState) ->
- rabbit_queue_index:deliver([SeqId], IndexState).
-
-betas_from_index_entries(List, TransientThreshold, RPA, DPA, IndexState) ->
- {Filtered, Delivers, Acks} =
- lists:foldr(
- fun ({_MsgId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
- {Filtered1, Delivers1, Acks1} = Acc) ->
- case SeqId < TransientThreshold andalso not IsPersistent of
- true -> {Filtered1,
- cons_if(not IsDelivered, SeqId, Delivers1),
- [SeqId | Acks1]};
- false -> case (gb_trees:is_defined(SeqId, RPA) orelse
- gb_trees:is_defined(SeqId, DPA)) of
- false -> {?QUEUE:in_r(m(beta_msg_status(M)),
- Filtered1),
- Delivers1, Acks1};
- true -> Acc
- end
- end
- end, {?QUEUE:new(), [], []}, List),
- {Filtered, rabbit_queue_index:ack(
- Acks, rabbit_queue_index:deliver(Delivers, IndexState))}.
-
-expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) ->
- d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 });
-expand_delta(SeqId, #delta { start_seq_id = StartSeqId,
- count = Count } = Delta)
- when SeqId < StartSeqId ->
- d(Delta #delta { start_seq_id = SeqId, count = Count + 1 });
-expand_delta(SeqId, #delta { count = Count,
- end_seq_id = EndSeqId } = Delta)
- when SeqId >= EndSeqId ->
- d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1 });
-expand_delta(_SeqId, #delta { count = Count } = Delta) ->
- d(Delta #delta { count = Count + 1 }).
-
-update_rate(Now, Then, Count, {OThen, OCount}) ->
- %% avg over the current period and the previous
- {1000000.0 * (Count + OCount) / timer:now_diff(Now, OThen), {Then, Count}}.
-
-%%----------------------------------------------------------------------------
-%% Internal major helpers for Public API
-%%----------------------------------------------------------------------------
-
-init(IsDurable, IndexState, DeltaCount, Terms,
- PersistentClient, TransientClient) ->
- {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState),
-
- DeltaCount1 = proplists:get_value(persistent_count, Terms, DeltaCount),
- Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
- true -> ?BLANK_DELTA;
- false -> d(#delta { start_seq_id = LowSeqId,
- count = DeltaCount1,
- end_seq_id = NextSeqId })
- end,
- Now = now(),
- State = #vqstate {
- q1 = ?QUEUE:new(),
- q2 = ?QUEUE:new(),
- delta = Delta,
- q3 = ?QUEUE:new(),
- q4 = ?QUEUE:new(),
- next_seq_id = NextSeqId,
- ram_pending_ack = gb_trees:empty(),
- disk_pending_ack = gb_trees:empty(),
- index_state = IndexState1,
- msg_store_clients = {PersistentClient, TransientClient},
- durable = IsDurable,
- transient_threshold = NextSeqId,
-
- len = DeltaCount1,
- persistent_count = DeltaCount1,
-
- target_ram_count = infinity,
- ram_msg_count = 0,
- ram_msg_count_prev = 0,
- ram_ack_count_prev = 0,
- out_counter = 0,
- in_counter = 0,
- rates = blank_rate(Now, DeltaCount1),
- msgs_on_disk = gb_sets:new(),
- msg_indices_on_disk = gb_sets:new(),
- unconfirmed = gb_sets:new(),
- confirmed = gb_sets:new(),
- ack_out_counter = 0,
- ack_in_counter = 0,
- ack_rates = blank_rate(Now, 0) },
- a(maybe_deltas_to_betas(State)).
-
-blank_rate(Timestamp, IngressLength) ->
- #rates { egress = {Timestamp, 0},
- ingress = {Timestamp, IngressLength},
- avg_egress = 0.0,
- avg_ingress = 0.0,
- timestamp = Timestamp }.
-
-in_r(MsgStatus = #msg_status { msg = undefined },
- State = #vqstate { q3 = Q3, q4 = Q4 }) ->
- case ?QUEUE:is_empty(Q4) of
- true -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
- false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
- read_msg(MsgStatus, State),
- inc_ram_msg_count(
- State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus#msg_status {
- msg = Msg }, Q4a) })
- end;
-in_r(MsgStatus, State = #vqstate { q4 = Q4 }) ->
- State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) }.
-
-queue_out(State = #vqstate { q4 = Q4 }) ->
- case ?QUEUE:out(Q4) of
- {empty, _Q4} ->
- case fetch_from_q3(State) of
- {empty, _State1} = Result -> Result;
- {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
- end;
- {{value, MsgStatus}, Q4a} ->
- {{value, MsgStatus}, State #vqstate { q4 = Q4a }}
- end.
-
-read_msg(#msg_status{msg = undefined,
- msg_id = MsgId,
- is_persistent = IsPersistent}, State) ->
- read_msg(MsgId, IsPersistent, State);
-read_msg(#msg_status{msg = Msg}, State) ->
- {Msg, State}.
-
-read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState}) ->
- {{ok, Msg = #basic_message {}}, MSCState1} =
- msg_store_read(MSCState, IsPersistent, MsgId),
- {Msg, State #vqstate {msg_store_clients = MSCState1}}.
-
-inc_ram_msg_count(State = #vqstate{ram_msg_count = RamMsgCount}) ->
- State#vqstate{ram_msg_count = RamMsgCount + 1}.
-
-remove(AckRequired, MsgStatus = #msg_status {
- seq_id = SeqId,
- msg_id = MsgId,
- msg = Msg,
- is_persistent = IsPersistent,
- is_delivered = IsDelivered,
- msg_on_disk = MsgOnDisk,
- index_on_disk = IndexOnDisk },
- State = #vqstate {ram_msg_count = RamMsgCount,
- out_counter = OutCount,
- index_state = IndexState,
- msg_store_clients = MSCState,
- len = Len,
- persistent_count = PCount}) ->
- %% 1. Mark it delivered if necessary
- IndexState1 = maybe_write_delivered(
- IndexOnDisk andalso not IsDelivered,
- SeqId, IndexState),
-
- %% 2. Remove from msg_store and queue index, if necessary
- Rem = fun () ->
- ok = msg_store_remove(MSCState, IsPersistent, [MsgId])
- end,
- Ack = fun () -> rabbit_queue_index:ack([SeqId], IndexState1) end,
- IndexState2 = case {AckRequired, MsgOnDisk, IndexOnDisk} of
- {false, true, false} -> Rem(), IndexState1;
- {false, true, true} -> Rem(), Ack();
- _ -> IndexState1
- end,
-
- %% 3. If an ack is required, add something sensible to PA
- {AckTag, State1} = case AckRequired of
- true -> StateN = record_pending_ack(
- MsgStatus #msg_status {
- is_delivered = true }, State),
- {SeqId, StateN};
- false -> {undefined, State}
- end,
-
- PCount1 = PCount - one_if(IsPersistent andalso not AckRequired),
- RamMsgCount1 = RamMsgCount - one_if(Msg =/= undefined),
-
- {AckTag, State1 #vqstate {ram_msg_count = RamMsgCount1,
- out_counter = OutCount + 1,
- index_state = IndexState2,
- len = Len - 1,
- persistent_count = PCount1}}.
-
-purge_betas_and_deltas(LensByStore,
- State = #vqstate { q3 = Q3,
- index_state = IndexState,
- msg_store_clients = MSCState }) ->
- case ?QUEUE:is_empty(Q3) of
- true -> {LensByStore, State};
- false -> {LensByStore1, IndexState1} =
- remove_queue_entries(fun ?QUEUE:foldl/3, Q3,
- LensByStore, IndexState, MSCState),
- purge_betas_and_deltas(LensByStore1,
- maybe_deltas_to_betas(
- State #vqstate {
- q3 = ?QUEUE:new(),
- index_state = IndexState1 }))
- end.
-
-remove_queue_entries(Fold, Q, LensByStore, IndexState, MSCState) ->
- {MsgIdsByStore, Delivers, Acks} =
- Fold(fun remove_queue_entries1/2, {orddict:new(), [], []}, Q),
- ok = orddict:fold(fun (IsPersistent, MsgIds, ok) ->
- msg_store_remove(MSCState, IsPersistent, MsgIds)
- end, ok, MsgIdsByStore),
- {sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore),
- rabbit_queue_index:ack(Acks,
- rabbit_queue_index:deliver(Delivers, IndexState))}.
-
-remove_queue_entries1(
- #msg_status { msg_id = MsgId, seq_id = SeqId,
- is_delivered = IsDelivered, msg_on_disk = MsgOnDisk,
- index_on_disk = IndexOnDisk, is_persistent = IsPersistent },
- {MsgIdsByStore, Delivers, Acks}) ->
- {case MsgOnDisk of
- true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
- false -> MsgIdsByStore
- end,
- cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
- cons_if(IndexOnDisk, SeqId, Acks)}.
-
-sum_msg_ids_by_store_to_len(LensByStore, MsgIdsByStore) ->
- orddict:fold(
- fun (IsPersistent, MsgIds, LensByStore1) ->
- orddict:update_counter(IsPersistent, length(MsgIds), LensByStore1)
- end, LensByStore, MsgIdsByStore).
-
-%%----------------------------------------------------------------------------
-%% Internal gubbins for publishing
-%%----------------------------------------------------------------------------
-
-maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
- msg_on_disk = true }, _MSCState) ->
- MsgStatus;
-maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
- msg = Msg, msg_id = MsgId,
- is_persistent = IsPersistent }, MSCState)
- when Force orelse IsPersistent ->
- Msg1 = Msg #basic_message {
- %% don't persist any recoverable decoded properties
- content = rabbit_binary_parser:clear_decoded_content(
- Msg #basic_message.content)},
- ok = msg_store_write(MSCState, IsPersistent, MsgId, Msg1),
- MsgStatus #msg_status { msg_on_disk = true };
-maybe_write_msg_to_disk(_Force, MsgStatus, _MSCState) ->
- MsgStatus.
-
-maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
- index_on_disk = true }, IndexState) ->
- true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION
- {MsgStatus, IndexState};
-maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
- msg_id = MsgId,
- seq_id = SeqId,
- is_persistent = IsPersistent,
- is_delivered = IsDelivered,
- msg_props = MsgProps}, IndexState)
- when Force orelse IsPersistent ->
- true = MsgStatus #msg_status.msg_on_disk, %% ASSERTION
- IndexState1 = rabbit_queue_index:publish(
- MsgId, SeqId, MsgProps, IsPersistent, IndexState),
- {MsgStatus #msg_status { index_on_disk = true },
- maybe_write_delivered(IsDelivered, SeqId, IndexState1)};
-maybe_write_index_to_disk(_Force, MsgStatus, IndexState) ->
- {MsgStatus, IndexState}.
-
-maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus,
- State = #vqstate { index_state = IndexState,
- msg_store_clients = MSCState }) ->
- MsgStatus1 = maybe_write_msg_to_disk(ForceMsg, MsgStatus, MSCState),
- {MsgStatus2, IndexState1} =
- maybe_write_index_to_disk(ForceIndex, MsgStatus1, IndexState),
- {MsgStatus2, State #vqstate { index_state = IndexState1 }}.
-
-%%----------------------------------------------------------------------------
-%% Internal gubbins for acks
-%%----------------------------------------------------------------------------
-
-record_pending_ack(#msg_status { seq_id = SeqId, msg = Msg } = MsgStatus,
- State = #vqstate { ram_pending_ack = RPA,
- disk_pending_ack = DPA,
- ack_in_counter = AckInCount}) ->
- {RPA1, DPA1} =
- case Msg of
- undefined -> {RPA, gb_trees:insert(SeqId, MsgStatus, DPA)};
- _ -> {gb_trees:insert(SeqId, MsgStatus, RPA), DPA}
- end,
- State #vqstate { ram_pending_ack = RPA1,
- disk_pending_ack = DPA1,
- ack_in_counter = AckInCount + 1}.
-
-lookup_pending_ack(SeqId, #vqstate { ram_pending_ack = RPA,
- disk_pending_ack = DPA }) ->
- case gb_trees:lookup(SeqId, RPA) of
- {value, V} -> V;
- none -> gb_trees:get(SeqId, DPA)
- end.
-
-remove_pending_ack(SeqId, State = #vqstate { ram_pending_ack = RPA,
- disk_pending_ack = DPA }) ->
- case gb_trees:lookup(SeqId, RPA) of
- {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA),
- {V, State #vqstate { ram_pending_ack = RPA1 }};
- none -> DPA1 = gb_trees:delete(SeqId, DPA),
- {gb_trees:get(SeqId, DPA),
- State #vqstate { disk_pending_ack = DPA1 }}
- end.
-
-purge_pending_ack(KeepPersistent,
- State = #vqstate { ram_pending_ack = RPA,
- disk_pending_ack = DPA,
- index_state = IndexState,
- msg_store_clients = MSCState }) ->
- F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
- {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
- rabbit_misc:gb_trees_fold(
- F, rabbit_misc:gb_trees_fold(F, accumulate_ack_init(), RPA), DPA),
- State1 = State #vqstate { ram_pending_ack = gb_trees:empty(),
- disk_pending_ack = gb_trees:empty() },
-
- case KeepPersistent of
- true -> case orddict:find(false, MsgIdsByStore) of
- error -> State1;
- {ok, MsgIds} -> ok = msg_store_remove(MSCState, false,
- MsgIds),
- State1
- end;
- false -> IndexState1 =
- rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
- [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
- || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)],
- State1 #vqstate { index_state = IndexState1 }
- end.
-
-accumulate_ack_init() -> {[], orddict:new(), []}.
-
-accumulate_ack(#msg_status { seq_id = SeqId,
- msg_id = MsgId,
- is_persistent = IsPersistent,
- msg_on_disk = MsgOnDisk,
- index_on_disk = IndexOnDisk },
- {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) ->
- {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
- case MsgOnDisk of
- true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
- false -> MsgIdsByStore
- end,
- [MsgId | AllMsgIds]}.
-
-find_persistent_count(LensByStore) ->
- case orddict:find(true, LensByStore) of
- error -> 0;
- {ok, Len} -> Len
- end.
-
-%%----------------------------------------------------------------------------
-%% Internal plumbing for confirms (aka publisher acks)
-%%----------------------------------------------------------------------------
-
-record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD,
- msg_indices_on_disk = MIOD,
- unconfirmed = UC,
- confirmed = C }) ->
- State #vqstate {
- msgs_on_disk = rabbit_misc:gb_sets_difference(MOD, MsgIdSet),
- msg_indices_on_disk = rabbit_misc:gb_sets_difference(MIOD, MsgIdSet),
- unconfirmed = rabbit_misc:gb_sets_difference(UC, MsgIdSet),
- confirmed = gb_sets:union(C, MsgIdSet) }.
-
-msgs_written_to_disk(Callback, MsgIdSet, ignored) ->
- Callback(?MODULE,
- fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end);
-msgs_written_to_disk(Callback, MsgIdSet, written) ->
- Callback(?MODULE,
- fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
- msg_indices_on_disk = MIOD,
- unconfirmed = UC }) ->
- Confirmed = gb_sets:intersection(UC, MsgIdSet),
- record_confirms(gb_sets:intersection(MsgIdSet, MIOD),
- State #vqstate {
- msgs_on_disk =
- gb_sets:union(MOD, Confirmed) })
- end).
-
-msg_indices_written_to_disk(Callback, MsgIdSet) ->
- Callback(?MODULE,
- fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
- msg_indices_on_disk = MIOD,
- unconfirmed = UC }) ->
- Confirmed = gb_sets:intersection(UC, MsgIdSet),
- record_confirms(gb_sets:intersection(MsgIdSet, MOD),
- State #vqstate {
- msg_indices_on_disk =
- gb_sets:union(MIOD, Confirmed) })
- end).
-
-%%----------------------------------------------------------------------------
-%% Internal plumbing for requeue
-%%----------------------------------------------------------------------------
-
-publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) ->
- {Msg, State1} = read_msg(MsgStatus, State),
- {MsgStatus#msg_status { msg = Msg }, inc_ram_msg_count(State1)};
-publish_alpha(MsgStatus, State) ->
- {MsgStatus, inc_ram_msg_count(State)}.
-
-publish_beta(MsgStatus, State) ->
- {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State),
- {m(trim_msg_status(MsgStatus1)), State1}.
-
-%% Rebuild queue, inserting sequence ids to maintain ordering
-queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) ->
- queue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds,
- Limit, PubFun, State).
-
-queue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds,
- Limit, PubFun, State)
- when Limit == undefined orelse SeqId < Limit ->
- case ?QUEUE:out(Q) of
- {{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1}
- when SeqIdQ < SeqId ->
- %% enqueue from the remaining queue
- queue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds,
- Limit, PubFun, State);
- {_, _Q1} ->
- %% enqueue from the remaining list of sequence ids
- {MsgStatus, State1} = msg_from_pending_ack(SeqId, State),
- {#msg_status { msg_id = MsgId } = MsgStatus1, State2} =
- PubFun(MsgStatus, State1),
- queue_merge(Rest, Q, ?QUEUE:in(MsgStatus1, Front), [MsgId | MsgIds],
- Limit, PubFun, State2)
- end;
-queue_merge(SeqIds, Q, Front, MsgIds,
- _Limit, _PubFun, State) ->
- {SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}.
-
-delta_merge([], Delta, MsgIds, State) ->
- {Delta, MsgIds, State};
-delta_merge(SeqIds, Delta, MsgIds, State) ->
- lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0}) ->
- {#msg_status { msg_id = MsgId } = MsgStatus, State1} =
- msg_from_pending_ack(SeqId, State0),
- {_MsgStatus, State2} =
- maybe_write_to_disk(true, true, MsgStatus, State1),
- {expand_delta(SeqId, Delta0), [MsgId | MsgIds0], State2}
- end, {Delta, MsgIds, State}, SeqIds).
-
-%% Mostly opposite of record_pending_ack/2
-msg_from_pending_ack(SeqId, State) ->
- {#msg_status { msg_props = MsgProps } = MsgStatus, State1} =
- remove_pending_ack(SeqId, State),
- {MsgStatus #msg_status {
- msg_props = MsgProps #message_properties { needs_confirming = false } },
- State1}.
-
-beta_limit(Q) ->
- case ?QUEUE:peek(Q) of
- {value, #msg_status { seq_id = SeqId }} -> SeqId;
- empty -> undefined
- end.
-
-delta_limit(?BLANK_DELTA_PATTERN(_X)) -> undefined;
-delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId.
-
-%%----------------------------------------------------------------------------
-%% Iterator
-%%----------------------------------------------------------------------------
-
-ram_ack_iterator(State) ->
- {ack, gb_trees:iterator(State#vqstate.ram_pending_ack)}.
-
-disk_ack_iterator(State) ->
- {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}.
-
-msg_iterator(State) -> istate(start, State).
-
-istate(start, State) -> {q4, State#vqstate.q4, State};
-istate(q4, State) -> {q3, State#vqstate.q3, State};
-istate(q3, State) -> {delta, State#vqstate.delta, State};
-istate(delta, State) -> {q2, State#vqstate.q2, State};
-istate(q2, State) -> {q1, State#vqstate.q1, State};
-istate(q1, _State) -> done.
-
-next({ack, It}, IndexState) ->
- case gb_trees:next(It) of
- none -> {empty, IndexState};
- {_SeqId, MsgStatus, It1} -> Next = {ack, It1},
- {value, MsgStatus, true, Next, IndexState}
- end;
-next(done, IndexState) -> {empty, IndexState};
-next({delta, #delta{start_seq_id = SeqId,
- end_seq_id = SeqId}, State}, IndexState) ->
- next(istate(delta, State), IndexState);
-next({delta, #delta{start_seq_id = SeqId,
- end_seq_id = SeqIdEnd} = Delta, State}, IndexState) ->
- SeqIdB = rabbit_queue_index:next_segment_boundary(SeqId),
- SeqId1 = lists:min([SeqIdB, SeqIdEnd]),
- {List, IndexState1} = rabbit_queue_index:read(SeqId, SeqId1, IndexState),
- next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1);
-next({delta, Delta, [], State}, IndexState) ->
- next({delta, Delta, State}, IndexState);
-next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
- case (gb_trees:is_defined(SeqId, State#vqstate.ram_pending_ack) orelse
- gb_trees:is_defined(SeqId, State#vqstate.disk_pending_ack)) of
- false -> Next = {delta, Delta, Rest, State},
- {value, beta_msg_status(M), false, Next, IndexState};
- true -> next({delta, Delta, Rest, State}, IndexState)
- end;
-next({Key, Q, State}, IndexState) ->
- case ?QUEUE:out(Q) of
- {empty, _Q} -> next(istate(Key, State), IndexState);
- {{value, MsgStatus}, QN} -> Next = {Key, QN, State},
- {value, MsgStatus, false, Next, IndexState}
- end.
-
-inext(It, {Its, IndexState}) ->
- case next(It, IndexState) of
- {empty, IndexState1} ->
- {Its, IndexState1};
- {value, MsgStatus1, Unacked, It1, IndexState1} ->
- {[{MsgStatus1, Unacked, It1} | Its], IndexState1}
- end.
-
-ifold(_Fun, Acc, [], State) ->
- {Acc, State};
-ifold(Fun, Acc, Its, State) ->
- [{MsgStatus, Unacked, It} | Rest] =
- lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _},
- {#msg_status{seq_id = SeqId2}, _, _}) ->
- SeqId1 =< SeqId2
- end, Its),
- {Msg, State1} = read_msg(MsgStatus, State),
- case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of
- {stop, Acc1} ->
- {Acc1, State};
- {cont, Acc1} ->
- {Its1, IndexState1} = inext(It, {Rest, State1#vqstate.index_state}),
- ifold(Fun, Acc1, Its1, State1#vqstate{index_state = IndexState1})
- end.
-
-%%----------------------------------------------------------------------------
-%% Phase changes
-%%----------------------------------------------------------------------------
-
-%% Determine whether a reduction in memory use is necessary, and call
-%% functions to perform the required phase changes. The function can
-%% also be used to just do the former, by passing in dummy phase
-%% change functions.
-%%
-%% The function does not report on any needed beta->delta conversions,
-%% though the conversion function for that is called as necessary. The
-%% reason is twofold. Firstly, this is safe because the conversion is
-%% only ever necessary just after a transition to a
-%% target_ram_count of zero or after an incremental alpha->beta
-%% conversion. In the former case the conversion is performed straight
-%% away (i.e. any betas present at the time are converted to deltas),
-%% and in the latter case the need for a conversion is flagged up
-%% anyway. Secondly, this is necessary because we do not have a
-%% precise and cheap predicate for determining whether a beta->delta
-%% conversion is necessary - due to the complexities of retaining up
-%% one segment's worth of messages in q3 - and thus would risk
-%% perpetually reporting the need for a conversion when no such
-%% conversion is needed. That in turn could cause an infinite loop.
-reduce_memory_use(AlphaBetaFun, BetaDeltaFun, AckFun,
- State = #vqstate {
- ram_pending_ack = RPA,
- ram_msg_count = RamMsgCount,
- target_ram_count = TargetRamCount,
- rates = #rates { avg_ingress = AvgIngress,
- avg_egress = AvgEgress },
- ack_rates = #rates { avg_ingress = AvgAckIngress,
- avg_egress = AvgAckEgress }
- }) ->
-
- {Reduce, State1 = #vqstate { q2 = Q2, q3 = Q3 }} =
- case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
- 0 -> {false, State};
- %% Reduce memory of pending acks and alphas. The order is
- %% determined based on which is growing faster. Whichever
- %% comes second may very well get a quota of 0 if the
- %% first manages to push out the max number of messages.
- S1 -> Funs = case ((AvgAckIngress - AvgAckEgress) >
- (AvgIngress - AvgEgress)) of
- true -> [AckFun, AlphaBetaFun];
- false -> [AlphaBetaFun, AckFun]
- end,
- {_, State2} = lists:foldl(fun (ReduceFun, {QuotaN, StateN}) ->
- ReduceFun(QuotaN, StateN)
- end, {S1, State}, Funs),
- {true, State2}
- end,
-
- case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
- permitted_beta_count(State1)) of
- ?IO_BATCH_SIZE = S2 -> {true, BetaDeltaFun(S2, State1)};
- _ -> {Reduce, State1}
- end.
-
-limit_ram_acks(0, State) ->
- {0, State};
-limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA,
- disk_pending_ack = DPA }) ->
- case gb_trees:is_empty(RPA) of
- true ->
- {Quota, State};
- false ->
- {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
- {MsgStatus1, State1} =
- maybe_write_to_disk(true, false, MsgStatus, State),
- DPA1 = gb_trees:insert(SeqId, m(trim_msg_status(MsgStatus1)), DPA),
- limit_ram_acks(Quota - 1,
- State1 #vqstate { ram_pending_ack = RPA1,
- disk_pending_ack = DPA1 })
- end.
-
-reduce_memory_use(State = #vqstate { target_ram_count = infinity }) ->
- State;
-reduce_memory_use(State) ->
- {_, State1} = reduce_memory_use(fun push_alphas_to_betas/2,
- fun push_betas_to_deltas/2,
- fun limit_ram_acks/2,
- State),
- State1.
-
-permitted_beta_count(#vqstate { len = 0 }) ->
- infinity;
-permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) ->
- lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]);
-permitted_beta_count(#vqstate { q1 = Q1,
- q4 = Q4,
- target_ram_count = TargetRamCount,
- len = Len }) ->
- BetaDelta = Len - ?QUEUE:len(Q1) - ?QUEUE:len(Q4),
- lists:max([rabbit_queue_index:next_segment_boundary(0),
- BetaDelta - ((BetaDelta * BetaDelta) div
- (BetaDelta + TargetRamCount))]).
-
-chunk_size(Current, Permitted)
- when Permitted =:= infinity orelse Permitted >= Current ->
- 0;
-chunk_size(Current, Permitted) ->
- lists:min([Current - Permitted, ?IO_BATCH_SIZE]).
-
-fetch_from_q3(State = #vqstate { q1 = Q1,
- q2 = Q2,
- delta = #delta { count = DeltaCount },
- q3 = Q3,
- q4 = Q4 }) ->
- case ?QUEUE:out(Q3) of
- {empty, _Q3} ->
- {empty, State};
- {{value, MsgStatus}, Q3a} ->
- State1 = State #vqstate { q3 = Q3a },
- State2 = case {?QUEUE:is_empty(Q3a), 0 == DeltaCount} of
- {true, true} ->
- %% q3 is now empty, it wasn't before;
- %% delta is still empty. So q2 must be
- %% empty, and we know q4 is empty
- %% otherwise we wouldn't be loading from
- %% q3. As such, we can just set q4 to Q1.
- true = ?QUEUE:is_empty(Q2), %% ASSERTION
- true = ?QUEUE:is_empty(Q4), %% ASSERTION
- State1 #vqstate { q1 = ?QUEUE:new(), q4 = Q1 };
- {true, false} ->
- maybe_deltas_to_betas(State1);
- {false, _} ->
- %% q3 still isn't empty, we've not
- %% touched delta, so the invariants
- %% between q1, q2, delta and q3 are
- %% maintained
- State1
- end,
- {loaded, {MsgStatus, State2}}
- end.
-
-maybe_deltas_to_betas(State = #vqstate { delta = ?BLANK_DELTA_PATTERN(X) }) ->
- State;
-maybe_deltas_to_betas(State = #vqstate {
- q2 = Q2,
- delta = Delta,
- q3 = Q3,
- index_state = IndexState,
- ram_pending_ack = RPA,
- disk_pending_ack = DPA,
- transient_threshold = TransientThreshold }) ->
- #delta { start_seq_id = DeltaSeqId,
- count = DeltaCount,
- end_seq_id = DeltaSeqIdEnd } = Delta,
- DeltaSeqId1 =
- lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId),
- DeltaSeqIdEnd]),
- {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
- IndexState),
- {Q3a, IndexState2} = betas_from_index_entries(List, TransientThreshold,
- RPA, DPA, IndexState1),
- State1 = State #vqstate { index_state = IndexState2 },
- case ?QUEUE:len(Q3a) of
- 0 ->
- %% we ignored every message in the segment due to it being
- %% transient and below the threshold
- maybe_deltas_to_betas(
- State1 #vqstate {
- delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })});
- Q3aLen ->
- Q3b = ?QUEUE:join(Q3, Q3a),
- case DeltaCount - Q3aLen of
- 0 ->
- %% delta is now empty, but it wasn't before, so
- %% can now join q2 onto q3
- State1 #vqstate { q2 = ?QUEUE:new(),
- delta = ?BLANK_DELTA,
- q3 = ?QUEUE:join(Q3b, Q2) };
- N when N > 0 ->
- Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
- count = N,
- end_seq_id = DeltaSeqIdEnd }),
- State1 #vqstate { delta = Delta1,
- q3 = Q3b }
- end
- end.
-
-push_alphas_to_betas(Quota, State) ->
- {Quota1, State1} =
- push_alphas_to_betas(
- fun ?QUEUE:out/1,
- fun (MsgStatus, Q1a,
- State0 = #vqstate { q3 = Q3, delta = #delta { count = 0 } }) ->
- State0 #vqstate { q1 = Q1a, q3 = ?QUEUE:in(MsgStatus, Q3) };
- (MsgStatus, Q1a, State0 = #vqstate { q2 = Q2 }) ->
- State0 #vqstate { q1 = Q1a, q2 = ?QUEUE:in(MsgStatus, Q2) }
- end, Quota, State #vqstate.q1, State),
- {Quota2, State2} =
- push_alphas_to_betas(
- fun ?QUEUE:out_r/1,
- fun (MsgStatus, Q4a, State0 = #vqstate { q3 = Q3 }) ->
- State0 #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3), q4 = Q4a }
- end, Quota1, State1 #vqstate.q4, State1),
- {Quota2, State2}.
-
-push_alphas_to_betas(_Generator, _Consumer, Quota, _Q,
- State = #vqstate { ram_msg_count = RamMsgCount,
- target_ram_count = TargetRamCount })
- when Quota =:= 0 orelse
- TargetRamCount =:= infinity orelse
- TargetRamCount >= RamMsgCount ->
- {Quota, State};
-push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
- case Generator(Q) of
- {empty, _Q} ->
- {Quota, State};
- {{value, MsgStatus}, Qa} ->
- {MsgStatus1 = #msg_status { msg_on_disk = true },
- State1 = #vqstate { ram_msg_count = RamMsgCount }} =
- maybe_write_to_disk(true, false, MsgStatus, State),
- MsgStatus2 = m(trim_msg_status(MsgStatus1)),
- State2 = State1 #vqstate { ram_msg_count = RamMsgCount - 1 },
- push_alphas_to_betas(Generator, Consumer, Quota - 1, Qa,
- Consumer(MsgStatus2, Qa, State2))
- end.
-
-push_betas_to_deltas(Quota, State = #vqstate { q2 = Q2,
- delta = Delta,
- q3 = Q3,
- index_state = IndexState }) ->
- PushState = {Quota, Delta, IndexState},
- {Q3a, PushState1} = push_betas_to_deltas(
- fun ?QUEUE:out_r/1,
- fun rabbit_queue_index:next_segment_boundary/1,
- Q3, PushState),
- {Q2a, PushState2} = push_betas_to_deltas(
- fun ?QUEUE:out/1,
- fun (Q2MinSeqId) -> Q2MinSeqId end,
- Q2, PushState1),
- {_, Delta1, IndexState1} = PushState2,
- State #vqstate { q2 = Q2a,
- delta = Delta1,
- q3 = Q3a,
- index_state = IndexState1 }.
-
-push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
- case ?QUEUE:is_empty(Q) of
- true ->
- {Q, PushState};
- false ->
- {value, #msg_status { seq_id = MinSeqId }} = ?QUEUE:peek(Q),
- {value, #msg_status { seq_id = MaxSeqId }} = ?QUEUE:peek_r(Q),
- Limit = LimitFun(MinSeqId),
- case MaxSeqId < Limit of
- true -> {Q, PushState};
- false -> push_betas_to_deltas1(Generator, Limit, Q, PushState)
- end
- end.
-
-push_betas_to_deltas1(_Generator, _Limit, Q,
- {0, _Delta, _IndexState} = PushState) ->
- {Q, PushState};
-push_betas_to_deltas1(Generator, Limit, Q,
- {Quota, Delta, IndexState} = PushState) ->
- case Generator(Q) of
- {empty, _Q} ->
- {Q, PushState};
- {{value, #msg_status { seq_id = SeqId }}, _Qa}
- when SeqId < Limit ->
- {Q, PushState};
- {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
- {#msg_status { index_on_disk = true }, IndexState1} =
- maybe_write_index_to_disk(true, MsgStatus, IndexState),
- Delta1 = expand_delta(SeqId, Delta),
- push_betas_to_deltas1(Generator, Limit, Qa,
- {Quota - 1, Delta1, IndexState1})
- end.
-
-%%----------------------------------------------------------------------------
-%% Upgrading
-%%----------------------------------------------------------------------------
-
-multiple_routing_keys() ->
- transform_storage(
- fun ({basic_message, ExchangeName, Routing_Key, Content,
- MsgId, Persistent}) ->
- {ok, {basic_message, ExchangeName, [Routing_Key], Content,
- MsgId, Persistent}};
- (_) -> {error, corrupt_message}
- end),
- ok.
-
-
-%% Assumes message store is not running
-transform_storage(TransformFun) ->
- transform_store(?PERSISTENT_MSG_STORE, TransformFun),
- transform_store(?TRANSIENT_MSG_STORE, TransformFun).
-
-transform_store(Store, TransformFun) ->
- rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store),
- rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun).
diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl
deleted file mode 100644
index c629180e..00000000
--- a/src/rabbit_version.erl
+++ /dev/null
@@ -1,175 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_version).
-
--export([recorded/0, matches/2, desired/0, desired_for_scope/1,
- record_desired/0, record_desired_for_scope/1,
- upgrades_required/1]).
-
-%% -------------------------------------------------------------------
--ifdef(use_specs).
-
--export_type([scope/0, step/0]).
-
--type(scope() :: atom()).
--type(scope_version() :: [atom()]).
--type(step() :: {atom(), atom()}).
-
--type(version() :: [atom()]).
-
--spec(recorded/0 :: () -> rabbit_types:ok_or_error2(version(), any())).
--spec(matches/2 :: ([A], [A]) -> boolean()).
--spec(desired/0 :: () -> version()).
--spec(desired_for_scope/1 :: (scope()) -> scope_version()).
--spec(record_desired/0 :: () -> 'ok').
--spec(record_desired_for_scope/1 ::
- (scope()) -> rabbit_types:ok_or_error(any())).
--spec(upgrades_required/1 ::
- (scope()) -> rabbit_types:ok_or_error2([step()], any())).
-
--endif.
-%% -------------------------------------------------------------------
-
--define(VERSION_FILENAME, "schema_version").
--define(SCOPES, [mnesia, local]).
-
-%% -------------------------------------------------------------------
-
-recorded() -> case rabbit_file:read_term_file(schema_filename()) of
- {ok, [V]} -> {ok, V};
- {error, _} = Err -> Err
- end.
-
-record(V) -> ok = rabbit_file:write_term_file(schema_filename(), [V]).
-
-recorded_for_scope(Scope) ->
- case recorded() of
- {error, _} = Err ->
- Err;
- {ok, Version} ->
- {ok, case lists:keysearch(Scope, 1, categorise_by_scope(Version)) of
- false -> [];
- {value, {Scope, SV1}} -> SV1
- end}
- end.
-
-record_for_scope(Scope, ScopeVersion) ->
- case recorded() of
- {error, _} = Err ->
- Err;
- {ok, Version} ->
- Version1 = lists:keystore(Scope, 1, categorise_by_scope(Version),
- {Scope, ScopeVersion}),
- ok = record([Name || {_Scope, Names} <- Version1, Name <- Names])
- end.
-
-%% -------------------------------------------------------------------
-
-matches(VerA, VerB) ->
- lists:usort(VerA) =:= lists:usort(VerB).
-
-%% -------------------------------------------------------------------
-
-desired() -> [Name || Scope <- ?SCOPES, Name <- desired_for_scope(Scope)].
-
-desired_for_scope(Scope) -> with_upgrade_graph(fun heads/1, Scope).
-
-record_desired() -> record(desired()).
-
-record_desired_for_scope(Scope) ->
- record_for_scope(Scope, desired_for_scope(Scope)).
-
-upgrades_required(Scope) ->
- case recorded_for_scope(Scope) of
- {error, enoent} ->
- case filelib:is_file(rabbit_guid:filename()) of
- false -> {error, starting_from_scratch};
- true -> {error, version_not_available}
- end;
- {ok, CurrentHeads} ->
- with_upgrade_graph(
- fun (G) ->
- case unknown_heads(CurrentHeads, G) of
- [] -> {ok, upgrades_to_apply(CurrentHeads, G)};
- Unknown -> {error, {future_upgrades_found, Unknown}}
- end
- end, Scope)
- end.
-
-%% -------------------------------------------------------------------
-
-with_upgrade_graph(Fun, Scope) ->
- case rabbit_misc:build_acyclic_graph(
- fun (Module, Steps) -> vertices(Module, Steps, Scope) end,
- fun (Module, Steps) -> edges(Module, Steps, Scope) end,
- rabbit_misc:all_module_attributes(rabbit_upgrade)) of
- {ok, G} -> try
- Fun(G)
- after
- true = digraph:delete(G)
- end;
- {error, {vertex, duplicate, StepName}} ->
- throw({error, {duplicate_upgrade_step, StepName}});
- {error, {edge, {bad_vertex, StepName}, _From, _To}} ->
- throw({error, {dependency_on_unknown_upgrade_step, StepName}});
- {error, {edge, {bad_edge, StepNames}, _From, _To}} ->
- throw({error, {cycle_in_upgrade_steps, StepNames}})
- end.
-
-vertices(Module, Steps, Scope0) ->
- [{StepName, {Module, StepName}} || {StepName, Scope1, _Reqs} <- Steps,
- Scope0 == Scope1].
-
-edges(_Module, Steps, Scope0) ->
- [{Require, StepName} || {StepName, Scope1, Requires} <- Steps,
- Require <- Requires,
- Scope0 == Scope1].
-unknown_heads(Heads, G) ->
- [H || H <- Heads, digraph:vertex(G, H) =:= false].
-
-upgrades_to_apply(Heads, G) ->
- %% Take all the vertices which can reach the known heads. That's
- %% everything we've already applied. Subtract that from all
- %% vertices: that's what we have to apply.
- Unsorted = sets:to_list(
- sets:subtract(
- sets:from_list(digraph:vertices(G)),
- sets:from_list(digraph_utils:reaching(Heads, G)))),
- %% Form a subgraph from that list and find a topological ordering
- %% so we can invoke them in order.
- [element(2, digraph:vertex(G, StepName)) ||
- StepName <- digraph_utils:topsort(digraph_utils:subgraph(G, Unsorted))].
-
-heads(G) ->
- lists:sort([V || V <- digraph:vertices(G), digraph:out_degree(G, V) =:= 0]).
-
-%% -------------------------------------------------------------------
-
-categorise_by_scope(Version) when is_list(Version) ->
- Categorised =
- [{Scope, Name} || {_Module, Attributes} <-
- rabbit_misc:all_module_attributes(rabbit_upgrade),
- {Name, Scope, _Requires} <- Attributes,
- lists:member(Name, Version)],
- orddict:to_list(
- lists:foldl(fun ({Scope, Name}, CatVersion) ->
- rabbit_misc:orddict_cons(Scope, Name, CatVersion)
- end, orddict:new(), Categorised)).
-
-dir() -> rabbit_mnesia:dir().
-
-schema_filename() -> filename:join(dir(), ?VERSION_FILENAME).
diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl
deleted file mode 100644
index 8d013d43..00000000
--- a/src/rabbit_vhost.erl
+++ /dev/null
@@ -1,142 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_vhost).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--export([add/1, delete/1, exists/1, list/0, with/2, assert/1]).
--export([info/1, info/2, info_all/0, info_all/1]).
-
--ifdef(use_specs).
-
--spec(add/1 :: (rabbit_types:vhost()) -> 'ok').
--spec(delete/1 :: (rabbit_types:vhost()) -> 'ok').
--spec(exists/1 :: (rabbit_types:vhost()) -> boolean()).
--spec(list/0 :: () -> [rabbit_types:vhost()]).
--spec(with/2 :: (rabbit_types:vhost(), rabbit_misc:thunk(A)) -> A).
--spec(assert/1 :: (rabbit_types:vhost()) -> 'ok').
-
--spec(info/1 :: (rabbit_types:vhost()) -> rabbit_types:infos()).
--spec(info/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
- -> rabbit_types:infos()).
--spec(info_all/0 :: () -> [rabbit_types:infos()]).
--spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(INFO_KEYS, [name, tracing]).
-
-add(VHostPath) ->
- rabbit_log:info("Adding vhost '~s'~n", [VHostPath]),
- R = rabbit_misc:execute_mnesia_transaction(
- fun () ->
- case mnesia:wread({rabbit_vhost, VHostPath}) of
- [] -> ok = mnesia:write(rabbit_vhost,
- #vhost{virtual_host = VHostPath},
- write);
- [_] -> mnesia:abort({vhost_already_exists, VHostPath})
- end
- end,
- fun (ok, true) ->
- ok;
- (ok, false) ->
- [rabbit_exchange:declare(
- rabbit_misc:r(VHostPath, exchange, Name),
- Type, true, false, false, []) ||
- {Name,Type} <-
- [{<<"">>, direct},
- {<<"amq.direct">>, direct},
- {<<"amq.topic">>, topic},
- {<<"amq.match">>, headers}, %% per 0-9-1 pdf
- {<<"amq.headers">>, headers}, %% per 0-9-1 xml
- {<<"amq.fanout">>, fanout},
- {<<"amq.rabbitmq.trace">>, topic}]],
- ok
- end),
- rabbit_event:notify(vhost_created, info(VHostPath)),
- R.
-
-delete(VHostPath) ->
- %% FIXME: We are forced to delete the queues and exchanges outside
- %% the TX below. Queue deletion involves sending messages to the queue
- %% process, which in turn results in further mnesia actions and
- %% eventually the termination of that process. Exchange deletion causes
- %% notifications which must be sent outside the TX
- rabbit_log:info("Deleting vhost '~s'~n", [VHostPath]),
- [{ok,_} = rabbit_amqqueue:delete(Q, false, false) ||
- Q <- rabbit_amqqueue:list(VHostPath)],
- [ok = rabbit_exchange:delete(Name, false) ||
- #exchange{name = Name} <- rabbit_exchange:list(VHostPath)],
- R = rabbit_misc:execute_mnesia_transaction(
- with(VHostPath, fun () ->
- ok = internal_delete(VHostPath)
- end)),
- ok = rabbit_event:notify(vhost_deleted, [{name, VHostPath}]),
- R.
-
-internal_delete(VHostPath) ->
- [ok = rabbit_auth_backend_internal:clear_permissions(
- proplists:get_value(user, Info), VHostPath)
- || Info <- rabbit_auth_backend_internal:list_vhost_permissions(VHostPath)],
- [ok = rabbit_runtime_parameters:clear(VHostPath,
- proplists:get_value(component, Info),
- proplists:get_value(name, Info))
- || Info <- rabbit_runtime_parameters:list(VHostPath)],
- [ok = rabbit_policy:delete(VHostPath, proplists:get_value(name, Info))
- || Info <- rabbit_policy:list(VHostPath)],
- ok = mnesia:delete({rabbit_vhost, VHostPath}),
- ok.
-
-exists(VHostPath) ->
- mnesia:dirty_read({rabbit_vhost, VHostPath}) /= [].
-
-list() ->
- mnesia:dirty_all_keys(rabbit_vhost).
-
-with(VHostPath, Thunk) ->
- fun () ->
- case mnesia:read({rabbit_vhost, VHostPath}) of
- [] ->
- mnesia:abort({no_such_vhost, VHostPath});
- [_V] ->
- Thunk()
- end
- end.
-
-%% Like with/2 but outside an Mnesia tx
-assert(VHostPath) -> case exists(VHostPath) of
- true -> ok;
- false -> throw({error, {no_such_vhost, VHostPath}})
- end.
-
-%%----------------------------------------------------------------------------
-
-infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
-
-i(name, VHost) -> VHost;
-i(tracing, VHost) -> rabbit_trace:enabled(VHost);
-i(Item, _) -> throw({bad_argument, Item}).
-
-info(VHost) -> infos(?INFO_KEYS, VHost).
-info(VHost, Items) -> infos(Items, VHost).
-
-info_all() -> info_all(?INFO_KEYS).
-info_all(Items) -> [info(VHost, Items) || VHost <- list()].
diff --git a/src/rabbit_vm.erl b/src/rabbit_vm.erl
deleted file mode 100644
index 597f9094..00000000
--- a/src/rabbit_vm.erl
+++ /dev/null
@@ -1,228 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_vm).
-
--export([memory/0]).
-
--define(MAGIC_PLUGINS, ["mochiweb", "webmachine", "cowboy", "sockjs",
- "rfc4627_jsonrpc"]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(memory/0 :: () -> rabbit_types:infos()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% Like erlang:memory(), but with awareness of rabbit-y things
-memory() ->
- ConnProcs = [rabbit_tcp_client_sup, ssl_connection_sup, amqp_sup],
- QProcs = [rabbit_amqqueue_sup, rabbit_mirror_queue_slave_sup],
- MsgIndexProcs = [msg_store_transient, msg_store_persistent],
- MgmtDbProcs = [rabbit_mgmt_sup],
- PluginProcs = plugin_sups(),
-
- All = [ConnProcs, QProcs, MsgIndexProcs, MgmtDbProcs, PluginProcs],
-
- {Sums, _Other} = sum_processes(lists:append(All), [memory]),
-
- [Conns, Qs, MsgIndexProc, MgmtDbProc, AllPlugins] =
- [aggregate_memory(Names, Sums) || Names <- All],
-
- Mnesia = mnesia_memory(),
- MsgIndexETS = ets_memory(rabbit_msg_store_ets_index),
- MgmtDbETS = ets_memory(rabbit_mgmt_db),
- Plugins = AllPlugins - MgmtDbProc,
-
- [{total, Total},
- {processes, Processes},
- {ets, ETS},
- {atom, Atom},
- {binary, Bin},
- {code, Code},
- {system, System}] =
- erlang:memory([total, processes, ets, atom, binary, code, system]),
-
- OtherProc = Processes - Conns - Qs - MsgIndexProc - AllPlugins,
-
- [{total, Total},
- {connection_procs, Conns},
- {queue_procs, Qs},
- {plugins, Plugins},
- {other_proc, lists:max([0, OtherProc])}, %% [1]
- {mnesia, Mnesia},
- {mgmt_db, MgmtDbETS + MgmtDbProc},
- {msg_index, MsgIndexETS + MsgIndexProc},
- {other_ets, ETS - Mnesia - MsgIndexETS - MgmtDbETS},
- {binary, Bin},
- {code, Code},
- {atom, Atom},
- {other_system, System - ETS - Atom - Bin - Code}].
-
-%% [1] - erlang:memory(processes) can be less than the sum of its
-%% parts. Rather than display something nonsensical, just silence any
-%% claims about negative memory. See
-%% http://erlang.org/pipermail/erlang-questions/2012-September/069320.html
-
-%%----------------------------------------------------------------------------
-
-mnesia_memory() ->
- case mnesia:system_info(is_running) of
- yes -> lists:sum([bytes(mnesia:table_info(Tab, memory)) ||
- Tab <- mnesia:system_info(tables)]);
- no -> 0
- end.
-
-ets_memory(Name) ->
- lists:sum([bytes(ets:info(T, memory)) || T <- ets:all(),
- N <- [ets:info(T, name)],
- N =:= Name]).
-
-bytes(Words) -> Words * erlang:system_info(wordsize).
-
-plugin_sups() ->
- lists:append([plugin_sup(App) ||
- {App, _, _} <- rabbit_misc:which_applications(),
- is_plugin(atom_to_list(App))]).
-
-plugin_sup(App) ->
- case application_controller:get_master(App) of
- undefined -> [];
- Master -> case application_master:get_child(Master) of
- {Pid, _} when is_pid(Pid) -> [process_name(Pid)];
- Pid when is_pid(Pid) -> [process_name(Pid)];
- _ -> []
- end
- end.
-
-process_name(Pid) ->
- case process_info(Pid, registered_name) of
- {registered_name, Name} -> Name;
- _ -> Pid
- end.
-
-is_plugin("rabbitmq_" ++ _) -> true;
-is_plugin(App) -> lists:member(App, ?MAGIC_PLUGINS).
-
-aggregate_memory(Names, Sums) ->
- lists:sum([extract_memory(Name, Sums) || Name <- Names]).
-
-extract_memory(Name, Sums) ->
- {value, {_, Accs}} = lists:keysearch(Name, 1, Sums),
- {value, {memory, V}} = lists:keysearch(memory, 1, Accs),
- V.
-
-%%----------------------------------------------------------------------------
-
-%% NB: this code is non-rabbit specific.
-
--ifdef(use_specs).
--type(process() :: pid() | atom()).
--type(info_key() :: atom()).
--type(info_value() :: any()).
--type(info_item() :: {info_key(), info_value()}).
--type(accumulate() :: fun ((info_key(), info_value(), info_value()) ->
- info_value())).
--spec(sum_processes/2 :: ([process()], [info_key()]) ->
- {[{process(), [info_item()]}], [info_item()]}).
--spec(sum_processes/3 :: ([process()], accumulate(), [info_item()]) ->
- {[{process(), [info_item()]}], [info_item()]}).
--endif.
-
-sum_processes(Names, Items) ->
- sum_processes(Names, fun (_, X, Y) -> X + Y end,
- [{Item, 0} || Item <- Items]).
-
-%% summarize the process_info of all processes based on their
-%% '$ancestor' hierarchy, recorded in their process dictionary.
-%%
-%% The function takes
-%%
-%% 1) a list of names/pids of processes that are accumulation points
-%% in the hierarchy.
-%%
-%% 2) a function that aggregates individual info items -taking the
-%% info item key, value and accumulated value as the input and
-%% producing a new accumulated value.
-%%
-%% 3) a list of info item key / initial accumulator value pairs.
-%%
-%% The process_info of a process is accumulated at the nearest of its
-%% ancestors that is mentioned in the first argument, or, if no such
-%% ancestor exists or the ancestor information is absent, in a special
-%% 'other' bucket.
-%%
-%% The result is a pair consisting of
-%%
-%% 1) a k/v list, containing for each of the accumulation names/pids a
-%% list of info items, containing the accumulated data, and
-%%
-%% 2) the 'other' bucket - a list of info items containing the
-%% accumulated data of all processes with no matching ancestors
-%%
-%% Note that this function operates on names as well as pids, but
-%% these must match whatever is contained in the '$ancestor' process
-%% dictionary entry. Generally that means for all registered processes
-%% the name should be used.
-sum_processes(Names, Fun, Acc0) ->
- Items = [Item || {Item, _Val0} <- Acc0],
- Acc0Dict = orddict:from_list(Acc0),
- NameAccs0 = orddict:from_list([{Name, Acc0Dict} || Name <- Names]),
- {NameAccs, OtherAcc} =
- lists:foldl(
- fun (Pid, Acc) ->
- InfoItems = [registered_name, dictionary | Items],
- case process_info(Pid, InfoItems) of
- undefined ->
- Acc;
- [{registered_name, RegName}, {dictionary, D} | Vals] ->
- %% see docs for process_info/2 for the
- %% special handling of 'registered_name'
- %% info items
- Extra = case RegName of
- [] -> [];
- N -> [N]
- end,
- accumulate(find_ancestor(Extra, D, Names), Fun,
- orddict:from_list(Vals), Acc)
- end
- end, {NameAccs0, Acc0Dict}, processes()),
- %% these conversions aren't strictly necessary; we do them simply
- %% for the sake of encapsulating the representation.
- {[{Name, orddict:to_list(Accs)} ||
- {Name, Accs} <- orddict:to_list(NameAccs)],
- orddict:to_list(OtherAcc)}.
-
-find_ancestor(Extra, D, Names) ->
- Ancestors = case lists:keysearch('$ancestors', 1, D) of
- {value, {_, Ancs}} -> Ancs;
- false -> []
- end,
- case lists:splitwith(fun (A) -> not lists:member(A, Names) end,
- Extra ++ Ancestors) of
- {_, []} -> undefined;
- {_, [Name | _]} -> Name
- end.
-
-accumulate(undefined, Fun, ValsDict, {NameAccs, OtherAcc}) ->
- {NameAccs, orddict:merge(Fun, ValsDict, OtherAcc)};
-accumulate(Name, Fun, ValsDict, {NameAccs, OtherAcc}) ->
- F = fun (NameAcc) -> orddict:merge(Fun, ValsDict, NameAcc) end,
- {orddict:update(Name, F, NameAccs), OtherAcc}.
diff --git a/src/rabbit_writer.erl b/src/rabbit_writer.erl
deleted file mode 100644
index c0b1f8e4..00000000
--- a/src/rabbit_writer.erl
+++ /dev/null
@@ -1,296 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(rabbit_writer).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--export([start/5, start_link/5, start/6, start_link/6]).
--export([send_command/2, send_command/3,
- send_command_sync/2, send_command_sync/3,
- send_command_and_notify/4, send_command_and_notify/5,
- flush/1]).
--export([internal_send_command/4, internal_send_command/6]).
-
-%% internal
--export([mainloop/1, mainloop1/1]).
-
--record(wstate, {sock, channel, frame_max, protocol, reader,
- stats_timer, pending}).
-
--define(HIBERNATE_AFTER, 5000).
-
-%%---------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start/5 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid())
- -> rabbit_types:ok(pid())).
--spec(start_link/5 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid())
- -> rabbit_types:ok(pid())).
--spec(start/6 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
- -> rabbit_types:ok(pid())).
--spec(start_link/6 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid(), boolean())
- -> rabbit_types:ok(pid())).
--spec(send_command/2 ::
- (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command/3 ::
- (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
- -> 'ok').
--spec(send_command_sync/2 ::
- (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command_sync/3 ::
- (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
- -> 'ok').
--spec(send_command_and_notify/4 ::
- (pid(), pid(), pid(), rabbit_framing:amqp_method_record())
- -> 'ok').
--spec(send_command_and_notify/5 ::
- (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
- rabbit_types:content())
- -> 'ok').
--spec(flush/1 :: (pid()) -> 'ok').
--spec(internal_send_command/4 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- rabbit_framing:amqp_method_record(), rabbit_types:protocol())
- -> 'ok').
--spec(internal_send_command/6 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- rabbit_framing:amqp_method_record(), rabbit_types:content(),
- non_neg_integer(), rabbit_types:protocol())
- -> 'ok').
-
--endif.
-
-%%---------------------------------------------------------------------------
-
-start(Sock, Channel, FrameMax, Protocol, ReaderPid) ->
- start(Sock, Channel, FrameMax, Protocol, ReaderPid, false).
-
-start_link(Sock, Channel, FrameMax, Protocol, ReaderPid) ->
- start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, false).
-
-start(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
- State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
- ReaderWantsStats),
- {ok, proc_lib:spawn(?MODULE, mainloop, [State])}.
-
-start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
- State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
- ReaderWantsStats),
- {ok, proc_lib:spawn_link(?MODULE, mainloop, [State])}.
-
-initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
- (case ReaderWantsStats of
- true -> fun rabbit_event:init_stats_timer/2;
- false -> fun rabbit_event:init_disabled_stats_timer/2
- end)(#wstate{sock = Sock,
- channel = Channel,
- frame_max = FrameMax,
- protocol = Protocol,
- reader = ReaderPid,
- pending = []},
- #wstate.stats_timer).
-
-mainloop(State) ->
- try
- mainloop1(State)
- catch
- exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State,
- ReaderPid ! {channel_exit, Channel, Error}
- end,
- done.
-
-mainloop1(State = #wstate{pending = []}) ->
- receive
- Message -> ?MODULE:mainloop1(handle_message(Message, State))
- after ?HIBERNATE_AFTER ->
- erlang:hibernate(?MODULE, mainloop, [State])
- end;
-mainloop1(State) ->
- receive
- Message -> ?MODULE:mainloop1(handle_message(Message, State))
- after 0 ->
- ?MODULE:mainloop1(internal_flush(State))
- end.
-
-handle_message({send_command, MethodRecord}, State) ->
- internal_send_command_async(MethodRecord, State);
-handle_message({send_command, MethodRecord, Content}, State) ->
- internal_send_command_async(MethodRecord, Content, State);
-handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) ->
- State1 = internal_flush(
- internal_send_command_async(MethodRecord, State)),
- gen_server:reply(From, ok),
- State1;
-handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}},
- State) ->
- State1 = internal_flush(
- internal_send_command_async(MethodRecord, Content, State)),
- gen_server:reply(From, ok),
- State1;
-handle_message({'$gen_call', From, flush}, State) ->
- State1 = internal_flush(State),
- gen_server:reply(From, ok),
- State1;
-handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) ->
- State1 = internal_send_command_async(MethodRecord, State),
- rabbit_amqqueue:notify_sent(QPid, ChPid),
- State1;
-handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content},
- State) ->
- State1 = internal_send_command_async(MethodRecord, Content, State),
- rabbit_amqqueue:notify_sent(QPid, ChPid),
- State1;
-handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) ->
- rabbit_amqqueue:notify_sent_queue_down(QPid),
- State;
-handle_message({inet_reply, _, ok}, State) ->
- rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats);
-handle_message({inet_reply, _, Status}, _State) ->
- exit({writer, send_failed, Status});
-handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
- ReaderPid ! ensure_stats,
- rabbit_event:reset_stats_timer(State, #wstate.stats_timer);
-handle_message(Message, _State) ->
- exit({writer, message_not_understood, Message}).
-
-%%---------------------------------------------------------------------------
-
-send_command(W, MethodRecord) ->
- W ! {send_command, MethodRecord},
- ok.
-
-send_command(W, MethodRecord, Content) ->
- W ! {send_command, MethodRecord, Content},
- ok.
-
-send_command_sync(W, MethodRecord) ->
- call(W, {send_command_sync, MethodRecord}).
-
-send_command_sync(W, MethodRecord, Content) ->
- call(W, {send_command_sync, MethodRecord, Content}).
-
-send_command_and_notify(W, Q, ChPid, MethodRecord) ->
- W ! {send_command_and_notify, Q, ChPid, MethodRecord},
- ok.
-
-send_command_and_notify(W, Q, ChPid, MethodRecord, Content) ->
- W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content},
- ok.
-
-flush(W) -> call(W, flush).
-
-%%---------------------------------------------------------------------------
-
-call(Pid, Msg) ->
- {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity),
- Res.
-
-%%---------------------------------------------------------------------------
-
-assemble_frame(Channel, MethodRecord, Protocol) ->
- rabbit_binary_generator:build_simple_method_frame(
- Channel, MethodRecord, Protocol).
-
-assemble_frames(Channel, MethodRecord, Content, FrameMax, Protocol) ->
- MethodName = rabbit_misc:method_record_type(MethodRecord),
- true = Protocol:method_has_content(MethodName), % assertion
- MethodFrame = rabbit_binary_generator:build_simple_method_frame(
- Channel, MethodRecord, Protocol),
- ContentFrames = rabbit_binary_generator:build_simple_content_frames(
- Channel, Content, FrameMax, Protocol),
- [MethodFrame | ContentFrames].
-
-tcp_send(Sock, Data) ->
- rabbit_misc:throw_on_error(inet_error,
- fun () -> rabbit_net:send(Sock, Data) end).
-
-internal_send_command(Sock, Channel, MethodRecord, Protocol) ->
- ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)).
-
-internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax,
- Protocol) ->
- ok = lists:foldl(fun (Frame, ok) -> tcp_send(Sock, Frame);
- (_Frame, Other) -> Other
- end, ok, assemble_frames(Channel, MethodRecord,
- Content, FrameMax, Protocol)).
-
-internal_send_command_async(MethodRecord,
- State = #wstate{channel = Channel,
- protocol = Protocol,
- pending = Pending}) ->
- Frame = assemble_frame(Channel, MethodRecord, Protocol),
- maybe_flush(State#wstate{pending = [Frame | Pending]}).
-
-internal_send_command_async(MethodRecord, Content,
- State = #wstate{channel = Channel,
- frame_max = FrameMax,
- protocol = Protocol,
- pending = Pending}) ->
- Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
- Protocol),
- maybe_flush(State#wstate{pending = [Frames | Pending]}).
-
-%% This magic number is the tcp-over-ethernet MSS (1460) minus the
-%% minimum size of a AMQP basic.deliver method frame (24) plus basic
-%% content header (22). The idea is that we want to flush just before
-%% exceeding the MSS.
--define(FLUSH_THRESHOLD, 1414).
-
-maybe_flush(State = #wstate{pending = Pending}) ->
- case iolist_size(Pending) >= ?FLUSH_THRESHOLD of
- true -> internal_flush(State);
- false -> State
- end.
-
-internal_flush(State = #wstate{pending = []}) ->
- State;
-internal_flush(State = #wstate{sock = Sock, pending = Pending}) ->
- ok = port_cmd(Sock, lists:reverse(Pending)),
- State#wstate{pending = []}.
-
-%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock,
-%% Status} to obtain the result. That is bad when it is called from
-%% the writer since it requires scanning of the writers possibly quite
-%% large message queue.
-%%
-%% So instead we lift the code from prim_inet:send/2, which is what
-%% gen_tcp:send/2 calls, do the first half here and then just process
-%% the result code in handle_message/2 as and when it arrives.
-%%
-%% This means we may end up happily sending data down a closed/broken
-%% socket, but that's ok since a) data in the buffers will be lost in
-%% any case (so qualitatively we are no worse off than if we used
-%% gen_tcp:send/2), and b) we do detect the changed socket status
-%% eventually, i.e. when we get round to handling the result code.
-%%
-%% Also note that the port has bounded buffers and port_command blocks
-%% when these are full. So the fact that we process the result
-%% asynchronously does not impact flow control.
-port_cmd(Sock, Data) ->
- true = try rabbit_net:port_command(Sock, Data)
- catch error:Error -> exit({writer, send_failed, Error})
- end,
- ok.
diff --git a/src/supervised_lifecycle.erl b/src/supervised_lifecycle.erl
deleted file mode 100644
index 8b306f6f..00000000
--- a/src/supervised_lifecycle.erl
+++ /dev/null
@@ -1,68 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-%% Invoke callbacks on startup and termination.
-%%
-%% Simply hook this process into a supervision hierarchy, to have the
-%% callbacks invoked at a precise point during the establishment and
-%% teardown of that hierarchy, respectively.
-%%
-%% Or launch the process independently, and link to it, to have the
-%% callbacks invoked on startup and when the linked process
-%% terminates, respectively.
-
--module(supervised_lifecycle).
-
--behavior(gen_server).
-
--export([start_link/3]).
-
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/3 :: (atom(), rabbit_types:mfargs(), rabbit_types:mfargs()) ->
- rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Name, StartMFA, StopMFA) ->
- gen_server:start_link({local, Name}, ?MODULE, [StartMFA, StopMFA], []).
-
-%%----------------------------------------------------------------------------
-
-init([{M, F, A}, StopMFA]) ->
- process_flag(trap_exit, true),
- apply(M, F, A),
- {ok, StopMFA}.
-
-handle_call(_Request, _From, State) -> {noreply, State}.
-
-handle_cast(_Msg, State) -> {noreply, State}.
-
-handle_info(_Info, State) -> {noreply, State}.
-
-terminate(_Reason, {M, F, A}) ->
- apply(M, F, A),
- ok.
-
-code_change(_OldVsn, State, _Extra) -> {ok, State}.
diff --git a/src/supervisor2.erl b/src/supervisor2.erl
deleted file mode 100644
index 23bfe7f1..00000000
--- a/src/supervisor2.erl
+++ /dev/null
@@ -1,1232 +0,0 @@
-%% This file is a copy of supervisor.erl from the R13B-3 Erlang/OTP
-%% distribution, with the following modifications:
-%%
-%% 1) the module name is supervisor2
-%%
-%% 2) there is a new strategy called
-%% simple_one_for_one_terminate. This is exactly the same as for
-%% simple_one_for_one, except that children *are* explicitly
-%% terminated as per the shutdown component of the child_spec.
-%%
-%% 3) child specifications can contain, as the restart type, a tuple
-%% {permanent, Delay} | {transient, Delay} | {intrinsic, Delay}
-%% where Delay >= 0 (see point (4) below for intrinsic). The delay,
-%% in seconds, indicates what should happen if a child, upon being
-%% restarted, exceeds the MaxT and MaxR parameters. Thus, if a
-%% child exits, it is restarted as normal. If it exits sufficiently
-%% quickly and often to exceed the boundaries set by the MaxT and
-%% MaxR parameters, and a Delay is specified, then rather than
-%% stopping the supervisor, the supervisor instead continues and
-%% tries to start up the child again, Delay seconds later.
-%%
-%% Note that you can never restart more frequently than the MaxT
-%% and MaxR parameters allow: i.e. you must wait until *both* the
-%% Delay has passed *and* the MaxT and MaxR parameters allow the
-%% child to be restarted.
-%%
-%% Also note that the Delay is a *minimum*. There is no guarantee
-%% that the child will be restarted within that time, especially if
-%% other processes are dying and being restarted at the same time -
-%% essentially we have to wait for the delay to have passed and for
-%% the MaxT and MaxR parameters to permit the child to be
-%% restarted. This may require waiting for longer than Delay.
-%%
-%% Sometimes, you may wish for a transient or intrinsic child to
-%% exit abnormally so that it gets restarted, but still log
-%% nothing. gen_server will log any exit reason other than
-%% 'normal', 'shutdown' or {'shutdown', _}. Thus the exit reason of
-%% {'shutdown', 'restart'} is interpreted to mean you wish the
-%% child to be restarted according to the delay parameters, but
-%% gen_server will not log the error. Thus from gen_server's
-%% perspective it's a normal exit, whilst from supervisor's
-%% perspective, it's an abnormal exit.
-%%
-%% 4) Added an 'intrinsic' restart type. Like the transient type, this
-%% type means the child should only be restarted if the child exits
-%% abnormally. Unlike the transient type, if the child exits
-%% normally, the supervisor itself also exits normally. If the
-%% child is a supervisor and it exits normally (i.e. with reason of
-%% 'shutdown') then the child's parent also exits normally.
-%%
-%% 5) normal, and {shutdown, _} exit reasons are all treated the same
-%% (i.e. are regarded as normal exits)
-%%
-%% All modifications are (C) 2010-2013 GoPivotal, Inc.
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
--module(supervisor2).
-
--behaviour(gen_server).
-
-%% External exports
--export([start_link/2,start_link/3,
- start_child/2, restart_child/2,
- delete_child/2, terminate_child/2,
- which_children/1, find_child/2,
- check_childspecs/1]).
-
-%% Internal exports
--export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3]).
--export([handle_cast/2]).
-
--define(DICT, dict).
-
--record(state, {name,
- strategy,
- children = [],
- dynamics = ?DICT:new(),
- intensity,
- period,
- restarts = [],
- module,
- args}).
-
--record(child, {pid = undefined, % pid is undefined when child is not running
- name,
- mfa,
- restart_type,
- shutdown,
- child_type,
- modules = []}).
-
--define(is_simple(State), State#state.strategy =:= simple_one_for_one orelse
- State#state.strategy =:= simple_one_for_one_terminate).
--define(is_terminate_simple(State),
- State#state.strategy =:= simple_one_for_one_terminate).
-
--ifdef(use_specs).
-
-%%--------------------------------------------------------------------------
-%% Types
-%%--------------------------------------------------------------------------
-
--export_type([child_spec/0, startchild_ret/0, strategy/0, sup_name/0]).
-
--type child() :: 'undefined' | pid().
--type child_id() :: term().
--type mfargs() :: {M :: module(), F :: atom(), A :: [term()] | undefined}.
--type modules() :: [module()] | 'dynamic'.
--type delay() :: non_neg_integer().
--type restart() :: 'permanent' | 'transient' | 'temporary' | 'intrinsic'
- | {'permanent', delay()} | {'transient', delay()}
- | {'intrinsic', delay()}.
--type shutdown() :: 'brutal_kill' | timeout().
--type worker() :: 'worker' | 'supervisor'.
--type sup_name() :: {'local', Name :: atom()} | {'global', Name :: atom()}.
--type sup_ref() :: (Name :: atom())
- | {Name :: atom(), Node :: node()}
- | {'global', Name :: atom()}
- | pid().
--type child_spec() :: {Id :: child_id(),
- StartFunc :: mfargs(),
- Restart :: restart(),
- Shutdown :: shutdown(),
- Type :: worker(),
- Modules :: modules()}.
-
-
--type strategy() :: 'one_for_all' | 'one_for_one'
- | 'rest_for_one' | 'simple_one_for_one'
- | 'simple_one_for_one_terminate'.
-
--type child_rec() :: #child{pid :: child() | {restarting,pid()} | [pid()],
- name :: child_id(),
- mfa :: mfargs(),
- restart_type :: restart(),
- shutdown :: shutdown(),
- child_type :: worker(),
- modules :: modules()}.
-
--type state() :: #state{strategy :: strategy(),
- children :: [child_rec()],
- dynamics :: ?DICT(),
- intensity :: non_neg_integer(),
- period :: pos_integer()}.
-
-%%--------------------------------------------------------------------------
-%% Callback behaviour
-%%--------------------------------------------------------------------------
-
--callback init(Args :: term()) ->
- {ok, {{RestartStrategy :: strategy(),
- MaxR :: non_neg_integer(),
- MaxT :: non_neg_integer()},
- [ChildSpec :: child_spec()]}}
- | ignore.
-
-%%--------------------------------------------------------------------------
-%% Specs
-%%--------------------------------------------------------------------------
-
--type startchild_err() :: 'already_present'
- | {'already_started', Child :: child()} | term().
--type startchild_ret() :: {'ok', Child :: child()}
- | {'ok', Child :: child(), Info :: term()}
- | {'error', startchild_err()}.
-
--spec start_child(SupRef, ChildSpec) -> startchild_ret() when
- SupRef :: sup_ref(),
- ChildSpec :: child_spec() | (List :: [term()]).
-
--spec restart_child(SupRef, Id) -> Result when
- SupRef :: sup_ref(),
- Id :: child_id(),
- Result :: {'ok', Child :: child()}
- | {'ok', Child :: child(), Info :: term()}
- | {'error', Error},
- Error :: 'running' | 'not_found' | 'simple_one_for_one' | term().
-
--spec delete_child(SupRef, Id) -> Result when
- SupRef :: sup_ref(),
- Id :: child_id(),
- Result :: 'ok' | {'error', Error},
- Error :: 'running' | 'not_found' | 'simple_one_for_one'.
-
--spec terminate_child(SupRef, Id) -> Result when
- SupRef :: sup_ref(),
- Id :: pid() | child_id(),
- Result :: 'ok' | {'error', Error},
- Error :: 'not_found' | 'simple_one_for_one'.
-
--spec which_children(SupRef) -> [{Id,Child,Type,Modules}] when
- SupRef :: sup_ref(),
- Id :: child_id() | 'undefined',
- Child :: child(),
- Type :: worker(),
- Modules :: modules().
-
--spec check_childspecs(ChildSpecs) -> Result when
- ChildSpecs :: [child_spec()],
- Result :: 'ok' | {'error', Error :: term()}.
-
--type init_sup_name() :: sup_name() | 'self'.
-
--type stop_rsn() :: 'shutdown' | {'bad_return', {module(),'init', term()}}
- | {'bad_start_spec', term()} | {'start_spec', term()}
- | {'supervisor_data', term()}.
-
--spec init({init_sup_name(), module(), [term()]}) ->
- {'ok', state()} | 'ignore' | {'stop', stop_rsn()}.
-
--type call() :: 'which_children'.
--spec handle_call(call(), term(), state()) -> {'reply', term(), state()}.
-
--spec handle_cast('null', state()) -> {'noreply', state()}.
-
--spec handle_info(term(), state()) ->
- {'noreply', state()} | {'stop', 'shutdown', state()}.
-
--spec terminate(term(), state()) -> 'ok'.
-
--spec code_change(term(), state(), term()) ->
- {'ok', state()} | {'error', term()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{init,1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-%%% ---------------------------------------------------
-%%% This is a general process supervisor built upon gen_server.erl.
-%%% Servers/processes should/could also be built using gen_server.erl.
-%%% SupName = {local, atom()} | {global, atom()}.
-%%% ---------------------------------------------------
-start_link(Mod, Args) ->
- gen_server:start_link(?MODULE, {self, Mod, Args}, []).
-
-start_link(SupName, Mod, Args) ->
- gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []).
-
-%%% ---------------------------------------------------
-%%% Interface functions.
-%%% ---------------------------------------------------
-start_child(Supervisor, ChildSpec) ->
- call(Supervisor, {start_child, ChildSpec}).
-
-restart_child(Supervisor, Name) ->
- call(Supervisor, {restart_child, Name}).
-
-delete_child(Supervisor, Name) ->
- call(Supervisor, {delete_child, Name}).
-
-%%-----------------------------------------------------------------
-%% Func: terminate_child/2
-%% Returns: ok | {error, Reason}
-%% Note that the child is *always* terminated in some
-%% way (maybe killed).
-%%-----------------------------------------------------------------
-terminate_child(Supervisor, Name) ->
- call(Supervisor, {terminate_child, Name}).
-
-which_children(Supervisor) ->
- call(Supervisor, which_children).
-
-find_child(Supervisor, Name) ->
- [Pid || {Name1, Pid, _Type, _Modules} <- which_children(Supervisor),
- Name1 =:= Name].
-
-call(Supervisor, Req) ->
- gen_server:call(Supervisor, Req, infinity).
-
-check_childspecs(ChildSpecs) when is_list(ChildSpecs) ->
- case check_startspec(ChildSpecs) of
- {ok, _} -> ok;
- Error -> {error, Error}
- end;
-check_childspecs(X) -> {error, {badarg, X}}.
-
-%%% ---------------------------------------------------
-%%%
-%%% Initialize the supervisor.
-%%%
-%%% ---------------------------------------------------
-init({SupName, Mod, Args}) ->
- process_flag(trap_exit, true),
- case Mod:init(Args) of
- {ok, {SupFlags, StartSpec}} ->
- case init_state(SupName, SupFlags, Mod, Args) of
- {ok, State} when ?is_simple(State) ->
- init_dynamic(State, StartSpec);
- {ok, State} ->
- init_children(State, StartSpec);
- Error ->
- {stop, {supervisor_data, Error}}
- end;
- ignore ->
- ignore;
- Error ->
- {stop, {bad_return, {Mod, init, Error}}}
- end.
-
-init_children(State, StartSpec) ->
- SupName = State#state.name,
- case check_startspec(StartSpec) of
- {ok, Children} ->
- case start_children(Children, SupName) of
- {ok, NChildren} ->
- {ok, State#state{children = NChildren}};
- {error, NChildren} ->
- terminate_children(NChildren, SupName),
- {stop, shutdown}
- end;
- Error ->
- {stop, {start_spec, Error}}
- end.
-
-init_dynamic(State, [StartSpec]) ->
- case check_startspec([StartSpec]) of
- {ok, Children} ->
- {ok, State#state{children = Children}};
- Error ->
- {stop, {start_spec, Error}}
- end;
-init_dynamic(_State, StartSpec) ->
- {stop, {bad_start_spec, StartSpec}}.
-
-%%-----------------------------------------------------------------
-%% Func: start_children/2
-%% Args: Children = [#child] in start order
-%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
-%% Purpose: Start all children. The new list contains #child's
-%% with pids.
-%% Returns: {ok, NChildren} | {error, NChildren}
-%% NChildren = [#child] in termination order (reversed
-%% start order)
-%%-----------------------------------------------------------------
-start_children(Children, SupName) -> start_children(Children, [], SupName).
-
-start_children([Child|Chs], NChildren, SupName) ->
- case do_start_child(SupName, Child) of
- {ok, Pid} ->
- start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName);
- {ok, Pid, _Extra} ->
- start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName);
- {error, Reason} ->
- report_error(start_error, Reason, Child, SupName),
- {error, lists:reverse(Chs) ++ [Child | NChildren]}
- end;
-start_children([], NChildren, _SupName) ->
- {ok, NChildren}.
-
-do_start_child(SupName, Child) ->
- #child{mfa = {M, F, A}} = Child,
- case catch apply(M, F, A) of
- {ok, Pid} when is_pid(Pid) ->
- NChild = Child#child{pid = Pid},
- report_progress(NChild, SupName),
- {ok, Pid};
- {ok, Pid, Extra} when is_pid(Pid) ->
- NChild = Child#child{pid = Pid},
- report_progress(NChild, SupName),
- {ok, Pid, Extra};
- ignore ->
- {ok, undefined};
- {error, What} -> {error, What};
- What -> {error, What}
- end.
-
-do_start_child_i(M, F, A) ->
- case catch apply(M, F, A) of
- {ok, Pid} when is_pid(Pid) ->
- {ok, Pid};
- {ok, Pid, Extra} when is_pid(Pid) ->
- {ok, Pid, Extra};
- ignore ->
- {ok, undefined};
- {error, Error} ->
- {error, Error};
- What ->
- {error, What}
- end.
-
-
-%%% ---------------------------------------------------
-%%%
-%%% Callback functions.
-%%%
-%%% ---------------------------------------------------
-handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) ->
- #child{mfa = {M, F, A}} = hd(State#state.children),
- Args = A ++ EArgs,
- case do_start_child_i(M, F, Args) of
- {ok, undefined} ->
- {reply, {ok, undefined}, State};
- {ok, Pid} ->
- NState = State#state{dynamics =
- ?DICT:store(Pid, Args, State#state.dynamics)},
- {reply, {ok, Pid}, NState};
- {ok, Pid, Extra} ->
- NState = State#state{dynamics =
- ?DICT:store(Pid, Args, State#state.dynamics)},
- {reply, {ok, Pid, Extra}, NState};
- What ->
- {reply, What, State}
- end;
-
-%%% The requests terminate_child, delete_child and restart_child are
-%%% invalid for simple_one_for_one and simple_one_for_one_terminate
-%%% supervisors.
-handle_call({_Req, _Data}, _From, State) when ?is_simple(State) ->
- {reply, {error, State#state.strategy}, State};
-
-handle_call({start_child, ChildSpec}, _From, State) ->
- case check_childspec(ChildSpec) of
- {ok, Child} ->
- {Resp, NState} = handle_start_child(Child, State),
- {reply, Resp, NState};
- What ->
- {reply, {error, What}, State}
- end;
-
-handle_call({restart_child, Name}, _From, State) ->
- case get_child(Name, State) of
- {value, Child} when Child#child.pid =:= undefined ->
- case do_start_child(State#state.name, Child) of
- {ok, Pid} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {reply, {ok, Pid}, NState};
- {ok, Pid, Extra} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {reply, {ok, Pid, Extra}, NState};
- Error ->
- {reply, Error, State}
- end;
- {value, _} ->
- {reply, {error, running}, State};
- _ ->
- {reply, {error, not_found}, State}
- end;
-
-handle_call({delete_child, Name}, _From, State) ->
- case get_child(Name, State) of
- {value, Child} when Child#child.pid =:= undefined ->
- NState = remove_child(Child, State),
- {reply, ok, NState};
- {value, _} ->
- {reply, {error, running}, State};
- _ ->
- {reply, {error, not_found}, State}
- end;
-
-handle_call({terminate_child, Name}, _From, State) ->
- case get_child(Name, State) of
- {value, Child} ->
- NChild = do_terminate(Child, State#state.name),
- {reply, ok, replace_child(NChild, State)};
- _ ->
- {reply, {error, not_found}, State}
- end;
-
-handle_call(which_children, _From, State) when ?is_simple(State) ->
- [#child{child_type = CT, modules = Mods}] = State#state.children,
- Reply = lists:map(fun ({Pid, _}) -> {undefined, Pid, CT, Mods} end,
- ?DICT:to_list(State#state.dynamics)),
- {reply, Reply, State};
-
-handle_call(which_children, _From, State) ->
- Resp =
- lists:map(fun (#child{pid = Pid, name = Name,
- child_type = ChildType, modules = Mods}) ->
- {Name, Pid, ChildType, Mods}
- end,
- State#state.children),
- {reply, Resp, State}.
-
-%%% Hopefully cause a function-clause as there is no API function
-%%% that utilizes cast.
-handle_cast(null, State) ->
- error_logger:error_msg("ERROR: Supervisor received cast-message 'null'~n",
- []),
-
- {noreply, State}.
-
-handle_info({delayed_restart, {RestartType, Reason, Child}}, State)
- when ?is_simple(State) ->
- {ok, NState} = do_restart(RestartType, Reason, Child, State),
- {noreply, NState};
-handle_info({delayed_restart, {RestartType, Reason, Child}}, State) ->
- case get_child(Child#child.name, State) of
- {value, Child1} ->
- {ok, NState} = do_restart(RestartType, Reason, Child1, State),
- {noreply, NState};
- _ ->
- {noreply, State}
- end;
-
-%%
-%% Take care of terminated children.
-%%
-handle_info({'EXIT', Pid, Reason}, State) ->
- case restart_child(Pid, Reason, State) of
- {ok, State1} ->
- {noreply, State1};
- {shutdown, State1} ->
- {stop, shutdown, State1}
- end;
-
-handle_info(Msg, State) ->
- error_logger:error_msg("Supervisor received unexpected message: ~p~n",
- [Msg]),
- {noreply, State}.
-%%
-%% Terminate this server.
-%%
-terminate(_Reason, State) when ?is_terminate_simple(State) ->
- terminate_simple_children(
- hd(State#state.children), State#state.dynamics, State#state.name),
- ok;
-terminate(_Reason, State) ->
- terminate_children(State#state.children, State#state.name),
- ok.
-
-%%
-%% Change code for the supervisor.
-%% Call the new call-back module and fetch the new start specification.
-%% Combine the new spec. with the old. If the new start spec. is
-%% not valid the code change will not succeed.
-%% Use the old Args as argument to Module:init/1.
-%% NOTE: This requires that the init function of the call-back module
-%% does not have any side effects.
-%%
-code_change(_, State, _) ->
- case (State#state.module):init(State#state.args) of
- {ok, {SupFlags, StartSpec}} ->
- case catch check_flags(SupFlags) of
- ok ->
- {Strategy, MaxIntensity, Period} = SupFlags,
- update_childspec(State#state{strategy = Strategy,
- intensity = MaxIntensity,
- period = Period},
- StartSpec);
- Error ->
- {error, Error}
- end;
- ignore ->
- {ok, State};
- Error ->
- Error
- end.
-
-check_flags({Strategy, MaxIntensity, Period}) ->
- validStrategy(Strategy),
- validIntensity(MaxIntensity),
- validPeriod(Period),
- ok;
-check_flags(What) ->
- {bad_flags, What}.
-
-update_childspec(State, StartSpec) when ?is_simple(State) ->
- case check_startspec(StartSpec) of
- {ok, [Child]} ->
- {ok, State#state{children = [Child]}};
- Error ->
- {error, Error}
- end;
-
-update_childspec(State, StartSpec) ->
- case check_startspec(StartSpec) of
- {ok, Children} ->
- OldC = State#state.children, % In reverse start order !
- NewC = update_childspec1(OldC, Children, []),
- {ok, State#state{children = NewC}};
- Error ->
- {error, Error}
- end.
-
-update_childspec1([Child|OldC], Children, KeepOld) ->
- case update_chsp(Child, Children) of
- {ok,NewChildren} ->
- update_childspec1(OldC, NewChildren, KeepOld);
- false ->
- update_childspec1(OldC, Children, [Child|KeepOld])
- end;
-update_childspec1([], Children, KeepOld) ->
- % Return them in (keeped) reverse start order.
- lists:reverse(Children ++ KeepOld).
-
-update_chsp(OldCh, Children) ->
- case lists:map(fun (Ch) when OldCh#child.name =:= Ch#child.name ->
- Ch#child{pid = OldCh#child.pid};
- (Ch) ->
- Ch
- end,
- Children) of
- Children ->
- false; % OldCh not found in new spec.
- NewC ->
- {ok, NewC}
- end.
-
-%%% ---------------------------------------------------
-%%% Start a new child.
-%%% ---------------------------------------------------
-
-handle_start_child(Child, State) ->
- case get_child(Child#child.name, State) of
- false ->
- case do_start_child(State#state.name, Child) of
- {ok, Pid} ->
- Children = State#state.children,
- {{ok, Pid},
- State#state{children =
- [Child#child{pid = Pid}|Children]}};
- {ok, Pid, Extra} ->
- Children = State#state.children,
- {{ok, Pid, Extra},
- State#state{children =
- [Child#child{pid = Pid}|Children]}};
- {error, What} ->
- {{error, {What, Child}}, State}
- end;
- {value, OldChild} when OldChild#child.pid =/= undefined ->
- {{error, {already_started, OldChild#child.pid}}, State};
- {value, _OldChild} ->
- {{error, already_present}, State}
- end.
-
-%%% ---------------------------------------------------
-%%% Restart. A process has terminated.
-%%% Returns: {ok, #state} | {shutdown, #state}
-%%% ---------------------------------------------------
-
-restart_child(Pid, Reason, State) when ?is_simple(State) ->
- case ?DICT:find(Pid, State#state.dynamics) of
- {ok, Args} ->
- [Child] = State#state.children,
- RestartType = Child#child.restart_type,
- {M, F, _} = Child#child.mfa,
- NChild = Child#child{pid = Pid, mfa = {M, F, Args}},
- do_restart(RestartType, Reason, NChild, State);
- error ->
- {ok, State}
- end;
-restart_child(Pid, Reason, State) ->
- Children = State#state.children,
- case lists:keysearch(Pid, #child.pid, Children) of
- {value, Child} ->
- RestartType = Child#child.restart_type,
- do_restart(RestartType, Reason, Child, State);
- _ ->
- {ok, State}
- end.
-
-do_restart({permanent = RestartType, Delay}, Reason, Child, State) ->
- do_restart_delay({RestartType, Delay}, Reason, Child, State);
-do_restart(permanent, Reason, Child, State) ->
- report_error(child_terminated, Reason, Child, State#state.name),
- restart(Child, State);
-do_restart(Type, normal, Child, State) ->
- del_child_and_maybe_shutdown(Type, Child, State);
-do_restart({RestartType, Delay}, {shutdown, restart} = Reason, Child, State)
- when RestartType =:= transient orelse RestartType =:= intrinsic ->
- do_restart_delay({RestartType, Delay}, Reason, Child, State);
-do_restart(Type, {shutdown, _}, Child, State) ->
- del_child_and_maybe_shutdown(Type, Child, State);
-do_restart(Type, shutdown, Child = #child{child_type = supervisor}, State) ->
- del_child_and_maybe_shutdown(Type, Child, State);
-do_restart({RestartType, Delay}, Reason, Child, State)
- when RestartType =:= transient orelse RestartType =:= intrinsic ->
- do_restart_delay({RestartType, Delay}, Reason, Child, State);
-do_restart(Type, Reason, Child, State) when Type =:= transient orelse
- Type =:= intrinsic ->
- report_error(child_terminated, Reason, Child, State#state.name),
- restart(Child, State);
-do_restart(temporary, Reason, Child, State) ->
- report_error(child_terminated, Reason, Child, State#state.name),
- NState = state_del_child(Child, State),
- {ok, NState}.
-
-do_restart_delay({RestartType, Delay}, Reason, Child, State) ->
- case restart1(Child, State) of
- {ok, NState} ->
- {ok, NState};
- {terminate, NState} ->
- _TRef = erlang:send_after(trunc(Delay*1000), self(),
- {delayed_restart,
- {{RestartType, Delay}, Reason, Child}}),
- {ok, state_del_child(Child, NState)}
- end.
-
-del_child_and_maybe_shutdown(intrinsic, Child, State) ->
- {shutdown, state_del_child(Child, State)};
-del_child_and_maybe_shutdown({intrinsic, _Delay}, Child, State) ->
- {shutdown, state_del_child(Child, State)};
-del_child_and_maybe_shutdown(_, Child, State) ->
- {ok, state_del_child(Child, State)}.
-
-restart(Child, State) ->
- case add_restart(State) of
- {ok, NState} ->
- restart(NState#state.strategy, Child, NState, fun restart/2);
- {terminate, NState} ->
- report_error(shutdown, reached_max_restart_intensity,
- Child, State#state.name),
- {shutdown, state_del_child(Child, NState)}
- end.
-
-restart1(Child, State) ->
- case add_restart(State) of
- {ok, NState} ->
- restart(NState#state.strategy, Child, NState, fun restart1/2);
- {terminate, _NState} ->
- %% we've reached the max restart intensity, but the
- %% add_restart will have added to the restarts
- %% field. Given we don't want to die here, we need to go
- %% back to the old restarts field otherwise we'll never
- %% attempt to restart later.
- {terminate, State}
- end.
-
-restart(Strategy, Child, State, Restart)
- when Strategy =:= simple_one_for_one orelse
- Strategy =:= simple_one_for_one_terminate ->
- #child{mfa = {M, F, A}} = Child,
- Dynamics = ?DICT:erase(Child#child.pid, State#state.dynamics),
- case do_start_child_i(M, F, A) of
- {ok, undefined} ->
- {ok, State};
- {ok, Pid} ->
- NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)},
- {ok, NState};
- {ok, Pid, _Extra} ->
- NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)},
- {ok, NState};
- {error, Error} ->
- report_error(start_error, Error, Child, State#state.name),
- Restart(Child, State)
- end;
-restart(one_for_one, Child, State, Restart) ->
- case do_start_child(State#state.name, Child) of
- {ok, Pid} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {ok, NState};
- {ok, Pid, _Extra} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {ok, NState};
- {error, Reason} ->
- report_error(start_error, Reason, Child, State#state.name),
- Restart(Child, State)
- end;
-restart(rest_for_one, Child, State, Restart) ->
- {ChAfter, ChBefore} = split_child(Child#child.pid, State#state.children),
- ChAfter2 = terminate_children(ChAfter, State#state.name),
- case start_children(ChAfter2, State#state.name) of
- {ok, ChAfter3} ->
- {ok, State#state{children = ChAfter3 ++ ChBefore}};
- {error, ChAfter3} ->
- Restart(Child, State#state{children = ChAfter3 ++ ChBefore})
- end;
-restart(one_for_all, Child, State, Restart) ->
- Children1 = del_child(Child#child.pid, State#state.children),
- Children2 = terminate_children(Children1, State#state.name),
- case start_children(Children2, State#state.name) of
- {ok, NChs} ->
- {ok, State#state{children = NChs}};
- {error, NChs} ->
- Restart(Child, State#state{children = NChs})
- end.
-
-%%-----------------------------------------------------------------
-%% Func: terminate_children/2
-%% Args: Children = [#child] in termination order
-%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
-%% Returns: NChildren = [#child] in
-%% startup order (reversed termination order)
-%%-----------------------------------------------------------------
-terminate_children(Children, SupName) ->
- terminate_children(Children, SupName, []).
-
-terminate_children([Child | Children], SupName, Res) ->
- NChild = do_terminate(Child, SupName),
- terminate_children(Children, SupName, [NChild | Res]);
-terminate_children([], _SupName, Res) ->
- Res.
-
-terminate_simple_children(Child, Dynamics, SupName) ->
- Pids = dict:fold(fun (Pid, _Args, Pids) ->
- erlang:monitor(process, Pid),
- unlink(Pid),
- exit(Pid, child_exit_reason(Child)),
- [Pid | Pids]
- end, [], Dynamics),
- TimeoutMsg = {timeout, make_ref()},
- TRef = timeout_start(Child, TimeoutMsg),
- {Replies, Timedout} =
- lists:foldl(
- fun (_Pid, {Replies, Timedout}) ->
- {Pid1, Reason1, Timedout1} =
- receive
- TimeoutMsg ->
- Remaining = Pids -- [P || {P, _} <- Replies],
- [exit(P, kill) || P <- Remaining],
- receive
- {'DOWN', _MRef, process, Pid, Reason} ->
- {Pid, Reason, true}
- end;
- {'DOWN', _MRef, process, Pid, Reason} ->
- {Pid, Reason, Timedout}
- end,
- {[{Pid1, child_res(Child, Reason1, Timedout1)} | Replies],
- Timedout1}
- end, {[], false}, Pids),
- timeout_stop(Child, TRef, TimeoutMsg, Timedout),
- ReportError = shutdown_error_reporter(SupName),
- Report = fun(_, ok) -> ok;
- (Pid, {error, R}) -> ReportError(R, Child#child{pid = Pid})
- end,
- [receive
- {'EXIT', Pid, Reason} ->
- Report(Pid, child_res(Child, Reason, Timedout))
- after
- 0 -> Report(Pid, Reply)
- end || {Pid, Reply} <- Replies],
- ok.
-
-child_exit_reason(#child{shutdown = brutal_kill}) -> kill;
-child_exit_reason(#child{}) -> shutdown.
-
-child_res(#child{shutdown=brutal_kill}, killed, false) -> ok;
-child_res(#child{}, shutdown, false) -> ok;
-child_res(#child{restart_type=permanent}, normal, false) -> {error, normal};
-child_res(#child{restart_type={permanent,_}},normal, false) -> {error, normal};
-child_res(#child{}, normal, false) -> ok;
-child_res(#child{}, R, _) -> {error, R}.
-
-timeout_start(#child{shutdown = Time}, Msg) when is_integer(Time) ->
- erlang:send_after(Time, self(), Msg);
-timeout_start(#child{}, _Msg) ->
- ok.
-
-timeout_stop(#child{shutdown = Time}, TRef, Msg, false) when is_integer(Time) ->
- erlang:cancel_timer(TRef),
- receive
- Msg -> ok
- after
- 0 -> ok
- end;
-timeout_stop(#child{}, _TRef, _Msg, _Timedout) ->
- ok.
-
-do_terminate(Child, SupName) when Child#child.pid =/= undefined ->
- ReportError = shutdown_error_reporter(SupName),
- case shutdown(Child#child.pid, Child#child.shutdown) of
- ok ->
- ok;
- {error, normal} ->
- case Child#child.restart_type of
- permanent -> ReportError(normal, Child);
- {permanent, _Delay} -> ReportError(normal, Child);
- _ -> ok
- end;
- {error, OtherReason} ->
- ReportError(OtherReason, Child)
- end,
- Child#child{pid = undefined};
-do_terminate(Child, _SupName) ->
- Child.
-
-%%-----------------------------------------------------------------
-%% Shutdowns a child. We must check the EXIT value
-%% of the child, because it might have died with another reason than
-%% the wanted. In that case we want to report the error. We put a
-%% monitor on the child an check for the 'DOWN' message instead of
-%% checking for the 'EXIT' message, because if we check the 'EXIT'
-%% message a "naughty" child, who does unlink(Sup), could hang the
-%% supervisor.
-%% Returns: ok | {error, OtherReason} (this should be reported)
-%%-----------------------------------------------------------------
-shutdown(Pid, brutal_kill) ->
-
- case monitor_child(Pid) of
- ok ->
- exit(Pid, kill),
- receive
- {'DOWN', _MRef, process, Pid, killed} ->
- ok;
- {'DOWN', _MRef, process, Pid, OtherReason} ->
- {error, OtherReason}
- end;
- {error, Reason} ->
- {error, Reason}
- end;
-
-shutdown(Pid, Time) ->
-
- case monitor_child(Pid) of
- ok ->
- exit(Pid, shutdown), %% Try to shutdown gracefully
- receive
- {'DOWN', _MRef, process, Pid, shutdown} ->
- ok;
- {'DOWN', _MRef, process, Pid, OtherReason} ->
- {error, OtherReason}
- after Time ->
- exit(Pid, kill), %% Force termination.
- receive
- {'DOWN', _MRef, process, Pid, OtherReason} ->
- {error, OtherReason}
- end
- end;
- {error, Reason} ->
- {error, Reason}
- end.
-
-%% Help function to shutdown/2 switches from link to monitor approach
-monitor_child(Pid) ->
-
- %% Do the monitor operation first so that if the child dies
- %% before the monitoring is done causing a 'DOWN'-message with
- %% reason noproc, we will get the real reason in the 'EXIT'-message
- %% unless a naughty child has already done unlink...
- erlang:monitor(process, Pid),
- unlink(Pid),
-
- receive
- %% If the child dies before the unlik we must empty
- %% the mail-box of the 'EXIT'-message and the 'DOWN'-message.
- {'EXIT', Pid, Reason} ->
- receive
- {'DOWN', _, process, Pid, _} ->
- {error, Reason}
- end
- after 0 ->
- %% If a naughty child did unlink and the child dies before
- %% monitor the result will be that shutdown/2 receives a
- %% 'DOWN'-message with reason noproc.
- %% If the child should die after the unlink there
- %% will be a 'DOWN'-message with a correct reason
- %% that will be handled in shutdown/2.
- ok
- end.
-
-
-%%-----------------------------------------------------------------
-%% Child/State manipulating functions.
-%%-----------------------------------------------------------------
-state_del_child(#child{pid = Pid}, State) when ?is_simple(State) ->
- NDynamics = ?DICT:erase(Pid, State#state.dynamics),
- State#state{dynamics = NDynamics};
-state_del_child(Child, State) ->
- NChildren = del_child(Child#child.name, State#state.children),
- State#state{children = NChildren}.
-
-del_child(Name, [Ch|Chs]) when Ch#child.name =:= Name ->
- [Ch#child{pid = undefined} | Chs];
-del_child(Pid, [Ch|Chs]) when Ch#child.pid =:= Pid ->
- [Ch#child{pid = undefined} | Chs];
-del_child(Name, [Ch|Chs]) ->
- [Ch|del_child(Name, Chs)];
-del_child(_, []) ->
- [].
-
-%% Chs = [S4, S3, Ch, S1, S0]
-%% Ret: {[S4, S3, Ch], [S1, S0]}
-split_child(Name, Chs) ->
- split_child(Name, Chs, []).
-
-split_child(Name, [Ch|Chs], After) when Ch#child.name =:= Name ->
- {lists:reverse([Ch#child{pid = undefined} | After]), Chs};
-split_child(Pid, [Ch|Chs], After) when Ch#child.pid =:= Pid ->
- {lists:reverse([Ch#child{pid = undefined} | After]), Chs};
-split_child(Name, [Ch|Chs], After) ->
- split_child(Name, Chs, [Ch | After]);
-split_child(_, [], After) ->
- {lists:reverse(After), []}.
-
-get_child(Name, State) ->
- lists:keysearch(Name, #child.name, State#state.children).
-replace_child(Child, State) ->
- Chs = do_replace_child(Child, State#state.children),
- State#state{children = Chs}.
-
-do_replace_child(Child, [Ch|Chs]) when Ch#child.name =:= Child#child.name ->
- [Child | Chs];
-do_replace_child(Child, [Ch|Chs]) ->
- [Ch|do_replace_child(Child, Chs)].
-
-remove_child(Child, State) ->
- Chs = lists:keydelete(Child#child.name, #child.name, State#state.children),
- State#state{children = Chs}.
-
-%%-----------------------------------------------------------------
-%% Func: init_state/4
-%% Args: SupName = {local, atom()} | {global, atom()} | self
-%% Type = {Strategy, MaxIntensity, Period}
-%% Strategy = one_for_one | one_for_all | simple_one_for_one |
-%% rest_for_one
-%% MaxIntensity = integer()
-%% Period = integer()
-%% Mod :== atom()
-%% Arsg :== term()
-%% Purpose: Check that Type is of correct type (!)
-%% Returns: {ok, #state} | Error
-%%-----------------------------------------------------------------
-init_state(SupName, Type, Mod, Args) ->
- case catch init_state1(SupName, Type, Mod, Args) of
- {ok, State} ->
- {ok, State};
- Error ->
- Error
- end.
-
-init_state1(SupName, {Strategy, MaxIntensity, Period}, Mod, Args) ->
- validStrategy(Strategy),
- validIntensity(MaxIntensity),
- validPeriod(Period),
- {ok, #state{name = supname(SupName,Mod),
- strategy = Strategy,
- intensity = MaxIntensity,
- period = Period,
- module = Mod,
- args = Args}};
-init_state1(_SupName, Type, _, _) ->
- {invalid_type, Type}.
-
-validStrategy(simple_one_for_one_terminate) -> true;
-validStrategy(simple_one_for_one) -> true;
-validStrategy(one_for_one) -> true;
-validStrategy(one_for_all) -> true;
-validStrategy(rest_for_one) -> true;
-validStrategy(What) -> throw({invalid_strategy, What}).
-
-validIntensity(Max) when is_integer(Max),
- Max >= 0 -> true;
-validIntensity(What) -> throw({invalid_intensity, What}).
-
-validPeriod(Period) when is_integer(Period),
- Period > 0 -> true;
-validPeriod(What) -> throw({invalid_period, What}).
-
-supname(self,Mod) -> {self(),Mod};
-supname(N,_) -> N.
-
-%%% ------------------------------------------------------
-%%% Check that the children start specification is valid.
-%%% Shall be a six (6) tuple
-%%% {Name, Func, RestartType, Shutdown, ChildType, Modules}
-%%% where Name is an atom
-%%% Func is {Mod, Fun, Args} == {atom, atom, list}
-%%% RestartType is permanent | temporary | transient |
-%%% intrinsic | {permanent, Delay} |
-%%% {transient, Delay} | {intrinsic, Delay}
-%% where Delay >= 0
-%%% Shutdown = integer() | infinity | brutal_kill
-%%% ChildType = supervisor | worker
-%%% Modules = [atom()] | dynamic
-%%% Returns: {ok, [#child]} | Error
-%%% ------------------------------------------------------
-
-check_startspec(Children) -> check_startspec(Children, []).
-
-check_startspec([ChildSpec|T], Res) ->
- case check_childspec(ChildSpec) of
- {ok, Child} ->
- case lists:keymember(Child#child.name, #child.name, Res) of
- true -> {duplicate_child_name, Child#child.name};
- false -> check_startspec(T, [Child | Res])
- end;
- Error -> Error
- end;
-check_startspec([], Res) ->
- {ok, lists:reverse(Res)}.
-
-check_childspec({Name, Func, RestartType, Shutdown, ChildType, Mods}) ->
- catch check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods);
-check_childspec(X) -> {invalid_child_spec, X}.
-
-check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods) ->
- validName(Name),
- validFunc(Func),
- validRestartType(RestartType),
- validChildType(ChildType),
- validShutdown(Shutdown, ChildType),
- validMods(Mods),
- {ok, #child{name = Name, mfa = Func, restart_type = RestartType,
- shutdown = Shutdown, child_type = ChildType, modules = Mods}}.
-
-validChildType(supervisor) -> true;
-validChildType(worker) -> true;
-validChildType(What) -> throw({invalid_child_type, What}).
-
-validName(_Name) -> true.
-
-validFunc({M, F, A}) when is_atom(M),
- is_atom(F),
- is_list(A) -> true;
-validFunc(Func) -> throw({invalid_mfa, Func}).
-
-validRestartType(permanent) -> true;
-validRestartType(temporary) -> true;
-validRestartType(transient) -> true;
-validRestartType(intrinsic) -> true;
-validRestartType({permanent, Delay}) -> validDelay(Delay);
-validRestartType({intrinsic, Delay}) -> validDelay(Delay);
-validRestartType({transient, Delay}) -> validDelay(Delay);
-validRestartType(RestartType) -> throw({invalid_restart_type,
- RestartType}).
-
-validDelay(Delay) when is_number(Delay),
- Delay >= 0 -> true;
-validDelay(What) -> throw({invalid_delay, What}).
-
-validShutdown(Shutdown, _)
- when is_integer(Shutdown), Shutdown > 0 -> true;
-validShutdown(infinity, supervisor) -> true;
-validShutdown(brutal_kill, _) -> true;
-validShutdown(Shutdown, _) -> throw({invalid_shutdown, Shutdown}).
-
-validMods(dynamic) -> true;
-validMods(Mods) when is_list(Mods) ->
- lists:foreach(fun (Mod) ->
- if
- is_atom(Mod) -> ok;
- true -> throw({invalid_module, Mod})
- end
- end,
- Mods);
-validMods(Mods) -> throw({invalid_modules, Mods}).
-
-%%% ------------------------------------------------------
-%%% Add a new restart and calculate if the max restart
-%%% intensity has been reached (in that case the supervisor
-%%% shall terminate).
-%%% All restarts accured inside the period amount of seconds
-%%% are kept in the #state.restarts list.
-%%% Returns: {ok, State'} | {terminate, State'}
-%%% ------------------------------------------------------
-
-add_restart(State) ->
- I = State#state.intensity,
- P = State#state.period,
- R = State#state.restarts,
- Now = erlang:now(),
- R1 = add_restart([Now|R], Now, P),
- State1 = State#state{restarts = R1},
- case length(R1) of
- CurI when CurI =< I ->
- {ok, State1};
- _ ->
- {terminate, State1}
- end.
-
-add_restart([R|Restarts], Now, Period) ->
- case inPeriod(R, Now, Period) of
- true ->
- [R|add_restart(Restarts, Now, Period)];
- _ ->
- []
- end;
-add_restart([], _, _) ->
- [].
-
-inPeriod(Time, Now, Period) ->
- case difference(Time, Now) of
- T when T > Period ->
- false;
- _ ->
- true
- end.
-
-%%
-%% Time = {MegaSecs, Secs, MicroSecs} (NOTE: MicroSecs is ignored)
-%% Calculate the time elapsed in seconds between two timestamps.
-%% If MegaSecs is equal just subtract Secs.
-%% Else calculate the Mega difference and add the Secs difference,
-%% note that Secs difference can be negative, e.g.
-%% {827, 999999, 676} diff {828, 1, 653753} == > 2 secs.
-%%
-difference({TimeM, TimeS, _}, {CurM, CurS, _}) when CurM > TimeM ->
- ((CurM - TimeM) * 1000000) + (CurS - TimeS);
-difference({_, TimeS, _}, {_, CurS, _}) ->
- CurS - TimeS.
-
-%%% ------------------------------------------------------
-%%% Error and progress reporting.
-%%% ------------------------------------------------------
-
-report_error(Error, Reason, Child, SupName) ->
- ErrorMsg = [{supervisor, SupName},
- {errorContext, Error},
- {reason, Reason},
- {offender, extract_child(Child)}],
- error_logger:error_report(supervisor_report, ErrorMsg).
-
-shutdown_error_reporter(SupName) ->
- fun(Reason, Child) ->
- report_error(shutdown_error, Reason, Child, SupName)
- end.
-
-extract_child(Child) ->
- [{pid, Child#child.pid},
- {name, Child#child.name},
- {mfa, Child#child.mfa},
- {restart_type, Child#child.restart_type},
- {shutdown, Child#child.shutdown},
- {child_type, Child#child.child_type}].
-
-report_progress(Child, SupName) ->
- Progress = [{supervisor, SupName},
- {started, extract_child(Child)}],
- error_logger:info_report(progress, Progress).
diff --git a/src/supervisor2_tests.erl b/src/supervisor2_tests.erl
deleted file mode 100644
index a841b1f0..00000000
--- a/src/supervisor2_tests.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(supervisor2_tests).
--behaviour(supervisor2).
-
--export([test_all/0, start_link/0]).
--export([init/1]).
-
-test_all() ->
- ok = check_shutdown(stop, 200, 200, 2000),
- ok = check_shutdown(ignored, 1, 2, 2000).
-
-check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
- {ok, Sup} = supervisor2:start_link(?MODULE, [SupTimeout]),
- Res = lists:foldl(
- fun (I, ok) ->
- TestSupPid = erlang:whereis(?MODULE),
- ChildPids =
- [begin
- {ok, ChildPid} =
- supervisor2:start_child(TestSupPid, []),
- ChildPid
- end || _ <- lists:seq(1, ChildCount)],
- MRef = erlang:monitor(process, TestSupPid),
- [P ! SigStop || P <- ChildPids],
- ok = supervisor2:terminate_child(Sup, test_sup),
- {ok, _} = supervisor2:restart_child(Sup, test_sup),
- receive
- {'DOWN', MRef, process, TestSupPid, shutdown} ->
- ok;
- {'DOWN', MRef, process, TestSupPid, Reason} ->
- {error, {I, Reason}}
- end;
- (_, R) ->
- R
- end, ok, lists:seq(1, Iterations)),
- unlink(Sup),
- exit(Sup, shutdown),
- Res.
-
-start_link() ->
- Pid = spawn_link(fun () ->
- process_flag(trap_exit, true),
- receive stop -> ok end
- end),
- {ok, Pid}.
-
-init([Timeout]) ->
- {ok, {{one_for_one, 0, 1},
- [{test_sup, {supervisor2, start_link,
- [{local, ?MODULE}, ?MODULE, []]},
- transient, Timeout, supervisor, [?MODULE]}]}};
-init([]) ->
- {ok, {{simple_one_for_one_terminate, 0, 1},
- [{test_worker, {?MODULE, start_link, []},
- temporary, 1000, worker, [?MODULE]}]}}.
diff --git a/src/tcp_acceptor.erl b/src/tcp_acceptor.erl
deleted file mode 100644
index 267ce4f1..00000000
--- a/src/tcp_acceptor.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(tcp_acceptor).
-
--behaviour(gen_server).
-
--export([start_link/2]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--record(state, {callback, sock, ref}).
-
-%%--------------------------------------------------------------------
-
-start_link(Callback, LSock) ->
- gen_server:start_link(?MODULE, {Callback, LSock}, []).
-
-%%--------------------------------------------------------------------
-
-init({Callback, LSock}) ->
- gen_server:cast(self(), accept),
- {ok, #state{callback=Callback, sock=LSock}}.
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast(accept, State) ->
- ok = file_handle_cache:obtain(),
- accept(State);
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({inet_async, LSock, Ref, {ok, Sock}},
- State = #state{callback={M,F,A}, sock=LSock, ref=Ref}) ->
-
- %% patch up the socket so it looks like one we got from
- %% gen_tcp:accept/1
- {ok, Mod} = inet_db:lookup_socket(LSock),
- inet_db:register_socket(Sock, Mod),
-
- %% handle
- case tune_buffer_size(Sock) of
- ok -> file_handle_cache:transfer(
- apply(M, F, A ++ [Sock])),
- ok = file_handle_cache:obtain();
- {error, enotconn} -> catch port_close(Sock);
- {error, Err} -> {ok, {IPAddress, Port}} = inet:sockname(LSock),
- error_logger:error_msg(
- "failed to tune buffer size of "
- "connection accepted on ~s:~p - ~p (~s)~n",
- [rabbit_misc:ntoab(IPAddress), Port,
- Err, rabbit_misc:format_inet_error(Err)]),
- catch port_close(Sock)
- end,
-
- %% accept more
- accept(State);
-
-handle_info({inet_async, LSock, Ref, {error, Reason}},
- State=#state{sock=LSock, ref=Ref}) ->
- case Reason of
- closed -> {stop, normal, State}; %% listening socket closed
- econnaborted -> accept(State); %% client sent RST before we accepted
- _ -> {stop, {accept_failed, Reason}, State}
- end;
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%--------------------------------------------------------------------
-
-accept(State = #state{sock=LSock}) ->
- case prim_inet:async_accept(LSock, -1) of
- {ok, Ref} -> {noreply, State#state{ref=Ref}};
- Error -> {stop, {cannot_accept, Error}, State}
- end.
-
-tune_buffer_size(Sock) ->
- case inet:getopts(Sock, [sndbuf, recbuf, buffer]) of
- {ok, BufSizes} -> BufSz = lists:max([Sz || {_Opt, Sz} <- BufSizes]),
- inet:setopts(Sock, [{buffer, BufSz}]);
- Error -> Error
- end.
diff --git a/src/tcp_acceptor_sup.erl b/src/tcp_acceptor_sup.erl
deleted file mode 100644
index 3619875f..00000000
--- a/src/tcp_acceptor_sup.erl
+++ /dev/null
@@ -1,43 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(tcp_acceptor_sup).
-
--behaviour(supervisor).
-
--export([start_link/2]).
-
--export([init/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/2 :: (atom(), mfargs()) -> rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Name, Callback) ->
- supervisor:start_link({local,Name}, ?MODULE, Callback).
-
-init(Callback) ->
- {ok, {{simple_one_for_one, 10, 10},
- [{tcp_acceptor, {tcp_acceptor, start_link, [Callback]},
- transient, brutal_kill, worker, [tcp_acceptor]}]}}.
diff --git a/src/tcp_listener.erl b/src/tcp_listener.erl
deleted file mode 100644
index 4b4a31b5..00000000
--- a/src/tcp_listener.erl
+++ /dev/null
@@ -1,98 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(tcp_listener).
-
--behaviour(gen_server).
-
--export([start_link/8]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--record(state, {sock, on_startup, on_shutdown, label}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/8 ::
- (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
- integer(), atom(), mfargs(), mfargs(), string()) ->
- rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%--------------------------------------------------------------------
-
-start_link(IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, AcceptorSup,
- OnStartup, OnShutdown, Label) ->
- gen_server:start_link(
- ?MODULE, {IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, AcceptorSup,
- OnStartup, OnShutdown, Label}, []).
-
-%%--------------------------------------------------------------------
-
-init({IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, AcceptorSup,
- {M,F,A} = OnStartup, OnShutdown, Label}) ->
- process_flag(trap_exit, true),
- case gen_tcp:listen(Port, SocketOpts ++ [{ip, IPAddress},
- {active, false}]) of
- {ok, LSock} ->
- lists:foreach(fun (_) ->
- {ok, _APid} = supervisor:start_child(
- AcceptorSup, [LSock])
- end,
- lists:duplicate(ConcurrentAcceptorCount, dummy)),
- {ok, {LIPAddress, LPort}} = inet:sockname(LSock),
- error_logger:info_msg(
- "started ~s on ~s:~p~n",
- [Label, rabbit_misc:ntoab(LIPAddress), LPort]),
- apply(M, F, A ++ [IPAddress, Port]),
- {ok, #state{sock = LSock,
- on_startup = OnStartup, on_shutdown = OnShutdown,
- label = Label}};
- {error, Reason} ->
- error_logger:error_msg(
- "failed to start ~s on ~s:~p - ~p (~s)~n",
- [Label, rabbit_misc:ntoab(IPAddress), Port,
- Reason, inet:format_error(Reason)]),
- {stop, {cannot_listen, IPAddress, Port, Reason}}
- end.
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, #state{sock=LSock, on_shutdown = {M,F,A}, label=Label}) ->
- {ok, {IPAddress, Port}} = inet:sockname(LSock),
- gen_tcp:close(LSock),
- error_logger:info_msg("stopped ~s on ~s:~p~n",
- [Label, rabbit_misc:ntoab(IPAddress), Port]),
- apply(M, F, A ++ [IPAddress, Port]).
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/tcp_listener_sup.erl b/src/tcp_listener_sup.erl
deleted file mode 100644
index 2a65cc17..00000000
--- a/src/tcp_listener_sup.erl
+++ /dev/null
@@ -1,70 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(tcp_listener_sup).
-
--behaviour(supervisor).
-
--export([start_link/7, start_link/8]).
-
--export([init/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/7 ::
- (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
- mfargs(), mfargs(), mfargs(), string()) ->
- rabbit_types:ok_pid_or_error()).
--spec(start_link/8 ::
- (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
- mfargs(), mfargs(), mfargs(), integer(), string()) ->
- rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, Label) ->
- start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, 1, Label).
-
-start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, ConcurrentAcceptorCount, Label) ->
- supervisor:start_link(
- ?MODULE, {IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, ConcurrentAcceptorCount, Label}).
-
-init({IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, ConcurrentAcceptorCount, Label}) ->
- %% This is gross. The tcp_listener needs to know about the
- %% tcp_acceptor_sup, and the only way I can think of accomplishing
- %% that without jumping through hoops is to register the
- %% tcp_acceptor_sup.
- Name = rabbit_misc:tcp_name(tcp_acceptor_sup, IPAddress, Port),
- {ok, {{one_for_all, 10, 10},
- [{tcp_acceptor_sup, {tcp_acceptor_sup, start_link,
- [Name, AcceptCallback]},
- transient, infinity, supervisor, [tcp_acceptor_sup]},
- {tcp_listener, {tcp_listener, start_link,
- [IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, Name,
- OnStartup, OnShutdown, Label]},
- transient, 16#ffffffff, worker, [tcp_listener]}]}}.
diff --git a/src/test_sup.erl b/src/test_sup.erl
deleted file mode 100644
index 51ff7b4e..00000000
--- a/src/test_sup.erl
+++ /dev/null
@@ -1,93 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(test_sup).
-
--behaviour(supervisor2).
-
--export([test_supervisor_delayed_restart/0,
- init/1, start_child/0]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(test_supervisor_delayed_restart/0 :: () -> 'passed').
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-test_supervisor_delayed_restart() ->
- passed = with_sup(simple_one_for_one_terminate,
- fun (SupPid) ->
- {ok, _ChildPid} =
- supervisor2:start_child(SupPid, []),
- test_supervisor_delayed_restart(SupPid)
- end),
- passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1).
-
-test_supervisor_delayed_restart(SupPid) ->
- ok = ping_child(SupPid),
- ok = exit_child(SupPid),
- timer:sleep(100),
- ok = ping_child(SupPid),
- ok = exit_child(SupPid),
- timer:sleep(100),
- timeout = ping_child(SupPid),
- timer:sleep(1010),
- ok = ping_child(SupPid),
- passed.
-
-with_sup(RestartStrategy, Fun) ->
- {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]),
- Res = Fun(SupPid),
- unlink(SupPid),
- exit(SupPid, shutdown),
- Res.
-
-init([RestartStrategy]) ->
- {ok, {{RestartStrategy, 1, 1},
- [{test, {test_sup, start_child, []}, {permanent, 1},
- 16#ffffffff, worker, [test_sup]}]}}.
-
-start_child() ->
- {ok, proc_lib:spawn_link(fun run_child/0)}.
-
-ping_child(SupPid) ->
- Ref = make_ref(),
- with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end),
- receive {pong, Ref} -> ok
- after 1000 -> timeout
- end.
-
-exit_child(SupPid) ->
- with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end),
- ok.
-
-with_child_pid(SupPid, Fun) ->
- case supervisor2:which_children(SupPid) of
- [{_Id, undefined, worker, [test_sup]}] -> ok;
- [{_Id, ChildPid, worker, [test_sup]}] -> Fun(ChildPid);
- [] -> ok
- end.
-
-run_child() ->
- receive {ping, Ref, Pid} -> Pid ! {pong, Ref},
- run_child()
- end.
diff --git a/src/vm_memory_monitor.erl b/src/vm_memory_monitor.erl
deleted file mode 100644
index d60b7fec..00000000
--- a/src/vm_memory_monitor.erl
+++ /dev/null
@@ -1,382 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
-%% In practice Erlang shouldn't be allowed to grow to more than a half
-%% of available memory. The pessimistic scenario is when the Erlang VM
-%% has a single process that's consuming all memory. In such a case,
-%% during garbage collection, Erlang tries to allocate a huge chunk of
-%% continuous memory, which can result in a crash or heavy swapping.
-%%
-%% This module tries to warn Rabbit before such situations occur, so
-%% that it has a higher chance to avoid running out of memory.
-
--module(vm_memory_monitor).
-
--behaviour(gen_server).
-
--export([start_link/1, start_link/3]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--export([get_total_memory/0, get_vm_limit/0,
- get_check_interval/0, set_check_interval/1,
- get_vm_memory_high_watermark/0, set_vm_memory_high_watermark/1,
- get_memory_limit/0]).
-
-
--define(SERVER, ?MODULE).
--define(DEFAULT_MEMORY_CHECK_INTERVAL, 1000).
--define(ONE_MB, 1048576).
-
-%% For an unknown OS, we assume that we have 1GB of memory. It'll be
-%% wrong. Scale by vm_memory_high_watermark in configuration to get a
-%% sensible value.
--define(MEMORY_SIZE_FOR_UNKNOWN_OS, 1073741824).
-
--record(state, {total_memory,
- memory_limit,
- memory_fraction,
- timeout,
- timer,
- alarmed,
- alarm_funs
- }).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()).
--spec(start_link/3 :: (float(), fun ((any()) -> 'ok'),
- fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error()).
--spec(get_total_memory/0 :: () -> (non_neg_integer() | 'unknown')).
--spec(get_vm_limit/0 :: () -> non_neg_integer()).
--spec(get_check_interval/0 :: () -> non_neg_integer()).
--spec(set_check_interval/1 :: (non_neg_integer()) -> 'ok').
--spec(get_vm_memory_high_watermark/0 :: () -> float()).
--spec(set_vm_memory_high_watermark/1 :: (float()) -> 'ok').
--spec(get_memory_limit/0 :: () -> non_neg_integer()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-get_total_memory() -> get_total_memory(os:type()).
-
-get_vm_limit() -> get_vm_limit(os:type()).
-
-get_check_interval() ->
- gen_server:call(?MODULE, get_check_interval, infinity).
-
-set_check_interval(Fraction) ->
- gen_server:call(?MODULE, {set_check_interval, Fraction}, infinity).
-
-get_vm_memory_high_watermark() ->
- gen_server:call(?MODULE, get_vm_memory_high_watermark, infinity).
-
-set_vm_memory_high_watermark(Fraction) ->
- gen_server:call(?MODULE, {set_vm_memory_high_watermark, Fraction},
- infinity).
-
-get_memory_limit() ->
- gen_server:call(?MODULE, get_memory_limit, infinity).
-
-%%----------------------------------------------------------------------------
-%% gen_server callbacks
-%%----------------------------------------------------------------------------
-
-start_link(MemFraction) ->
- start_link(MemFraction,
- fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
-
-start_link(MemFraction, AlarmSet, AlarmClear) ->
- gen_server:start_link({local, ?SERVER}, ?MODULE,
- [MemFraction, {AlarmSet, AlarmClear}], []).
-
-init([MemFraction, AlarmFuns]) ->
- TRef = start_timer(?DEFAULT_MEMORY_CHECK_INTERVAL),
- State = #state { timeout = ?DEFAULT_MEMORY_CHECK_INTERVAL,
- timer = TRef,
- alarmed = false,
- alarm_funs = AlarmFuns },
- {ok, set_mem_limits(State, MemFraction)}.
-
-handle_call(get_vm_memory_high_watermark, _From, State) ->
- {reply, State#state.memory_fraction, State};
-
-handle_call({set_vm_memory_high_watermark, MemFraction}, _From, State) ->
- {reply, ok, set_mem_limits(State, MemFraction)};
-
-handle_call(get_check_interval, _From, State) ->
- {reply, State#state.timeout, State};
-
-handle_call({set_check_interval, Timeout}, _From, State) ->
- {ok, cancel} = timer:cancel(State#state.timer),
- {reply, ok, State#state{timeout = Timeout, timer = start_timer(Timeout)}};
-
-handle_call(get_memory_limit, _From, State) ->
- {reply, State#state.memory_limit, State};
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast(_Request, State) ->
- {noreply, State}.
-
-handle_info(update, State) ->
- {noreply, internal_update(State)};
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-%% Server Internals
-%%----------------------------------------------------------------------------
-
-set_mem_limits(State, MemFraction) ->
- TotalMemory =
- case get_total_memory() of
- unknown ->
- case State of
- #state { total_memory = undefined,
- memory_limit = undefined } ->
- error_logger:warning_msg(
- "Unknown total memory size for your OS ~p. "
- "Assuming memory size is ~pMB.~n",
- [os:type(),
- trunc(?MEMORY_SIZE_FOR_UNKNOWN_OS/?ONE_MB)]);
- _ ->
- ok
- end,
- ?MEMORY_SIZE_FOR_UNKNOWN_OS;
- M -> M
- end,
- UsableMemory = case get_vm_limit() of
- Limit when Limit < TotalMemory ->
- error_logger:warning_msg(
- "Only ~pMB of ~pMB memory usable due to "
- "limited address space.~n",
- [trunc(V/?ONE_MB) || V <- [Limit, TotalMemory]]),
- Limit;
- _ ->
- TotalMemory
- end,
- MemLim = trunc(MemFraction * UsableMemory),
- error_logger:info_msg("Memory limit set to ~pMB of ~pMB total.~n",
- [trunc(MemLim/?ONE_MB), trunc(TotalMemory/?ONE_MB)]),
- internal_update(State #state { total_memory = TotalMemory,
- memory_limit = MemLim,
- memory_fraction = MemFraction}).
-
-internal_update(State = #state { memory_limit = MemLimit,
- alarmed = Alarmed,
- alarm_funs = {AlarmSet, AlarmClear} }) ->
- MemUsed = erlang:memory(total),
- NewAlarmed = MemUsed > MemLimit,
- case {Alarmed, NewAlarmed} of
- {false, true} -> emit_update_info(set, MemUsed, MemLimit),
- AlarmSet({{resource_limit, memory, node()}, []});
- {true, false} -> emit_update_info(clear, MemUsed, MemLimit),
- AlarmClear({resource_limit, memory, node()});
- _ -> ok
- end,
- State #state {alarmed = NewAlarmed}.
-
-emit_update_info(AlarmState, MemUsed, MemLimit) ->
- error_logger:info_msg(
- "vm_memory_high_watermark ~p. Memory used:~p allowed:~p~n",
- [AlarmState, MemUsed, MemLimit]).
-
-start_timer(Timeout) ->
- {ok, TRef} = timer:send_interval(Timeout, update),
- TRef.
-
-%% According to http://msdn.microsoft.com/en-us/library/aa366778(VS.85).aspx
-%% Windows has 2GB and 8TB of address space for 32 and 64 bit accordingly.
-get_vm_limit({win32,_OSname}) ->
- case erlang:system_info(wordsize) of
- 4 -> 2*1024*1024*1024; %% 2 GB for 32 bits 2^31
- 8 -> 8*1024*1024*1024*1024 %% 8 TB for 64 bits 2^42
- end;
-
-%% On a 32-bit machine, if you're using more than 4 gigs of RAM you're
-%% in big trouble anyway.
-get_vm_limit(_OsType) ->
- case erlang:system_info(wordsize) of
- 4 -> 4*1024*1024*1024; %% 4 GB for 32 bits 2^32
- 8 -> 256*1024*1024*1024*1024 %% 256 TB for 64 bits 2^48
- %%http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
- end.
-
-%%----------------------------------------------------------------------------
-%% Internal Helpers
-%%----------------------------------------------------------------------------
-cmd(Command) ->
- Exec = hd(string:tokens(Command, " ")),
- case os:find_executable(Exec) of
- false -> throw({command_not_found, Exec});
- _ -> os:cmd(Command)
- end.
-
-%% get_total_memory(OS) -> Total
-%% Windows and Freebsd code based on: memsup:get_memory_usage/1
-%% Original code was part of OTP and released under "Erlang Public License".
-
-get_total_memory({unix,darwin}) ->
- File = cmd("/usr/bin/vm_stat"),
- Lines = string:tokens(File, "\n"),
- Dict = dict:from_list(lists:map(fun parse_line_mach/1, Lines)),
- [PageSize, Inactive, Active, Free, Wired] =
- [dict:fetch(Key, Dict) ||
- Key <- [page_size, 'Pages inactive', 'Pages active', 'Pages free',
- 'Pages wired down']],
- PageSize * (Inactive + Active + Free + Wired);
-
-get_total_memory({unix,freebsd}) ->
- PageSize = sysctl("vm.stats.vm.v_page_size"),
- PageCount = sysctl("vm.stats.vm.v_page_count"),
- PageCount * PageSize;
-
-get_total_memory({unix,openbsd}) ->
- sysctl("hw.usermem");
-
-get_total_memory({win32,_OSname}) ->
- %% Due to the Erlang print format bug, on Windows boxes the memory
- %% size is broken. For example Windows 7 64 bit with 4Gigs of RAM
- %% we get negative memory size:
- %% > os_mon_sysinfo:get_mem_info().
- %% ["76 -1658880 1016913920 -1 -1021628416 2147352576 2134794240\n"]
- %% Due to this bug, we don't actually know anything. Even if the
- %% number is postive we can't be sure if it's correct. This only
- %% affects us on os_mon versions prior to 2.2.1.
- case application:get_key(os_mon, vsn) of
- undefined ->
- unknown;
- {ok, Version} ->
- case rabbit_misc:version_compare(Version, "2.2.1", lt) of
- true -> %% os_mon is < 2.2.1, so we know nothing
- unknown;
- false ->
- [Result|_] = os_mon_sysinfo:get_mem_info(),
- {ok, [_MemLoad, TotPhys, _AvailPhys,
- _TotPage, _AvailPage, _TotV, _AvailV], _RestStr} =
- io_lib:fread("~d~d~d~d~d~d~d", Result),
- TotPhys
- end
- end;
-
-get_total_memory({unix, linux}) ->
- File = read_proc_file("/proc/meminfo"),
- Lines = string:tokens(File, "\n"),
- Dict = dict:from_list(lists:map(fun parse_line_linux/1, Lines)),
- dict:fetch('MemTotal', Dict);
-
-get_total_memory({unix, sunos}) ->
- File = cmd("/usr/sbin/prtconf"),
- Lines = string:tokens(File, "\n"),
- Dict = dict:from_list(lists:map(fun parse_line_sunos/1, Lines)),
- dict:fetch('Memory size', Dict);
-
-get_total_memory({unix, aix}) ->
- File = cmd("/usr/bin/vmstat -v"),
- Lines = string:tokens(File, "\n"),
- Dict = dict:from_list(lists:map(fun parse_line_aix/1, Lines)),
- dict:fetch('memory pages', Dict) * 4096;
-
-get_total_memory(_OsType) ->
- unknown.
-
-%% A line looks like "Foo bar: 123456."
-parse_line_mach(Line) ->
- [Name, RHS | _Rest] = string:tokens(Line, ":"),
- case Name of
- "Mach Virtual Memory Statistics" ->
- ["(page", "size", "of", PageSize, "bytes)"] =
- string:tokens(RHS, " "),
- {page_size, list_to_integer(PageSize)};
- _ ->
- [Value | _Rest1] = string:tokens(RHS, " ."),
- {list_to_atom(Name), list_to_integer(Value)}
- end.
-
-%% A line looks like "FooBar: 123456 kB"
-parse_line_linux(Line) ->
- [Name, RHS | _Rest] = string:tokens(Line, ":"),
- [Value | UnitsRest] = string:tokens(RHS, " "),
- Value1 = case UnitsRest of
- [] -> list_to_integer(Value); %% no units
- ["kB"] -> list_to_integer(Value) * 1024
- end,
- {list_to_atom(Name), Value1}.
-
-%% A line looks like "Memory size: 1024 Megabytes"
-parse_line_sunos(Line) ->
- case string:tokens(Line, ":") of
- [Name, RHS | _Rest] ->
- [Value1 | UnitsRest] = string:tokens(RHS, " "),
- Value2 = case UnitsRest of
- ["Gigabytes"] ->
- list_to_integer(Value1) * ?ONE_MB * 1024;
- ["Megabytes"] ->
- list_to_integer(Value1) * ?ONE_MB;
- ["Kilobytes"] ->
- list_to_integer(Value1) * 1024;
- _ ->
- Value1 ++ UnitsRest %% no known units
- end,
- {list_to_atom(Name), Value2};
- [Name] -> {list_to_atom(Name), none}
- end.
-
-%% Lines look like " 12345 memory pages"
-%% or " 80.1 maxpin percentage"
-parse_line_aix(Line) ->
- [Value | NameWords] = string:tokens(Line, " "),
- Name = string:join(NameWords, " "),
- {list_to_atom(Name),
- case lists:member($., Value) of
- true -> trunc(list_to_float(Value));
- false -> list_to_integer(Value)
- end}.
-
-sysctl(Def) ->
- list_to_integer(cmd("/sbin/sysctl -n " ++ Def) -- "\n").
-
-%% file:read_file does not work on files in /proc as it seems to get
-%% the size of the file first and then read that many bytes. But files
-%% in /proc always have length 0, we just have to read until we get
-%% eof.
-read_proc_file(File) ->
- {ok, IoDevice} = file:open(File, [read, raw]),
- Res = read_proc_file(IoDevice, []),
- file:close(IoDevice),
- lists:flatten(lists:reverse(Res)).
-
--define(BUFFER_SIZE, 1024).
-read_proc_file(IoDevice, Acc) ->
- case file:read(IoDevice, ?BUFFER_SIZE) of
- {ok, Res} -> read_proc_file(IoDevice, [Res | Acc]);
- eof -> Acc
- end.
diff --git a/src/worker_pool.erl b/src/worker_pool.erl
deleted file mode 100644
index 488db5ec..00000000
--- a/src/worker_pool.erl
+++ /dev/null
@@ -1,142 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(worker_pool).
-
-%% Generic worker pool manager.
-%%
-%% Supports nested submission of jobs (nested jobs always run
-%% immediately in current worker process).
-%%
-%% Possible future enhancements:
-%%
-%% 1. Allow priorities (basically, change the pending queue to a
-%% priority_queue).
-
--behaviour(gen_server2).
-
--export([start_link/0, submit/1, submit_async/1, idle/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
--spec(submit/1 :: (fun (() -> A) | mfargs()) -> A).
--spec(submit_async/1 :: (fun (() -> any()) | mfargs()) -> 'ok').
--spec(idle/1 :: (any()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(SERVER, ?MODULE).
--define(HIBERNATE_AFTER_MIN, 1000).
--define(DESIRED_HIBERNATE, 10000).
-
--record(state, { available, pending }).
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_server2:start_link({local, ?SERVER}, ?MODULE, [],
- [{timeout, infinity}]).
-
-submit(Fun) ->
- case get(worker_pool_worker) of
- true -> worker_pool_worker:run(Fun);
- _ -> Pid = gen_server2:call(?SERVER, next_free, infinity),
- worker_pool_worker:submit(Pid, Fun)
- end.
-
-submit_async(Fun) ->
- gen_server2:cast(?SERVER, {run_async, Fun}).
-
-idle(WId) ->
- gen_server2:cast(?SERVER, {idle, WId}).
-
-%%----------------------------------------------------------------------------
-
-init([]) ->
- {ok, #state { pending = queue:new(), available = queue:new() }, hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-handle_call(next_free, From, State = #state { available = Avail,
- pending = Pending }) ->
- case queue:out(Avail) of
- {empty, _Avail} ->
- {noreply,
- State #state { pending = queue:in({next_free, From}, Pending) },
- hibernate};
- {{value, WId}, Avail1} ->
- {reply, get_worker_pid(WId), State #state { available = Avail1 },
- hibernate}
- end;
-
-handle_call(Msg, _From, State) ->
- {stop, {unexpected_call, Msg}, State}.
-
-handle_cast({idle, WId}, State = #state { available = Avail,
- pending = Pending }) ->
- {noreply, case queue:out(Pending) of
- {empty, _Pending} ->
- State #state { available = queue:in(WId, Avail) };
- {{value, {next_free, From}}, Pending1} ->
- gen_server2:reply(From, get_worker_pid(WId)),
- State #state { pending = Pending1 };
- {{value, {run_async, Fun}}, Pending1} ->
- worker_pool_worker:submit_async(get_worker_pid(WId), Fun),
- State #state { pending = Pending1 }
- end, hibernate};
-
-handle_cast({run_async, Fun}, State = #state { available = Avail,
- pending = Pending }) ->
- {noreply,
- case queue:out(Avail) of
- {empty, _Avail} ->
- State #state { pending = queue:in({run_async, Fun}, Pending)};
- {{value, WId}, Avail1} ->
- worker_pool_worker:submit_async(get_worker_pid(WId), Fun),
- State #state { available = Avail1 }
- end, hibernate};
-
-handle_cast(Msg, State) ->
- {stop, {unexpected_cast, Msg}, State}.
-
-handle_info(Msg, State) ->
- {stop, {unexpected_info, Msg}, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, State) ->
- State.
-
-%%----------------------------------------------------------------------------
-
-get_worker_pid(WId) ->
- [{WId, Pid, _Type, _Modules} | _] =
- lists:dropwhile(fun ({Id, _Pid, _Type, _Modules})
- when Id =:= WId -> false;
- (_) -> true
- end,
- supervisor:which_children(worker_pool_sup)),
- Pid.
diff --git a/src/worker_pool_sup.erl b/src/worker_pool_sup.erl
deleted file mode 100644
index 24bc375c..00000000
--- a/src/worker_pool_sup.erl
+++ /dev/null
@@ -1,53 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(worker_pool_sup).
-
--behaviour(supervisor).
-
--export([start_link/0, start_link/1]).
-
--export([init/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(start_link/1 :: (non_neg_integer()) -> rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(SERVER, ?MODULE).
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- start_link(erlang:system_info(schedulers)).
-
-start_link(WCount) ->
- supervisor:start_link({local, ?SERVER}, ?MODULE, [WCount]).
-
-%%----------------------------------------------------------------------------
-
-init([WCount]) ->
- {ok, {{one_for_one, 10, 10},
- [{worker_pool, {worker_pool, start_link, []}, transient,
- 16#ffffffff, worker, [worker_pool]} |
- [{N, {worker_pool_worker, start_link, [N]}, transient, 16#ffffffff,
- worker, [worker_pool_worker]} || N <- lists:seq(1, WCount)]]}}.
diff --git a/src/worker_pool_worker.erl b/src/worker_pool_worker.erl
deleted file mode 100644
index a976503f..00000000
--- a/src/worker_pool_worker.erl
+++ /dev/null
@@ -1,106 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
-%%
-
--module(worker_pool_worker).
-
--behaviour(gen_server2).
-
--export([start_link/1, submit/2, submit_async/2, run/1]).
-
--export([set_maximum_since_use/2]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3, prioritise_cast/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/1 :: (any()) -> {'ok', pid()} | {'error', any()}).
--spec(submit/2 :: (pid(), fun (() -> A) | mfargs()) -> A).
--spec(submit_async/2 :: (pid(), fun (() -> any()) | mfargs()) -> 'ok').
--spec(run/1 :: (fun (() -> A)) -> A; (mfargs()) -> any()).
--spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(HIBERNATE_AFTER_MIN, 1000).
--define(DESIRED_HIBERNATE, 10000).
-
-%%----------------------------------------------------------------------------
-
-start_link(WId) ->
- gen_server2:start_link(?MODULE, [WId], [{timeout, infinity}]).
-
-submit(Pid, Fun) ->
- gen_server2:call(Pid, {submit, Fun}, infinity).
-
-submit_async(Pid, Fun) ->
- gen_server2:cast(Pid, {submit_async, Fun}).
-
-set_maximum_since_use(Pid, Age) ->
- gen_server2:cast(Pid, {set_maximum_since_use, Age}).
-
-run({M, F, A}) ->
- apply(M, F, A);
-run(Fun) ->
- Fun().
-
-%%----------------------------------------------------------------------------
-
-init([WId]) ->
- ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
- [self()]),
- ok = worker_pool:idle(WId),
- put(worker_pool_worker, true),
- {ok, WId, hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
-prioritise_cast(_Msg, _Len, _State) -> 0.
-
-handle_call({submit, Fun}, From, WId) ->
- gen_server2:reply(From, run(Fun)),
- ok = worker_pool:idle(WId),
- {noreply, WId, hibernate};
-
-handle_call(Msg, _From, State) ->
- {stop, {unexpected_call, Msg}, State}.
-
-handle_cast({submit_async, Fun}, WId) ->
- run(Fun),
- ok = worker_pool:idle(WId),
- {noreply, WId, hibernate};
-
-handle_cast({set_maximum_since_use, Age}, WId) ->
- ok = file_handle_cache:set_maximum_since_use(Age),
- {noreply, WId, hibernate};
-
-handle_cast(Msg, State) ->
- {stop, {unexpected_cast, Msg}, State}.
-
-handle_info(Msg, State) ->
- {stop, {unexpected_info, Msg}, State}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-terminate(_Reason, State) ->
- State.
diff --git a/version.mk b/version.mk
deleted file mode 100644
index 5683af4a..00000000
--- a/version.mk
+++ /dev/null
@@ -1 +0,0 @@
-VERSION?=0.0.0