From 7997a199acada382b0c61dc84aa956e4c65de986 Mon Sep 17 00:00:00 2001 From: Daniel Bengtsson Date: Tue, 5 Nov 2024 12:14:17 +0100 Subject: [PATCH] Remove deprecated AMQP1 driver The AMQP1 driver has been deprecated for several years, since 14.4.0 and is no longer maintained or widely used. This commit removes the AMQP1 driver from the code base. References: - Original deprecation commit: https://opendev.org/openstack/oslo.messaging/commit/0f63c227f5425995ae8c61f1d40ec85e7728528a Change-Id: Iaeb52791008a7a6736c99459f66d2bdbb2dea17b --- .zuul.yaml | 20 +- bindep.txt | 9 - doc/source/admin/AMQP1.0.rst | 639 ----- doc/source/admin/index.rst | 1 - doc/source/admin/kafka.rst | 17 +- doc/source/reference/transport.rst | 1 - .../_drivers/amqp1_driver/__init__.py | 0 .../_drivers/amqp1_driver/addressing.py | 294 --- .../_drivers/amqp1_driver/controller.py | 1336 ---------- .../_drivers/amqp1_driver/eventloop.py | 404 --- oslo_messaging/_drivers/amqp1_driver/opts.py | 270 -- .../oslo_messaging_amqp_driver_overview.rst | 1144 -------- oslo_messaging/_drivers/impl_amqp1.py | 449 ---- oslo_messaging/conffixture.py | 3 - oslo_messaging/opts.py | 2 - .../tests/drivers/test_amqp_driver.py | 2324 ----------------- oslo_messaging/tests/test_opts.py | 3 +- .../notes/remove-amqp1-c924ea548dadffaa.yaml | 6 + setup.cfg | 5 +- tools/test-setup.sh | 20 - 20 files changed, 16 insertions(+), 6931 deletions(-) delete mode 100644 doc/source/admin/AMQP1.0.rst delete mode 100644 oslo_messaging/_drivers/amqp1_driver/__init__.py delete mode 100644 oslo_messaging/_drivers/amqp1_driver/addressing.py delete mode 100644 oslo_messaging/_drivers/amqp1_driver/controller.py delete mode 100644 oslo_messaging/_drivers/amqp1_driver/eventloop.py delete mode 100644 oslo_messaging/_drivers/amqp1_driver/opts.py delete mode 100644 oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst delete mode 100644 oslo_messaging/_drivers/impl_amqp1.py delete mode 100644 oslo_messaging/tests/drivers/test_amqp_driver.py create mode 100644 releasenotes/notes/remove-amqp1-c924ea548dadffaa.yaml delete mode 100755 tools/test-setup.sh diff --git a/.zuul.yaml b/.zuul.yaml index 56618a6ee..1c4f5363c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -10,7 +10,7 @@ parent: openstack-tox-py310 vars: tox_envlist: py310-func-scenario02 - bindep_profile: rabbit kafka amqp1 + bindep_profile: rabbit kafka # Begin v3 native jobs # See https://docs.openstack.org/devstack/latest/ @@ -34,22 +34,6 @@ Run full tempest tests against rabbitmq parent: oslo.messaging-devstack-tempest-full-base -- job: - name: oslo.messaging-src-dsvm-full-amqp1-hybrid - description: | - Run the full tempest tests using the AMQP 1.0 driver for RPC and - RabbitMQ for Notifications. - parent: oslo.messaging-devstack-tempest-full-base - required-projects: - - openstack/devstack-plugin-amqp1 - vars: - devstack_localrc: - AMQP1_SERVICE: qpid-hybrid - devstack_plugins: - devstack-plugin-amqp1: https://opendev.org/openstack/devstack-plugin-amqp1 - zuul_copy_output: - '{{ devstack_log_dir }}/qdrouterd.log': logs - - job: name: oslo.messaging-src-dsvm-full-kafka-hybrid description: | @@ -109,8 +93,6 @@ - oslo.messaging-tox-py310-func-scenario02: voting: false - oslo.messaging-src-dsvm-full-rabbit - - oslo.messaging-src-dsvm-full-amqp1-hybrid: - voting: false - oslo.messaging-src-dsvm-full-kafka-hybrid: voting: false - oslo.messaging-grenade: diff --git a/bindep.txt b/bindep.txt index df62eb0b5..16d0c3c75 100644 --- a/bindep.txt +++ b/bindep.txt @@ -14,15 +14,6 @@ libffi-devel [platform:rpm] rabbitmq-server [platform:dpkg rabbit] rabbitmq-server [platform:rpm rabbit] -# AMQP1 dpkg -# This needs qpid/testing, will be installed by tools/test-setup.sh -# qdrouterd [platform:dpkg amqp1 test] -sasl2-bin [platform:dpkg amqp1 test] -uuid-dev [platform:dpkg amqp1 test] -swig [platform:dpkg amqp1 test] -libsasl2-modules [platform:dpkg amqp1 test] -default-jdk [platform:dpkg amqp1 test !platform:debian] - # kafka dpkg default-jdk [platform:dpkg kafka] librdkafka1 [platform:dpkg kafka] diff --git a/doc/source/admin/AMQP1.0.rst b/doc/source/admin/AMQP1.0.rst deleted file mode 100644 index aa2e32038..000000000 --- a/doc/source/admin/AMQP1.0.rst +++ /dev/null @@ -1,639 +0,0 @@ -========================================= -AMQP 1.0 Protocol Driver Deployment Guide -========================================= - -.. currentmodule:: oslo_messaging - -Introduction ------------- - -The AMQP 1.0 Protocol Driver is a messaging transport backend -supported in oslo.messaging. The driver maps the base *oslo.messaging* -capabilities for RPC and Notification message exchange onto version -1.0 of the Advanced Message Queuing Protocol (AMQP 1.0, ISO/IEC -19464). The driver is intended to support any messaging intermediary -(e.g. broker or router) that implements version 1.0 of the AMQP -protocol. - -More detail regarding the AMQP 1.0 Protocol is available from the -`AMQP specification`__. - -More detail regarding the driver's implementation is available from -the `oslo specification`__. - -__ http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-overview-v1.0-os.html -__ https://opendev.org/openstack/oslo-specs/src/branch/master/specs/juno/amqp10-driver-implementation.rst - - -Abstract --------- - -The AMQP 1.0 driver is one of a family of *oslo.messaging* backend -drivers. It currently supports two types of message intermediaries. -The first type is an AMQP 1.0 messaging broker and the second type is -an AMQP 1.0 message router. The driver should support additional -intermediary types in the future but may require additions to driver -configuration parameters in order to do so. - -+--------------+-----------+------------+------------+-----------+ -| Intermediary | RPC | Notify | Message | Topology | -| Type | Pattern | Pattern | Treatment | | -+--------------+-----------+------------+------------+-----------+ -| Message | Yes | `Limited`_ | Direct | Single or | -| Router | | | Messaging | Mesh | -+--------------+-----------+------------+------------+-----------+ -| Message | Yes | Yes | Store and | Single or | -| Broker | | | Forward | Cluster | -+--------------+-----------+------------+------------+-----------+ - -Direct Messaging -~~~~~~~~~~~~~~~~ - -The RPC messaging pattern is a synchronous exchange between -client and server that is temporally bracketed. The direct messaging -capabilities provided by the message router are optimal for the -RPC messaging pattern. - -The driver can readily scale operation from working with a single -instances of a message router to working with a large scale routed -mesh interconnect topology. - -Store and Forward -~~~~~~~~~~~~~~~~~ - -The Notification messaging pattern is an asynchronous exchange from -a notifier to a listener (e.g. consumer). The listener need not be -present when the notification is sent. Thus, the store and forwarding -capabilities provided by the message broker are required for the -Notification messaging pattern. - -This driver is able to work with a single instance of a message broker -or a clustered broker deployment. - -.. _Limited: - -It is recommended that the message router intermediary not be used -for the Notification messaging pattern due to the consideration that -notification messages will be dropped when there is no active -consumer. The message router does not provide durability or -store-and-forward capabilities for notification messages. - -Hybrid Messaging Backends -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Oslo.messaging provides a mechanism to configure separate backends for -RPC and Notification communications. This is supported through the -specification of separate RPC and Notification `transport urls`_ in the -service configuration. This capability enables the optimal alignment -of messaging patterns to messaging backend and allows for different -messaging backend types to be deployed. - -This document provides deployment and configuration information for use -of this driver in hybrid messaging configurations. - -Addressing -~~~~~~~~~~ - -A new address syntax was added to the driver to support efficient -direct message routing. This new syntax will also work with a broker -intermediary backend but is not compatible with the address syntax -previously used by the driver. In order to allow backward compatibility, -the driver will attempt to identify the intermediary type for the -backend in use and will automatically select the 'legacy' syntax for -broker-based backends or the new 'routable' syntax for router-based -backends. An `address mode`_ configuration option is provided to -override this dynamic behavior and force the use of either the legacy -or routable address syntax. - -Message Acknowledgement -~~~~~~~~~~~~~~~~~~~~~~~ - -A primary functional difference between a router and a -broker intermediary type is when message acknowledgement occurs. - -The router does not "store" the message hence it does not generate an -acknowledgement. Instead the consuming endpoint is responsible for message -acknowledgement and the router forwards the acknowledgement back to -the sender. This is known as 'end-to-end' acknowledgement. In contrast, a -broker stores then forwards the message so that message acknowledgement is -performed in two stages. In the first stage, a message -acknowledgement occurs between the broker and the Sender. In the -second stage, an acknowledgement occurs between the Server and -the broker. - -This difference affects how long the Sender waits for the message -transfer to complete. - -:: - - +dispatch+ - | (3) | - | | - | v - +--------------+ (1) +----------+ (2) +--------------+ - | Client |---------->| Router |----------->| Server | - | (Sender) |<----------| (Direct) |<-----------| (Listener) | - +--------------+ (5) +----------+ (4) +--------------+ - - -For example when a router intermediary is used, the following sequence -occurs: - -1. The message is sent to the router -2. The router forwards the message to the Server -3. The Server dispatches the message to the application -4. The Server indicates the acknowledgement via the router -5. The router forwards the acknowledgement to the Sender - -In this sequence, a Sender waits for the message acknowledgement until -step (5) occurs. - - -:: - - +dispatch+ - | (4) | - | | - | v - +--------------+ (1) +----------+ (3) +--------------+ - | Client |---------->| Broker |----------->| Server | - | (Sender) |<----------| (Queue) |<-----------| (Listener) | - +--------------+ (2) +----------+ (5) +--------------+ - - -And when a broker intermediary is used, the following sequence occurs: - -1. The message is sent to the broker -2. The broker stores the message and acknowledges the message to the - Sender -3. The broker sends the message to the Server -4. The Server dispatches the message to the application -5. The Server indicates the acknowledgement to the broker - -In this sequence, a Sender waits for the message acknowledgement until -step (2) occurs. - -Therefore the broker-based Sender receives the acknowledgement -earlier in the transfer than the routed case. However in the brokered -case receipt of the acknowledgement does not signify that the message -has been (or will ever be) received by the Server. - -Batched Notifications **Note Well** -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -While the use of a router intermediary for oslo.messaging Notification is -currently not recommended, it should be noted that the use of a router -intermediary with batched notifications may exacerbate the acknowledgement -wait time for a Sender. - -For example, when a batched notification configuration is used where -batch size is set to 100, the Server will wait until 100 notification -messages are buffered (or timeout occurs) before dispatching the -notifications to the application for message acknowledgement. Since -each notifier client can have at most one message outstanding -(e.g. pending acknowledgement), then if the total number of notifying -clients are less than 100 the batch limit will never be met. This will -effectively pause all notifying clients until the batch timeout expires. - - -Prerequisites -------------- - -Protocol Engine -~~~~~~~~~~~~~~~ - -This driver uses the Apache QPID `Proton`__ AMQP 1.0 protocol engine. -This engine consists of a platform specific library and a python -binding. The driver does not directly interface with the engine API, -as the API is a very low-level interface to the AMQP protocol. -Instead, the driver uses the pure python `Pyngus`__ client API, which -is layered on top of the protocol engine. - -In order to run the driver the Proton Python bindings, Proton -library, Proton header files, and Pyngus must be installed. - -__ http://qpid.apache.org/proton/index.html -__ https://github.com/kgiusti/pyngus - -Source packages for the `Pyngus client API`__ are available via PyPI. - -__ https://pypi.org/project/pyngus - -Pyngus depends on the Proton Python bindings. Source packages for the -`Proton Python bindings`__ are also available via PyPI. - -__ https://pypi.org/project/python-qpid-proton - -Since the AMQP 1.0 driver is an optional extension to Oslo.Messaging -these packages are not installed by default. Use the 'amqp1' extras -tag when installing Oslo.Messaging in order to pull in these extra -packages: - -.. code-block:: shell - - $ python -m pip install oslo.messaging[amqp1] - -The Proton package includes a C extension that links to the Proton -library. The C extension is built locally when the Proton source -packages are install from PyPI. In order to build the Proton C source -locally, there are a number of tools and libraries that need to be -present on the system: - -* The tools and library necessary for Python C development -* The `SWIG`__ wrapper generator -* The `OpenSSL`__ development libraries and headers -* The `Cyrus SASL`__ development libraries and headers - -**Note well**: Currently the Proton PyPI package only supports building -the C extension on Linux systems. - -Pre-built packages for both Pyngus and Proton engine are available for -various Linux distributions (see `packages`_ below). It is recommended -to use the pre-built packages if they are available for your platform. - -__ http://www.swig.org/index.php -__ https://www.openssl.org -__ https://cyrusimap.org - -Router Intermediary -~~~~~~~~~~~~~~~~~~~ - -This driver supports a *router* intermediary that supports version 1.0 -of the AMQP protocol. The direct messaging capabilities provided by -this intermediary type are recommended for oslo.messaging RPC. - -The driver has been tested with `qpid-dispatch-router`__ router in a -`devstack`_ environment. The version of qpid-dispatch-router -**must** be at least 0.7.0. The qpid-dispatch-router also uses the -Proton engine for its AMQP 1.0 support, so the Proton library must be -installed on the system hosting the qpid-dispatch-router daemon. - -Pre-built packages for the router are available. See `packages`_ below. - -__ http://qpid.apache.org/components/dispatch-router/ - -Broker Intermediary -~~~~~~~~~~~~~~~~~~~ - -This driver supports a *broker* intermediary that supports version 1.0 -of the AMQP protocol. The store and forward capabilities provided by -this intermediary type are recommended for *oslo.messaging* Notifications. - -The driver has been tested with the `qpidd`__ broker in a `devstack`_ -environment. The version of qpidd **must** be at least -0.34. qpidd also uses the Proton engine for its AMQP 1.0 support, so -the Proton library must be installed on the system hosting the qpidd -daemon. - -Pre-built packages for the broker are available. See `packages`_ below. - -See the `oslo specification`__ for additional information regarding testing -done on the driver. - -__ http://qpid.apache.org/components/cpp-broker/index.html -__ https://opendev.org/openstack/oslo-specs/src/branch/master/specs/juno/amqp10-driver-implementation.rst - - -Configuration -------------- - -.. _transport urls: - -Transport URL Enable -~~~~~~~~~~~~~~~~~~~~ - -In oslo.messaging, the transport_url parameters define the OpenStack service -backends for RPC and Notify. The url is of the form:: - - transport://user:pass@host1:port[,hostN:portN]/virtual_host - -Where the transport value specifies the rpc or notification backend as -one of **amqp**, rabbit, kafka, etc. - -To specify and enable the AMQP 1.0 driver for RPC, in the ``[DEFAULT]`` -section of the service configuration file, specify the -``transport_url`` parameter: - -.. code-block:: ini - - [DEFAULT] - transport_url = amqp://username:password@routerhostname:5672 - -To specify and enable the AMQP 1.0 driver for Notify, in the -``[NOTIFICATIONS]`` section of the service configuration file, specify the -``transport_url`` parameter: - -:: - - [NOTIFICATIONS] - transport_url = amqp://username:password@brokerhostname:5672 - -Note, that if a 'transport_url' parameter is not specified in the -[NOTIFICATIONS] section, the [DEFAULT] transport_url will be used -for both RPC and Notify backends. - -Driver Options -~~~~~~~~~~~~~~ - -It is recommended that the default configuration options provided by -the AMQP 1.0 driver be used. The configuration options can be modified -in the :oslo.config:group:`oslo_messaging_amqp` section of the service -configuration file. - -Connection Options -^^^^^^^^^^^^^^^^^^ - -- :oslo.config:option:`oslo_messaging_amqp.idle_timeout` -- :oslo.config:option:`oslo_messaging_amqp.connection_retry_interval` -- :oslo.config:option:`oslo_messaging_amqp.connection_retry_backoff` -- :oslo.config:option:`oslo_messaging_amqp.connection_retry_interval_max` - -Message Send Options -^^^^^^^^^^^^^^^^^^^^ - -- :oslo.config:option:`oslo_messaging_amqp.pre_settled` -- :oslo.config:option:`oslo_messaging_amqp.link_retry_delay` -- :oslo.config:option:`oslo_messaging_amqp.default_reply_timeout` -- :oslo.config:option:`oslo_messaging_amqp.default_send_timeout` -- :oslo.config:option:`oslo_messaging_amqp.default_notify_timeout` - -.. _address mode: - -Addressing Options -^^^^^^^^^^^^^^^^^^ - -- :oslo.config:option:`oslo_messaging_amqp.addressing_mode` -- :oslo.config:option:`oslo_messaging_amqp.server_request_prefix` -- :oslo.config:option:`oslo_messaging_amqp.broadcast_prefix` -- :oslo.config:option:`oslo_messaging_amqp.group_request_prefix` -- :oslo.config:option:`oslo_messaging_amqp.rpc_address_prefix` -- :oslo.config:option:`oslo_messaging_amqp.notify_address_prefix` -- :oslo.config:option:`oslo_messaging_amqp.multicast_address` -- :oslo.config:option:`oslo_messaging_amqp.unicast_address` -- :oslo.config:option:`oslo_messaging_amqp.anycast_address` -- :oslo.config:option:`oslo_messaging_amqp.default_notification_exchange` -- :oslo.config:option:`oslo_messaging_amqp.default_rpc_exchange` - -SSL Options -^^^^^^^^^^^ - -- :oslo.config:option:`oslo_messaging_amqp.ssl` -- :oslo.config:option:`oslo_messaging_amqp.ssl_ca_file` -- :oslo.config:option:`oslo_messaging_amqp.ssl_cert_file` -- :oslo.config:option:`oslo_messaging_amqp.ssl_key_file` -- :oslo.config:option:`oslo_messaging_amqp.ssl_key_password` - -SASL Options -^^^^^^^^^^^^ - -- :oslo.config:option:`oslo_messaging_amqp.sasl_mechanisms` -- :oslo.config:option:`oslo_messaging_amqp.sasl_config_dir` -- :oslo.config:option:`oslo_messaging_amqp.sasl_config_name` -- :oslo.config:option:`oslo_messaging_amqp.sasl_default_realm` - -AMQP Generic Options (**Note Well**) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The AMQP 1.0 driver currently does **not** support the generic *amqp* -options used by pre-1.0 drivers such as *amqp_durable_queues* or -*amqp_auto_delete*. - -qpid-dispatch-router -~~~~~~~~~~~~~~~~~~~~ - -First, verify that the Proton library has been installed and is -imported by the ``qpid-dispatch-router`` intermediary. This can be checked -by running: - -:: - - $ qdrouterd --help - -and looking for references to ``qpid-dispatch`` include and config path -options in the help text. If no ``qpid-dispatch`` information is listed, -verify that the Proton libraries are installed and that the version of -``qdrouterd`` is greater than or equal to 0.6.0. - -Second, configure the address patterns used by the driver. This is -done by adding the following to ``/etc/qpid-dispatch/qdrouterd.conf``. - -If the legacy syntax for the addressing mode is required, include the -following: - -:: - - address { - prefix: unicast - distribution: closest - } - - address { - prefix: exclusive - distribution: closest - } - - address { - prefix: broadcast - distribution: multicast - } - -For the routable syntax addressing mode, include the following: - -:: - - address { - prefix: openstack.org/om/rpc/multicast - distribution: multicast - } - - address { - prefix: openstack.org/om/rpc/unicast - distribution: closest - } - - address { - prefix: openstack.org/om/rpc/anycast - distribution: balanced - } - - address { - prefix: openstack.org/om/notify/multicast - distribution: multicast - } - - address { - prefix: openstack.org/om/notify/unicast - distribution: closest - } - - address { - prefix: openstack.org/om/notify/anycast - distribution: balanced - } - - -**Note well**: For any customization of the `address mode`_ and syntax used, -it is required that the address entity configurations in the -`1`/etc/qpid-dispatch/qdrouterd.conf`` be updated. - -qpidd -~~~~~ - -First, verify that the Proton library has been installed and is -imported by the qpidd broker. This can checked by running: - -.. code-block:: shell - - $ qpidd --help - -and looking for the AMQP 1.0 options in the help text. If no AMQP 1.0 -options are listed, verify that the Proton libraries are installed and -that the version of qpidd is greater than or equal to 0.34. - -Second, configure the default address patterns used by the -driver for a broker-based backend. This is done by adding the -following to ``/etc/qpid/qpidd.conf``: - -.. code-block:: ini - - queue-patterns=exclusive - queue-patterns=unicast - topic-patterns=broadcast - -These patterns, *exclusive*, *unicast*, and *broadcast* are the -legacy addressing values used by the driver. These can be overridden via the -driver configuration options if desired (see above). If manually overridden, -update the ``qpidd.conf`` values to match. - - -.. _devstack: - -DevStack Support ----------------- - -The plugin for the AMQP 1.0 oslo.messaging driver is supported by -DevStack. The plugin supports the deployment of several different -message bus configurations. - -In the ``[localrc]`` section of ``local.conf``, the `devstack-plugin-amqp1`__ -plugin repository must be enabled. For example: - -.. code-block:: ini - - [[local|localrc]] - enable_plugin amqp1 https://opendev.org/openstack/devstack-plugin-amqp1 - -Set the username and password variables if needed for the -configuration: - -.. code-block:: shell - - AMQP1_USERNAME=queueuser - AMQP1_PASSWORD=queuepassword - -The AMQP1_SERVICE variable identifies the message bus configuration that -will be used. In addition to the AMQP 1.0 driver being used for both the -RPC and Notification messaging communications, a hybrid configuration is -supported in the plugin that will deploy AMQP 1.0 for the RPC backend and -the oslo_messaging rabbit driver for the Notification backend. Additionally, -the plugin supports a setting for a pre-provisioned messaging bus that -prevents the plugin from creating the messaging bus. The setting of the -AMQP1_SERVICE variable will select which messaging intermediary will be used -for the RPC and Notification messaging backends: - -+---------------+------------------+------------------+ -| AMQP1_SERVICE | RPC Backend | Notify Backend | -+---------------+------------------+------------------+ -| | | | -| qpid | qpidd broker | qpidd broker | -| | | | -+---------------+------------------+------------------+ -| | | | -| qpid-dual | qdrouterd router | qpidd broker | -| | | | -+---------------+------------------+------------------+ -| | | | -| qpid-hybrid | qdrouterd router | rabbitmq broker | -| | | | -+---------------+------------------+------------------+ -| | | | -| external | pre-provisioned | pre-provisioned | -| | message bus | message bus | -| | | | -+---------------+------------------+------------------+ - -__ https://github.com/openstack/devstack-plugin-amqp1.git - - -.. _packages: - -Platforms and Packages ----------------------- - -PyPi -~~~~ - -Packages for `Pyngus`__ and the `Proton`__ engine are available on PyPI. - -__ https://pypi.org/project/pyngus -__ https://pypi.org/project/python-qpid-proton - -RHEL and Fedora -~~~~~~~~~~~~~~~ - -Packages exist in EPEL for RHEL/Centos 7 and 8, and Fedora 26+. - -The following packages must be installed on the system running the -``qdrouterd`` daemon: - -- ``qpid-dispatch-router`` -- ``python-qpid-proton`` - -The following packages must be installed on the system running the -``qpidd`` daemon: - -- ``qpid-cpp-server`` (version 0.26+) -- ``qpid-proton-c`` - -The following packages must be installed on the systems running the -services that use the new driver: - -- Proton libraries: ``qpid-proton-c-devel`` -- Proton python bindings: ``python-qpid-proton`` -- ``pyngus`` (via PyPI) - -Debian and Ubuntu -~~~~~~~~~~~~~~~~~ - -.. todo:: Is this still true? - -Packages for the Proton library, headers, and Python bindings are -available in the Debian/Testing repository. Proton packages are not -yet available in the Ubuntu repository. The version of qpidd on both -platforms is too old and does not support AMQP 1.0. - -Until the proper package version arrive the latest packages can be -pulled from the `Apache Qpid PPA`__ on Launchpad: - -.. code-block:: shell - - $ sudo add-apt-repository ppa:qpid/released - -The following packages must be installed on the system running the -``qdrouterd`` daemon: - -- ``qdrouterd`` (version 0.8.0+) - -The following packages must be installed on the system running the -``qpidd`` daemon: - -- ``qpidd`` (version 0.34+) - -The following packages must be installed on the systems running the -services that use the new driver: - -- Proton libraries: ``libqpid-proton2-dev`` -- Proton python bindings: ``python-qpid-proton`` -- ``pyngus`` (via Pypi) - -__ https://launchpad.net/~qpid/+archive/ubuntu/released diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index 778997571..a554d2b8c 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -6,6 +6,5 @@ Deployment Guide :maxdepth: 2 drivers - AMQP1.0 kafka rabbit diff --git a/doc/source/admin/kafka.rst b/doc/source/admin/kafka.rst index c581fb0ad..f13fca004 100644 --- a/doc/source/admin/kafka.rst +++ b/doc/source/admin/kafka.rst @@ -45,12 +45,11 @@ through the definition of separate RPC and notification `transport urls`__ in the service configuration. When the Kafka driver is deployed for *oslo.messaging* notifications, a separate driver and messaging backend must be deployed for RPC communications. For these -hybrid messaging configurations, either the `rabbit`__ or `amqp`__ -drivers can be deployed for *oslo.messaging* RPC. +hybrid messaging configurations, the `rabbit`__ drivers can be deployed for +*oslo.messaging* RPC. __ https://docs.openstack.org/oslo.messaging/latest/reference/transport.html __ https://docs.openstack.org/oslo.messaging/latest/admin/drivers.html#rabbit -__ https://docs.openstack.org/oslo.messaging/latest/admin/AMQP1.0.html Topics and vhost Support ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -131,7 +130,7 @@ service backends for RPC and Notify. The URL is of the form:: transport://user:pass@host1:port[,hostN:portN]/virtual_host Where the transport value specifies the RPC or notification backend as -one of ``amqp``, ``rabbit``, ``kafka``, etc. To specify and enable the +one of ``rabbit``, ``kafka``, etc. To specify and enable the Kafka driver for notifications, in the section ``[oslo_messaging_notifications]`` of the service configuration file, specify the ``transport_url`` parameter:: @@ -207,10 +206,10 @@ the configuration The ``RPC_`` and ``NOTIFY_`` variables will define the message bus configuration that will be used. The hybrid configurations will allow -for the *rabbit* and *amqp* drivers to be used for the RPC transports -while the *kafka* driver will be used for the notification transport. The -setting of the service variables will select which messaging -intermediary is enabled for the configuration: +for the *rabbit* drivers to be used for the RPC transports while the +*kafka* driver will be used for the notification transport. The setting +of the service variables will select which messaging intermediary is +enabled for the configuration: +------------+--------------------+--------------------+ | | RPC | NOTIFY | @@ -219,7 +218,5 @@ intermediary is enabled for the configuration: +------------+-----------+--------+-----------+--------+ | Config 1 | rabbit | 5672 | kafka | 9092 | +------------+-----------+--------+-----------+--------+ -| Config 1 | amqp | 5672 | kafka | 9092 | -+------------+-----------+--------+-----------+--------+ __ https://github.com/openstack/devstack-plugin-kafka.git diff --git a/doc/source/reference/transport.rst b/doc/source/reference/transport.rst index 7dfca9756..1b5d96115 100644 --- a/doc/source/reference/transport.rst +++ b/doc/source/reference/transport.rst @@ -23,4 +23,3 @@ different 3rd party libraries that don't ensure that. In certain cases, with some drivers, it does work: * rabbit: works only if no connection have already been established. -* amqp1: works diff --git a/oslo_messaging/_drivers/amqp1_driver/__init__.py b/oslo_messaging/_drivers/amqp1_driver/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/oslo_messaging/_drivers/amqp1_driver/addressing.py b/oslo_messaging/_drivers/amqp1_driver/addressing.py deleted file mode 100644 index 79396e794..000000000 --- a/oslo_messaging/_drivers/amqp1_driver/addressing.py +++ /dev/null @@ -1,294 +0,0 @@ -# Copyright 2016, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Utilities that map from a Target address to a proper AMQP 1.0 address. - -This module defines a utility class that translates a high level oslo.messaging - address (Target) into the message-level address used on the message bus. This - translation may be statically configured or determined when the connection to - the message bus is made. - -The Target members that are used to generate the address are: - - * exchange - * topic - * server flag - * fanout flag - -In addition a 'service' tag is associated with the address. This tag determines -the service associated with an address (e.g. rpc or notification) so -that traffic can be partitioned based on its use. -""" - -import abc -import logging - -from oslo_messaging.target import Target - -__all__ = [ - "keyify", - "AddresserFactory", - "SERVICE_RPC", - "SERVICE_NOTIFY" -] - -SERVICE_RPC = 0 -SERVICE_NOTIFY = 1 - -LOG = logging.getLogger(__name__) - - -def keyify(address, service=SERVICE_RPC): - """Create a hashable key from a Target and service that will uniquely - identify the generated address. This key is used to map the abstract - oslo.messaging address to its corresponding AMQP link(s). This mapping may - be done before the connection is established. - """ - if isinstance(address, Target): - # service is important because the resolved address may be - # different based on whether or not this Target is used for - # notifications or RPC - return ("Target:{t={%s} e={%s} s={%s} f={%s} service={%s}}" % - (address.topic, address.exchange, address.server, - address.fanout, service)) - else: - # absolute address can be used without modification - return "String:{%s}" % address - - -class Addresser: - """Base class message bus address generator. Used to convert an - oslo.messaging address into an AMQP 1.0 address string used over the - connection to the message bus. - """ - def __init__(self, default_exchange): - self._default_exchange = default_exchange - - def resolve(self, target, service): - if not isinstance(target, Target): - # an already resolved address - return target - # Return a link address for a given target - if target.fanout: - return self.multicast_address(target, service) - elif target.server: - return self.unicast_address(target, service) - else: - return self.anycast_address(target, service) - - @abc.abstractmethod - def multicast_address(self, target, service): - """Address used to broadcast to all subscribers - """ - - @abc.abstractmethod - def unicast_address(self, target, service): - """Address used to target a specific subscriber (direct) - """ - - @abc.abstractmethod - def anycast_address(self, target, service): - """Address used for shared subscribers (competing consumers) - """ - - def _concat(self, sep, items): - return sep.join(filter(bool, items)) - - -class LegacyAddresser(Addresser): - """Legacy addresses are in the following format: - - multicast: '$broadcast_prefix[.$vhost].$exchange.$topic.all' - unicast: '$server_prefix[.$vhost].$exchange.$topic.$server' - anycast: '$group_prefix[.$vhost].$exchange.$topic' - - Legacy addresses do not distinguish RPC traffic from Notification traffic - """ - def __init__(self, default_exchange, server_prefix, broadcast_prefix, - group_prefix, vhost): - super().__init__(default_exchange) - self._server_prefix = server_prefix - self._broadcast_prefix = broadcast_prefix - self._group_prefix = group_prefix - self._vhost = vhost - - def multicast_address(self, target, service): - return self._concat(".", - [self._broadcast_prefix, - self._vhost, - target.exchange or self._default_exchange, - target.topic, - "all"]) - - def unicast_address(self, target, service=SERVICE_RPC): - return self._concat(".", - [self._server_prefix, - self._vhost, - target.exchange or self._default_exchange, - target.topic, - target.server]) - - def anycast_address(self, target, service=SERVICE_RPC): - return self._concat(".", - [self._group_prefix, - self._vhost, - target.exchange or self._default_exchange, - target.topic]) - - # for debug: - def _is_multicast(self, address): - return address.startswith(self._broadcast_prefix) - - def _is_unicast(self, address): - return address.startswith(self._server_prefix) - - def _is_anycast(self, address): - return address.startswith(self._group_prefix) - - def _is_service(self, address, service): - # legacy addresses are the same for RPC or Notifications - return True - - -class RoutableAddresser(Addresser): - """Routable addresses have different formats based their use. It starts - with a prefix that is determined by the type of traffic (RPC or - Notifications). The prefix is followed by a description of messaging - delivery semantics. The delivery may be one of: 'multicast', 'unicast', or - 'anycast'. The delivery semantics are followed by information pulled from - the Target. The template is: - - $prefix/$semantics[/$vhost]/$exchange/$topic[/$server] - - Examples based on the default prefix and semantic values: - - rpc-unicast: "openstack.org/om/rpc/unicast/my-exchange/my-topic/my-server" - notify-anycast: "openstack.org/om/notify/anycast/my-vhost/exchange/topic" - """ - - def __init__(self, default_exchange, rpc_exchange, rpc_prefix, - notify_exchange, notify_prefix, unicast_tag, multicast_tag, - anycast_tag, vhost): - super().__init__(default_exchange) - if not self._default_exchange: - self._default_exchange = "openstack" - - # templates for address generation: - - self._vhost = vhost - - _rpc = rpc_prefix + "/" - self._rpc_prefix = _rpc - self._rpc_unicast = _rpc + unicast_tag - self._rpc_multicast = _rpc + multicast_tag - self._rpc_anycast = _rpc + anycast_tag - - _notify = notify_prefix + "/" - self._notify_prefix = _notify - self._notify_unicast = _notify + unicast_tag - self._notify_multicast = _notify + multicast_tag - self._notify_anycast = _notify + anycast_tag - - self._exchange = [ - # SERVICE_RPC: - rpc_exchange or self._default_exchange or 'rpc', - # SERVICE_NOTIFY: - notify_exchange or self._default_exchange or 'notify' - ] - - def multicast_address(self, target, service=SERVICE_RPC): - if service == SERVICE_RPC: - prefix = self._rpc_multicast - else: - prefix = self._notify_multicast - return self._concat("/", - [prefix, - self._vhost, - target.exchange or self._exchange[service], - target.topic]) - - def unicast_address(self, target, service=SERVICE_RPC): - if service == SERVICE_RPC: - prefix = self._rpc_unicast - else: - prefix = self._notify_unicast - return self._concat("/", - [prefix, - self._vhost, - target.exchange or self._exchange[service], - target.topic, - target.server]) - - def anycast_address(self, target, service=SERVICE_RPC): - if service == SERVICE_RPC: - prefix = self._rpc_anycast - else: - prefix = self._notify_anycast - return self._concat("/", - [prefix, - self._vhost, - target.exchange or self._exchange[service], - target.topic]) - - # for debug: - def _is_multicast(self, address): - return (address.startswith(self._rpc_multicast) or - address.startswith(self._notify_multicast)) - - def _is_unicast(self, address): - return (address.startswith(self._rpc_unicast) or - address.startswith(self._notify_unicast)) - - def _is_anycast(self, address): - return (address.startswith(self._rpc_anycast) or - address.startswith(self._notify_anycast)) - - def _is_service(self, address, service): - return address.startswith(self._rpc_prefix if service == SERVICE_RPC - else self._notify_prefix) - - -class AddresserFactory: - """Generates the proper Addresser based on configuration and the type of - message bus the driver is connected to. - """ - def __init__(self, default_exchange, mode, **kwargs): - self._default_exchange = default_exchange - self._mode = mode - self._kwargs = kwargs - - def __call__(self, remote_properties, vhost=None): - # for backwards compatibility use legacy if dynamic and we're connected - # to qpidd or we cannot identify the message bus. This can be - # overridden via the configuration. - product = remote_properties.get('product', 'qpid-cpp') - - if self._mode == 'legacy' or (self._mode == 'dynamic' and - product == 'qpid-cpp'): - return LegacyAddresser(self._default_exchange, - self._kwargs['legacy_server_prefix'], - self._kwargs['legacy_broadcast_prefix'], - self._kwargs['legacy_group_prefix'], - vhost) - else: - return RoutableAddresser(self._default_exchange, - self._kwargs.get("rpc_exchange"), - self._kwargs["rpc_prefix"], - self._kwargs.get("notify_exchange"), - self._kwargs["notify_prefix"], - self._kwargs["unicast"], - self._kwargs["multicast"], - self._kwargs["anycast"], - vhost) diff --git a/oslo_messaging/_drivers/amqp1_driver/controller.py b/oslo_messaging/_drivers/amqp1_driver/controller.py deleted file mode 100644 index fcabe057d..000000000 --- a/oslo_messaging/_drivers/amqp1_driver/controller.py +++ /dev/null @@ -1,1336 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Controller that manages the interface between the driver and the messaging -service. - -This module defines a Controller class that is responsible for performing -messaging-related operations (Tasks) requested by the driver, and for managing -the connection to the messaging service. The Controller creates a background -thread which performs all messaging operations and socket I/O. The -Controller's messaging logic is executed in the background thread via lambda -functions scheduled by the Controller. -""" - -import abc -import collections -import logging -import os -import platform -import queue -import random -import sys -import threading -import time -import uuid - -from oslo_utils import eventletutils -import proton -import pyngus - -from oslo_messaging._drivers.amqp1_driver.addressing import AddresserFactory -from oslo_messaging._drivers.amqp1_driver.addressing import keyify -from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_NOTIFY -from oslo_messaging._drivers.amqp1_driver.addressing import SERVICE_RPC -from oslo_messaging._drivers.amqp1_driver import eventloop -from oslo_messaging import exceptions -from oslo_messaging.target import Target -from oslo_messaging import transport - -LOG = logging.getLogger(__name__) - - -class Task: - """Run a command on the eventloop thread, wait until it completes - """ - - @abc.abstractmethod - def wait(self): - """Called by the client thread to wait for the operation to - complete. The implementation may optionally return a value. - """ - - @abc.abstractmethod - def _execute(self, controller): - """This method will be run on the eventloop thread to perform the - messaging operation. - """ - - -class SubscribeTask(Task): - """A task that creates a subscription to the given target. Messages - arriving from the target are given to the listener. - """ - def __init__(self, target, listener, notifications=False): - super().__init__() - self._target = target() # mutable - need a copy - self._subscriber_id = listener.id - self._in_queue = listener.incoming - self._service = SERVICE_NOTIFY if notifications else SERVICE_RPC - self._wakeup = eventletutils.Event() - - def wait(self): - self._wakeup.wait() - - def _execute(self, controller): - controller.subscribe(self) - self._wakeup.set() - - -class SendTask(Task): - """This is the class used by the Controller to send messages to a given - destination. - """ - def __init__(self, name, message, target, deadline, retry, - wait_for_ack, notification=False): - super().__init__() - self.name = name - # note: target can be either a Target class or a string - # target is mutable - make copy - self.target = target() if isinstance(target, Target) else target - self.message = message - self.deadline = deadline - self.wait_for_ack = wait_for_ack - self.service = SERVICE_NOTIFY if notification else SERVICE_RPC - self.timer = None - self._retry = None if retry is None or retry < 0 else retry - self._wakeup = eventletutils.Event() - self._error = None - self._sender = None - - def wait(self): - self._wakeup.wait() - return self._error - - def _execute(self, controller): - if self.deadline: - # time out the send - self.timer = controller.processor.alarm(self._on_timeout, - self.deadline) - controller.send(self) - - def _prepare(self, sender): - """Called immediately before the message is handed off to the i/o - system. This implies that the sender link is up. - """ - self._sender = sender - - def _on_ack(self, state, info): - """If wait_for_ack is True, this is called by the eventloop thread when - the ack/nack is received from the peer. If wait_for_ack is False this - is called by the eventloop right after the message is written to the - link. In the last case state will always be set to ACCEPTED. - """ - if state != pyngus.SenderLink.ACCEPTED: - msg = ("{name} message send to {target} failed: remote" - " disposition: {disp}, info:" - "{info}".format(name=self.name, - target=self.target, - disp=state, - info=info)) - self._error = exceptions.MessageDeliveryFailure(msg) - LOG.warning("%s", msg) - self._cleanup() - self._wakeup.set() - - def _on_timeout(self): - """Invoked by the eventloop when our timer expires - """ - self.timer = None - self._sender and self._sender.cancel_send(self) - msg = ("{name} message sent to {target} failed: timed" - " out".format(name=self.name, target=self.target)) - LOG.warning("%s", msg) - # Only raise a MessagingTimeout if the caller has explicitly specified - # a timeout. - self._error = exceptions.MessagingTimeout(msg) \ - if self.message.ttl else \ - exceptions.MessageDeliveryFailure(msg) - self._cleanup() - self._wakeup.set() - - def _on_error(self, description): - """Invoked by the eventloop if the send operation fails for reasons - other than timeout and nack. - """ - msg = ("{name} message sent to {target} failed:" - " {reason}".format(name=self.name, - target=self.target, - reason=description)) - LOG.warning("%s", msg) - self._error = exceptions.MessageDeliveryFailure(msg) - self._cleanup() - self._wakeup.set() - - def _cleanup(self): - self._sender = None - if self.timer: - self.timer.cancel() - self.timer = None - - @property - def _can_retry(self): - # has the retry count expired? - if self._retry is not None: - self._retry -= 1 - if self._retry < 0: - return False - return True - - -class RPCCallTask(SendTask): - """Performs an RPC Call. Sends the request and waits for a response from - the destination. - """ - def __init__(self, target, message, deadline, retry, wait_for_ack): - super().__init__("RPC Call", message, target, - deadline, retry, wait_for_ack) - self._reply_link = None - self._reply_msg = None - self._msg_id = None - - def wait(self): - error = super().wait() - return error or self._reply_msg - - def _prepare(self, sender): - super()._prepare(sender) - # reserve a message id for mapping the received response - if self._msg_id: - # already set so this is a re-transmit. To be safe cancel the old - # msg_id and allocate a fresh one. - self._reply_link.cancel_response(self._msg_id) - self._reply_link = sender._reply_link - rl = self._reply_link - self._msg_id = rl.prepare_for_response(self.message, self._on_reply) - - def _on_reply(self, message): - # called if/when the reply message arrives - self._reply_msg = message - self._cleanup() - self._wakeup.set() - - def _on_ack(self, state, info): - if state != pyngus.SenderLink.ACCEPTED: - super()._on_ack(state, info) - # must wait for reply if ACCEPTED - - def _cleanup(self): - if self._msg_id: - self._reply_link.cancel_response(self._msg_id) - self._msg_id = None - self._reply_link = None - super()._cleanup() - - -class RPCMonitoredCallTask(RPCCallTask): - """An RPC call which expects a periodic heartbeat until the response is - received. There are two timeouts: - deadline - overall hard timeout, implemented in RPCCallTask - call_monitor_timeout - keep alive timeout, reset when heartbeat arrives - """ - def __init__(self, target, message, deadline, call_monitor_timeout, - retry, wait_for_ack): - super().__init__(target, message, deadline, - retry, wait_for_ack) - assert call_monitor_timeout is not None # nosec - self._monitor_timeout = call_monitor_timeout - self._monitor_timer = None - self._set_alarm = None - - def _execute(self, controller): - self._set_alarm = controller.processor.defer - self._monitor_timer = self._set_alarm(self._call_timeout, - self._monitor_timeout) - super()._execute(controller) - - def _call_timeout(self): - # monitor_timeout expired - self._monitor_timer = None - self._sender and self._sender.cancel_send(self) - msg = ("{name} message sent to {target} failed: call monitor timed" - " out".format(name=self.name, target=self.target)) - LOG.warning("%s", msg) - self._error = exceptions.MessagingTimeout(msg) - self._cleanup() - self._wakeup.set() - - def _on_reply(self, message): - # if reply is null, then this is the call monitor heartbeat - if message.body is None: - self._monitor_timer.cancel() - self._monitor_timer = self._set_alarm(self._call_timeout, - self._monitor_timeout) - else: - super()._on_reply(message) - - def _cleanup(self): - self._set_alarm = None - if self._monitor_timer: - self._monitor_timer.cancel() - self._monitor_timer = None - super()._cleanup() - - -class MessageDispositionTask(Task): - """A task that updates the message disposition as accepted or released - for a Server - """ - def __init__(self, disposition, released=False): - super().__init__() - self._disposition = disposition - self._released = released - - def wait(self): - # disposition update does not have to block the sender since there is - # no result to pend for. This avoids a thread context switch with - # every RPC call - pass - - def _execute(self, controller): - try: - self._disposition(self._released) - except Exception as e: - # there's really nothing we can do about a failed disposition. - LOG.exception("Message acknowledgment failed: %s", e) - - -class Sender(pyngus.SenderEventHandler): - """A link for sending to a particular destination on the message bus. - """ - def __init__(self, destination, scheduler, delay, service): - super().__init__() - self._destination = destination - self._service = service - self._address = None - self._link = None - self._scheduler = scheduler - self._delay = delay # for re-connecting/re-transmitting - # holds all pending SendTasks - self._pending_sends = collections.deque() - # holds all messages sent but not yet acked - self._unacked = set() - self._reply_link = None - self._connection = None - self._resend_timer = None - - @property - def pending_messages(self): - return len(self._pending_sends) - - @property - def unacked_messages(self): - return len(self._unacked) - - def attach(self, connection, reply_link, addresser): - """Open the link. Called by the Controller when the AMQP connection - becomes active. - """ - self._connection = connection - self._reply_link = reply_link - self._address = addresser.resolve(self._destination, self._service) - LOG.debug("Sender %s attached", self._address) - self._link = self._open_link() - - def detach(self): - """Close the link. Called by the controller when shutting down or in - response to a close requested by the remote. May be re-attached later - (after a reset is done) - """ - LOG.debug("Sender %s detached", self._address) - self._connection = None - self._reply_link = None - if self._resend_timer: - self._resend_timer.cancel() - self._resend_timer = None - if self._link: - self._link.close() - - def reset(self, reason="Link reset"): - """Called by the controller on connection failover. Release all link - resources, abort any in-flight messages, and check the retry limit on - all pending send requests. - """ - self._address = None - self._connection = None - self._reply_link = None - if self._link: - self._link.destroy() - self._link = None - self._abort_unacked(reason) - self._check_retry_limit(reason) - - def destroy(self, reason="Link destroyed"): - """Destroy the sender and all pending messages. Called on driver - shutdown. - """ - LOG.debug("Sender %s destroyed", self._address) - self.reset(reason) - self._abort_pending(reason) - - def send_message(self, send_task): - """Send a message out the link. - """ - if not self._can_send or self._pending_sends: - self._pending_sends.append(send_task) - else: - self._send(send_task) - - def cancel_send(self, send_task): - """Attempts to cancel a send request. It is possible that the send has - already completed, so this is best-effort. - """ - # may be in either list, or none - self._unacked.discard(send_task) - try: - self._pending_sends.remove(send_task) - except ValueError: - pass - - # Pyngus callbacks: - - def sender_active(self, sender_link): - LOG.debug("Sender %s active", self._address) - self._send_pending() - - def credit_granted(self, sender_link): - pass - - def sender_remote_closed(self, sender_link, pn_condition): - # The remote has initiated a close. This could happen when the message - # bus is shutting down, or it detected an error - LOG.warning("Sender %(addr)s failed due to remote initiated close:" - " condition=%(cond)s", - {'addr': self._address, 'cond': pn_condition}) - self._link.close() - # sender_closed() will be called once the link completes closing - - def sender_closed(self, sender_link): - self._handle_sender_closed() - - def sender_failed(self, sender_link, error): - """Protocol error occurred.""" - LOG.warning("Sender %(addr)s failed error=%(error)s", - {'addr': self._address, 'error': error}) - self._handle_sender_closed(str(error)) - - # end Pyngus callbacks - - def _handle_sender_closed(self, reason="Sender closed"): - self._abort_unacked(reason) - if self._connection: - # still attached, so attempt to restart the link - self._check_retry_limit(reason) - self._scheduler.defer(self._reopen_link, self._delay) - - def _check_retry_limit(self, reason): - # Called on recoverable connection or link failure. Remove any pending - # sends that have exhausted their retry count: - expired = set() - for send_task in self._pending_sends: - if not send_task._can_retry: - expired.add(send_task) - send_task._on_error("Message send failed: %s" % reason) - while expired: - self._pending_sends.remove(expired.pop()) - - def _abort_unacked(self, error): - # fail all messages that have been sent to the message bus and have not - # been acked yet - while self._unacked: - send_task = self._unacked.pop() - send_task._on_error("Message send failed: %s" % error) - - def _abort_pending(self, error): - # fail all messages that have yet to be sent to the message bus - while self._pending_sends: - send_task = self._pending_sends.popleft() - send_task._on_error("Message send failed: %s" % error) - - @property - def _can_send(self): - return self._link and self._link.active - - # acknowledge status - _TIMED_OUT = pyngus.SenderLink.TIMED_OUT - _ACCEPTED = pyngus.SenderLink.ACCEPTED - _RELEASED = pyngus.SenderLink.RELEASED - _MODIFIED = pyngus.SenderLink.MODIFIED - - def _send(self, send_task): - send_task._prepare(self) - send_task.message.address = self._address - if send_task.wait_for_ack: - self._unacked.add(send_task) - - def pyngus_callback(link, handle, state, info): - # invoked when the message bus (n)acks this message - if state == Sender._TIMED_OUT: - # ignore pyngus timeout - we maintain our own timer - # which will properly deal with this case - return - self._unacked.discard(send_task) - if state == Sender._ACCEPTED: - send_task._on_ack(Sender._ACCEPTED, info) - elif (state == Sender._RELEASED or - (state == Sender._MODIFIED and - # assuming delivery-failed means in-doubt: - not info.get("delivery-failed") and - not info.get("undeliverable-here"))): - # These states indicate that the message was never - # forwarded beyond the next hop so they can be - # re-transmitted without risk of duplication - self._resend(send_task) - else: - # some error - let task figure it out... - send_task._on_ack(state, info) - - self._link.send(send_task.message, - delivery_callback=pyngus_callback, - handle=self, - deadline=send_task.deadline) - else: # do not wait for ack - self._link.send(send_task.message, - delivery_callback=None, - handle=self, - deadline=send_task.deadline) - send_task._on_ack(pyngus.SenderLink.ACCEPTED, {}) - - def _resend(self, send_task): - # the message bus returned the message without forwarding it. Wait a - # bit for other outstanding sends to finish - most likely ending up - # here since they are all going to the same destination - then resend - # this message - if send_task._can_retry: - # note well: once there is something on the pending list no further - # messages will be sent (they will all queue up behind this one). - self._pending_sends.append(send_task) - if self._resend_timer is None: - sched = self._scheduler - # this will get the pending sends going again - self._resend_timer = sched.defer(self._resend_pending, - self._delay) - else: - send_task._on_error("Send retries exhausted") - - def _resend_pending(self): - # run from the _resend_timer, attempt to resend pending messages - self._resend_timer = None - self._send_pending() - - def _send_pending(self): - # flush all pending messages out - if self._can_send: - while self._pending_sends: - self._send(self._pending_sends.popleft()) - - def _open_link(self): - name = "openstack.org/om/sender/[{}]/{}".format(self._address, - uuid.uuid4().hex) - link = self._connection.create_sender(name=name, - source_address=self._address, - target_address=self._address, - event_handler=self) - link.open() - return link - - def _reopen_link(self): - if self._connection: - if self._link: - self._link.destroy() - self._link = self._open_link() - - -class Replies(pyngus.ReceiverEventHandler): - """This is the receiving link for all RPC reply messages. Messages are - routed to the proper incoming queue using the correlation-id header in the - message. - """ - def __init__(self, connection, on_ready, on_down, capacity): - self._correlation = {} # map of correlation-id to response queue - self._on_ready = on_ready - self._on_down = on_down - rname = ("openstack.org/om/receiver/[rpc-response]/%s" - % uuid.uuid4().hex) - self._receiver = connection.create_receiver("rpc-response", - event_handler=self, - name=rname) - - # capacity determines the maximum number of reply messages this link is - # willing to receive. As messages are received and capacity is - # consumed, this driver will 'top up' the capacity back to max - # capacity. This number should be large enough to avoid needlessly - # flow-controlling the replies. - self._capacity = capacity - self._capacity_low = (capacity + 1) / 2 - self._receiver.open() - - def detach(self): - # close the link - if self._receiver: - self._receiver.close() - - def destroy(self): - self._correlation.clear() - if self._receiver: - self._receiver.destroy() - self._receiver = None - - def prepare_for_response(self, request, callback): - """Apply a unique message identifier to this request message. This will - be used to identify messages received in reply. The identifier is - placed in the 'id' field of the request message. It is expected that - the identifier will appear in the 'correlation-id' field of the - corresponding response message. - - When the caller is done receiving replies, it must call cancel_response - """ - request.id = uuid.uuid4().hex - # reply is placed on reply_queue - self._correlation[request.id] = callback - request.reply_to = self._receiver.source_address - return request.id - - def cancel_response(self, msg_id): - """Abort waiting for the response message corresponding to msg_id. - This can be used if the request fails and no reply is expected. - """ - try: - del self._correlation[msg_id] - except KeyError: - pass - - @property - def active(self): - return self._receiver and self._receiver.active - - # Pyngus ReceiverLink event callbacks: - - def receiver_active(self, receiver_link): - """This is a Pyngus callback, invoked by Pyngus when the receiver_link - has transitioned to the open state and is able to receive incoming - messages. - """ - LOG.debug("Replies link active src=%s", self._receiver.source_address) - receiver_link.add_capacity(self._capacity) - self._on_ready() - - def receiver_remote_closed(self, receiver, pn_condition): - """This is a Pyngus callback, invoked by Pyngus when the peer of this - receiver link has initiated closing the connection. - """ - if pn_condition: - LOG.error("Reply subscription closed by peer: %s", - pn_condition) - receiver.close() - - def receiver_failed(self, receiver_link, error): - """Protocol error occurred.""" - LOG.error("Link to reply queue failed. error=%(error)s", - {"error": error}) - self._on_down() - - def receiver_closed(self, receiver_link): - self._on_down() - - def message_received(self, receiver, message, handle): - """This is a Pyngus callback, invoked by Pyngus when a new message - arrives on this receiver link from the peer. - """ - key = message.correlation_id - try: - self._correlation[key](message) - receiver.message_accepted(handle) - except KeyError: - LOG.warning("Can't find receiver for response msg id=%s, " - "dropping!", key) - receiver.message_modified(handle, True, True, None) - # ensure we have enough credit - if receiver.capacity <= self._capacity_low: - receiver.add_capacity(self._capacity - receiver.capacity) - - -class Server(pyngus.ReceiverEventHandler): - """A group of links that receive messages from a set of addresses derived - from a given target. Messages arriving on the links are placed on the - 'incoming' queue. - """ - def __init__(self, target, incoming, scheduler, delay, capacity): - self._target = target - self._incoming = incoming - self._addresses = [] - self._capacity = capacity # credit per each link - self._capacity_low = (capacity + 1) / 2 - self._receivers = [] - self._scheduler = scheduler - self._delay = delay # for link re-attach - self._connection = None - self._reopen_scheduled = False - - def attach(self, connection): - """Create receiver links over the given connection for all the - configured addresses. - """ - self._connection = connection - for a in self._addresses: - name = "openstack.org/om/receiver/[{}]/{}".format( - a, uuid.uuid4().hex) - r = self._open_link(a, name) - self._receivers.append(r) - - def detach(self): - """Attempt a clean shutdown of the links""" - self._connection = None - self._addresses = [] - for receiver in self._receivers: - receiver.close() - - def reset(self): - # destroy the links, but keep the addresses around since we may be - # failing over. Since links are destroyed, this cannot be called from - # any of the following ReceiverLink callbacks. - self._connection = None - self._addresses = [] - self._reopen_scheduled = False - for r in self._receivers: - r.destroy() - self._receivers = [] - - # Pyngus ReceiverLink event callbacks. Note that all of the Server's links - # share this handler - - def receiver_remote_closed(self, receiver, pn_condition): - """This is a Pyngus callback, invoked by Pyngus when the peer of this - receiver link has initiated closing the connection. - """ - LOG.debug("Server subscription to %s remote detach", - receiver.source_address) - if pn_condition: - vals = { - "addr": receiver.source_address or receiver.target_address, - "err_msg": pn_condition - } - LOG.error("Server subscription %(addr)s closed " - "by peer: %(err_msg)s", vals) - receiver.close() - - def receiver_failed(self, receiver_link, error): - """Protocol error occurred.""" - LOG.error("Listener link queue failed. error=%(error)s", - {"error": error}) - self.receiver_closed(receiver_link) - - def receiver_closed(self, receiver_link): - LOG.debug("Server subscription to %s closed", - receiver_link.source_address) - # If still attached, attempt to re-start link - if self._connection and not self._reopen_scheduled: - LOG.debug("Server subscription reopen scheduled") - self._reopen_scheduled = True - self._scheduler.defer(self._reopen_links, self._delay) - - def message_received(self, receiver, message, handle): - """This is a Pyngus callback, invoked by Pyngus when a new message - arrives on this receiver link from the peer. - """ - def message_disposition(released=False): - if receiver in self._receivers and not receiver.closed: - if released: - receiver.message_released(handle) - else: - receiver.message_accepted(handle) - if receiver.capacity <= self._capacity_low: - receiver.add_capacity(self._capacity - receiver.capacity) - else: - LOG.debug("Can't find receiver for settlement") - - qentry = {"message": message, "disposition": message_disposition} - self._incoming.put(qentry) - - def _open_link(self, address, name): - props = {"snd-settle-mode": "mixed"} - r = self._connection.create_receiver(source_address=address, - target_address=address, - event_handler=self, - name=name, - properties=props) - r.add_capacity(self._capacity) - r.open() - return r - - def _reopen_links(self): - # attempt to re-establish any closed links - LOG.debug("Server subscription reopening") - self._reopen_scheduled = False - if self._connection: - for i in range(len(self._receivers)): - link = self._receivers[i] - if link.closed: - addr = link.target_address - name = link.name - link.destroy() - self._receivers[i] = self._open_link(addr, name) - - -class RPCServer(Server): - """Subscribes to RPC addresses""" - def __init__(self, target, incoming, scheduler, delay, capacity): - super().__init__(target, incoming, scheduler, delay, - capacity) - - def attach(self, connection, addresser): - # Generate the AMQP 1.0 addresses for the base class - self._addresses = [ - addresser.unicast_address(self._target, SERVICE_RPC), - addresser.multicast_address(self._target, SERVICE_RPC), - addresser.anycast_address(self._target, SERVICE_RPC) - ] - # now invoke the base class with the generated addresses - super().attach(connection) - - -class NotificationServer(Server): - """Subscribes to Notification addresses""" - def __init__(self, target, incoming, scheduler, delay, capacity): - super().__init__(target, incoming, scheduler, - delay, capacity) - - def attach(self, connection, addresser): - # Generate the AMQP 1.0 addresses for the base class - self._addresses = [ - addresser.anycast_address(self._target, SERVICE_NOTIFY) - ] - # now invoke the base class with the generated addresses - super().attach(connection) - - -class Hosts: - """An order list of TransportHost addresses. Connection failover progresses - from one host to the next. The default realm comes from the configuration - and is only used if no realm is present in the URL. - """ - def __init__(self, url, default_realm=None): - self.virtual_host = url.virtual_host - if url.hosts: - self._entries = url.hosts[:] - else: - self._entries = [transport.TransportHost(hostname="localhost", - port=5672)] - for entry in self._entries: - entry.port = entry.port or 5672 - entry.username = entry.username - entry.password = entry.password - if default_realm and entry.username and '@' not in entry.username: - entry.username = entry.username + '@' + default_realm - self._current = random.randint(0, len(self._entries) - 1) # nosec - - @property - def current(self): - return self._entries[self._current] - - def next(self): - if len(self._entries) > 1: - self._current = (self._current + 1) % len(self._entries) - return self.current - - def __repr__(self): - return '' - - def __str__(self): - r = ', vhost=%s' % self.virtual_host if self.virtual_host else '' - return ", ".join(["%r" % th for th in self._entries]) + r - - -class Controller(pyngus.ConnectionEventHandler): - """Controls the connection to the AMQP messaging service. This object is - the 'brains' of the driver. It maintains the logic for addressing, sending - and receiving messages, and managing the connection. All messaging and I/O - work is done on the Eventloop thread, allowing the driver to run - asynchronously from the messaging clients. - """ - def __init__(self, url, default_exchange, config): - self.processor = None - self._socket_connection = None - self._node = platform.node() or "" - self._command = os.path.basename(sys.argv[0]) - self._pid = os.getpid() - # queue of drivertask objects to execute on the eventloop thread - self._tasks = queue.Queue(maxsize=500) - # limit the number of Task()'s to execute per call to _process_tasks(). - # This allows the eventloop main thread to return to servicing socket - # I/O in a timely manner - self._max_task_batch = 50 - # cache of all Sender links indexed by address: - self._all_senders = {} - # active Sender links indexed by address: - self._active_senders = set() - # closing Sender links indexed by address: - self._purged_senders = [] - # Servers indexed by target. Each entry is a map indexed by the - # specific ProtonListener's identifier: - self._servers = {} - - self._container_name = config.oslo_messaging_amqp.container_name - self.idle_timeout = config.oslo_messaging_amqp.idle_timeout - self.trace_protocol = config.oslo_messaging_amqp.trace - self.ssl = config.oslo_messaging_amqp.ssl - self.ssl_ca_file = config.oslo_messaging_amqp.ssl_ca_file - self.ssl_cert_file = config.oslo_messaging_amqp.ssl_cert_file - self.ssl_key_file = config.oslo_messaging_amqp.ssl_key_file - self.ssl_key_password = config.oslo_messaging_amqp.ssl_key_password - self.ssl_verify_vhost = config.oslo_messaging_amqp.ssl_verify_vhost - self.pseudo_vhost = config.oslo_messaging_amqp.pseudo_vhost - self.sasl_mechanisms = config.oslo_messaging_amqp.sasl_mechanisms - self.sasl_config_dir = config.oslo_messaging_amqp.sasl_config_dir - self.sasl_config_name = config.oslo_messaging_amqp.sasl_config_name - self.hosts = Hosts(url, config.oslo_messaging_amqp.sasl_default_realm) - self.conn_retry_interval = \ - config.oslo_messaging_amqp.connection_retry_interval - self.conn_retry_backoff = \ - config.oslo_messaging_amqp.connection_retry_backoff - self.conn_retry_interval_max = \ - config.oslo_messaging_amqp.connection_retry_interval_max - self.link_retry_delay = config.oslo_messaging_amqp.link_retry_delay - - _opts = config.oslo_messaging_amqp - factory_args = {"legacy_server_prefix": _opts.server_request_prefix, - "legacy_broadcast_prefix": _opts.broadcast_prefix, - "legacy_group_prefix": _opts.group_request_prefix, - "rpc_prefix": _opts.rpc_address_prefix, - "notify_prefix": _opts.notify_address_prefix, - "multicast": _opts.multicast_address, - "unicast": _opts.unicast_address, - "anycast": _opts.anycast_address, - "notify_exchange": _opts.default_notification_exchange, - "rpc_exchange": _opts.default_rpc_exchange} - - self.addresser_factory = AddresserFactory(default_exchange, - _opts.addressing_mode, - **factory_args) - self.addresser = None - - # cannot send an RPC request until the replies link is active, as we - # need the peer assigned address, so need to delay sending any RPC - # requests until this link is active: - self.reply_link = None - # Set True when the driver is shutting down - self._closing = False - # only schedule one outstanding reconnect attempt at a time - self._reconnecting = False - self._delay = self.conn_retry_interval # seconds between retries - # prevent queuing up multiple requests to run _process_tasks() - self._process_tasks_scheduled = False - self._process_tasks_lock = threading.Lock() - # credit levels for incoming links - self._reply_credit = _opts.reply_link_credit - self._rpc_credit = _opts.rpc_server_credit - self._notify_credit = _opts.notify_server_credit - # sender link maintenance timer and interval - self._link_maint_timer = None - self._link_maint_timeout = _opts.default_sender_link_timeout - - def connect(self): - """Connect to the messaging service.""" - self.processor = eventloop.Thread(self._container_name, self._node, - self._command, self._pid) - self.processor.wakeup(lambda: self._do_connect()) - - def add_task(self, task): - """Add a Task for execution on processor thread.""" - self._tasks.put(task) - self._schedule_task_processing() - - def shutdown(self, timeout=30): - """Shutdown the messaging service.""" - LOG.info("Shutting down the AMQP 1.0 connection") - if self.processor: - self.processor.wakeup(self._start_shutdown) - LOG.debug("Waiting for eventloop to exit") - self.processor.join(timeout) - self._hard_reset("Shutting down") - for sender in self._all_senders.values(): - sender.destroy() - self._all_senders.clear() - self._servers.clear() - self.processor.destroy() - self.processor = None - LOG.debug("Eventloop exited, driver shut down") - - # The remaining methods are reserved to run from the eventloop thread only! - # They must not be invoked directly! - - # methods executed by Tasks created by the driver: - - def send(self, send_task): - if send_task.deadline and send_task.deadline <= time.monotonic(): - send_task._on_timeout() - return - key = keyify(send_task.target, send_task.service) - sender = self._all_senders.get(key) - if not sender: - sender = Sender(send_task.target, self.processor, - self.link_retry_delay, send_task.service) - self._all_senders[key] = sender - if self.reply_link and self.reply_link.active: - sender.attach(self._socket_connection.pyngus_conn, - self.reply_link, self.addresser) - self._active_senders.add(key) - sender.send_message(send_task) - - def subscribe(self, subscribe_task): - """Subscribe to a given target""" - if subscribe_task._service == SERVICE_NOTIFY: - t = "notification" - server = NotificationServer(subscribe_task._target, - subscribe_task._in_queue, - self.processor, - self.link_retry_delay, - self._notify_credit) - else: - t = "RPC" - server = RPCServer(subscribe_task._target, - subscribe_task._in_queue, - self.processor, - self.link_retry_delay, - self._rpc_credit) - - LOG.debug("Subscribing to %(type)s target %(target)s", - {'type': t, 'target': subscribe_task._target}) - key = keyify(subscribe_task._target, subscribe_task._service) - servers = self._servers.get(key) - if servers is None: - servers = {} - self._servers[key] = servers - servers[subscribe_task._subscriber_id] = server - if self._active: - server.attach(self._socket_connection.pyngus_conn, - self.addresser) - - # commands executed on the processor (eventloop) via 'wakeup()': - - def _do_connect(self): - """Establish connection and reply subscription on processor thread.""" - host = self.hosts.current - conn_props = {'properties': {'process': self._command, - 'pid': self._pid, - 'node': self._node}} - # only set hostname in the AMQP 1.0 Open performative if the message - # bus can interpret it as the virtual host. We leave it unspecified - # since apparently noone can agree on how it should be used otherwise! - if self.hosts.virtual_host and not self.pseudo_vhost: - conn_props['hostname'] = self.hosts.virtual_host - if self.idle_timeout: - conn_props["idle-time-out"] = float(self.idle_timeout) - if self.trace_protocol: - conn_props["x-trace-protocol"] = self.trace_protocol - - # SSL configuration - ssl_enabled = False - if self.ssl: - ssl_enabled = True - conn_props["x-ssl"] = self.ssl - if self.ssl_ca_file: - conn_props["x-ssl-ca-file"] = self.ssl_ca_file - ssl_enabled = True - if self.ssl_cert_file: - ssl_enabled = True - conn_props["x-ssl-identity"] = (self.ssl_cert_file, - self.ssl_key_file, - self.ssl_key_password) - if ssl_enabled: - # Set the identity of the remote server for SSL to use when - # verifying the received certificate. Typically this is the DNS - # name used to set up the TCP connections. However some servers - # may provide a certificate for the virtual host instead. If that - # is the case we need to use the virtual hostname instead. - # Refer to SSL Server Name Indication (SNI) for the entire story: - # https://tools.ietf.org/html/rfc6066 - if self.ssl_verify_vhost: - if self.hosts.virtual_host: - conn_props['x-ssl-peer-name'] = self.hosts.virtual_host - else: - conn_props['x-ssl-peer-name'] = host.hostname - - # SASL configuration: - if self.sasl_mechanisms: - conn_props["x-sasl-mechs"] = self.sasl_mechanisms - if self.sasl_config_dir: - conn_props["x-sasl-config-dir"] = self.sasl_config_dir - if self.sasl_config_name: - conn_props["x-sasl-config-name"] = self.sasl_config_name - - self._socket_connection = self.processor.connect(host, - handler=self, - properties=conn_props) - LOG.debug("Connection initiated") - - def _process_tasks(self): - """Execute Task objects in the context of the processor thread.""" - with self._process_tasks_lock: - self._process_tasks_scheduled = False - count = 0 - while (not self._tasks.empty() and - count < self._max_task_batch): - try: - self._tasks.get(False)._execute(self) - except Exception as e: - LOG.exception("Error processing task: %s", e) - count += 1 - - # if we hit _max_task_batch, resume task processing later: - if not self._tasks.empty(): - self._schedule_task_processing() - - def _schedule_task_processing(self): - """_process_tasks() helper: prevent queuing up multiple requests for - task processing. This method is called both by the application thread - and the processing thread. - """ - if self.processor: - with self._process_tasks_lock: - already_scheduled = self._process_tasks_scheduled - self._process_tasks_scheduled = True - if not already_scheduled: - self.processor.wakeup(lambda: self._process_tasks()) - - def _start_shutdown(self): - """Called when the application is closing the transport. - Attempt to cleanly flush/close all links. - """ - self._closing = True - if self._active: - # try a clean shutdown - self._detach_senders() - self._detach_servers() - self.reply_link.detach() - self._socket_connection.pyngus_conn.close() - else: - # don't wait for a close from the remote, may never happen - self.processor.shutdown() - - # reply link callbacks: - - def _reply_link_ready(self): - """Invoked when the Replies reply link has become active. At this - point, we are ready to receive messages, so start all pending RPC - requests. - """ - LOG.info("Messaging is active (%(hostname)s:%(port)s%(vhost)s)", - {'hostname': self.hosts.current.hostname, - 'port': self.hosts.current.port, - 'vhost': ("/" + self.hosts.virtual_host - if self.hosts.virtual_host else "")}) - - for sender in self._all_senders.values(): - sender.attach(self._socket_connection.pyngus_conn, - self.reply_link, self.addresser) - - def _reply_link_down(self): - # Treat it as a recoverable failure because the RPC reply address is - # now invalid for all in-flight RPC requests. - if not self._closing: - self._detach_senders() - self._detach_servers() - self._socket_connection.pyngus_conn.close() - # once closed, _handle_connection_loss() will initiate reconnect - - # callback from eventloop on socket error - - def socket_error(self, error): - """Called by eventloop when a socket error occurs.""" - LOG.error("Socket failure: %s", error) - self._handle_connection_loss(str(error)) - - # Pyngus connection event callbacks (and their helpers), all invoked from - # the eventloop thread: - - def connection_failed(self, connection, error): - """This is a Pyngus callback, invoked by Pyngus when a non-recoverable - error occurs on the connection. - """ - if connection is not self._socket_connection.pyngus_conn: - # pyngus bug: ignore failure callback on destroyed connections - return - LOG.debug("AMQP Connection failure: %s", error) - self._handle_connection_loss(str(error)) - - def connection_active(self, connection): - """This is a Pyngus callback, invoked by Pyngus when the connection to - the peer is up. At this point, the driver will activate all subscriber - links (server) and the reply link. - """ - LOG.debug("Connection active (%(hostname)s:%(port)s), subscribing...", - {'hostname': self.hosts.current.hostname, - 'port': self.hosts.current.port}) - # allocate an addresser based on the advertised properties of the - # message bus - props = connection.remote_properties or {} - self.addresser = self.addresser_factory(props, - self.hosts.virtual_host - if self.pseudo_vhost else None) - for servers in self._servers.values(): - for server in servers.values(): - server.attach(self._socket_connection.pyngus_conn, - self.addresser) - self.reply_link = Replies(self._socket_connection.pyngus_conn, - self._reply_link_ready, - self._reply_link_down, - self._reply_credit) - self._delay = self.conn_retry_interval # reset - # schedule periodic maintenance of sender links - self._link_maint_timer = self.processor.defer(self._purge_sender_links, - self._link_maint_timeout) - - def connection_closed(self, connection): - """This is a Pyngus callback, invoked by Pyngus when the connection has - cleanly closed. This occurs after the driver closes the connection - locally, and the peer has acknowledged the close. At this point, the - shutdown of the driver's connection is complete. - """ - LOG.debug("AMQP connection closed.") - # if the driver isn't being shutdown, failover and reconnect - self._handle_connection_loss("AMQP connection closed.") - - def connection_remote_closed(self, connection, reason): - """This is a Pyngus callback, invoked by Pyngus when the peer has - requested that the connection be closed. - """ - # The messaging service/broker is trying to shut down the - # connection. Acknowledge the close, and try to reconnect/failover - # later once the connection has closed (connection_closed is called). - if reason: - LOG.info("Connection closed by peer: %s", reason) - self._detach_senders() - self._detach_servers() - self.reply_link.detach() - self._socket_connection.pyngus_conn.close() - - def sasl_done(self, connection, pn_sasl, outcome): - """This is a Pyngus callback invoked when the SASL handshake - has completed. The outcome of the handshake is passed in the outcome - argument. - """ - if outcome == proton.SASL.OK: - return - LOG.error("AUTHENTICATION FAILURE: Cannot connect to " - "%(hostname)s:%(port)s as user %(username)s", - {'hostname': self.hosts.current.hostname, - 'port': self.hosts.current.port, - 'username': self.hosts.current.username}) - # pyngus will invoke connection_failed() eventually - - def _handle_connection_loss(self, reason): - """The connection to the messaging service has been lost. Try to - reestablish the connection/failover if not shutting down the driver. - """ - self.addresser = None - self._socket_connection.close() - if self._closing: - # we're in the middle of shutting down the driver anyways, - # just consider it done: - self.processor.shutdown() - else: - # for some reason, we've lost the connection to the messaging - # service. Try to re-establish the connection: - if not self._reconnecting: - self._reconnecting = True - self.processor.wakeup(lambda: self._hard_reset(reason)) - LOG.info("Delaying reconnect attempt for %d seconds", - self._delay) - self.processor.defer(lambda: self._do_reconnect(reason), - self._delay) - self._delay = min(self._delay * self.conn_retry_backoff, - self.conn_retry_interval_max) - if self._link_maint_timer: - self._link_maint_timer.cancel() - self._link_maint_timer = None - - def _do_reconnect(self, reason): - """Invoked on connection/socket failure, failover and re-connect to the - messaging service. - """ - self._reconnecting = False - if not self._closing: - host = self.hosts.next() - LOG.info("Reconnecting to: %(hostname)s:%(port)s", - {'hostname': host.hostname, 'port': host.port}) - self.processor.wakeup(lambda: self._do_connect()) - - def _hard_reset(self, reason): - """Reset the controller to its pre-connection state""" - # note well: since this method destroys the connection, it cannot be - # invoked directly from a pyngus callback. Use processor.defer() to - # run this method on the main loop instead. - for sender in self._purged_senders: - sender.destroy(reason) - del self._purged_senders[:] - self._active_senders.clear() - unused = [] - for key, sender in self._all_senders.items(): - # clean up any sender links that no longer have messages to send - if sender.pending_messages == 0: - unused.append(key) - else: - sender.reset(reason) - self._active_senders.add(key) - for key in unused: - self._all_senders[key].destroy(reason) - del self._all_senders[key] - for servers in self._servers.values(): - for server in servers.values(): - server.reset() - if self.reply_link: - self.reply_link.destroy() - self.reply_link = None - if self._socket_connection: - self._socket_connection.reset() - - def _detach_senders(self): - """Close all sender links""" - for sender in self._all_senders.values(): - sender.detach() - - def _detach_servers(self): - """Close all listener links""" - for servers in self._servers.values(): - for server in servers.values(): - server.detach() - - def _purge_sender_links(self): - """Purge inactive sender links""" - if not self._closing: - # destroy links that have already been closed - for sender in self._purged_senders: - sender.destroy("Idle link purged") - del self._purged_senders[:] - - # determine next set to purge - purge = set(self._all_senders.keys()) - self._active_senders - for key in purge: - sender = self._all_senders[key] - if not sender.pending_messages and not sender.unacked_messages: - sender.detach() - self._purged_senders.append(self._all_senders.pop(key)) - self._active_senders.clear() - self._link_maint_timer = \ - self.processor.defer(self._purge_sender_links, - self._link_maint_timeout) - - @property - def _active(self): - # Is the connection up - return (self._socket_connection and - self._socket_connection.pyngus_conn and - self._socket_connection.pyngus_conn.active) diff --git a/oslo_messaging/_drivers/amqp1_driver/eventloop.py b/oslo_messaging/_drivers/amqp1_driver/eventloop.py deleted file mode 100644 index 37ccfeb0a..000000000 --- a/oslo_messaging/_drivers/amqp1_driver/eventloop.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A thread that performs all messaging I/O and protocol event handling. - -This module provides a background thread that handles messaging operations -scheduled via the Controller, and performs blocking socket I/O and timer -processing. This thread is designed to be as simple as possible - all the -protocol specific intelligence is provided by the Controller and executed on -the background thread via callables. -""" - -import collections -import errno -import heapq -import logging -import math -import os -import pyngus -import select -import socket -import threading -import time -import uuid - -LOG = logging.getLogger(__name__) - - -def compute_timeout(offset): - # minimize the timer granularity to one second so we don't have to track - # too many timers - return math.ceil(time.monotonic() + offset) - - -class _SocketConnection: - """Associates a pyngus Connection with a python network socket, - and handles all connection-related I/O and timer events. - """ - - def __init__(self, name, container, properties, handler): - self.name = name - self.socket = None - self.pyngus_conn = None - self._properties = properties - # The handler is a pyngus ConnectionEventHandler, which is invoked by - # pyngus on connection-related events (active, closed, error, etc). - # Currently it is the Controller object. - self._handler = handler - self._container = container - - def fileno(self): - """Allows use of a _SocketConnection in a select() call. - """ - return self.socket.fileno() - - def read_socket(self): - """Called to read from the socket.""" - if self.socket: - try: - pyngus.read_socket_input(self.pyngus_conn, self.socket) - self.pyngus_conn.process(time.monotonic()) - except (socket.timeout, OSError) as e: - # pyngus handles EAGAIN/EWOULDBLOCK and EINTER - self.pyngus_conn.close_input() - self.pyngus_conn.close_output() - self._handler.socket_error(str(e)) - - def write_socket(self): - """Called to write to the socket.""" - if self.socket: - try: - pyngus.write_socket_output(self.pyngus_conn, self.socket) - self.pyngus_conn.process(time.monotonic()) - except (socket.timeout, OSError) as e: - # pyngus handles EAGAIN/EWOULDBLOCK and EINTER - self.pyngus_conn.close_output() - self.pyngus_conn.close_input() - self._handler.socket_error(str(e)) - - def connect(self, host): - """Connect to host and start the AMQP protocol.""" - addr = socket.getaddrinfo(host.hostname, host.port, socket.AF_UNSPEC, - socket.SOCK_STREAM) - if not addr: - key = "%s:%i" % (host.hostname, host.port) - error = "Invalid peer address '%s'" % key - LOG.error("Invalid peer address '%s'", key) - self._handler.socket_error(error) - return - my_socket = socket.socket(addr[0][0], addr[0][1], addr[0][2]) - my_socket.setblocking(0) # 0=non-blocking - my_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - try: - my_socket.connect(addr[0][4]) - except OSError as e: - if e.errno != errno.EINPROGRESS: - error = "Socket connect failure '%s'" % str(e) - LOG.error("Socket connect failure '%s'", str(e)) - self._handler.socket_error(error) - return - self.socket = my_socket - - props = self._properties.copy() - if pyngus.VERSION >= (2, 0, 0): - # configure client authentication - # - props['x-server'] = False - if host.username: - props['x-username'] = host.username - props['x-password'] = host.password or "" - - self.pyngus_conn = self._container.create_connection(self.name, - self._handler, - props) - self.pyngus_conn.user_context = self - - if pyngus.VERSION < (2, 0, 0): - # older versions of pyngus requires manual SASL configuration: - # determine the proper SASL mechanism: PLAIN if a username/password - # is present, else ANONYMOUS - pn_sasl = self.pyngus_conn.pn_sasl - if host.username: - password = host.password if host.password else "" - pn_sasl.plain(host.username, password) - else: - pn_sasl.mechanisms("ANONYMOUS") - pn_sasl.client() - - self.pyngus_conn.open() - - def reset(self, name=None): - """Clean up the current state, expect 'connect()' to be recalled - later. - """ - # note well: since destroy() is called on the connection, do not invoke - # this method from a pyngus callback! - if self.pyngus_conn: - self.pyngus_conn.destroy() - self.pyngus_conn = None - self.close() - if name: - self.name = name - - def close(self): - if self.socket: - self.socket.close() - self.socket = None - - -class Scheduler: - """Schedule callables to be run in the future. - """ - class Event: - # simply hold a reference to a callback that can be set to None if the - # alarm is canceled - def __init__(self, callback): - self.callback = callback - - def cancel(self): - # quicker than rebalancing the tree - self.callback = None - - def __init__(self): - self._callbacks = {} - self._deadlines = [] - - def alarm(self, request, deadline): - """Request a callable be executed at a specific time - """ - try: - callbacks = self._callbacks[deadline] - except KeyError: - callbacks = list() - self._callbacks[deadline] = callbacks - heapq.heappush(self._deadlines, deadline) - entry = Scheduler.Event(request) - callbacks.append(entry) - return entry - - def defer(self, request, delay): - """Request a callable be executed after delay seconds - """ - return self.alarm(request, compute_timeout(delay)) - - @property - def _next_deadline(self): - """The timestamp of the next expiring event or None - """ - return self._deadlines[0] if self._deadlines else None - - def _get_delay(self, max_delay=None): - """Get the delay in milliseconds until the next callable needs to be - run, or 'max_delay' if no outstanding callables or the delay to the - next callable is > 'max_delay'. - """ - due = self._deadlines[0] if self._deadlines else None - if due is None: - return max_delay - _now = time.monotonic() - if due <= _now: - return 0 - else: - return min(due - _now, max_delay) if max_delay else due - _now - - def _process(self): - """Invoke all expired callables.""" - if self._deadlines: - _now = time.monotonic() - try: - while self._deadlines[0] <= _now: - deadline = heapq.heappop(self._deadlines) - callbacks = self._callbacks[deadline] - del self._callbacks[deadline] - for cb in callbacks: - cb.callback and cb.callback() - except IndexError: - pass - - -class Requests: - """A queue of callables to execute from the eventloop thread's main - loop. - """ - def __init__(self): - self._requests = collections.deque() - self._wakeup_pipe = os.pipe() - self._pipe_ready = False # prevents blocking on an empty pipe - self._pipe_lock = threading.Lock() - - def wakeup(self, request=None): - """Enqueue a callable to be executed by the eventloop, and force the - eventloop thread to wake up from select(). - """ - with self._pipe_lock: - if request: - self._requests.append(request) - if not self._pipe_ready: - self._pipe_ready = True - os.write(self._wakeup_pipe[1], b'!') - - def fileno(self): - """Allows this request queue to be used by select().""" - return self._wakeup_pipe[0] - - def process_requests(self): - """Invoked by the eventloop thread, execute each queued callable.""" - with self._pipe_lock: - if not self._pipe_ready: - return - self._pipe_ready = False - os.read(self._wakeup_pipe[0], 512) - requests = self._requests - self._requests = collections.deque() - - for r in requests: - r() - - -class Thread(threading.Thread): - """Manages socket I/O and executes callables queued up by external - threads. - """ - def __init__(self, container_name, node, command, pid): - super().__init__() - - # callables from other threads: - self._requests = Requests() - # delayed callables (only used on this thread for now): - self._scheduler = Scheduler() - - self._connection = None - - # Configure a container - if container_name is None: - container_name = ("openstack.org/om/container/%s/%s/%s/%s" % - (node, command, pid, uuid.uuid4().hex)) - self._container = pyngus.Container(container_name) - - self.name = "Thread for Proton container: %s" % self._container.name - self._shutdown = False - self.daemon = True - self.start() - - def wakeup(self, request=None): - """Wake up the eventloop thread, Optionally providing a callable to run - when the eventloop wakes up. Thread safe. - """ - self._requests.wakeup(request) - - def shutdown(self): - """Shutdown the eventloop thread. Thread safe. - """ - LOG.debug("eventloop shutdown requested") - self._shutdown = True - self.wakeup() - - def destroy(self): - # release the container. This can only be called after the eventloop - # thread exited - self._container.destroy() - self._container = None - - # the following methods are not thread safe - they must be run from the - # eventloop thread - - def defer(self, request, delay): - """Invoke request after delay seconds.""" - return self._scheduler.defer(request, delay) - - def alarm(self, request, deadline): - """Invoke request at a particular time""" - return self._scheduler.alarm(request, deadline) - - def connect(self, host, handler, properties): - """Get a _SocketConnection to a peer represented by url.""" - key = "openstack.org/om/connection/{}:{}/".format( - host.hostname, host.port) - # return pre-existing - conn = self._container.get_connection(key) - if conn: - return conn.user_context - - # create a new connection - this will be stored in the - # container, using the specified name as the lookup key, or if - # no name was provided, the host:port combination - sc = _SocketConnection(key, self._container, - properties, handler=handler) - sc.connect(host) - self._connection = sc - return sc - - def run(self): - """Run the proton event/timer loop.""" - LOG.debug("Starting Proton thread, container=%s", - self._container.name) - try: - self._main_loop() - except Exception: - # unknown error - fatal - LOG.exception("Fatal unhandled event loop error!") - raise - - def _main_loop(self): - # Main event loop - while not self._shutdown: - - readfds = [self._requests] - writefds = [] - deadline = self._scheduler._next_deadline - - pyngus_conn = self._connection and self._connection.pyngus_conn - if pyngus_conn and self._connection.socket: - if pyngus_conn.needs_input: - readfds.append(self._connection) - if pyngus_conn.has_output: - writefds.append(self._connection) - if pyngus_conn.deadline: - deadline = (pyngus_conn.deadline if not deadline else - min(deadline, pyngus_conn.deadline)) - - # force select to return in time to service the next expiring timer - if deadline: - _now = time.monotonic() - timeout = 0 if deadline <= _now else (deadline - _now) - else: - timeout = None - - # and now we wait... - try: - select.select(readfds, writefds, [], timeout) - except OSError as serror: - if serror[0] == errno.EINTR: - LOG.warning("ignoring interrupt from select(): %s", - str(serror)) - continue - raise # assuming fatal... - - # Ignore the select return value - simply poll the socket for I/O. - # Testing shows that polling improves latency over checking the - # lists returned by select() - self._requests.process_requests() - self._connection.read_socket() - if pyngus_conn and pyngus_conn.deadline: - _now = time.monotonic() - if pyngus_conn.deadline <= _now: - pyngus_conn.process(_now) - self._connection.write_socket() - - self._scheduler._process() # run any deferred requests - - LOG.info("eventloop thread exiting, container=%s", - self._container.name) diff --git a/oslo_messaging/_drivers/amqp1_driver/opts.py b/oslo_messaging/_drivers/amqp1_driver/opts.py deleted file mode 100644 index 91655242b..000000000 --- a/oslo_messaging/_drivers/amqp1_driver/opts.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - - -amqp1_opts = [ - cfg.StrOpt('container_name', - deprecated_group='amqp1', - help='Name for the AMQP container. must be globally unique.' - ' Defaults to a generated UUID'), - - cfg.IntOpt('idle_timeout', - default=0, # disabled - deprecated_group='amqp1', - help='Timeout for inactive connections (in seconds)'), - - cfg.BoolOpt('trace', - default=False, - deprecated_group='amqp1', - help='Debug: dump AMQP frames to stdout'), - - cfg.BoolOpt('ssl', - default=False, - help=("Attempt to connect via SSL. If no other ssl-related " - "parameters are given, it will use the system's " - "CA-bundle to verify the server's certificate.")), - - cfg.StrOpt('ssl_ca_file', - default='', - deprecated_group='amqp1', - help="CA certificate PEM file used to verify the server's" - ' certificate'), - - cfg.StrOpt('ssl_cert_file', - default='', - deprecated_group='amqp1', - help='Self-identifying certificate PEM file' - ' for client authentication'), - - cfg.StrOpt('ssl_key_file', - default='', - deprecated_group='amqp1', - help='Private key PEM file used to sign ssl_cert_file' - ' certificate (optional)'), - - cfg.StrOpt('ssl_key_password', - deprecated_group='amqp1', - secret=True, - help='Password for decrypting ssl_key_file (if encrypted)'), - - cfg.BoolOpt('ssl_verify_vhost', - default=False, - help="By default SSL checks that the name in the server's" - " certificate matches the hostname in the transport_url. In" - " some configurations it may be preferable to use the virtual" - " hostname instead, for example if the server uses the Server" - " Name Indication TLS extension (rfc6066) to provide a" - " certificate per virtual host. Set ssl_verify_vhost to True" - " if the server's SSL certificate uses the virtual host name" - " instead of the DNS name."), - - cfg.StrOpt('sasl_mechanisms', - default='', - deprecated_group='amqp1', - help='Space separated list of acceptable SASL mechanisms'), - - cfg.StrOpt('sasl_config_dir', - default='', - deprecated_group='amqp1', - help='Path to directory that contains the SASL configuration'), - - cfg.StrOpt('sasl_config_name', - default='', - deprecated_group='amqp1', - help='Name of configuration file (without .conf suffix)'), - - cfg.StrOpt('sasl_default_realm', - default='', - help='SASL realm to use if no realm present in username'), - - # Network connection failure retry options - - cfg.IntOpt('connection_retry_interval', - default=1, - min=1, - help='Seconds to pause before attempting to re-connect.'), - - cfg.IntOpt('connection_retry_backoff', - default=2, - min=0, - help='Increase the connection_retry_interval by this many' - ' seconds after each unsuccessful failover attempt.'), - - cfg.IntOpt('connection_retry_interval_max', - default=30, - min=1, - help='Maximum limit for connection_retry_interval' - ' + connection_retry_backoff'), - - # Message send retry and timeout options - - cfg.IntOpt('link_retry_delay', - default=10, - min=1, - help='Time to pause between re-connecting an AMQP 1.0 link that' - ' failed due to a recoverable error.'), - - cfg.IntOpt('default_reply_retry', - default=0, - min=-1, - help='The maximum number of attempts to re-send a reply message' - ' which failed due to a recoverable error.'), - - cfg.IntOpt('default_reply_timeout', - default=30, - min=5, - help='The deadline for an rpc reply message delivery.'), - - cfg.IntOpt('default_send_timeout', - default=30, - min=5, - help='The deadline for an rpc cast or call message delivery.' - ' Only used when caller does not provide a timeout expiry.'), - - cfg.IntOpt('default_notify_timeout', - default=30, - min=5, - help='The deadline for a sent notification message delivery.' - ' Only used when caller does not provide a timeout expiry.'), - - # Sender link cache maintenance: - cfg.IntOpt('default_sender_link_timeout', - default=600, - min=1, - help='The duration to schedule a purge of idle sender links.' - ' Detach link after expiry.'), - - # Addressing: - - cfg.StrOpt('addressing_mode', - default='dynamic', - help="Indicates the addressing mode used by the driver.\n" - "Permitted values:\n" - "'legacy' - use legacy non-routable addressing\n" - "'routable' - use routable addresses\n" - "'dynamic' - use legacy addresses if the message bus does not" - " support routing otherwise use routable addressing"), - - cfg.BoolOpt('pseudo_vhost', - default=True, - help="Enable virtual host support for those message buses" - " that do not natively support virtual hosting (such as" - " qpidd). When set to true the virtual host name will be" - " added to all message bus addresses, effectively creating" - " a private 'subnet' per virtual host. Set to False if the" - " message bus supports virtual hosting using the 'hostname'" - " field in the AMQP 1.0 Open performative as the name of the" - " virtual host."), - - # Legacy addressing customization: - - cfg.StrOpt('server_request_prefix', - default='exclusive', - deprecated_group='amqp1', - help="address prefix used when sending to a specific server"), - - cfg.StrOpt('broadcast_prefix', - default='broadcast', - deprecated_group='amqp1', - help="address prefix used when broadcasting to all servers"), - - cfg.StrOpt('group_request_prefix', - default='unicast', - deprecated_group='amqp1', - help="address prefix when sending to any server in group"), - - # Routable addressing customization: - # - # Addresses a composed of the following string values using a template in - # the form of: - # $(address_prefix)/$(*cast)/$(exchange)/$(topic)[/$(server-name)] - # where *cast is one of the multicast/unicast/anycast values used to - # identify the delivery pattern used for the addressed message - - cfg.StrOpt('rpc_address_prefix', - default='openstack.org/om/rpc', - help="Address prefix for all generated RPC addresses"), - - cfg.StrOpt('notify_address_prefix', - default='openstack.org/om/notify', - help="Address prefix for all generated Notification addresses"), - - cfg.StrOpt('multicast_address', - default='multicast', - help="Appended to the address prefix when sending a fanout" - " message. Used by the message bus to identify fanout" - " messages."), - - cfg.StrOpt('unicast_address', - default='unicast', - help="Appended to the address prefix when sending to a" - " particular RPC/Notification server. Used by the message bus" - " to identify messages sent to a single destination."), - - cfg.StrOpt('anycast_address', - default='anycast', - help="Appended to the address prefix when sending to a group of" - " consumers. Used by the message bus to identify messages that" - " should be delivered in a round-robin fashion across" - " consumers."), - - cfg.StrOpt('default_notification_exchange', - help="Exchange name used in notification addresses.\n" - "Exchange name resolution precedence:\n" - "Target.exchange if set\n" - "else default_notification_exchange if set\n" - "else control_exchange if set\n" - "else 'notify'"), - - cfg.StrOpt('default_rpc_exchange', - help="Exchange name used in RPC addresses.\n" - "Exchange name resolution precedence:\n" - "Target.exchange if set\n" - "else default_rpc_exchange if set\n" - "else control_exchange if set\n" - "else 'rpc'"), - - # Message Credit Levels - - cfg.IntOpt('reply_link_credit', - default=200, - min=1, - help='Window size for incoming RPC Reply messages.'), - - cfg.IntOpt('rpc_server_credit', - default=100, - min=1, - help='Window size for incoming RPC Request messages'), - - cfg.IntOpt('notify_server_credit', - default=100, - min=1, - help='Window size for incoming Notification messages'), - - # Settlement control - - cfg.MultiStrOpt('pre_settled', - default=['rpc-cast', 'rpc-reply'], - help="Send messages of this type pre-settled.\n" - "Pre-settled messages will not receive acknowledgement\n" - "from the peer. Note well: pre-settled messages may be\n" - "silently discarded if the delivery fails.\n" - "Permitted values:\n" - "'rpc-call' - send RPC Calls pre-settled\n" - "'rpc-reply'- send RPC Replies pre-settled\n" - "'rpc-cast' - Send RPC Casts pre-settled\n" - "'notify' - Send Notifications pre-settled\n") -] diff --git a/oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst b/oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst deleted file mode 100644 index 9680cd65d..000000000 --- a/oslo_messaging/_drivers/amqp1_driver/oslo_messaging_amqp_driver_overview.rst +++ /dev/null @@ -1,1144 +0,0 @@ -############################## -Oslo.messaging AMQP 1.0 Driver -############################## -:Date: $Date: 2016-08-02 $ -:Revision: $Revision: 0.04 $ - -Introduction -============ - -This document describes the architecture and implementation of the -oslo.messaging AMQP 1.0 driver. The AMQP 1.0 driver provides an -implementation of the oslo.messaging base driver service interfaces -that map client application RPC and Notify methods "onto" the -operation of an AMQP 1.0 protocol messaging bus. The blueprint for the original -driver can be found here [1]_ and the original implementation is described in -[2]_. The feature specification for the updates to the AMQP 1.0 driver for the -OpenStack Newton release can be found here [3]_ - -The driver effectively hides the details of the AMQP 1.0 protocol transport and -message processing from the client applications. The Pyngus messaging -framework [4]_ built on the QPID Proton engine [5]_ provides a -callback-based API for message passing. The driver implementation is -comprised of the callback "handlers" that drive the messaging APIs to -connect to the message bus, subscribe servers, send and receive messages. - -:: - - +------------+ +------------+ +-------------+ +-------------+ - | | | | | | | | OpenStack - | RPC Client | | RPC Server | | Notify | | Notify | Application - | | | | | Client | | Server | - +------------+ +------------+ +-------------+ +-------------+ - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - +-----------------------------------------------------------+ - | Oslo.Messaging "Base Driver Interface" | Oslo Messaging - +-----------------------------------------------------------+ Driver - | Oslo.Messaging AMQP 1.0 Driver | - +-----------------------------------------------------------+ - XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - +-----------------------------------------------------------+ - | Pyngus Messaging Framework | - +-----------------------------------------------------------+ - | QPID Proton Library | AMQP 1.0 - +-----------------------------------------------------------+ Protocol - | AMQP 1.0 Protocol | Exchange - +-----------------------------------------------------------+ - | TCP/IP Network Layer | - +-----------------------------------------------------------+ - - -Development View -================ - -Code Base ---------- - -The AMQP 1.0 driver source code is maintained in the OpenStack -oslo.messaging repository [7]_. The driver implementation, tests and -user guide are located in the sub-directories of the repository. - -:: - - ├── doc - │ └── source - │ ├── AMQP1.0.rst - ├── oslo_messaging - ├── _drivers - │ ├── amqp1_driver - │ │ ├── addressing.py - │ │ ├── controller.py - │ │ ├── eventloop.py - │ │ ├── opts.py - │ ├── impl_amqp1.py - ├── tests - ├── drivers - ├── test_amqp_driver.py - - - - -+-----------------+----------------------------------------------------+ -|File | Content | -+=================+====================================================+ -|doc/ |The AMQP 1.0 driver user guide details | -|source/ |prerequisite, configuration and platform deployment | -|AMQP1.0.rst |considerations. | -| | | -+-----------------+----------------------------------------------------+ -|_drivers/ |This file provides the oslo.messaging driver entry | -|impl_amqp1.py |points for the AMQP 1.0 driver. The file provides | -| |implementations for the base.RpcIncomingMessage, | -| |base.PollStyleListener and base.BaseDriver oslo | -| |messaging entities. | -+-----------------+----------------------------------------------------+ -|_drivers/ |This file provides a set of utilities that translate| -|amqp1_driver/ |a target address to a well-formed AMQP 1.0 address. | -|addressing.py | | -| | | -+-----------------+----------------------------------------------------+ -|_drivers/ |The controller manages the interface between the | -|amqp1_driver/ |driver and the messaging service protocol exchange. | -|controller.py | | -| | | -+-----------------+----------------------------------------------------+ -|_drivers/ |This module provides a background thread that | -|amqp1_driver/ |handles scheduled messaging operations. All | -|eventloop.py |protocol specific exchanges are executed on this | -| |background thread. | -+-----------------+----------------------------------------------------+ -|_drivers/ |This file manages the AMQP 1.0 driver configuration | -|amqp1_driver/ |options (oslo_messaging_amqp). | -|opts.py | | -| | | -+-----------------+----------------------------------------------------+ -|tests/ |This file contains a set of functional tests that | -|drivers/ |target the capabilities of the driver. A message | -|test_amqp_driver |intermediary is included to emulate the full | -| |messaging protocol exchanges. | -+-----------------+----------------------------------------------------+ - -Deployment -========== - -The Oslo Messaging AMQP 1.0 driver is deployed on each node of the -OpenStack infrastructure where one or more OpenStack services will be deployed. - -:: - - Node Node - +--------------------------------+ +-----------------------------------+ - | +-------------+ | | +--------------+ +--------------+ | - | | | | | | | | | | - | | OpenStack | | | | OpenStack | | OpenStack | | - | | Service | | | | Service | | Service | | - | | | | | | | | | | - | +-------------+ | | +--------------+ +--------------+ | - | | Oslo | | | | Oslo | | Oslo | | - | | Messaging | | | | Messaging | | Messaging | | - | +------------+ +-------------+ | | +--------------+ +--------------+ | - | | AMQP 1.0 | | AMQP 1.0 | | | | AMQP 1.0 | | AMQP 1.0 | | - | |Intermediary| | Driver | | | | Driver | | Driver | | - | +------------+ +-------------+ | | +--------------+ +--------------+ | - | +----------------------------+ | | +-------------------------------+ | - | | TCP/IP | | | | TCP/IP | | - | | Stack | | | | Stack | | - | +----------------------------+ | | +-------------------------------+ | - +--------------------------------+ +-----------------------------------+ - ^ ^ ^ ^ - | | | | - | | Public Network | | - +----------------------v-----------------------------------------v------------+ - v Internal Network v - +-----------------------------------------------------------------------------+ - -The configuration of each OpenStack service must provide the -transport information that indicates to the oslo messaging layer that the AMQP -1.0 driver is to be instantiated for the back-end. During instantiation of -the driver, a connection is established from the driver to an AMQP 1.0 -intermediary that provides the messaging bus capabilities. The -intermediary can be co-located on nodes that are running OpenStack -services or can be located on separate stand-alone nodes in the -control plane. - -The driver architecture is intended to support any messaging -intermediary (e.g. broker or router) that implements version 1.0 of the -AMQP protocol. Support for additional classes of intermediaries might -require changes to driver configuration parameters and addressing syntax -but should not otherwise require changes to the driver architecture. - -Driver Structure -================ - -The functionality of the AMQP 1.0 driver is implemented across a number of -components that encapsulate the mapping of the driver activities onto the AMQP -protocol exchange. The *Controller* implements the primary functional logic for -the driver and serves as the interface between the driver entry points ( *Proton -Driver* ) and the I/O operations associated with sending and receiving messages -on links attached to the message bus. Each sending or receiving link is -associated with a specific driver activity such as sending an RPC Call/Cast or -Notify message, receiving an RPC reply message, or receiving an RPC or Notify -server request. - -:: - - _______________________ - / / - / Application / - / (OpenStack) / - /______________________/ - | - XXXXXXXXXXXXXXXXXXXXXXX|XXXXXXXXXXXXXXXXXXXXXXXXXXXXX - | - +----------+ - +-----------| Proton | - V | Driver | - +-------+ +----------+ - | Tasks | | - +-------+ +------------+ - +--------->| Controller | - ----| |---- - / +------------+ \ - / | \ - / | \ - +---------+ +---------+ +---------+ - | Sender |<--| Replies | | Server | - | | | | | | - +---------+ +---------+ +---------+ - | | | - | +---------+ +---------+ - | | Proton | | Proton | - | |Listener | |Listener | - | +---------+ +---------+ - | | | - XXXXXXXXX|XXXXXXXXXXXXX|XXXXXXXXXXXXXXX|XXXXXXXXXXXXX - | | | - +--------+ +--------+ +--------+ - | Send | | Receive| | Receive| - | Link | | Link | | Link | - +--------+ +--------+ +--------+ - - -Task Orchestration ------------------- - -The AMQP 1.0 driver maintains a thread for processing protocol events -and timers. Therefore, the driver must orchestrate and synchronize -requests from the client applications with this internal thread. The -*Proton Driver* will act as a proxy for each client request and -constructs a task request object on the caller's thread via the -*Controller*. The task request object contains the necessary information -to execute the desired method on the driver invocation thread of -control. This method is executed synchronously - the client thread -pends until the driver thread completes processing the task. The unique -task objects provided for driver thread invocation include: - -* Subscribe Task -* Send Task (for RPC Cast or Notify) -* RPC Call Task -* RPC Reply Task -* Message Disposition Task - -:: - - +------------------------+ +-------------------------------+ - | Client Thread | | Driver Thread | - | +--------+ +---------+ | | +------+ +--------+ +-------+ | - | |Proton | |Control | | | |Event | |Control | |Pyngus | | - | |Driver | |(-ler) | | | |Loop | |(-ler) | |Frmwrk | | - | +---+----+ +----+----+ | | +---+--+ +---+----+ +---+---+ | - | |create | | | | | | | - | |task() | | | | | | | - | |---------->| | | | | | | - | |add | | | | | | | - | |task() | | Request | | | | | - | |---------->| | Queue | | | | | - | | | enq | +------+ | deq | | | | - | | |------|---> |||||+--|---->| exec() | | | - | | | | +------+ | |------->| | | - | | | | | | |----------|-+ | - | | wait() | | | | | Protocol | | | - | #-----------|------|------+ | | | Exchange | | | - | # | | V | | | | | | - | # | | +-----+ | | set() |<---------|-+ | - | # | | |Event|<--------|--------| | | - | # | | | | | | | | | - | # | | +-----+ | | | | | - | # | | | | | | | | - | #<----------|------|------+ | | | | | - | | | | | | | | | - | + + | | + + + | - | | | | - | | | | - +------------------------+ +-------------------------------+ - - -Scheduling - Execution -^^^^^^^^^^^^^^^^^^^^^^ - -Following the method task construction, the task is added to the *Controller* -queue of requests for execution. Following the placement of the task on this -queue, the caller will wait for the execution to complete (or possibly timeout -or raise an exception). - -The eventloop running in its own thread will dequeue the task request and invoke -the corresponding method on the *Controller* servant using the -information stored in the task request object retrieved. The calls -executed on this eventloop thread via the *Controller* perform all the -protocol specific intelligence required for the pyngus framework. In -addition to the target method invocation, the eventloop may call on -the request object for message communication state changes or other -indications from the peer. - -:: - - Request - +--------------------------------------------+ +----------+ Tasks - |Client Thread /\ | | | - | * * * * * * / v | | + V + - listen() | * * * * * * | | |---| - -------->| * Init *-->* Schedule *-->* Wait * | | |---| - | * * * * * * | | |---| - | * * * * * * | | +_|_+ - | * * * *\ * * | | V - | +------------------|-->| +--------------+ - +--------------------------------------------+ | | Eventloop | - | | * * | - +--------------------------------------------+ | | * * | - |Client Thread /\ | | | * Execute * | - | * * * * * * / v | | | * * | - call() | * * * * * * | | | * * | - -------->| * Init *-->* Schedule *-->* Wait * | | | ^ * * \ | - | * * * * * * | | | / \ | - | * * * * * * | | | / / | - | * * * *\ * * | | | \ / | - | +------------------|-->| | \ * *v | - +--------------------------------------------+ | | * * | - o | | * Protocol * | - o | | * Exchange * | - o | | * * | - +--------------------------------------------+ | | * * | - |Client Thread /\ | | +--------------+ - | * * * * * * / v | | - cast() | * * * * * * | | - -------->| * Init *-->* Schedule *-->* Wait * | | - | * * * * * * | | - | * * * * * * | | - | * * * *\ * * | | - | +------------------|--> - +--------------------------------------------+ - - -Completion -^^^^^^^^^^ - -After carrying out the messaging protocol exchange for the requested -task or upon a timeout/exception condition, the eventloop thread will -wake-up the callers thread to indicate the task completion. - -Use Scenarios -============= - -The primary use scenarios for the AMQP 1.0 Driver correspond to the activities -supported by the oslo messaging base driver interface. These activities include -the ability to subscribe RPC and Notify servers (referred to as -"Servers" in the graphics) as well the ability to send RPC (cast and -call) messages and Notification messages into the control plane -infrastructure. Following RPC and Notify server processing -(e.g. dispatch to the application) the ability to indicate the final -disposition of the message is supported and mapped onto the message -delivery and settlement capabilities of the AMQP messaging bus. The -composition of the AMQP driver and its dynamic behaviors is defined by -the support of these primary activities. - -Load Driver ------------ - -The operational life-cycle of the AMQP 1.0 driver begins when the oslo messaging -loads and instantiates the driver instance for use by an application. To -complete this activity, the driver will retrieve the oslo_messaging_amqp -configuration options in order to define the driver's run time behaviors. The -transport URL specifier provided will be used by the driver to create a -connection to the AMQP 1.0 messaging bus. The transport URL is of the form - - amqp://user:pass@host1:port[,hostN:portN] - -Where the transport scheme specifies **amqp** as the back-end. It -should be noted that oslo.messaging is deprecating the discrete host, -port and auth configuration options [6]_. - -The driver provides the capability to transform the "Target" provided -by an application to an addressing format that can be associated to the -sender and receive links that take part in the AMQP protocol exchange. - -:: - - load()---+ - \ ----------- - \ +--- Transport - > * * | ----------- - * *<---+ - * Prepare * - * Driver * - * * - * * - ---------- | - Cfg Opts | - ----------\ | - \ v - v * * - * * - * Retrieve * - * Config * - * * - * * - | - | - v - * * - * Start * - * Protocol * - * Thread * - * * - * * - | - | - v - * * +--------------+ - * Connect* | AMQP | - * to *<----------->| Protocol | - * Message * | Exchange | - * Bus * +--------------+ - * * \ - | \ - | \ ------------ - v +-----> Connection --+ - * * ------------ | - * * | - * Address *<--------------------+ - * Factory * - * * - * * - -When the AMQP 1.0 driver connects to the messaging bus, it will -identify the intermediary that it is connected to (e.g. broker or -router). Based on the intermediary type, the driver will dynamically -select an addressing syntax that is optimal for operation in a router -mesh or a syntax that is appropriate for broker backed queues or topics. - - -Subscribe Server ----------------- - -The AMQP 1.0 driver maintains a set of (RPC or Notification) servers that are -created via the subscribe server activity. For each server, the driver will -create and attach a set of addresses for the target that corresponds to the -server endpoint for an AMQP protocol exchange. A unique *ProtonListener* (e.g. -AMQP 1.0 Receiver Link) is instantiated for each server subscription and the -driver will attach event handlers to perform message transport -performatives for the link. The driver maintains a single incoming -queue that messages from all attached links will be placed upon. - -:: - - listen() - + - \ - \ * * - \ * * - +> * Create * - * Listener* - * * - * * \ ---------- - -------- | +-------> Incoming - Target -+ | / ---------- - -------- \ | +----+ - \ v / - v * * v - * * - * Create * - * Server * - * *\ - * * \ - ---------- | \ ----------- - Connection | +------> Addresses - ----------\ | /----------- - \ v / - v * * / - * *<------+ - * Attach * - * Links * - * * - * * - | - | - v - +--------------+ - | AMQP | - | Protocol | - | Exchange | - +--------------+ - - -Send Message ------------- - -The AMQP 1.0 driver provides the ability to send messages (e.g. RPC Call/Cast -or Notify) to a target specified by a client application. The driver -maintains a cache of senders corresponding to each unique target that is -referenced across the driver life-cycle. The driver maintains a single -receiver link that will be the incoming link for all RPC reply -messages received by the driver. Prior to sending an RPC call message -that expects a reply, the driver will allocate a unique correlation -identifier for inclusion in the call message. The driver will also set -the message's reply-to field to the address of the RPC reply -link. This correlation identifier will appear in the RPC reply message -and is used to deliver the reply to the proper client. - -Prior to sending the message, the AMQP 1.0 driver will determine if the sender -link is active and has enough credits for the transfer to proceed. If -there are not enough credits to send the message, the driver will -retain the pending message until it can be sent or times out. If there are -credits to send a message, the driver will first check if there are -any messages from a previous request pending to be sent. The driver -will service these pending requests in FIFO order and may defer -sending the current message request if credits to send run out. - -The AMQP 1.0 driver tracks the settlement status of all request -messages sent to the messaging bus. For each message sent, the driver -will maintain a count of the number of retry attempts made on the -message. The driver will re-send a message that is not acknowledged up -until the retry limit is reached or a send timeout deadline is reached. - -:: - - send() - + -------- - \ +--- Target - \ * * | -------- - \ * *<---+ - +> * Prepare * - * Request *---+ ------------- - /* * +----> Request Msg <-----+ - / * * ------------- | - ------- <-+ | | - Sender | | - ------- | | - v | - * * ------------ | - * *---------> Correlation | - * Prepare * ------------ | - * Response * | - * * | - * * | - | | - | | - v --------- | - * * +---------> Pending | - * */ --------- | - * Send * | - * Message *\ --------- | - * * +-----> Unacked <---+ | - * * --------- | | - | | | - | | + - v | / - +--------------+ * * v - | AMQP | * * - | Protocol |-----------> * Settle * - | Exchange | * Message * - +--------------+ * * - * * - -Server Receive --------------- - -The AMQP 1.0 driver (via subscribe) maintains a groups of links that -receive messages from a set of addresses derived from the Targets -associated with a Server instantiation. Messages arriving from these -links are placed on the Listener's incoming queue via the Server's -incoming message handler. The Listener's poll method will return the -message to the application for subsequent application service dispatching. - -:: - - +--------------+ - | AMQP | - | Protocol | - | Exchange | - +--------------+ - | ^ - -------- V | --------- - Receiver-+ * * +------- Address - -------- \ * * --------- - v* Message * - * Received* - * * - * * \ - \ ----------------- - +------> Incoming Message --+ - * * ----------------- | - * * | - * Poll *<--+ | - * * | | - * * | | - * * +-------------------------+ - - - -RPC Reply Receive ------------------ - -The AMQP 1.0 driver instantiates a single receiving link for the -reception of all RPC reply messages. Messages received on this -receiving link are routed to the originating caller using the -correlation-id embedded in the header of the message itself. To -ensure the responsiveness and throughput on the shared RPC receiving -link, the AMQP 1.0 driver will immediately update the link transfer -credits and will acknowledge the successful receipt of the RPC reply. - -:: - - +--------------+ - | AMQP | - | Protocol | - | Exchange | - +--------------+ - | ----------------- - V + ------ Incoming Message - * * / ----------------- - * *v - * Message * - * Received*<---+ - * * | - * * \ | ------------- - | \ +---- Correlation - V \ ------------- - * * \ - * * \ --------------- - * Update * +------> Reply Message - * Credit * --------------- - * * - * * - | - V - * * - * * - * Accept * - * Message * - * * - * * - | - V - +--------------+ - | AMQP | - | Protocol | - | Exchange | - +--------------+ - - -Disposition ------------ - -For each incoming message provided by the AMQP 1.0 driver to a server -application (e.g. RPC or Notify), the delivery disposition of the -incoming message can be indicated to the driver. The disposition can -either be to acknowledge the message indicating the message was -accepted by the application or to requeue the message indicating that -application processing could not successfully take place. The driver -will initiate the appropriate settlement of the message through an -AMQP protocol exchange over the message bus. - -:: - - acknowledge()--------+ requeue() --------+ - | | - v v - * * * * - * * * * - * Ack * * Requeue * - * Message *\ ----* Message * - * * \ / * * - * * \ / * * - | v ------------- v | - | Incoming Msg | - | / ------------- | - | / | - v v | - +--------------+ | - | AMQP |<----------------------------+ - | Protocol | - | Exchange | - +--------------+ - - -Driver Components -================= - -This section describes the components of the AMQP 1.0 driver -implementation. For each component, its primary responsibilities and -the relationships to other components are included. These -relationships are derived from service requests placed upon the other -components. Architectural or system-level constraints on the component -(e.g. multiplicity, concurrency, parameterization) that change the -depiction of the architecture are included. Additionally, any list of issues -waiting resolution are described. - -Controller ----------- -+-----------------+----------------------------------------------------+ -|Component | *Controller* | -+=================+====================================================+ -|Responsibilities | Responsible for performing messaging-related | -| | operations requested by the driver (tasks) | -| | and for managing the connection to the messaging | -| | service provided by the AMQP 1.0 intermediaries. | -| | | -| | This component provides the logic for addressing, | -| | sending and receiving messages as well as managing | -| | the messaging bus connection life-cycle. | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Sender (pyngus.SenderEventHandler) | -| | Server (pyngus.ReceiverEventHandler) | -| | Replies (pyngus.ReceiverEventHandler) | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created whenever the driver is instantiated | -| | in a client application process. The component | -| | will terminate the driver operation when the client| -| | initiates a shutdown of the driver. | -| | | -| | All AMQP 1.0 protocol exchanges (e.g. messaging | -| | and I/O work) are done on the Eventloop driver | -| | thread. This allows the driver to run | -| | asynchronously from the messaging clients. | -| | | -| | The component supports addressing modes defined | -| | by the driver configuration and through dynamic | -| | inspection of the connection to the messaging | -| | intermediary. | -+-----------------+----------------------------------------------------+ -|Issues | A cache of sender links indexed by address is | -| | maintained. Currently, removal from the cache is | -| | is not implemented. | -+-----------------+----------------------------------------------------+ - -Sender ------- -+-----------------+----------------------------------------------------+ -|Component | *Sender* (pyngus.SenderEventHander) | -+=================+====================================================+ -|Responsibilities | Responsible for managing a sender link life-cycle | -| | and queueing/tracking the message delivery. | -| | (implementation of Pyngus.SenderEventHandle) | -| | | -| | Provides the capabilities for sending to a | -| | particular address on the message bus. | -| | | -| | Provides the capability to queue (pending) | -| | *SendTask* when link not active or insufficient | -| | link credit capacity. | -| | | -| | Provides the capability to retry send following a | -| | recoverable connection or link failure. | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Addresser | -| | Connection | -| | Pyngus.SenderLink | -| | SendTask | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created by the *Controller* on a client | -| | caller thread and retained in a *Sender* cache. | -+-----------------+----------------------------------------------------+ -|Issues | Sender cache aging (see above) | -+-----------------+----------------------------------------------------+ - -Server ------- -+-----------------+----------------------------------------------------+ -|Component | *Server* (pyngus.ReceiverEventHander) | -+=================+====================================================+ -|Responsibilities | Responsible for operations for the lifecycle of an | -| | incoming queue that is used for messages received | -| | from a set of target addresses. | -| | | -+-----------------+----------------------------------------------------+ -|Collaborators | Connection | -| | Pyngus.ReceiverLink | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created whenever a client application | -| | subscribes a RPC or Notification server to the | -| | messaging bus. When the client application closes | -| | the transport, this component and its associated | -| | links will be detached/closed. | -| | | -| | Individual receiver links are created over the | -| | message bus connection for all the addresses | -| | generated for the server target. | -| | | -| | All the receiver links share a single event | -| | callback handler. | -+-----------------+----------------------------------------------------+ -|Issues | The credit per link is presently hard-coded. A | -| | mechanism to monitor for a back-up of inbound | -| | messages to back-pressure the sender is proposed. | -+-----------------+----------------------------------------------------+ - -Replies -------- -+-----------------+----------------------------------------------------+ -|Component | *Replies* (pyngus.ReceiverEventHander) | -+=================+====================================================+ -|Responsibilities | Responsible for the operations and managing | -| | the life-cycle of the receiver link for all RPC | -| | reply messages. A single instance of an RPC reply | -| | link is maintained for the driver. | -+-----------------+----------------------------------------------------+ -|Collaborators | Connection | -| | Pyngus.ReceiverLink | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | The reply link is created when the connection to | -| | the messaging bus is activated. | -| | | -| | The origination of RPC calls is inhibited until | -| | the replies link is active. | -| | | -| | Message are routed to the originator's incoming | -| | queue using the correlation-id header that is | -| | contained in the response message. | -+-----------------+----------------------------------------------------+ -|Issues | | -+-----------------+----------------------------------------------------+ - -ProtonDriver ------------- -+-----------------+----------------------------------------------------+ -|Component | *ProtonDriver* | -+=================+====================================================+ -|Responsibilities | Responsible for providing the oslo.Messaging | -| | BaseDriver implementation. | -| | | -| | Provides the capabilities to send RPC and | -| | Notification messages and create subscriptions for | -| | the application. | -| | | -| | Each operation generates a task that is scheduled | -| | for execution on the *Controller* eventloop | -| | thread. | -| | | -| | The calling thread blocks until execution completes| -| | or timeout. | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Controller | -| | RPCCallTask | -| | SendTask | -| | SubscribeTask | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created whenever the oslo.messaging AMQP 1.0 | -| | driver is loaded by an application (process). | -| | | -| | The component manages the life-cycle of the | -| | *Controller* component. Tasks may be created but | -| | will not be processed until the Controller | -| | connection to the messaging service completes. | -| | | -| | There are separate timeout values for RPC Send, | -| | Notify Send, and RPC Call Reply. | -+-----------------+----------------------------------------------------+ -|Issues | | -| | The unmarshalling of an RPC response could cause | -| | an exception/failure and should be optimally | -| | communicated back up to the caller. | -+-----------------+----------------------------------------------------+ - -ProtonIncomingMessage ---------------------- -+-----------------+----------------------------------------------------+ -|Component | *ProtonIncomingMessage* | -+=================+====================================================+ -|Responsibilities | Responsible for managing the life-cycle of an | -| | incoming message received on a RPC or notification | -| | Server link. | -| | | -| | Provides the capability to set the disposition of | -| | the incoming message as acknowledge (e.g. settled) | -| | or requeue. | -| | | -| | Provides the capability to marshal and send the | -| | reply to an RPC Call message. | -| | | -+-----------------+----------------------------------------------------+ -|Collaborators | Controller | -| | ProtonListener | -| | MessageDispositionTask | -| | SendTask | -| | | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | A ProtonListener returns this component from the | -| | poll of the incoming queue. | -| | | -| | The message reply_to and id fields of the incoming | -| | message are used to generate the target for the | -| | RPC reply message. | -| | | -| | The RPC reply and message disposition operations | -| | are scheduled for execution on the Controller | -| | eventoloop thread. The caller on the component is | -| | blocked until task completion (or timeout). | -+-----------------+----------------------------------------------------+ -|Issues | The ProtonIncomingMessage is used for both RPC | -| | and Notification Server instances. Conceptually, | -| | a Notification Server should not schedule a reply | -| | and a RPC Server should not schedule a message | -| | requeue. Subclassing base.IncomingMessage for | -| | Notifications and base.RpcIncomingMessage for RPC | -| | could be a consideration. | -+-----------------+----------------------------------------------------+ - -ProtonListener --------------- -+-----------------+----------------------------------------------------+ -|Component | *ProtonListener* | -+=================+====================================================+ -|Responsibilities | Responsible for providing the oslo.Messaging | -| | base.PollStyleListener implementation. | -| | | -| | Provides the capabilities to manage the queue of | -| | incoming messages received from the messaging links| -| | | -| | Returns instance of ProtonIncomingMessage to | -| | to Servers | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Queue | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | An instance is created for each subscription | -| | request (e.g. RPC or Notification Server). | -| | | -| | The Controller maintains a map of Servers indexed | -| | by each specific ProtonListener identifier (target)| -+-----------------+----------------------------------------------------+ -|Issues | | -+-----------------+----------------------------------------------------+ - -SubscribeTask -------------- -+-----------------+----------------------------------------------------+ -|Component | *SubscribeTask* | -+=================+====================================================+ -|Responsibilities | Responsible for orchestrating a subscription to a | -| | given target. | -| | | -| | Provides the capability to prepare and schedule | -| | the subscription call on the Controller eventloop | -| | thread. | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Controller | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created for each ProtonDriver subscription | -| | request (e.g. listen or listen_for_notifications). | -| | | -| | The task is prepared and scheduled on the caller's | -| | thread. The subscribe operation is executed on the | -| | Controller's eventloop thread. The task completes | -| | once the subscription has been established on the | -| | message bus. | -+-----------------+----------------------------------------------------+ -|Issues | | -+-----------------+----------------------------------------------------+ - -SendTask --------- -+-----------------+----------------------------------------------------+ -|Component | *SendTask* | -+=================+====================================================+ -|Responsibilities | Responsible for sending a message to a given | -| | target. | -| | | -| | Provides the capability to prepare and schedule | -| | the send call on the Controller eventloop thread. | -| | | -| | Provides the ability to be called by Controller | -| | eventloop thread to indicate the settlement of the | -| | message (e.g. acknowledge or nack). | -| | | -| | Provides the ability to be called by Controller | -| | eventloop thread upon expiry of send timeout | -| | duration or general message delivery failure. | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Controller | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created for each ProtonDriver "RPC Cast" or | -| | "Notify" send request. The component is destroyed | -| | when the message transfer has reached a terminal | -| | state (e.g. settled). | -| | | -| | The task is prepared and scheduled on the caller's | -| | thread. The send operation is executed on the | -| | Controller's eventloop thread. | -| | | -| | All retry, timeout and acknowledge operations are | -| | performed on Controller eventloop thread and | -| | indicated back to the caller thread. | -+-----------------+----------------------------------------------------+ -|Issues | | -+-----------------+----------------------------------------------------+ - -RPCCallTask ------------ -+-----------------+----------------------------------------------------+ -|Component | *RPCCallTask* | -+=================+====================================================+ -|Responsibilities | Responsible for sending an RPC Call message to a | -| | given target. | -| | | -| | Provides all the capabilities derived from the | -| | parent SendTask component. | -| | | -| | Provides the additional capability to prepare for | -| | the RPC Call response message that will be returned| -| | on the senders reply link. | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Controller | -| | Sender | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created for each ProtonDriver "RPC Call" | -| | send request. It is destroyed once the RPC | -| | exchanged has reached its terminal state. | -| | | -| | The task is prepared and scheduled on the caller's | -| | thread. The send operation is executed on the | -| | Controller's eventloop thread. | -| | | -| | The Controller manages a single receiving link for | -| | all RPC reply messages. Message are routed | -| | using the correlation-id header in the response | -| | message. | -+-----------------+----------------------------------------------------+ -|Issues | | -+-----------------+----------------------------------------------------+ - -MessageDispositionTasks ------------------------ -+-----------------+----------------------------------------------------+ -|Component | *MessageDispositionTask* | -+=================+====================================================+ -|Responsibilities | Responsible for updating the message disposition | -| | for ProtonIncomingMessage. | -| | | -| | Provides the ability to acknowledge or requeue the | -| | message according to application determination. | -+-----------------+----------------------------------------------------+ -|Collaborators | | -| | Controller | -| | ProtonIncomingMessage | -| | Server | -+-----------------+----------------------------------------------------+ -|Notes | The component is dynamically created and destroyed.| -| | It is created by ProtonIncomingMessage settlement | -| | calls (acknowledge or requeue). It is destroyed | -| | once the disposition is updated in the Proton | -| | protocol engine. | -| | | -| | the task is prepared and scheduled on the caller's | -| | thread. The disposition operation is a function | -| | closure on the target server, receiver link and | -| | delivery handle for the message received on the | -| | Server receiver call back. The closure is executed | -| | on the Controller's eventloop thread. | -| | | -| | The settlement of RPC responses is automatic and | -| | not under application control. | -+-----------------+----------------------------------------------------+ -|Issues | | -+-----------------+----------------------------------------------------+ - -Service and Operational Qualities -================================= - -This section describes the primary service and operational qualities -that are relevant to the driver architecture and implementation. These -non-functional factors define the behavior of the driver implementation -(e.g. limits and capacities). These behaviors can be generally -categorized as being due to a design time (e.g. limit enforced by -implementation) or a run time (e.g. limit due to environment, -resources, etc.) constraint. The full detail and measures for these -qualities is outside the scope of this document but should be included -in any performance and scalability analysis of the driver implementation. - -+-------------+--------------------------------------------+------------+ -| Quality | Description | Limit | -+-------------+--------------------------------------------+------------+ -| Servers | The number of RPC or Notify servers that | Environment| -| | the driver will concurrently subscribe to | | -| | the messaging bus (e.g. Listeners) | | -+-------------+--------------------------------------------+------------+ -| Subscription| The maximum rate at which servers can be | Environment| -| Rate | subscribed and attached to the message bus | | -+-------------+--------------------------------------------+------------+ -| Senders | The number of unique Targets that can | Environment| -| | be concurrently defined for the destination| | -| | of RPC or Notify message transfer | | -+-------------+--------------------------------------------+------------+ -| Pending | The number of messages that the driver | Environment| -| Sends | will queue while waiting for link | | -| | availability or flow credit | | -+-------------+--------------------------------------------+------------+ -| Sends | The number of concurrent unacked messages | Environment| -| Outstanding | the driver will send | | -| | | | -+-------------+--------------------------------------------+------------+ -| Server Link | The number of message credits an RPC or | Design | -| Credits | Notification server will issue | | -| | | | -+-------------+--------------------------------------------+------------+ -| RPC Reply | The number of RPC reply message credits | Design | -| Link Credits| the driver will issue | | -| | | | -+-------------+--------------------------------------------+------------+ -| Message | The rate that the driver will transfer | Environment| -| Transfer | requests to the message bus | | -| Rate | | | -+-------------+--------------------------------------------+------------+ -| Message | The rate of transfer for the message | Environment| -| Data | body "payload" | | -| Throughput | | | -+-------------+--------------------------------------------+------------+ -| Tasks | The number of concurrent client requests | Design | -| Outstanding | that can be queued for driver thread | | -| | processing. | | -+-------------+--------------------------------------------+------------+ -| Message | The number of attempts the driver will | Design | -| Retries | make to send a message | | -| | | | -+-------------+--------------------------------------------+------------+ -| Transport | The number of Transport Hosts that can | Environment| -| Hosts | be specified for connection management | | -| | (e.g. selection and failover) | | -+-------------+--------------------------------------------+------------+ - -References -========== - -.. [1] https://blueprints.launchpad.net/oslo.messaging/+spec/amqp10-driver-implementation -.. [2] https://opendev.org/openstack/oslo-specs/src/branch/master/specs/juno/amqp10-driver-implementation.rst -.. [3] https://review.opendev.org/#/c/314603/ -.. [4] https://github.com/kgiusti/pyngus -.. [5] https://github.com/apache/qpid-proton -.. [6] https://review.opendev.org/#/c/317285/ -.. [7] https://opendev.org/openstack/oslo.messaging diff --git a/oslo_messaging/_drivers/impl_amqp1.py b/oslo_messaging/_drivers/impl_amqp1.py deleted file mode 100644 index bba8cebf4..000000000 --- a/oslo_messaging/_drivers/impl_amqp1.py +++ /dev/null @@ -1,449 +0,0 @@ -# Copyright 2014, Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Driver for the 'amqp' transport. - -This module provides a transport driver that speaks version 1.0 of the AMQP -messaging protocol. The driver sends messages and creates subscriptions via -'tasks' that are performed on its behalf via the controller module. -""" - -import collections -import logging -import os -import threading -import uuid -import warnings - -from debtcollector import removals -from oslo_config import cfg -from oslo_messaging.target import Target -from oslo_serialization import jsonutils -from oslo_utils import importutils -from oslo_utils import timeutils - -from oslo_messaging._drivers.amqp1_driver.eventloop import compute_timeout -from oslo_messaging._drivers.amqp1_driver import opts -from oslo_messaging._drivers import base -from oslo_messaging._drivers import common - - -warnings.simplefilter('always') -proton = importutils.try_import('proton') -controller = importutils.try_import( - 'oslo_messaging._drivers.amqp1_driver.controller' -) -LOG = logging.getLogger(__name__) - -# Build/Decode RPC Response messages -# Body Format - json string containing a map with keys: -# 'failure' - (optional) serialized exception from remote -# 'response' - (if no failure provided) data returned by call - - -def marshal_response(reply, failure): - # TODO(grs): do replies have a context? - # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't - # have support for vbin8. - msg = proton.Message(inferred=True) - if failure: - failure = common.serialize_remote_exception(failure) - data = {"failure": failure} - else: - data = {"response": reply} - msg.body = jsonutils.dumps(data) - return msg - - -def unmarshal_response(message, allowed): - # TODO(kgiusti) This may fail to unpack and raise an exception. Need to - # communicate this to the caller! - data = jsonutils.loads(message.body) - failure = data.get('failure') - if failure is not None: - raise common.deserialize_remote_exception(failure, allowed) - return data.get("response") - - -# Build/Decode RPC Request and Notification messages -# Body Format: json string containing a map with keys: -# 'request' - possibly serialized application data -# 'context' - context provided by the application -# 'call_monitor_timeout' - optional time in seconds for RPC call monitoring - -def marshal_request(request, context, envelope=False, - call_monitor_timeout=None): - # NOTE(flaper87): Set inferred to True since rabbitmq-amqp-1.0 doesn't - # have support for vbin8. - msg = proton.Message(inferred=True) - if envelope: - request = common.serialize_msg(request) - data = { - "request": request, - "context": context - } - if call_monitor_timeout is not None: - data["call_monitor_timeout"] = call_monitor_timeout - msg.body = jsonutils.dumps(data) - return msg - - -def unmarshal_request(message): - data = jsonutils.loads(message.body) - msg = common.deserialize_msg(data.get("request")) - return (msg, data.get("context"), data.get("call_monitor_timeout")) - - -@removals.removed_class("ProtonIncomingMessage") -class ProtonIncomingMessage(base.RpcIncomingMessage): - def __init__(self, listener, message, disposition): - request, ctxt, client_timeout = unmarshal_request(message) - super().__init__(ctxt, request) - self.listener = listener - self.client_timeout = client_timeout - self._reply_to = message.reply_to - self._correlation_id = message.id - self._disposition = disposition - - def heartbeat(self): - # heartbeats are sent "worst effort": non-blocking, no retries, - # pre-settled (no blocking for acks). We don't want the server thread - # being blocked because it is unable to send a heartbeat. - if not self._reply_to: - LOG.warning("Cannot send RPC heartbeat: no reply-to provided") - return - # send a null msg (no body). This will cause the client to simply reset - # its timeout (the null message is dropped). Use time-to-live to - # prevent stale heartbeats from building up on the message bus - msg = proton.Message() - msg.correlation_id = self._correlation_id - msg.ttl = self.client_timeout - task = controller.SendTask("RPC KeepAlive", msg, self._reply_to, - deadline=None, retry=0, wait_for_ack=False) - self.listener.driver._ctrl.add_task(task) - task.wait() - - def reply(self, reply=None, failure=None): - """Schedule an RPCReplyTask to send the reply.""" - if self._reply_to: - response = marshal_response(reply, failure) - response.correlation_id = self._correlation_id - driver = self.listener.driver - deadline = compute_timeout(driver._default_reply_timeout) - ack = not driver._pre_settle_reply - task = controller.SendTask("RPC Reply", response, self._reply_to, - # analogous to kombu missing dest t/o: - deadline, - retry=driver._default_reply_retry, - wait_for_ack=ack) - driver._ctrl.add_task(task) - rc = task.wait() - if rc: - # something failed. Not much we can do at this point but log - LOG.debug("RPC Reply failed to send: %s", str(rc)) - else: - LOG.debug("Ignoring reply as no reply address available") - - def acknowledge(self): - """Schedule a MessageDispositionTask to send the settlement.""" - task = controller.MessageDispositionTask(self._disposition, - released=False) - self.listener.driver._ctrl.add_task(task) - - def requeue(self): - """Schedule a MessageDispositionTask to release the message""" - task = controller.MessageDispositionTask(self._disposition, - released=True) - self.listener.driver._ctrl.add_task(task) - - -@removals.removed_class("Queue") -class Queue: - def __init__(self): - self._queue = collections.deque() - self._lock = threading.Lock() - self._pop_wake_condition = threading.Condition(self._lock) - self._started = True - - def put(self, item): - with self._lock: - self._queue.appendleft(item) - self._pop_wake_condition.notify() - - def pop(self, timeout): - with timeutils.StopWatch(timeout) as stop_watcher: - with self._lock: - while len(self._queue) == 0: - if stop_watcher.expired() or not self._started: - return None - self._pop_wake_condition.wait( - stop_watcher.leftover(return_none=True) - ) - return self._queue.pop() - - def stop(self): - with self._lock: - self._started = False - self._pop_wake_condition.notify_all() - - -@removals.removed_class("ProtonListener") -class ProtonListener(base.PollStyleListener): - def __init__(self, driver): - super().__init__(driver.prefetch_size) - self.driver = driver - self.incoming = Queue() - self.id = uuid.uuid4().hex - - def stop(self): - self.incoming.stop() - - @base.batch_poll_helper - def poll(self, timeout=None): - qentry = self.incoming.pop(timeout) - if qentry is None: - return None - return ProtonIncomingMessage(self, - qentry['message'], - qentry['disposition']) - - -@removals.removed_class("ProtonDriver") -class ProtonDriver(base.BaseDriver): - """AMQP 1.0 Driver - - See :doc:`AMQP1.0` for details. - """ - - def __init__(self, conf, url, - default_exchange=None, allowed_remote_exmods=[]): - if proton is None or controller is None: - raise NotImplementedError("Proton AMQP C libraries not installed") - - super().__init__(conf, url, default_exchange, - allowed_remote_exmods) - - opt_group = cfg.OptGroup(name='oslo_messaging_amqp', - title='AMQP 1.0 driver options') - conf.register_group(opt_group) - conf.register_opts(opts.amqp1_opts, group=opt_group) - conf = common.ConfigOptsProxy(conf, url, opt_group.name) - - self._conf = conf - self._default_exchange = default_exchange - - # lazy connection setup - don't create the controller until - # after the first messaging request: - self._ctrl = None - self._pid = None - self._lock = threading.Lock() - - # timeout for message acknowledgement - opt_name = conf.oslo_messaging_amqp - self._default_reply_timeout = opt_name.default_reply_timeout - self._default_send_timeout = opt_name.default_send_timeout - self._default_notify_timeout = opt_name.default_notify_timeout - self._default_reply_retry = opt_name.default_reply_retry - - # which message types should be sent pre-settled? - ps = [s.lower() for s in opt_name.pre_settled] - self._pre_settle_call = 'rpc-call' in ps - self._pre_settle_reply = 'rpc-reply' in ps - self._pre_settle_cast = 'rpc-cast' in ps - self._pre_settle_notify = 'notify' in ps - bad_opts = set(ps).difference(['rpc-call', 'rpc-reply', - 'rpc-cast', 'notify']) - if bad_opts: - LOG.warning("Ignoring unrecognized pre_settle value(s): %s", - " ".join(bad_opts)) - - def _ensure_connect_called(func): - """Causes a new controller to be created when the messaging service is - first used by the current process. It is safe to push tasks to it - whether connected or not, but those tasks won't be processed until - connection completes. - """ - def wrap(self, *args, **kws): - with self._lock: - # check to see if a fork was done after the Controller and its - # I/O thread was spawned. old_pid will be None the first time - # this is called which will cause the Controller to be created. - old_pid = self._pid - self._pid = os.getpid() - - if old_pid != self._pid: - if self._ctrl is not None: - # fork was called after the Controller was created, and - # we are now executing as the child process. Do not - # touch the existing Controller - it is owned by the - # parent. Best we can do here is simply drop it and - # hope we get lucky. - LOG.warning("Process forked after connection " - "established!") - self._ctrl = None - # Create a Controller that connects to the messaging - # service: - self._ctrl = controller.Controller(self._url, - self._default_exchange, - self._conf) - self._ctrl.connect() - return func(self, *args, **kws) - return wrap - - @_ensure_connect_called - def send(self, target, ctxt, message, - wait_for_reply=False, - timeout=None, call_monitor_timeout=None, - retry=None, transport_options=None): - """Send a message to the given target. - - :param target: destination for message - :type target: oslo_messaging.Target - :param ctxt: message context - :type ctxt: dict - :param message: message payload - :type message: dict - :param wait_for_reply: expects a reply message, wait for it - :type wait_for_reply: bool - :param timeout: raise exception if send does not complete within - timeout seconds. None == no timeout. - :type timeout: float - :param call_monitor_timeout: Maximum time the client will wait for the - call to complete or receive a message heartbeat indicating the - remote side is still executing. - :type call_monitor_timeout: float - :param retry: (optional) maximum re-send attempts on recoverable error - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - :param transport_options: transport-specific options to apply to the - sending of the message (TBD) - :type transport_options: dictionary - """ - request = marshal_request(message, ctxt, None, - call_monitor_timeout) - if timeout: - expire = compute_timeout(timeout) - request.ttl = timeout - request.expiry_time = compute_timeout(timeout) - else: - # no timeout provided by application. If the backend is queueless - # this could lead to a hang - provide a default to prevent this - # TODO(kgiusti) only do this if brokerless backend - expire = compute_timeout(self._default_send_timeout) - if wait_for_reply: - ack = not self._pre_settle_call - if call_monitor_timeout is None: - task = controller.RPCCallTask(target, request, expire, retry, - wait_for_ack=ack) - else: - task = controller.RPCMonitoredCallTask(target, request, expire, - call_monitor_timeout, - retry, wait_for_ack=ack) - else: - ack = not self._pre_settle_cast - task = controller.SendTask("RPC Cast", request, target, expire, - retry, wait_for_ack=ack) - self._ctrl.add_task(task) - - reply = task.wait() - if isinstance(reply, Exception): - raise reply - if reply: - # TODO(kgiusti) how to handle failure to un-marshal? - # Must log, and determine best way to communicate this failure - # back up to the caller - reply = unmarshal_response(reply, self._allowed_remote_exmods) - return reply - - @_ensure_connect_called - def send_notification(self, target, ctxt, message, version, - retry=None): - """Send a notification message to the given target. - - :param target: destination for message - :type target: oslo_messaging.Target - :param ctxt: message context - :type ctxt: dict - :param message: message payload - :type message: dict - :param version: message envelope version - :type version: float - :param retry: (optional) maximum re-send attempts on recoverable error - None or -1 means to retry forever - 0 means no retry - N means N retries - :type retry: int - """ - request = marshal_request(message, ctxt, envelope=(version == 2.0)) - # no timeout is applied to notifications, however if the backend is - # queueless this could lead to a hang - provide a default to prevent - # this - # TODO(kgiusti) should raise NotImplemented if not broker backend - deadline = compute_timeout(self._default_notify_timeout) - ack = not self._pre_settle_notify - task = controller.SendTask("Notify", request, target, - deadline, retry, wait_for_ack=ack, - notification=True) - self._ctrl.add_task(task) - rc = task.wait() - if isinstance(rc, Exception): - raise rc - - @_ensure_connect_called - def listen(self, target, batch_size, batch_timeout): - """Construct a Listener for the given target.""" - LOG.debug("Listen to %s", target) - listener = ProtonListener(self) - task = controller.SubscribeTask(target, listener) - self._ctrl.add_task(task) - task.wait() - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - @_ensure_connect_called - def listen_for_notifications(self, targets_and_priorities, pool, - batch_size, batch_timeout): - """Construct a Listener for notifications on the given target and - priority. - """ - # TODO(kgiusti) should raise NotImplemented if not broker backend - LOG.debug("Listen for notifications %s", targets_and_priorities) - if pool: - raise NotImplementedError('"pool" not implemented by ' - 'this transport driver') - listener = ProtonListener(self) - # this is how the destination target is created by the notifier, - # see MessagingDriver.notify in oslo_messaging/notify/messaging.py - for target, priority in targets_and_priorities: - topic = '{}.{}'.format(target.topic, priority) - # Sooo... the exchange is simply discarded? (see above comment) - task = controller.SubscribeTask(Target(topic=topic), - listener, notifications=True) - self._ctrl.add_task(task) - task.wait() - return base.PollStyleListenerAdapter(listener, batch_size, - batch_timeout) - - def cleanup(self): - """Release all resources.""" - if self._ctrl: - self._ctrl.shutdown() - self._ctrl = None - LOG.info("AMQP 1.0 messaging driver shutdown") - - def require_features(self, requeue=True): - pass diff --git a/oslo_messaging/conffixture.py b/oslo_messaging/conffixture.py index 090538714..b7eb57bc7 100644 --- a/oslo_messaging/conffixture.py +++ b/oslo_messaging/conffixture.py @@ -55,9 +55,6 @@ class ConfFixture(fixtures.Fixture): _import_opts(self.conf, 'oslo_messaging._drivers.amqp', 'amqp_opts', 'oslo_messaging_rabbit') - _import_opts(self.conf, - 'oslo_messaging._drivers.amqp1_driver.opts', - 'amqp1_opts', 'oslo_messaging_amqp') _import_opts(self.conf, 'oslo_messaging.rpc.client', '_client_opts') _import_opts(self.conf, 'oslo_messaging.transport', '_transport_opts') _import_opts(self.conf, 'oslo_messaging.rpc.dispatcher', diff --git a/oslo_messaging/opts.py b/oslo_messaging/opts.py index f3dd59cc0..f981d5f40 100644 --- a/oslo_messaging/opts.py +++ b/oslo_messaging/opts.py @@ -16,7 +16,6 @@ import copy import itertools from oslo_messaging._drivers import amqp -from oslo_messaging._drivers.amqp1_driver import opts as amqp_opts from oslo_messaging._drivers import base as drivers_base from oslo_messaging._drivers import impl_rabbit from oslo_messaging._drivers.kafka_driver import kafka_options @@ -40,7 +39,6 @@ _global_opt_lists = [ _opts = [ (None, list(itertools.chain(*_global_opt_lists))), - ('oslo_messaging_amqp', amqp_opts.amqp1_opts), ('oslo_messaging_notifications', notifier._notifier_opts), ('oslo_messaging_rabbit', list( itertools.chain(amqp.amqp_opts, impl_rabbit.rabbit_opts))), diff --git a/oslo_messaging/tests/drivers/test_amqp_driver.py b/oslo_messaging/tests/drivers/test_amqp_driver.py deleted file mode 100644 index 968281555..000000000 --- a/oslo_messaging/tests/drivers/test_amqp_driver.py +++ /dev/null @@ -1,2324 +0,0 @@ -# Copyright (C) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import logging -import os -import queue -import select -import shlex -import shutil -import socket -import subprocess -import sys -import tempfile -import threading -import time -from unittest import mock -import uuid - -from oslo_utils import eventletutils -from oslo_utils import importutils -from string import Template -import testtools - -import oslo_messaging -from oslo_messaging.tests import utils as test_utils - -# TODO(kgiusti) Conditionally run these tests only if the necessary -# dependencies are installed. This should be removed once the proton libraries -# are available in the base repos for all supported platforms. -pyngus = importutils.try_import("pyngus") -if pyngus: - from oslo_messaging._drivers.amqp1_driver.addressing \ - import AddresserFactory - from oslo_messaging._drivers.amqp1_driver.addressing \ - import LegacyAddresser - from oslo_messaging._drivers.amqp1_driver.addressing \ - import RoutableAddresser - import oslo_messaging._drivers.impl_amqp1 as amqp_driver - -# The Cyrus-based SASL tests can only be run if the installed version of proton -# has been built with Cyrus SASL support. -_proton = importutils.try_import("proton") -CYRUS_ENABLED = (pyngus and pyngus.VERSION >= (2, 0, 0) and _proton and - getattr(_proton.SASL, "extended", lambda: False)()) -# same with SSL -SSL_ENABLED = (_proton and getattr(_proton.SSL, "present", lambda: False)()) - -LOG = logging.getLogger(__name__) - - -def _wait_until(predicate, timeout): - deadline = timeout + time.time() - while not predicate() and deadline > time.time(): - time.sleep(0.1) - - -class _ListenerThread(threading.Thread): - """Run a blocking listener in a thread.""" - def __init__(self, listener, msg_count, msg_ack=True): - super().__init__() - self.listener = listener - self.msg_count = msg_count - self._msg_ack = msg_ack - self.messages = queue.Queue() - self.daemon = True - self.started = eventletutils.Event() - self._done = eventletutils.Event() - self.start() - self.started.wait() - - def run(self): - LOG.debug("Listener started") - self.started.set() - while not self._done.is_set(): - for in_msg in self.listener.poll(timeout=0.5): - self.messages.put(in_msg) - self.msg_count -= 1 - self.msg_count == 0 and self._done.set() - if self._msg_ack: - in_msg.acknowledge() - if in_msg.message.get('method') == 'echo': - in_msg.reply(reply={'correlation-id': - in_msg.message.get('id')}) - else: - in_msg.requeue() - - LOG.debug("Listener stopped") - - def get_messages(self): - """Returns a list of all received messages.""" - msgs = [] - try: - while True: - m = self.messages.get(False) - msgs.append(m) - except queue.Empty: - pass - return msgs - - def kill(self, timeout=30): - self._done.set() - self.join(timeout) - - -class _SlowResponder(_ListenerThread): - # an RPC listener that pauses delay seconds before replying - def __init__(self, listener, delay, msg_count=1): - self._delay = delay - super().__init__(listener, msg_count) - - def run(self): - LOG.debug("_SlowResponder started") - self.started.set() - while not self._done.is_set(): - for in_msg in self.listener.poll(timeout=0.5): - time.sleep(self._delay) - in_msg.acknowledge() - in_msg.reply(reply={'correlation-id': - in_msg.message.get('id')}) - self.messages.put(in_msg) - self.msg_count -= 1 - self.msg_count == 0 and self._done.set() - - -class _CallMonitor(_ListenerThread): - # an RPC listener that generates heartbeats before - # replying. - def __init__(self, listener, delay, hb_count, msg_count=1): - self._delay = delay - self._hb_count = hb_count - super().__init__(listener, msg_count) - - def run(self): - LOG.debug("_CallMonitor started") - self.started.set() - while not self._done.is_set(): - for in_msg in self.listener.poll(timeout=0.5): - hb_rate = in_msg.client_timeout / 2.0 - deadline = time.time() + self._delay - while deadline > time.time(): - if self._done.wait(hb_rate): - return - if self._hb_count > 0: - in_msg.heartbeat() - self._hb_count -= 1 - in_msg.acknowledge() - in_msg.reply(reply={'correlation-id': - in_msg.message.get('id')}) - self.messages.put(in_msg) - self.msg_count -= 1 - self.msg_count == 0 and self._done.set() - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestProtonDriverLoad(test_utils.BaseTestCase): - - def setUp(self): - super().setUp() - self.messaging_conf.transport_url = 'amqp://' - - def test_driver_load(self): - transport = oslo_messaging.get_transport(self.conf) - self.assertIsInstance(transport._driver, - amqp_driver.ProtonDriver) - - -class _AmqpBrokerTestCase(test_utils.BaseTestCase): - """Creates a single FakeBroker for use by the tests""" - @testtools.skipUnless(pyngus, "proton modules not present") - def setUp(self): - super().setUp() - self._broker = FakeBroker(self.conf.oslo_messaging_amqp) - self._broker_addr = "amqp://%s:%d" % (self._broker.host, - self._broker.port) - self._broker_url = oslo_messaging.TransportURL.parse( - self.conf, self._broker_addr) - - def tearDown(self): - super().tearDown() - if self._broker: - self._broker.stop() - - -class _AmqpBrokerTestCaseAuto(_AmqpBrokerTestCase): - """Like _AmqpBrokerTestCase, but starts the broker""" - @testtools.skipUnless(pyngus, "proton modules not present") - def setUp(self): - super().setUp() - self._broker.start() - - -class TestAmqpSend(_AmqpBrokerTestCaseAuto): - """Test sending and receiving messages.""" - - def test_driver_unconnected_cleanup(self): - """Verify the driver can cleanly shutdown even if never connected.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - driver.cleanup() - - def test_listener_cleanup(self): - """Verify unused listener can cleanly shutdown.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = driver.listen(target, None, None)._poll_style_listener - self.assertIsInstance(listener, amqp_driver.ProtonListener) - driver.cleanup() - - def test_send_no_reply(self): - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - rc = driver.send(target, {"context": True}, - {"msg": "value"}, wait_for_reply=False) - self.assertIsNone(rc) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - self.assertEqual({"msg": "value"}, listener.messages.get().message) - - predicate = lambda: (self._broker.sender_link_ack_count == 1) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.cleanup() - - def test_send_exchange_with_reply(self): - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target1 = oslo_messaging.Target(topic="test-topic", exchange="e1") - listener1 = _ListenerThread( - driver.listen(target1, None, None)._poll_style_listener, 1) - target2 = oslo_messaging.Target(topic="test-topic", exchange="e2") - listener2 = _ListenerThread( - driver.listen(target2, None, None)._poll_style_listener, 1) - - rc = driver.send(target1, {"context": "whatever"}, - {"method": "echo", "id": "e1"}, - wait_for_reply=True, - timeout=30) - self.assertIsNotNone(rc) - self.assertEqual('e1', rc.get('correlation-id')) - - rc = driver.send(target2, {"context": "whatever"}, - {"method": "echo", "id": "e2"}, - wait_for_reply=True, - timeout=30) - self.assertIsNotNone(rc) - self.assertEqual('e2', rc.get('correlation-id')) - - listener1.join(timeout=30) - self.assertFalse(listener1.is_alive()) - listener2.join(timeout=30) - self.assertFalse(listener2.is_alive()) - driver.cleanup() - - def test_messaging_patterns(self): - """Verify the direct, shared, and fanout message patterns work.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target1 = oslo_messaging.Target(topic="test-topic", server="server1") - listener1 = _ListenerThread( - driver.listen(target1, None, None)._poll_style_listener, 4) - target2 = oslo_messaging.Target(topic="test-topic", server="server2") - listener2 = _ListenerThread( - driver.listen(target2, None, None)._poll_style_listener, 3) - - shared_target = oslo_messaging.Target(topic="test-topic") - fanout_target = oslo_messaging.Target(topic="test-topic", - fanout=True) - # this should go to only one server: - driver.send(shared_target, {"context": "whatever"}, - {"method": "echo", "id": "either-1"}, - wait_for_reply=True) - self.assertEqual(1, self._broker.topic_count) - self.assertEqual(1, self._broker.direct_count) # reply - - # this should go to the other server: - driver.send(shared_target, {"context": "whatever"}, - {"method": "echo", "id": "either-2"}, - wait_for_reply=True) - self.assertEqual(2, self._broker.topic_count) - self.assertEqual(2, self._broker.direct_count) # reply - - # these should only go to listener1: - driver.send(target1, {"context": "whatever"}, - {"method": "echo", "id": "server1-1"}, - wait_for_reply=True) - - driver.send(target1, {"context": "whatever"}, - {"method": "echo", "id": "server1-2"}, - wait_for_reply=True) - self.assertEqual(6, self._broker.direct_count) # 2X(send+reply) - - # this should only go to listener2: - driver.send(target2, {"context": "whatever"}, - {"method": "echo", "id": "server2"}, - wait_for_reply=True) - self.assertEqual(8, self._broker.direct_count) - - # both listeners should get a copy: - driver.send(fanout_target, {"context": "whatever"}, - {"method": "echo", "id": "fanout"}) - - listener1.join(timeout=30) - self.assertFalse(listener1.is_alive()) - listener2.join(timeout=30) - self.assertFalse(listener2.is_alive()) - self.assertEqual(1, self._broker.fanout_count) - - listener1_ids = [x.message.get('id') for x in listener1.get_messages()] - listener2_ids = [x.message.get('id') for x in listener2.get_messages()] - - self.assertTrue('fanout' in listener1_ids and - 'fanout' in listener2_ids) - self.assertTrue('server1-1' in listener1_ids and - 'server1-1' not in listener2_ids) - self.assertTrue('server1-2' in listener1_ids and - 'server1-2' not in listener2_ids) - self.assertTrue('server2' in listener2_ids and - 'server2' not in listener1_ids) - if 'either-1' in listener1_ids: - self.assertTrue('either-2' in listener2_ids and - 'either-2' not in listener1_ids and - 'either-1' not in listener2_ids) - else: - self.assertTrue('either-2' in listener1_ids and - 'either-2' not in listener2_ids and - 'either-1' in listener2_ids) - - predicate = lambda: (self._broker.sender_link_ack_count == 12) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.cleanup() - - def test_send_timeout(self): - """Verify send timeout - no reply sent.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - - # the listener will drop this message: - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, target, - {"context": "whatever"}, - {"method": "drop"}, - wait_for_reply=True, - timeout=1.0) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_released_send(self): - """Verify exception thrown if send Nacked.""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="no listener") - - # the broker will send a nack (released) since there is no active - # listener for the target: - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - driver.send, target, - {"context": "whatever"}, - {"method": "drop"}, - wait_for_reply=True, - retry=0, - timeout=1.0) - driver.cleanup() - - def test_send_not_acked(self): - """Verify exception thrown ack dropped.""" - self.config(pre_settled=[], - group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - # set this directly so we can use a value < minimum allowed - driver._default_send_timeout = 2 - target = oslo_messaging.Target(topic="!no-ack!") - - # the broker will silently discard: - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - driver.send, target, - {"context": "whatever"}, - {"method": "drop"}, - retry=0, - wait_for_reply=True) - driver.cleanup() - - def test_no_ack_cast(self): - """Verify no exception is thrown if acks are turned off""" - # set casts to ignore ack - self.config(pre_settled=['rpc-cast'], - group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - # set this directly so we can use a value < minimum allowed - driver._default_send_timeout = 2 - target = oslo_messaging.Target(topic="!no-ack!") - - # the broker will silently discard this cast, but since ack'ing is - # disabled the send does not fail - driver.send(target, {"context": "whatever"}, - {"method": "drop"}, - wait_for_reply=False) - driver.cleanup() - - def test_call_late_reply(self): - """What happens if reply arrives after timeout?""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _SlowResponder( - driver.listen(target, None, None)._poll_style_listener, - delay=3) - - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, target, - {"context": "whatever"}, - {"method": "echo", "id": "???"}, - wait_for_reply=True, - timeout=1.0) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - - predicate = lambda: (self._broker.sender_link_ack_count == 1) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.cleanup() - - def test_call_failed_reply(self): - """Send back an exception generated at the listener""" - class _FailedResponder(_ListenerThread): - def __init__(self, listener): - super().__init__(listener, 1) - - def run(self): - self.started.set() - while not self._done.is_set(): - for in_msg in self.listener.poll(timeout=0.5): - try: - raise RuntimeError("Oopsie!") - except RuntimeError: - in_msg.reply(reply=None, - failure=sys.exc_info()) - self._done.set() - - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _FailedResponder( - driver.listen(target, None, None)._poll_style_listener) - - self.assertRaises(RuntimeError, - driver.send, target, - {"context": "whatever"}, - {"method": "echo"}, - wait_for_reply=True, - timeout=5.0) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_call_reply_timeout(self): - """What happens if the replier times out?""" - class _TimeoutListener(_ListenerThread): - def __init__(self, listener): - super().__init__(listener, 1) - - def run(self): - self.started.set() - while not self._done.is_set(): - for in_msg in self.listener.poll(timeout=0.5): - # reply will never be acked (simulate drop): - in_msg._reply_to = "!no-ack!" - in_msg.reply(reply={'correlation-id': - in_msg.message.get("id")}) - self._done.set() - - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - driver._default_reply_timeout = 1 - target = oslo_messaging.Target(topic="test-topic") - listener = _TimeoutListener( - driver.listen(target, None, None)._poll_style_listener) - - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, target, - {"context": "whatever"}, - {"method": "echo"}, - wait_for_reply=True, - timeout=3) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_listener_requeue(self): - "Emulate Server requeue on listener incoming messages" - self.config(pre_settled=[], group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - driver.require_features(requeue=True) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1, - msg_ack=False) - - rc = driver.send(target, {"context": True}, - {"msg": "value"}, wait_for_reply=False) - self.assertIsNone(rc) - - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - - predicate = lambda: (self._broker.sender_link_requeue_count == 1) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.cleanup() - - def test_sender_minimal_credit(self): - # ensure capacity is replenished when only 1 credit is configured - self.config(reply_link_credit=1, - rpc_server_credit=1, - group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic", server="server") - listener = _ListenerThread(driver.listen(target, - None, - None)._poll_style_listener, - 4) - for i in range(4): - threading.Thread(target=driver.send, - args=(target, - {"context": "whatever"}, - {"method": "echo"}), - kwargs={'wait_for_reply': True}).start() - predicate = lambda: (self._broker.direct_count == 8) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - listener.join(timeout=30) - driver.cleanup() - - def test_sender_link_maintenance(self): - # ensure links are purged from cache - self.config(default_sender_link_timeout=1, - group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic-maint") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 3) - - # the send should create a receiver link on the broker - rc = driver.send(target, {"context": True}, - {"msg": "value"}, wait_for_reply=False) - self.assertIsNone(rc) - - predicate = lambda: (self._broker.receiver_link_count == 1) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - self.assertTrue(listener.is_alive()) - self.assertEqual({"msg": "value"}, listener.messages.get().message) - - predicate = lambda: (self._broker.receiver_link_count == 0) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - # the next send should create a separate receiver link on the broker - rc = driver.send(target, {"context": True}, - {"msg": "value"}, wait_for_reply=False) - self.assertIsNone(rc) - - predicate = lambda: (self._broker.receiver_link_count == 1) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - self.assertTrue(listener.is_alive()) - self.assertEqual({"msg": "value"}, listener.messages.get().message) - - predicate = lambda: (self._broker.receiver_link_count == 0) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.cleanup() - - def test_call_monitor_ok(self): - # verify keepalive by delaying the reply > heartbeat interval - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _CallMonitor( - driver.listen(target, None, None)._poll_style_listener, - delay=11, - hb_count=100) - rc = driver.send(target, - {"context": True}, - {"method": "echo", "id": "1"}, - wait_for_reply=True, - timeout=60, - call_monitor_timeout=5) - self.assertIsNotNone(rc) - self.assertEqual("1", rc.get('correlation-id')) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_call_monitor_bad_no_heartbeat(self): - # verify call fails if keepalives stop coming - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _CallMonitor( - driver.listen(target, None, None)._poll_style_listener, - delay=11, - hb_count=1) - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, - target, - {"context": True}, - {"method": "echo", "id": "1"}, - wait_for_reply=True, - timeout=60, - call_monitor_timeout=5) - listener.kill() - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_call_monitor_bad_call_timeout(self): - # verify call fails if deadline hit regardless of heartbeat activity - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _CallMonitor( - driver.listen(target, None, None)._poll_style_listener, - delay=20, - hb_count=100) - self.assertRaises(oslo_messaging.MessagingTimeout, - driver.send, - target, - {"context": True}, - {"method": "echo", "id": "1"}, - wait_for_reply=True, - timeout=11, - call_monitor_timeout=5) - listener.kill() - self.assertFalse(listener.is_alive()) - driver.cleanup() - - -class TestAmqpNotification(_AmqpBrokerTestCaseAuto): - """Test sending and receiving notifications.""" - - def test_notification(self): - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - notifications = [(oslo_messaging.Target(topic="topic-1"), 'info'), - (oslo_messaging.Target(topic="topic-1"), 'error'), - (oslo_messaging.Target(topic="topic-2"), 'debug')] - nl = driver.listen_for_notifications( - notifications, None, None, None)._poll_style_listener - - # send one for each support version: - msg_count = len(notifications) * 2 - listener = _ListenerThread(nl, msg_count) - targets = ['topic-1.info', - 'topic-1.bad', # will raise MessageDeliveryFailure - 'bad-topic.debug', # will raise MessageDeliveryFailure - 'topic-1.error', - 'topic-2.debug'] - - excepted_targets = [] - for version in (1.0, 2.0): - for t in targets: - try: - driver.send_notification(oslo_messaging.Target(topic=t), - "context", {'target': t}, - version, retry=0) - except oslo_messaging.MessageDeliveryFailure: - excepted_targets.append(t) - - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - topics = [x.message.get('target') for x in listener.get_messages()] - self.assertEqual(msg_count, len(topics)) - self.assertEqual(2, topics.count('topic-1.info')) - self.assertEqual(2, topics.count('topic-1.error')) - self.assertEqual(2, topics.count('topic-2.debug')) - self.assertEqual(4, self._broker.dropped_count) - self.assertEqual(2, excepted_targets.count('topic-1.bad')) - self.assertEqual(2, excepted_targets.count('bad-topic.debug')) - driver.cleanup() - - def test_released_notification(self): - """Broker sends a Nack (released)""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - driver.send_notification, - oslo_messaging.Target(topic="bad address"), - "context", {'target': "bad address"}, - 2.0, - retry=0) - driver.cleanup() - - def test_notification_not_acked(self): - """Simulate drop of ack from broker""" - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - # set this directly so we can use a value < minimum allowed - driver._default_notify_timeout = 2 - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - driver.send_notification, - oslo_messaging.Target(topic="!no-ack!"), - "context", {'target': "!no-ack!"}, - 2.0, retry=0) - driver.cleanup() - - def test_no_ack_notification(self): - """Verify no exception is thrown if acks are turned off""" - # add a couple of illegal values for coverage of the warning - self.config(pre_settled=['notify', 'fleabag', 'poochie'], - group="oslo_messaging_amqp") - - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - # set this directly so we can use a value < minimum allowed - driver._default_notify_timeout = 2 - driver.send_notification(oslo_messaging.Target(topic="!no-ack!"), - "context", {'target': "!no-ack!"}, 2.0) - driver.cleanup() - - -@testtools.skipUnless(pyngus and pyngus.VERSION < (2, 0, 0), - "pyngus module not present") -class TestAuthentication(test_utils.BaseTestCase): - """Test user authentication using the old pyngus API""" - def setUp(self): - super().setUp() - # for simplicity, encode the credentials as they would appear 'on the - # wire' in a SASL frame - username and password prefixed by zero. - user_credentials = ["\0joe\0secret"] - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sasl_mechanisms="PLAIN", - user_credentials=user_credentials) - self._broker.start() - - def tearDown(self): - super().tearDown() - self._broker.stop() - - def test_authentication_ok(self): - """Verify that username and password given in TransportHost are - accepted by the broker. - """ - - addr = "amqp://joe:secret@%s:%d" % (self._broker.host, - self._broker.port) - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - rc = driver.send(target, {"context": True}, - {"method": "echo"}, wait_for_reply=True) - self.assertIsNotNone(rc) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_authentication_failure(self): - """Verify that a bad password given in TransportHost is - rejected by the broker. - """ - - addr = "amqp://joe:badpass@%s:%d" % (self._broker.host, - self._broker.port) - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - driver.send, - target, {"context": True}, - {"method": "echo"}, - wait_for_reply=True, - retry=2) - driver.cleanup() - - -@testtools.skipUnless(CYRUS_ENABLED, "Cyrus SASL not supported") -class TestCyrusAuthentication(test_utils.BaseTestCase): - """Test the driver's Cyrus SASL integration""" - - _conf_dir = None - - # Note: don't add ANONYMOUS or EXTERNAL mechs without updating the - # test_authentication_bad_mechs test below - _mechs = "DIGEST-MD5 SCRAM-SHA-1 CRAM-MD5 PLAIN" - - @classmethod - def setUpClass(cls): - # The Cyrus library can only be initialized once per _process_ - # Create a SASL configuration and user database, - # add a user 'joe' with password 'secret': - cls._conf_dir = "/tmp/amqp1_tests_%s" % os.getpid() - # no, we cannot use tempfile.mkdtemp() as it will 'helpfully' remove - # the temp dir after the first test is run - os.makedirs(cls._conf_dir) - db = os.path.join(cls._conf_dir, 'openstack.sasldb') - _t = "echo secret | saslpasswd2 -c -p -f ${db} -u myrealm joe" - cmd = Template(_t).substitute(db=db) - try: - subprocess.check_call(args=cmd, shell=True) - except Exception: - shutil.rmtree(cls._conf_dir, ignore_errors=True) - cls._conf_dir = None - return - - # configure the SASL server: - conf = os.path.join(cls._conf_dir, 'openstack.conf') - t = Template("""sasldb_path: ${db} -pwcheck_method: auxprop -auxprop_plugin: sasldb -mech_list: ${mechs} -""") - with open(conf, 'w') as f: - f.write(t.substitute(db=db, mechs=cls._mechs)) - - @classmethod - def tearDownClass(cls): - if cls._conf_dir: - shutil.rmtree(cls._conf_dir, ignore_errors=True) - - def setUp(self): - # fire up a test broker with the SASL config: - super().setUp() - if TestCyrusAuthentication._conf_dir is None: - self.skipTest("Cyrus SASL tools not installed") - _mechs = TestCyrusAuthentication._mechs - _dir = TestCyrusAuthentication._conf_dir - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sasl_mechanisms=_mechs, - user_credentials=["\0joe@myrealm\0secret"], - sasl_config_dir=_dir, - sasl_config_name="openstack") - self._broker.start() - self.messaging_conf.transport_url = 'amqp://' - self.conf = self.messaging_conf.conf - - def tearDown(self): - if self._broker: - self._broker.stop() - self._broker = None - super().tearDown() - - def _authentication_test(self, addr, retry=None): - url = oslo_messaging.TransportURL.parse(self.conf, addr) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - try: - rc = driver.send(target, {"context": True}, - {"method": "echo"}, wait_for_reply=True, - retry=retry) - self.assertIsNotNone(rc) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - finally: - driver.cleanup() - - def test_authentication_ok(self): - """Verify that username and password given in TransportHost are - accepted by the broker. - """ - addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, - self._broker.port) - self._authentication_test(addr) - - def test_authentication_failure(self): - """Verify that a bad password given in TransportHost is - rejected by the broker. - """ - addr = "amqp://joe@myrealm:badpass@%s:%d" % (self._broker.host, - self._broker.port) - try: - self._authentication_test(addr, retry=2) - except oslo_messaging.MessageDeliveryFailure as e: - # verify the exception indicates the failure was an authentication - # error - self.assertTrue('amqp:unauthorized-access' in str(e)) - else: - self.assertIsNone("Expected authentication failure") - - def test_authentication_bad_mechs(self): - """Verify that the connection fails if the client's SASL mechanisms do - not match the broker's. - """ - self.config(sasl_mechanisms="EXTERNAL ANONYMOUS", - group="oslo_messaging_amqp") - addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, - self._broker.port) - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - self._authentication_test, - addr, - retry=0) - - def test_authentication_default_realm(self): - """Verify that default realm is used if none present in username""" - addr = "amqp://joe:secret@%s:%d" % (self._broker.host, - self._broker.port) - self.config(sasl_default_realm="myrealm", - group="oslo_messaging_amqp") - self._authentication_test(addr) - - def test_authentication_ignore_default_realm(self): - """Verify that default realm is not used if realm present in - username - """ - addr = "amqp://joe@myrealm:secret@%s:%d" % (self._broker.host, - self._broker.port) - self.config(sasl_default_realm="bad-realm", - group="oslo_messaging_amqp") - self._authentication_test(addr) - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestFailover(test_utils.BaseTestCase): - - def setUp(self): - super().setUp() - # configure different addressing modes on the brokers to test failing - # over from one type of backend to another - self.config(addressing_mode='dynamic', group="oslo_messaging_amqp") - self._brokers = self._gen_brokers() - self._primary = 0 - self._backup = 1 - hosts = [] - for broker in self._brokers: - hosts.append(oslo_messaging.TransportHost(hostname=broker.host, - port=broker.port)) - self._broker_url = self._gen_transport_url(hosts) - - def tearDown(self): - super().tearDown() - for broker in self._brokers: - if broker.is_alive(): - broker.stop() - - def _gen_brokers(self): - return [FakeBroker(self.conf.oslo_messaging_amqp, - product="qpid-cpp"), - FakeBroker(self.conf.oslo_messaging_amqp, - product="routable")] - - def _gen_transport_url(self, hosts): - return oslo_messaging.TransportURL(self.conf, - transport="amqp", - hosts=hosts) - - def _failover(self, fail_broker): - self._brokers[0].start() - self._brokers[1].start() - - # self.config(trace=True, group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - - target = oslo_messaging.Target(topic="my-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 2) - - # wait for listener links to come up on either broker - # 4 == 3 links per listener + 1 for the global reply queue - predicate = lambda: ((self._brokers[0].sender_link_count == 4) or - (self._brokers[1].sender_link_count == 4)) - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - if self._brokers[1].sender_link_count == 4: - self._primary = 1 - self._backup = 0 - - rc = driver.send(target, {"context": "whatever"}, - {"method": "echo", "id": "echo-1"}, - wait_for_reply=True, - timeout=30) - self.assertIsNotNone(rc) - self.assertEqual('echo-1', rc.get('correlation-id')) - - # 1 request msg, 1 response: - self.assertEqual(1, self._brokers[self._primary].topic_count) - self.assertEqual(1, self._brokers[self._primary].direct_count) - - # invoke failover method - fail_broker(self._brokers[self._primary]) - - # wait for listener links to re-establish on broker 1 - # 4 = 3 links per listener + 1 for the global reply queue - predicate = lambda: self._brokers[self._backup].sender_link_count == 4 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - rc = driver.send(target, - {"context": "whatever"}, - {"method": "echo", "id": "echo-2"}, - wait_for_reply=True, - timeout=2) - self.assertIsNotNone(rc) - self.assertEqual('echo-2', rc.get('correlation-id')) - - # 1 request msg, 1 response: - self.assertEqual(1, self._brokers[self._backup].topic_count) - self.assertEqual(1, self._brokers[self._backup].direct_count) - - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - - # note: stopping the broker first tests cleaning up driver without a - # connection active - self._brokers[self._backup].stop() - driver.cleanup() - - def test_broker_crash(self): - """Simulate a failure of one broker.""" - def _meth(broker): - # fail broker: - broker.stop() - time.sleep(0.5) - self._failover(_meth) - - def test_broker_shutdown(self): - """Simulate a normal shutdown of a broker.""" - def _meth(broker): - broker.stop(clean=True) - time.sleep(0.5) - self._failover(_meth) - - def test_heartbeat_failover(self): - """Simulate broker heartbeat timeout.""" - def _meth(broker): - # keep alive heartbeat from primary broker will stop, which should - # force failover to backup broker in about two seconds - broker.pause() - self.config(idle_timeout=2, group="oslo_messaging_amqp") - self._failover(_meth) - self._brokers[self._primary].stop() - - def test_listener_failover(self): - """Verify that Listeners sharing the same topic are re-established - after failover. - """ - self._brokers[0].start() - # self.config(trace=True, group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - - target = oslo_messaging.Target(topic="my-topic") - bcast = oslo_messaging.Target(topic="my-topic", fanout=True) - listener1 = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 2) - listener2 = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 2) - - # wait for 7 sending links to become active on the broker. - # 7 = 3 links per Listener + 1 global reply link - predicate = lambda: self._brokers[0].sender_link_count == 7 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.send(bcast, {"context": "whatever"}, - {"method": "ignore", "id": "echo-1"}) - - # 1 message per listener - predicate = lambda: self._brokers[0].fanout_sent_count == 2 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - # start broker 1 then shutdown broker 0: - self._brokers[1].start() - self._brokers[0].stop(clean=True) - - # wait again for 7 sending links to re-establish on broker 1 - predicate = lambda: self._brokers[1].sender_link_count == 7 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - driver.send(bcast, {"context": "whatever"}, - {"method": "ignore", "id": "echo-2"}) - - # 1 message per listener - predicate = lambda: self._brokers[1].fanout_sent_count == 2 - _wait_until(predicate, 30) - self.assertTrue(predicate()) - - listener1.join(timeout=30) - listener2.join(timeout=30) - self.assertFalse(listener1.is_alive() or listener2.is_alive()) - - driver.cleanup() - self._brokers[1].stop() - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestLinkRecovery(_AmqpBrokerTestCase): - - def _send_retry(self, reject, retries): - self._reject = reject - - def on_active(link): - if self._reject > 0: - link.close() - self._reject -= 1 - else: - link.add_capacity(10) - - self._broker.on_receiver_active = on_active - self._broker.start() - self.config(link_retry_delay=1, - group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread(driver.listen(target, - None, - None)._poll_style_listener, - 1) - try: - rc = driver.send(target, {"context": "whatever"}, - {"method": "echo", "id": "e1"}, - wait_for_reply=True, retry=retries) - self.assertIsNotNone(rc) - self.assertEqual(rc.get('correlation-id'), 'e1') - except Exception: - listener.kill() - driver.cleanup() - raise - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - self.assertEqual(listener.messages.get().message.get('method'), "echo") - driver.cleanup() - - def test_send_retry_ok(self): - # verify sender with retry=3 survives 2 link failures: - self._send_retry(reject=2, retries=3) - - def test_send_retry_fail(self): - # verify sender fails if retries exhausted - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - self._send_retry, - reject=3, - retries=2) - - def test_listener_recovery(self): - # verify a listener recovers if all links fail: - self._addrs = {'unicast.test-topic': 2, - 'broadcast.test-topic.all': 2, - 'exclusive.test-topic.server': 2} - self._recovered = eventletutils.Event() - self._count = 0 - - def _on_active(link): - t = link.target_address - if t in self._addrs: - if self._addrs[t] > 0: - link.close() - self._addrs[t] -= 1 - else: - self._count += 1 - if self._count == len(self._addrs): - self._recovered.set() - - self._broker.on_sender_active = _on_active - self._broker.start() - self.config(link_retry_delay=1, group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic", - server="server") - listener = _ListenerThread(driver.listen(target, - None, - None)._poll_style_listener, - 3) - # wait for recovery - self.assertTrue(self._recovered.wait(timeout=30)) - # verify server RPC: - rc = driver.send(target, {"context": "whatever"}, - {"method": "echo", "id": "e1"}, - wait_for_reply=True) - self.assertIsNotNone(rc) - self.assertEqual(rc.get('correlation-id'), 'e1') - # verify balanced RPC: - target.server = None - rc = driver.send(target, {"context": "whatever"}, - {"method": "echo", "id": "e2"}, - wait_for_reply=True) - self.assertIsNotNone(rc) - self.assertEqual(rc.get('correlation-id'), 'e2') - # verify fanout: - target.fanout = True - driver.send(target, {"context": "whatever"}, - {"msg": "value"}, - wait_for_reply=False) - listener.join(timeout=30) - self.assertTrue(self._broker.fanout_count == 1) - self.assertFalse(listener.is_alive()) - self.assertEqual(listener.messages.get().message.get('method'), "echo") - driver.cleanup() - - def test_sender_credit_blocked(self): - # ensure send requests resume once credit is provided - self._blocked_links = set() - - def _on_active(link): - # refuse granting credit for the broadcast link - if self._broker._addresser._is_multicast(link.source_address): - self._blocked_links.add(link) - else: - # unblock all link when RPC call is made - link.add_capacity(10) - for li in self._blocked_links: - li.add_capacity(10) - - self._broker.on_receiver_active = _on_active - self._broker.on_credit_exhausted = lambda link: None - self._broker.start() - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic", server="server") - listener = _ListenerThread(driver.listen(target, - None, - None)._poll_style_listener, - 4) - target.fanout = True - target.server = None - # these threads will share the same link - for i in range(3): - t = threading.Thread(target=driver.send, - args=(target, {"context": "whatever"}, - {"msg": "n=%d" % i}), - kwargs={'wait_for_reply': False}) - t.start() - # casts return once message is put on active link - t.join(timeout=30) - - time.sleep(1) # ensure messages are going nowhere - self.assertEqual(self._broker.fanout_sent_count, 0) - # this will trigger the release of credit for the previous links - target.fanout = False - rc = driver.send(target, {"context": "whatever"}, - {"method": "echo", "id": "e1"}, - wait_for_reply=True) - self.assertIsNotNone(rc) - self.assertEqual(rc.get('correlation-id'), 'e1') - listener.join(timeout=30) - self.assertTrue(self._broker.fanout_count == 3) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestAddressing(test_utils.BaseTestCase): - # Verify the addressing modes supported by the driver - def _address_test(self, rpc_target, targets_priorities): - # verify proper messaging semantics for a given addressing mode - broker = FakeBroker(self.conf.oslo_messaging_amqp) - broker.start() - url = oslo_messaging.TransportURL.parse(self.conf, - "amqp://%s:%d" % - (broker.host, broker.port)) - driver = amqp_driver.ProtonDriver(self.conf, url) - - rl = [] - for server in ["Server1", "Server2"]: - _ = driver.listen(rpc_target(server=server), None, - None)._poll_style_listener - # 3 == 1 msg to server + 1 fanout msg + 1 anycast msg - rl.append(_ListenerThread(_, 3)) - - nl = [] - for n in range(2): - _ = driver.listen_for_notifications(targets_priorities, None, None, - None)._poll_style_listener - nl.append(_ListenerThread(_, len(targets_priorities))) - - driver.send(rpc_target(server="Server1"), {"context": "whatever"}, - {"msg": "Server1"}) - driver.send(rpc_target(server="Server2"), {"context": "whatever"}, - {"msg": "Server2"}) - driver.send(rpc_target(fanout=True), {"context": "whatever"}, - {"msg": "Fanout"}) - # FakeBroker should evenly distribute these across the servers - driver.send(rpc_target(server=None), {"context": "whatever"}, - {"msg": "Anycast1"}) - driver.send(rpc_target(server=None), {"context": "whatever"}, - {"msg": "Anycast2"}) - - expected = [] - for n in targets_priorities: - # this is how the notifier creates an address: - topic = "{}.{}".format(n[0].topic, n[1]) - target = oslo_messaging.Target(topic=topic) - driver.send_notification(target, {"context": "whatever"}, - {"msg": topic}, 2.0) - expected.append(topic) - - for li in rl: - li.join(timeout=30) - - # anycast will not evenly distribute an odd number of msgs - predicate = lambda: len(expected) == (nl[0].messages.qsize() + - nl[1].messages.qsize()) - _wait_until(predicate, 30) - for li in nl: - li.kill(timeout=30) - - s1_payload = [m.message.get('msg') for m in rl[0].get_messages()] - s2_payload = [m.message.get('msg') for m in rl[1].get_messages()] - - self.assertTrue("Server1" in s1_payload and - "Server2" not in s1_payload) - self.assertTrue("Server2" in s2_payload and - "Server1" not in s2_payload) - self.assertEqual(s1_payload.count("Fanout"), 1) - self.assertEqual(s2_payload.count("Fanout"), 1) - self.assertEqual((s1_payload + s2_payload).count("Anycast1"), 1) - self.assertEqual((s1_payload + s2_payload).count("Anycast2"), 1) - - n1_payload = [m.message.get('msg') for m in nl[0].get_messages()] - n2_payload = [m.message.get('msg') for m in nl[1].get_messages()] - - self.assertEqual((n1_payload + n2_payload).sort(), expected.sort()) - - driver.cleanup() - broker.stop() - return broker.message_log - - def test_routable_address(self): - # verify routable address mode - self.config(addressing_mode='routable', group="oslo_messaging_amqp") - _opts = self.conf.oslo_messaging_amqp - notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), - (oslo_messaging.Target(topic="test-topic"), 'error'), - (oslo_messaging.Target(topic="test-topic"), 'debug')] - - msgs = self._address_test(oslo_messaging.Target(exchange="ex", - topic="test-topic"), - notifications) - addrs = [m.address for m in msgs] - - notify_addrs = [a for a in addrs - if a.startswith(_opts.notify_address_prefix)] - self.assertEqual(len(notify_addrs), len(notifications)) - # expect all notifications to be 'anycast' - self.assertEqual(len(notifications), - len([a for a in notify_addrs - if _opts.anycast_address in a])) - - rpc_addrs = [a for a in addrs - if a.startswith(_opts.rpc_address_prefix)] - # 2 anycast messages - self.assertEqual(2, - len([a for a in rpc_addrs - if _opts.anycast_address in a])) - # 1 fanout sent - self.assertEqual(1, - len([a for a in rpc_addrs - if _opts.multicast_address in a])) - # 2 unicast messages (1 for each server) - self.assertEqual(2, - len([a for a in rpc_addrs - if _opts.unicast_address in a])) - - def test_legacy_address(self): - # verify legacy address mode - self.config(addressing_mode='legacy', group="oslo_messaging_amqp") - _opts = self.conf.oslo_messaging_amqp - notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), - (oslo_messaging.Target(topic="test-topic"), 'error'), - (oslo_messaging.Target(topic="test-topic"), 'debug')] - - msgs = self._address_test(oslo_messaging.Target(exchange="ex", - topic="test-topic"), - notifications) - addrs = [m.address for m in msgs] - - server_addrs = [a for a in addrs - if a.startswith(_opts.server_request_prefix)] - broadcast_addrs = [a for a in addrs - if a.startswith(_opts.broadcast_prefix)] - group_addrs = [a for a in addrs - if a.startswith(_opts.group_request_prefix)] - # 2 server address messages sent - self.assertEqual(len(server_addrs), 2) - # 1 fanout address message sent - self.assertEqual(len(broadcast_addrs), 1) - # group messages: 2 rpc + all notifications - self.assertEqual(len(group_addrs), - 2 + len(notifications)) - - def test_address_options(self): - # verify addressing configuration options - self.config(addressing_mode='routable', group="oslo_messaging_amqp") - self.config(rpc_address_prefix="RPC-PREFIX", - group="oslo_messaging_amqp") - self.config(notify_address_prefix="NOTIFY-PREFIX", - group="oslo_messaging_amqp") - - self.config(multicast_address="MULTI-CAST", - group="oslo_messaging_amqp") - self.config(unicast_address="UNI-CAST", - group="oslo_messaging_amqp") - self.config(anycast_address="ANY-CAST", - group="oslo_messaging_amqp") - - self.config(default_notification_exchange="NOTIFY-EXCHANGE", - group="oslo_messaging_amqp") - self.config(default_rpc_exchange="RPC-EXCHANGE", - group="oslo_messaging_amqp") - - notifications = [(oslo_messaging.Target(topic="test-topic"), 'info'), - (oslo_messaging.Target(topic="test-topic"), 'error'), - (oslo_messaging.Target(topic="test-topic"), 'debug')] - - msgs = self._address_test(oslo_messaging.Target(exchange=None, - topic="test-topic"), - notifications) - addrs = [m.address for m in msgs] - - notify_addrs = [a for a in addrs - if a.startswith("NOTIFY-PREFIX")] - self.assertEqual(len(notify_addrs), len(notifications)) - # expect all notifications to be 'anycast' - self.assertEqual(len(notifications), - len([a for a in notify_addrs - if "ANY-CAST" in a])) - # and all should contain the default exchange: - self.assertEqual(len(notifications), - len([a for a in notify_addrs - if "NOTIFY-EXCHANGE" in a])) - - rpc_addrs = [a for a in addrs - if a.startswith("RPC-PREFIX")] - # 2 RPC anycast messages - self.assertEqual(2, - len([a for a in rpc_addrs - if "ANY-CAST" in a])) - # 1 RPC fanout sent - self.assertEqual(1, - len([a for a in rpc_addrs - if "MULTI-CAST" in a])) - # 2 RPC unicast messages (1 for each server) - self.assertEqual(2, - len([a for a in rpc_addrs - if "UNI-CAST" in a])) - - self.assertEqual(len(rpc_addrs), - len([a for a in rpc_addrs - if "RPC-EXCHANGE" in a])) - - def _dynamic_test(self, product): - # return the addresser used when connected to 'product' - broker = FakeBroker(self.conf.oslo_messaging_amqp, - product=product) - broker.start() - url = oslo_messaging.TransportURL.parse(self.conf, - "amqp://%s:%d" % - (broker.host, broker.port)) - driver = amqp_driver.ProtonDriver(self.conf, url) - - # need to send a message to initate the connection to the broker - target = oslo_messaging.Target(topic="test-topic", - server="Server") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - driver.send(target, {"context": True}, {"msg": "value"}, - wait_for_reply=False) - listener.join(timeout=30) - - addresser = driver._ctrl.addresser - driver.cleanup() - broker.stop() # clears the driver's addresser - return addresser - - def test_dynamic_addressing(self): - # simply check that the correct addresser is provided based on the - # identity of the messaging back-end - self.config(addressing_mode='dynamic', group="oslo_messaging_amqp") - self.assertIsInstance(self._dynamic_test("router"), - RoutableAddresser) - self.assertIsInstance(self._dynamic_test("qpid-cpp"), - LegacyAddresser) - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestMessageRetransmit(_AmqpBrokerTestCase): - # test message is retransmitted if safe to do so - def _test_retransmit(self, nack_method): - self._nack_count = 2 - - def _on_message(message, handle, link): - if self._nack_count: - self._nack_count -= 1 - nack_method(link, handle) - else: - self._broker.forward_message(message, handle, link) - - self._broker.on_message = _on_message - self._broker.start() - self.config(link_retry_delay=1, pre_settled=[], - group="oslo_messaging_amqp") - driver = amqp_driver.ProtonDriver(self.conf, self._broker_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread(driver.listen(target, - None, - None)._poll_style_listener, - 1) - try: - rc = driver.send(target, {"context": "whatever"}, - {"method": "echo", "id": "blah"}, - wait_for_reply=True, - retry=2) # initial send + up to 2 resends - except Exception: - # Some test runs are expected to raise an exception, - # clean up the listener since no message was received - listener.kill(timeout=30) - raise - else: - self.assertIsNotNone(rc) - self.assertEqual(0, self._nack_count) - self.assertEqual(rc.get('correlation-id'), 'blah') - listener.join(timeout=30) - finally: - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_released(self): - # should retry and succeed - self._test_retransmit(lambda link, handle: - link.message_released(handle)) - - def test_modified(self): - # should retry and succeed - self._test_retransmit(lambda link, handle: - link.message_modified(handle, False, False, {})) - - def test_modified_failed(self): - # since delivery_failed is set to True, should fail - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - self._test_retransmit, - lambda link, handle: - link.message_modified(handle, True, False, {})) - - def test_rejected(self): - # rejected - should fail - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - self._test_retransmit, - lambda link, handle: - link.message_rejected(handle, {})) - - -@testtools.skipUnless(SSL_ENABLED, "OpenSSL not supported") -class TestSSL(TestFailover): - """Test the driver's OpenSSL integration""" - - def setUp(self): - self._broker = None - # Create the CA, server, and client SSL certificates: - self._tmpdir = tempfile.mkdtemp(prefix='amqp1') - files = ['ca_key', 'ca_cert', 's_key', 's_req', 's_cert', 's2_key', - 's2_req', 's2_cert', 'c_key', 'c_req', 'c_cert', 'bad_cert', - 'bad_req', 'bad_key'] - conf = dict(zip(files, [os.path.join(self._tmpdir, "%s.pem" % f) - for f in files])) - conf['pw'] = 'password' - conf['s_name'] = '127.0.0.1' - conf['s2_name'] = '127.0.0.2' - conf['c_name'] = 'client.com' - - self._ssl_config = conf - ssl_setup = [ - # create self-signed CA certificate: - Template('openssl req -x509 -nodes -newkey rsa:2048' - ' -subj "/CN=Trusted.CA.com" -keyout ${ca_key}' - ' -out ${ca_cert}').substitute(conf), - # create Server keys and certificates: - Template('openssl genrsa -out ${s_key} 2048').substitute(conf), - Template('openssl req -new -key ${s_key} -subj /CN=${s_name}' - ' -passin pass:${pw} -out ${s_req}').substitute(conf), - Template('openssl x509 -req -in ${s_req} -CA ${ca_cert}' - ' -CAkey ${ca_key} -CAcreateserial -out' - ' ${s_cert}').substitute(conf), - Template('openssl genrsa -out ${s2_key} 2048').substitute(conf), - Template('openssl req -new -key ${s2_key} -subj /CN=${s2_name}' - ' -passin pass:${pw} -out ${s2_req}').substitute(conf), - Template('openssl x509 -req -in ${s2_req} -CA ${ca_cert}' - ' -CAkey ${ca_key} -CAcreateserial -out' - ' ${s2_cert}').substitute(conf), - # create a "bad" Server cert for testing CN validation: - Template('openssl genrsa -out ${bad_key} 2048').substitute(conf), - Template('openssl req -new -key ${bad_key} -subj /CN=Invalid' - ' -passin pass:${pw} -out ${bad_req}').substitute(conf), - Template('openssl x509 -req -in ${bad_req} -CA ${ca_cert}' - ' -CAkey ${ca_key} -CAcreateserial -out' - ' ${bad_cert}').substitute(conf), - # create Client key and certificate for client authentication: - Template('openssl genrsa -out ${c_key} 2048').substitute(conf), - Template('openssl req -new -key ${c_key} -subj /CN=${c_name}' - ' -passin pass:${pw} -out' - ' ${c_req}').substitute(conf), - Template('openssl x509 -req -in ${c_req} -CA ${ca_cert}' - ' -CAkey ${ca_key} -CAcreateserial -out' - ' ${c_cert}').substitute(conf) - ] - for cmd in ssl_setup: - try: - subprocess.check_call(args=shlex.split(cmd)) - except Exception: - shutil.rmtree(self._tmpdir, ignore_errors=True) - self._tmpdir = None - self.skipTest("OpenSSL tools not installed - skipping") - - super().setUp() - - self.config(ssl_ca_file=self._ssl_config['ca_cert'], - group='oslo_messaging_amqp') - - def _gen_brokers(self): - s2_conf = self._ssl_config.copy() - for item in ['name', 'key', 'req', 'cert']: - s2_conf["s_%s" % item] = s2_conf["s2_%s" % item] - - return [FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=self._ssl_config['s_name'], - ssl_config=self._ssl_config), - FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=s2_conf['s_name'], - ssl_config=s2_conf)] - - def _gen_transport_url(self, hosts): - url = "amqp://%s" % (",".join(map(lambda x: "%s:%d" % - (x.hostname, x.port), hosts))) - return oslo_messaging.TransportURL.parse(self.conf, url) - - def _ssl_server_ok(self, url): - self._broker.start() - tport_url = oslo_messaging.TransportURL.parse(self.conf, url) - driver = amqp_driver.ProtonDriver(self.conf, tport_url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - - driver.send(target, - {"context": "whatever"}, - {"method": "echo", "a": "b"}, - wait_for_reply=True, - timeout=30) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_server_ok(self): - # test client authenticates server - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=self._ssl_config['s_name'], - ssl_config=self._ssl_config) - url = "amqp://%s:%d" % (self._broker.host, self._broker.port) - self._ssl_server_ok(url) - - def test_server_ignore_vhost_ok(self): - # test client authenticates server and ignores vhost - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=self._ssl_config['s_name'], - ssl_config=self._ssl_config) - url = "amqp://%s:%d/my-vhost" % (self._broker.host, self._broker.port) - self._ssl_server_ok(url) - - def test_server_check_vhost_ok(self): - # test client authenticates server using vhost as CN - # Use 'Invalid' from bad_cert CN - self.config(ssl_verify_vhost=True, group='oslo_messaging_amqp') - self._ssl_config['s_cert'] = self._ssl_config['bad_cert'] - self._ssl_config['s_key'] = self._ssl_config['bad_key'] - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=self._ssl_config['s_name'], - ssl_config=self._ssl_config) - url = "amqp://%s:%d/Invalid" % (self._broker.host, self._broker.port) - self._ssl_server_ok(url) - - @mock.patch('ssl.get_default_verify_paths') - def test_server_ok_with_ssl_set_in_transport_url(self, mock_verify_paths): - # test client authenticates server - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=self._ssl_config['s_name'], - ssl_config=self._ssl_config) - url = oslo_messaging.TransportURL.parse( - self.conf, "amqp://%s:%d?ssl=1" % (self._broker.host, - self._broker.port)) - self._broker.start() - mock_verify_paths.return_value = mock.Mock( - cafile=self._ssl_config['ca_cert']) - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - - driver.send(target, - {"context": "whatever"}, - {"method": "echo", "a": "b"}, - wait_for_reply=True, - timeout=30) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def test_bad_server_fail(self): - # test client does not connect to invalid server - self._ssl_config['s_cert'] = self._ssl_config['bad_cert'] - self._ssl_config['s_key'] = self._ssl_config['bad_key'] - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=self._ssl_config['s_name'], - ssl_config=self._ssl_config) - url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % - (self._broker.host, - self._broker.port)) - self._broker.start() - - self.config(ssl_ca_file=self._ssl_config['ca_cert'], - group='oslo_messaging_amqp') - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - self.assertRaises(oslo_messaging.MessageDeliveryFailure, - driver.send, target, - {"context": "whatever"}, - {"method": "echo", "a": "b"}, - wait_for_reply=False, - retry=1) - driver.cleanup() - - def test_client_auth_ok(self): - # test server authenticates client - self._ssl_config['authenticate_client'] = True - self._broker = FakeBroker(self.conf.oslo_messaging_amqp, - sock_addr=self._ssl_config['s_name'], - ssl_config=self._ssl_config) - url = oslo_messaging.TransportURL.parse(self.conf, "amqp://%s:%d" % - (self._broker.host, - self._broker.port)) - self._broker.start() - - self.config(ssl_ca_file=self._ssl_config['ca_cert'], - ssl_cert_file=self._ssl_config['c_cert'], - ssl_key_file=self._ssl_config['c_key'], - ssl_key_password=self._ssl_config['pw'], - group='oslo_messaging_amqp') - driver = amqp_driver.ProtonDriver(self.conf, url) - target = oslo_messaging.Target(topic="test-topic") - listener = _ListenerThread( - driver.listen(target, None, None)._poll_style_listener, 1) - - driver.send(target, - {"context": "whatever"}, - {"method": "echo", "a": "b"}, - wait_for_reply=True, - timeout=30) - listener.join(timeout=30) - self.assertFalse(listener.is_alive()) - driver.cleanup() - - def tearDown(self): - if self._broker: - self._broker.stop() - self._broker = None - if self._tmpdir: - shutil.rmtree(self._tmpdir, ignore_errors=True) - super().tearDown() - - -@testtools.skipUnless(pyngus, "proton modules not present") -class TestVHost(_AmqpBrokerTestCaseAuto): - """Verify the pseudo virtual host behavior""" - - def _vhost_test(self): - """Verify that all messaging for a particular vhost stays on that vhost - """ - self.config(pseudo_vhost=True, - group="oslo_messaging_amqp") - - vhosts = ["None", "HOSTA", "HOSTB", "HOSTC"] - target = oslo_messaging.Target(topic="test-topic") - fanout = oslo_messaging.Target(topic="test-topic", fanout=True) - - listeners = {} - ldrivers = {} - sdrivers = {} - - replies = {} - msgs = {} - - for vhost in vhosts: - url = copy.copy(self._broker_url) - url.virtual_host = vhost if vhost != "None" else None - ldriver = amqp_driver.ProtonDriver(self.conf, url) - listeners[vhost] = _ListenerThread( - ldriver.listen(target, None, None)._poll_style_listener, - 10) - ldrivers[vhost] = ldriver - sdrivers[vhost] = amqp_driver.ProtonDriver(self.conf, url) - replies[vhost] = [] - msgs[vhost] = [] - - # send a fanout and a single rpc call to each listener - for vhost in vhosts: - if vhost == "HOSTC": # expect no messages to HOSTC - continue - sdrivers[vhost].send(fanout, - {"context": vhost}, - {"vhost": vhost, - "fanout": True, - "id": vhost}) - replies[vhost].append(sdrivers[vhost].send(target, - {"context": vhost}, - {"method": "echo", - "id": vhost}, - wait_for_reply=True)) - time.sleep(1) - - for vhost in vhosts: - msgs[vhost] += listeners[vhost].get_messages() - if vhost == "HOSTC": - # HOSTC should get nothing - self.assertEqual(0, len(msgs[vhost])) - self.assertEqual(0, len(replies[vhost])) - continue - - self.assertEqual(2, len(msgs[vhost])) - for m in msgs[vhost]: - # the id must match the vhost - self.assertEqual(vhost, m.message.get("id")) - self.assertEqual(1, len(replies[vhost])) - for m in replies[vhost]: - # same for correlation id - self.assertEqual(vhost, m.get("correlation-id")) - - for vhost in vhosts: - listeners[vhost].kill() - ldrivers[vhost].cleanup - sdrivers[vhost].cleanup() - - def test_vhost_routing(self): - """Test vhost using routable addresses - """ - self.config(addressing_mode='routable', group="oslo_messaging_amqp") - self._vhost_test() - - def test_vhost_legacy(self): - """Test vhost using legacy addresses - """ - self.config(addressing_mode='legacy', group="oslo_messaging_amqp") - self._vhost_test() - - -class FakeBroker(threading.Thread): - """A test AMQP message 'broker'.""" - - if pyngus: - class Connection(pyngus.ConnectionEventHandler): - """A single AMQP connection.""" - - def __init__(self, server, socket_, name, product, - sasl_mechanisms, user_credentials, - sasl_config_dir, sasl_config_name): - """Create a Connection using socket_.""" - self.socket = socket_ - self.name = name - self.server = server - self.sasl_mechanisms = sasl_mechanisms - self.user_credentials = user_credentials - properties = {'x-server': True} - # setup SASL: - if self.sasl_mechanisms: - properties['x-sasl-mechs'] = self.sasl_mechanisms - if "ANONYMOUS" not in self.sasl_mechanisms: - properties['x-require-auth'] = True - if sasl_config_dir: - properties['x-sasl-config-dir'] = sasl_config_dir - if sasl_config_name: - properties['x-sasl-config-name'] = sasl_config_name - # setup SSL - if self.server._ssl_config: - ssl = self.server._ssl_config - properties['x-ssl-server'] = True - properties['x-ssl-identity'] = (ssl['s_cert'], - ssl['s_key'], - ssl['pw']) - # check for client authentication - if ssl.get('authenticate_client'): - properties['x-ssl-ca-file'] = ssl['ca_cert'] - properties['x-ssl-verify-mode'] = 'verify-peer' - properties['x-ssl-peer-name'] = ssl['c_name'] - # misc connection properties - if product: - properties['properties'] = {'product': product} - - self.connection = server.container.create_connection( - name, self, properties) - self.connection.user_context = self - if pyngus.VERSION < (2, 0, 0): - # older versions of pyngus don't recognize the sasl - # connection properties, so configure them manually: - if sasl_mechanisms: - self.connection.pn_sasl.mechanisms(sasl_mechanisms) - self.connection.pn_sasl.server() - self.connection.open() - self.sender_links = set() - self.receiver_links = set() - self.dead_links = set() - - def destroy(self): - """Destroy the test connection.""" - for link in self.sender_links | self.receiver_links: - link.destroy() - self.sender_links.clear() - self.receiver_links.clear() - self.dead_links.clear() - self.connection.destroy() - self.connection = None - self.socket.close() - self.socket = None - - def fileno(self): - """Allows use of this in a select() call.""" - return self.socket.fileno() - - def process_input(self): - """Called when socket is read-ready.""" - try: - pyngus.read_socket_input(self.connection, self.socket) - self.connection.process(time.time()) - except OSError: - self._socket_error() - - def send_output(self): - """Called when socket is write-ready.""" - try: - pyngus.write_socket_output(self.connection, - self.socket) - self.connection.process(time.time()) - except OSError: - self._socket_error() - - def _socket_error(self): - self.connection.close_input() - self.connection.close_output() - # the broker will clean up in its main loop - - # Pyngus ConnectionEventHandler callbacks: - - def connection_active(self, connection): - self.server.connection_count += 1 - - def connection_remote_closed(self, connection, reason): - """Peer has closed the connection.""" - self.connection.close() - - def connection_closed(self, connection): - """Connection close completed.""" - self.server.connection_count -= 1 - - def connection_failed(self, connection, error): - """Connection failure detected.""" - self.connection_closed(connection) - - def sender_requested(self, connection, link_handle, - name, requested_source, properties): - """Create a new message source.""" - addr = requested_source or "source-" + uuid.uuid4().hex - link = FakeBroker.SenderLink(self.server, self, - link_handle, addr) - self.sender_links.add(link) - - def receiver_requested(self, connection, link_handle, - name, requested_target, properties): - """Create a new message consumer.""" - addr = requested_target or "target-" + uuid.uuid4().hex - FakeBroker.ReceiverLink(self.server, self, - link_handle, addr) - - def sasl_step(self, connection, pn_sasl): - # only called if not using Cyrus SASL - if 'PLAIN' in self.sasl_mechanisms: - credentials = pn_sasl.recv() - if not credentials: - return # wait until some arrives - if credentials not in self.user_credentials: - # failed - return pn_sasl.done(pn_sasl.AUTH) - pn_sasl.done(pn_sasl.OK) - - class SenderLink(pyngus.SenderEventHandler): - """An AMQP sending link.""" - def __init__(self, server, conn, handle, src_addr=None): - self.server = server - self.conn = conn - cnn = conn.connection - self.link = cnn.accept_sender(handle, - source_override=src_addr, - event_handler=self) - conn.sender_links.add(self) - self.link.open() - self.routed = False - - def destroy(self): - """Destroy the link.""" - conn = self.conn - self.conn = None - conn.sender_links.remove(self) - conn.dead_links.discard(self) - if self.link: - self.link.destroy() - self.link = None - - def send_message(self, message): - """Send a message over this link.""" - def pyngus_callback(link, handle, state, info): - if state == pyngus.SenderLink.ACCEPTED: - self.server.sender_link_ack_count += 1 - elif state == pyngus.SenderLink.RELEASED: - self.server.sender_link_requeue_count += 1 - - self.link.send(message, delivery_callback=pyngus_callback) - - def _cleanup(self): - if self.routed: - self.server.remove_route(self.link.source_address, - self) - self.routed = False - self.conn.dead_links.add(self) - - # Pyngus SenderEventHandler callbacks: - - def sender_active(self, sender_link): - self.server.sender_link_count += 1 - self.server.add_route(self.link.source_address, self) - self.routed = True - self.server.on_sender_active(sender_link) - - def sender_remote_closed(self, sender_link, error): - self.link.close() - - def sender_closed(self, sender_link): - self.server.sender_link_count -= 1 - self._cleanup() - - def sender_failed(self, sender_link, error): - self.sender_closed(sender_link) - - class ReceiverLink(pyngus.ReceiverEventHandler): - """An AMQP Receiving link.""" - def __init__(self, server, conn, handle, addr=None): - self.server = server - self.conn = conn - cnn = conn.connection - self.link = cnn.accept_receiver(handle, - target_override=addr, - event_handler=self) - conn.receiver_links.add(self) - self.link.open() - - def destroy(self): - """Destroy the link.""" - conn = self.conn - self.conn = None - conn.receiver_links.remove(self) - conn.dead_links.discard(self) - if self.link: - self.link.destroy() - self.link = None - - # ReceiverEventHandler callbacks: - - def receiver_active(self, receiver_link): - self.server.receiver_link_count += 1 - self.server.on_receiver_active(receiver_link) - - def receiver_remote_closed(self, receiver_link, error): - self.link.close() - - def receiver_closed(self, receiver_link): - self.server.receiver_link_count -= 1 - self.conn.dead_links.add(self) - - def receiver_failed(self, receiver_link, error): - self.receiver_closed(receiver_link) - - def message_received(self, receiver_link, message, handle): - """Forward this message out the proper sending link.""" - self.server.on_message(message, handle, receiver_link) - if self.link.capacity < 1: - self.server.on_credit_exhausted(self.link) - - def __init__(self, cfg, - sock_addr="", sock_port=0, - product=None, - default_exchange="Test-Exchange", - sasl_mechanisms="ANONYMOUS", - user_credentials=None, - sasl_config_dir=None, - sasl_config_name=None, - ssl_config=None): - """Create a fake broker listening on sock_addr:sock_port.""" - if not pyngus: - raise AssertionError("pyngus module not present") - threading.Thread.__init__(self) - self._product = product - self._sasl_mechanisms = sasl_mechanisms - self._sasl_config_dir = sasl_config_dir - self._sasl_config_name = sasl_config_name - self._user_credentials = user_credentials - self._ssl_config = ssl_config - self._wakeup_pipe = os.pipe() - self._my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self._my_socket.bind((sock_addr, sock_port)) - self.host, self.port = self._my_socket.getsockname() - self.container = pyngus.Container("test_server_%s:%d" - % (self.host, self.port)) - - # create an addresser using the test client's config and expected - # message bus so the broker can parse the message addresses - af = AddresserFactory(default_exchange, - cfg.addressing_mode, - legacy_server_prefix=cfg.server_request_prefix, - legacy_broadcast_prefix=cfg.broadcast_prefix, - legacy_group_prefix=cfg.group_request_prefix, - rpc_prefix=cfg.rpc_address_prefix, - notify_prefix=cfg.notify_address_prefix, - multicast=cfg.multicast_address, - unicast=cfg.unicast_address, - anycast=cfg.anycast_address) - props = {'product': product} if product else {} - self._addresser = af(props) - - self._connections = {} - self._sources = {} - self._pause = eventletutils.Event() - # count of messages forwarded, by messaging pattern - self.direct_count = 0 - self.topic_count = 0 - self.fanout_count = 0 - self.fanout_sent_count = 0 - self.dropped_count = 0 - # counts for active links and connections: - self.connection_count = 0 - self.sender_link_count = 0 - self.receiver_link_count = 0 - self.sender_link_ack_count = 0 - self.sender_link_requeue_count = 0 - # log of all messages received by the broker - self.message_log = [] - # callback hooks - self.on_sender_active = lambda link: None - self.on_receiver_active = lambda link: link.add_capacity(10) - self.on_credit_exhausted = lambda link: link.add_capacity(10) - self.on_message = lambda message, handle, link: self.forward_message( - message, handle, link) - - def start(self): - """Start the server.""" - LOG.debug("Starting Test Broker on %s:%d", self.host, self.port) - self._shutdown = False - self._closing = False - self.daemon = True - self._pause.set() - self._my_socket.listen(10) - super().start() - - def pause(self): - self._pause.clear() - os.write(self._wakeup_pipe[1], b'!') - - def unpause(self): - self._pause.set() - - def stop(self, clean=False): - """Stop the server.""" - # If clean is True, attempt a clean shutdown by closing all open - # links/connections first. Otherwise force an immediate disconnect - LOG.debug("Stopping test Broker %s:%d", self.host, self.port) - if clean: - self._closing = 1 - else: - self._shutdown = True - self._pause.set() - os.write(self._wakeup_pipe[1], b'!') - self.join() - LOG.debug("Test Broker %s:%d stopped", self.host, self.port) - - def run(self): - """Process I/O and timer events until the broker is stopped.""" - LOG.debug("Test Broker on %s:%d started", self.host, self.port) - while not self._shutdown: - self._pause.wait() - readers, writers, timers = self.container.need_processing() - - # map pyngus Connections back to _TestConnections: - readfd = [c.user_context for c in readers] - readfd.extend([self._my_socket, self._wakeup_pipe[0]]) - writefd = [c.user_context for c in writers] - - timeout = None - if timers: - # [0] == next expiring timer - deadline = timers[0].next_tick - now = time.time() - timeout = 0 if deadline <= now else deadline - now - - readable, writable, ignore = select.select(readfd, - writefd, - [], - timeout) - worked = set() - for r in readable: - if r is self._my_socket: - # new inbound connection request received - sock, addr = self._my_socket.accept() - if not self._closing: - # create a new Connection for it: - name = str(addr) - conn = FakeBroker.Connection(self, sock, name, - self._product, - self._sasl_mechanisms, - self._user_credentials, - self._sasl_config_dir, - self._sasl_config_name) - self._connections[conn.name] = conn - else: - sock.close() # drop it - elif r is self._wakeup_pipe[0]: - os.read(self._wakeup_pipe[0], 512) - else: - r.process_input() - worked.add(r) - - for t in timers: - now = time.time() - if t.next_tick > now: - break - t.process(now) - conn = t.user_context - worked.add(conn) - - for w in writable: - w.send_output() - worked.add(w) - - # clean up any closed connections or links: - while worked: - conn = worked.pop() - if conn.connection.closed: - del self._connections[conn.name] - conn.destroy() - else: - while conn.dead_links: - conn.dead_links.pop().destroy() - - if self._closing and not self._connections: - self._shutdown = True - elif self._closing == 1: - # start closing connections - self._closing = 2 - for conn in self._connections.values(): - conn.connection.close() - - # Shutting down. Any open links are just disconnected - the peer will - # see a socket close. - self._my_socket.close() - for conn in self._connections.values(): - conn.destroy() - self._connections = None - self.container.destroy() - self.container = None - return 0 - - def add_route(self, address, link): - # route from address -> link[, link ...] - if address not in self._sources: - self._sources[address] = [link] - elif link not in self._sources[address]: - self._sources[address].append(link) - - def remove_route(self, address, link): - if address in self._sources: - if link in self._sources[address]: - self._sources[address].remove(link) - if not self._sources[address]: - del self._sources[address] - - def forward_message(self, message, handle, rlink): - # returns True if message was routed - self.message_log.append(message) - dest = message.address - if dest not in self._sources: - # can't forward - self.dropped_count += 1 - # observe magic "don't ack" address - if '!no-ack!' not in dest: - rlink.message_released(handle) - return - - LOG.debug("Forwarding [%s]", dest) - # route "behavior" determined by address prefix: - if self._addresser._is_multicast(dest): - self.fanout_count += 1 - for link in self._sources[dest]: - self.fanout_sent_count += 1 - LOG.debug("Broadcast to %s", dest) - link.send_message(message) - elif self._addresser._is_anycast(dest): - # round-robin: - self.topic_count += 1 - link = self._sources[dest].pop(0) - link.send_message(message) - LOG.debug("Send to %s", dest) - self._sources[dest].append(link) - else: - # unicast: - self.direct_count += 1 - LOG.debug("Unicast to %s", dest) - self._sources[dest][0].send_message(message) - rlink.message_accepted(handle) diff --git a/oslo_messaging/tests/test_opts.py b/oslo_messaging/tests/test_opts.py index 749276932..b28fff3f6 100644 --- a/oslo_messaging/tests/test_opts.py +++ b/oslo_messaging/tests/test_opts.py @@ -29,11 +29,10 @@ from oslo_messaging.tests import utils as test_utils class OptsTestCase(test_utils.BaseTestCase): def _test_list_opts(self, result): - self.assertEqual(5, len(result)) + self.assertEqual(4, len(result)) groups = [g for (g, l) in result] self.assertIn(None, groups) - self.assertIn('oslo_messaging_amqp', groups) self.assertIn('oslo_messaging_notifications', groups) self.assertIn('oslo_messaging_rabbit', groups) self.assertIn('oslo_messaging_kafka', groups) diff --git a/releasenotes/notes/remove-amqp1-c924ea548dadffaa.yaml b/releasenotes/notes/remove-amqp1-c924ea548dadffaa.yaml new file mode 100644 index 000000000..8434c4e3d --- /dev/null +++ b/releasenotes/notes/remove-amqp1-c924ea548dadffaa.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The AMQP1 driver, which was deprecated in 14.1.0 was removed, due to + limited usage and lack of support on recent distributions. Use any of + the other supported driver, such as RabbitMQ or Kafka. diff --git a/setup.cfg b/setup.cfg index 57573b488..d7e164b18 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,9 +25,7 @@ classifier = [extras] # package dependencies for optional (non-rabbitmq) messaging drivers. # projects can test-depend on oslo.messaging[] -# e.g.: oslo.messaging[kafka,amqp1] -amqp1 = - pyngus>=2.2.0 # Apache-2.0 +# e.g.: oslo.messaging[kafka] kafka = confluent-kafka>=1.3.0 # Apache-2.0 @@ -41,7 +39,6 @@ console_scripts = oslo.messaging.drivers = rabbit = oslo_messaging._drivers.impl_rabbit:RabbitDriver - amqp = oslo_messaging._drivers.impl_amqp1:ProtonDriver # This driver is supporting for only notification usage kafka = oslo_messaging._drivers.impl_kafka:KafkaDriver diff --git a/tools/test-setup.sh b/tools/test-setup.sh deleted file mode 100755 index 484fbe7e8..000000000 --- a/tools/test-setup.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -xe - -# This script will be run by OpenStack CI before unit tests are run, -# it sets up the test system as needed. -# Developer should setup their test systems in a similar way. - -# This setup for amqp1 needs to be run by a user that can run sudo. - -# qdrouterd needs to be installed from qpid/testing repo in Ubuntu. -# bindep does not allow setting up another repo, so we just install -# this package here. - -# inspired from project-config install-distro-packages.sh -#if apt-get -v >/dev/null 2>&1 ; then -# sudo add-apt-repository -y ppa:qpid/testing -# sudo apt-get -qq update -# sudo PATH=/usr/sbin:/sbin:$PATH DEBIAN_FRONTEND=noninteractive \ -# apt-get -q --option "Dpkg::Options::=--force-confold" \ -# --assume-yes install qdrouterd -#fi