Sync common modules from Oslo

Oslo version:
commit  c8b3dc04de9e9946afab1a18617026e35cfa88fb
Merge:  b9d6589 d78b633
Author: Jenkins <jenkins@review.openstack.org>
Date:   Fri May 30 23:37:27 2014 +0000
Name:   Merge "Fixes a simple spelling mistake"

Change-Id: Ib40f7e4784174dfa848b45e577f8ba4a99334bf3
Implements: blueprint update-oslo-code
This commit is contained in:
vponomaryov 2014-06-04 19:48:57 +03:00 committed by Valeriy Ponomaryov
parent 45e9fe6cbb
commit 139c3609de
62 changed files with 4574 additions and 1787 deletions

View File

@ -26,7 +26,6 @@ from manila.api.openstack import wsgi
from manila.api import xmlutil from manila.api import xmlutil
from manila import exception from manila import exception
from manila.openstack.common import exception as common_exception
from manila.openstack.common import importutils from manila.openstack.common import importutils
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
import manila.policy import manila.policy
@ -373,7 +372,7 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
(package, relpkg, dname)) (package, relpkg, dname))
try: try:
ext = importutils.import_class(ext_name) ext = importutils.import_class(ext_name)
except common_exception.NotFound: except ImportError:
# extension() doesn't exist on it, so we'll explore # extension() doesn't exist on it, so we'll explore
# the directory for ourselves # the directory for ourselves
subdirs.append(dname) subdirs.append(dname)

View File

@ -6,7 +6,7 @@ A number of modules from openstack-common are imported into this project.
These modules are "incubating" in openstack-common and are kept in sync These modules are "incubating" in openstack-common and are kept in sync
with the help of openstack-common's update.py script. See: with the help of openstack-common's update.py script. See:
http://wiki.openstack.org/CommonLibrary#Incubation https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator
The copy of the code should never be directly modified here. Please The copy of the code should never be directly modified here. Please
always update openstack-common first and then run the script to copy always update openstack-common first and then run the script to copy

View File

@ -1,6 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -13,3 +10,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -27,39 +25,60 @@ import uuid
def generate_request_id(): def generate_request_id():
return 'req-' + str(uuid.uuid4()) return b'req-' + str(uuid.uuid4()).encode('ascii')
class RequestContext(object): class RequestContext(object):
""" """Helper class to represent useful information about a request context.
Stores information about the security context under which the user Stores information about the security context under which the user
accesses the system, as well as additional request information. accesses the system, as well as additional request information.
""" """
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
read_only=False, show_deleted=False, request_id=None):
def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
user_domain=None, project_domain=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None,
instance_uuid=None):
self.auth_token = auth_token self.auth_token = auth_token
self.user = user self.user = user
self.tenant = tenant self.tenant = tenant
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
self.is_admin = is_admin self.is_admin = is_admin
self.read_only = read_only self.read_only = read_only
self.show_deleted = show_deleted self.show_deleted = show_deleted
self.instance_uuid = instance_uuid
if not request_id: if not request_id:
request_id = generate_request_id() request_id = generate_request_id()
self.request_id = request_id self.request_id = request_id
def to_dict(self): def to_dict(self):
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {'user': self.user, return {'user': self.user,
'tenant': self.tenant, 'tenant': self.tenant,
'domain': self.domain,
'user_domain': self.user_domain,
'project_domain': self.project_domain,
'is_admin': self.is_admin, 'is_admin': self.is_admin,
'read_only': self.read_only, 'read_only': self.read_only,
'show_deleted': self.show_deleted, 'show_deleted': self.show_deleted,
'auth_token': self.auth_token, 'auth_token': self.auth_token,
'request_id': self.request_id} 'request_id': self.request_id,
'instance_uuid': self.instance_uuid,
'user_identity': user_idt}
def get_admin_context(show_deleted="no"): def get_admin_context(show_deleted=False):
context = RequestContext(None, context = RequestContext(None,
tenant=None, tenant=None,
is_admin=True, is_admin=True,
@ -79,3 +98,14 @@ def get_context_from_function_and_args(function, args, kwargs):
return arg return arg
return None return None
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation. # Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -16,8 +14,13 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from __future__ import print_function
import errno
import gc import gc
import os
import pprint import pprint
import socket
import sys import sys
import traceback import traceback
@ -26,36 +29,82 @@ import eventlet.backdoor
import greenlet import greenlet
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common.gettextutils import _LI
from manila.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [ eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port', cfg.StrOpt('backdoor_port',
default=None, help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
help='port for eventlet backdoor to listen')
] ]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts) CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this(): def _dont_use_this():
print "Don't use this, just disconnect instead" print("Don't use this, just disconnect instead")
def _find_objects(t): def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects()) return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads(): def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)): for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print i, gt print(i, gt)
traceback.print_stack(gt.gr_frame) traceback.print_stack(gt.gr_frame)
print print()
def _print_nativethreads(): def _print_nativethreads():
for threadId, stack in sys._current_frames().items(): for threadId, stack in sys._current_frames().items():
print threadId print(threadId)
traceback.print_stack(stack) traceback.print_stack(stack)
print print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled(): def initialize_if_enabled():
@ -70,6 +119,8 @@ def initialize_if_enabled():
if CONF.backdoor_port is None: if CONF.backdoor_port is None:
return None return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of # NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites # the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint # the __builtin__._ that gettext sets. Let's switch to using pprint
@ -80,8 +131,15 @@ def initialize_if_enabled():
pprint.pprint(val) pprint.pprint(val)
sys.displayhook = displayhook sys.displayhook = displayhook
sock = eventlet.listen(('localhost', CONF.backdoor_port)) sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1] port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals) locals=backdoor_locals)
return port return port

View File

@ -1,142 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions common to OpenStack projects
"""
import logging
from manila.openstack.common.gettextutils import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
pass
class UnknownScheme(Error):
msg = "Unknown scheme '%s' found in URI"
def __init__(self, scheme):
msg = self.__class__.msg % scheme
super(UnknownScheme, self).__init__(msg)
class BadStoreUri(Error):
msg = "The Store URI %s was malformed. Reason: %s"
def __init__(self, uri, reason):
msg = self.__class__.msg % (uri, reason)
super(BadStoreUri, self).__init__(msg)
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
class BadInputError(Exception):
"""Error resulting from a client sending bad input to a server"""
pass
class MissingArgumentError(Error):
pass
class DatabaseMigrationError(Error):
pass
class ClientConnectionError(Exception):
"""Error resulting from a client connecting to a server"""
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap
class OpenstackException(Exception):
"""
Base Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception as e:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class MalformedRequestBody(OpenstackException):
message = "Malformed message body: %(reason)s"
class InvalidContentType(OpenstackException):
message = "Invalid content type %(content_type)s"

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc. # Copyright 2012, Red Hat, Inc.
# #
@ -19,16 +17,17 @@
Exception related utilities. Exception related utilities.
""" """
import contextlib
import logging import logging
import sys import sys
import time
import traceback import traceback
from manila.openstack.common.gettextutils import _ import six
from manila.openstack.common.gettextutils import _LE
@contextlib.contextmanager class save_and_reraise_exception(object):
def save_and_reraise_exception():
"""Save current exception, run some code and then re-raise. """Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None In some cases the exception context can be cleared, resulting in None
@ -40,12 +39,75 @@ def save_and_reraise_exception():
To work around this, we save the exception state, run handler code, and To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised. saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example::
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
If another exception occurs and reraise flag is False,
the saved exception will not be logged.
If the caller wants to raise new exception during exception handling
he/she sets reraise to False initially with an ability to set it back to
True if needed::
except Exception:
with save_and_reraise_exception(reraise=False) as ctxt:
[if statements to determine whether to raise a new exception]
# Not raising a new exception, so reraise
ctxt.reraise = True
""" """
type_, value, tb = sys.exc_info() def __init__(self, reraise=True):
try: self.reraise = reraise
yield
except Exception: def __enter__(self):
logging.error(_('Original exception being dropped: %s'), self.type_, self.value, self.tb, = sys.exc_info()
traceback.format_exception(type_, value, tb)) return self
raise
raise type_, value, tb def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
if self.reraise:
logging.error(_LE('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
this_exc_message = six.u(str(exc))
if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_LE('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -15,9 +13,17 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import contextlib
import errno import errno
import os import os
import tempfile
from manila.openstack.common import excutils
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path): def ensure_tree(path):
@ -33,3 +39,97 @@ def ensure_tree(path):
raise raise
else: else:
raise raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path

View File

@ -1,6 +1,5 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc. # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -23,18 +22,144 @@ Usual usage in an openstack.common module:
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _
""" """
import copy
import functools
import gettext import gettext
import locale
from logging import handlers
import os import os
_localedir = os.environ.get('manila'.upper() + '_LOCALEDIR') from babel import localedata
_t = gettext.translation('manila', localedir=_localedir, fallback=True) import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
def _(msg): class TranslatorFactory(object):
return _t.ugettext(msg) """Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
def install(domain): # NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('manila')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('manila', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def install(domain, lazy=False):
"""Install a _() function using the given translation domain. """Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's Given a translation domain, install a _() function using gettext's
@ -44,7 +169,330 @@ def install(domain):
overriding the default localedir (e.g. /usr/share/locale) using overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g. a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR). NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
""" """
gettext.install(domain, if lazy:
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'), from six import moves
unicode=True) tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='manila', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -24,12 +22,12 @@ import traceback
def import_class(import_str): def import_class(import_str):
"""Returns a class from a string including module and class""" """Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.') mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try: try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str) return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError): except AttributeError:
raise ImportError('Class %s cannot be found (%s)' % raise ImportError('Class %s cannot be found (%s)' %
(class_str, (class_str,
traceback.format_exception(*sys.exc_info()))) traceback.format_exception(*sys.exc_info())))
@ -41,8 +39,9 @@ def import_object(import_str, *args, **kwargs):
def import_object_ns(name_space, import_str, *args, **kwargs): def import_object_ns(name_space, import_str, *args, **kwargs):
""" """Tries to import object from default namespace.
Import a class and return an instance of it, first by trying
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to to find the class in a default namespace, then failing back to
a full path if not found in the default namespace. a full path if not found in the default namespace.
""" """
@ -59,6 +58,13 @@ def import_module(import_str):
return sys.modules[import_str] return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'manila.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None): def try_import(import_str, default=None):
"""Try to import a module and if it fails return default.""" """Try to import a module and if it fails return default."""
try: try:

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara # Copyright 2011 Justin Santa Barbara
@ -33,16 +31,32 @@ This module provides a few things:
''' '''
import codecs
import datetime import datetime
import functools import functools
import inspect import inspect
import itertools import itertools
import json import sys
import types
import xmlrpclib
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
except ImportError:
import json
else:
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
from manila.openstack.common import gettextutils
from manila.openstack.common import importutils
from manila.openstack.common import strutils
from manila.openstack.common import timeutils from manila.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction, inspect.isfunction, inspect.isgeneratorfunction,
@ -50,7 +64,8 @@ _nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.iscode, inspect.isbuiltin, inspect.isroutine, inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract] inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long) _simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True, def to_primitive(value, convert_instances=False, convert_datetime=True,
@ -93,7 +108,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
# value of itertools.count doesn't get caught by nasty_type_tests # value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called. # and results in infinite loop when list(value) is called.
if type(value) == itertools.count: if type(value) == itertools.count:
return unicode(value) return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround, # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that # tests that raise an exception in a mocked method that
@ -115,7 +130,7 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
level=level, level=level,
max_depth=max_depth) max_depth=max_depth)
if isinstance(value, dict): if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems()) return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)): elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value] return [recursive(lv) for lv in value]
@ -127,6 +142,8 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
if convert_datetime and isinstance(value, datetime.datetime): if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value) return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'): elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1) return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'): elif hasattr(value, '__iter__'):
@ -135,26 +152,28 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
# Likely an instance of something. Watch for cycles. # Likely an instance of something. Watch for cycles.
# Ignore class member vars. # Ignore class member vars.
return recursive(value.__dict__, level=level + 1) return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else: else:
if any(test(value) for test in _nasty_type_tests): if any(test(value) for test in _nasty_type_tests):
return unicode(value) return six.text_type(value)
return value return value
except TypeError: except TypeError:
# Class objects are tricky since they may define something like # Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list(). # __iter__ defined but it isn't callable as list().
return unicode(value) return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs): def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs) return json.dumps(value, default=default, **kwargs)
def loads(s): def loads(s, encoding='utf-8'):
return json.loads(s) return json.loads(strutils.safe_decode(s, encoding))
def load(s): def load(fp, encoding='utf-8'):
return json.load(s) return json.load(codecs.getreader(encoding)(fp))
try: try:

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -15,16 +13,15 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Greenthread local storage of variables using weak references""" """Local storage of variables using weak references"""
import threading
import weakref import weakref
from eventlet import corolocal
class WeakLocal(threading.local):
class WeakLocal(corolocal.local):
def __getattribute__(self, attr): def __getattribute__(self, attr):
rval = corolocal.local.__getattribute__(self, attr) rval = super(WeakLocal, self).__getattribute__(attr)
if rval: if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak # NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup # reference, not the value itself. We therefore need to lookup
@ -34,7 +31,7 @@ class WeakLocal(corolocal.local):
def __setattr__(self, attr, value): def __setattr__(self, attr, value):
value = weakref.ref(value) value = weakref.ref(value)
return corolocal.local.__setattr__(self, attr, value) return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future # NOTE(mikal): the name "store" should be deprecated in the future
@ -45,4 +42,4 @@ store = WeakLocal()
# "strong" store will hold a reference to the object so that it never falls out # "strong" store will hold a reference to the object so that it never falls out
# of scope. # of scope.
weak_store = WeakLocal() weak_store = WeakLocal()
strong_store = corolocal.local strong_store = threading.local()

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -15,9 +13,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import contextlib import contextlib
import errno import errno
import fcntl
import functools import functools
import os import os
import shutil import shutil
@ -31,8 +29,7 @@ import weakref
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common import fileutils from manila.openstack.common import fileutils
from manila.openstack.common.gettextutils import _ # noqa from manila.openstack.common.gettextutils import _, _LE, _LI
from manila.openstack.common import local
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
@ -41,10 +38,10 @@ LOG = logging.getLogger(__name__)
util_opts = [ util_opts = [
cfg.BoolOpt('disable_process_locking', default=False, cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'), help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path', cfg.StrOpt('lock_path',
default=os.environ.get("MANILA_LOCK_PATH"), default=os.environ.get("MANILA_LOCK_PATH"),
help=('Directory to use for lock files.')) help='Directory to use for lock files.')
] ]
@ -56,7 +53,7 @@ def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path) cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object): class _FileLock(object):
"""Lock implementation which allows multiple locks, working around """Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file not require any cleanup. Since the lock is always held on a file
@ -78,7 +75,13 @@ class _InterProcessLock(object):
self.lockfile = None self.lockfile = None
self.fname = name self.fname = name
def __enter__(self): def acquire(self):
basedir = os.path.dirname(self.fname)
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w') self.lockfile = open(self.fname, 'w')
while True: while True:
@ -88,23 +91,41 @@ class _InterProcessLock(object):
# Also upon reading the MSDN docs for locking(), it seems # Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism. # to have a laughable 10 attempts "blocking" mechanism.
self.trylock() self.trylock()
return self LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e: except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN): if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables # external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning # updates - give it some time to prevent busy spinning
time.sleep(0.01) time.sleep(0.01)
else: else:
raise raise threading.ThreadError(_("Unable to acquire lock on"
" `%(filename)s` due to"
" %(exception)s") %
{
'filename': self.fname,
'exception': e,
})
def __exit__(self, exc_type, exc_val, exc_tb): def __enter__(self):
self.acquire()
return self
def release(self):
try: try:
self.unlock() self.unlock()
self.lockfile.close() self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError: except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"), LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname) self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self): def trylock(self):
raise NotImplementedError() raise NotImplementedError()
@ -112,7 +133,7 @@ class _InterProcessLock(object):
raise NotImplementedError() raise NotImplementedError()
class _WindowsLock(_InterProcessLock): class _WindowsLock(_FileLock):
def trylock(self): def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
@ -120,7 +141,7 @@ class _WindowsLock(_InterProcessLock):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock): class _FcntlLock(_FileLock):
def trylock(self): def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
@ -128,17 +149,120 @@ class _PosixLock(_InterProcessLock):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN) fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
class _PosixLock(object):
def __init__(self, name):
# Hash the name because it's not valid to have POSIX semaphore
# names with things like / in them. Then use base64 to encode
# the digest() instead taking the hexdigest() because the
# result is shorter and most systems can't have shm sempahore
# names longer than 31 characters.
h = hashlib.sha1()
h.update(name.encode('ascii'))
self.name = str((b'/' + base64.urlsafe_b64encode(
h.digest())).decode('ascii'))
def acquire(self, timeout=None):
self.semaphore = posix_ipc.Semaphore(self.name,
flags=posix_ipc.O_CREAT,
initial_value=1)
self.semaphore.acquire(timeout)
return self
def __enter__(self):
self.acquire()
return self
def release(self):
self.semaphore.release()
self.semaphore.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
try:
semaphore = posix_ipc.Semaphore(self.name)
except posix_ipc.ExistentialError:
return False
else:
semaphore.close()
return True
if os.name == 'nt': if os.name == 'nt':
import msvcrt import msvcrt
InterProcessLock = _WindowsLock InterProcessLock = _WindowsLock
FileLock = _WindowsLock
else: else:
import fcntl import base64
import hashlib
import posix_ipc
InterProcessLock = _PosixLock InterProcessLock = _PosixLock
FileLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary() _semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock() _semaphores_lock = threading.Lock()
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
# NOTE(bnemec): Create a fake lock path for posix locks so we don't
# unnecessarily raise the RequiredOptError below.
if InterProcessLock is not _PosixLock:
raise cfg.RequiredOptError('lock_path')
local_lock_path = 'posixlock:/'
return os.path.join(local_lock_path, name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
# NOTE(bnemec): If an explicit lock_path was passed to us then it
# means the caller is relying on file-based locking behavior, so
# we can't use posix locks for those calls.
if lock_path:
return FileLock(lock_file_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove a external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug('Got semaphore "%(lock)s"', {'lock': name})
return sem
@contextlib.contextmanager @contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None): def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock """Context based lock
@ -148,73 +272,22 @@ def lock(name, lock_file_prefix=None, external=False, lock_path=None):
True, in which case, it'll yield an InterProcessLock instance. True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide :param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix. lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock :param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock', workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time. external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
""" """
with _semaphores_lock: int_lock = internal_lock(name)
try: with int_lock:
sem = _semaphores[name] if external and not CONF.disable_process_locking:
except KeyError: ext_lock = external_lock(name, lock_file_prefix, lock_path)
sem = threading.Semaphore() with ext_lock:
_semaphores[name] = sem yield ext_lock
else:
with sem: yield int_lock
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) LOG.debug('Released semaphore "%(lock)s"', {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
@ -246,11 +319,11 @@ def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
def inner(*args, **kwargs): def inner(*args, **kwargs):
try: try:
with lock(name, lock_file_prefix, external, lock_path): with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'), LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__}) {'function': f.__name__})
return f(*args, **kwargs) return f(*args, **kwargs)
finally: finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'), LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__}) {'function': f.__name__})
return inner return inner
return wrap return wrap

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
@ -17,7 +15,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Openstack logging handler. """OpenStack logging handler.
This module adds to logging functionality by adding the option to specify This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object a context object when calling the various log methods. If the context object
@ -29,29 +27,49 @@ It also allows setting of formatting information through conf.
""" """
import ConfigParser
import cStringIO
import inspect import inspect
import itertools import itertools
import logging import logging
import logging.config import logging.config
import logging.handlers import logging.handlers
import os import os
import stat import re
import sys import sys
import traceback import traceback
from oslo.config import cfg from oslo.config import cfg
import six
from six import moves
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _
from manila.openstack.common import importutils
from manila.openstack.common import jsonutils from manila.openstack.common import jsonutils
from manila.openstack.common import local from manila.openstack.common import local
from manila.openstack.common import notifier
_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*).*?([\s])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [ common_cli_opts = [
cfg.BoolOpt('debug', cfg.BoolOpt('debug',
short='d', short='d',
@ -66,24 +84,26 @@ common_cli_opts = [
] ]
logging_cli_opts = [ logging_cli_opts = [
cfg.StrOpt('log-config', cfg.StrOpt('log-config-append',
metavar='PATH', metavar='PATH',
help='If this option is specified, the logging configuration ' deprecated_name='log-config',
'file specified is used and overrides any other logging ' help='The name of a logging configuration file. This file '
'options specified. Please see the Python logging module ' 'is appended to any existing logging configuration '
'documentation for details on logging configuration ' 'files. For details about logging configuration files, '
'files.'), 'see the Python logging module documentation.'),
cfg.StrOpt('log-format', cfg.StrOpt('log-format',
default=_DEFAULT_LOG_FORMAT,
metavar='FORMAT', metavar='FORMAT',
help='A logging.Formatter log message format string which may ' help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. ' 'use any of the available logging.LogRecord attributes. '
'Default: %(default)s'), 'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format', cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT, default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT', metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. ' help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'), 'Default: %(default)s .'),
cfg.StrOpt('log-file', cfg.StrOpt('log-file',
metavar='PATH', metavar='PATH',
deprecated_name='logfile', deprecated_name='logfile',
@ -92,69 +112,80 @@ logging_cli_opts = [
cfg.StrOpt('log-dir', cfg.StrOpt('log-dir',
deprecated_name='logdir', deprecated_name='logdir',
help='(Optional) The base directory used for relative ' help='(Optional) The base directory used for relative '
'--log-file paths'), '--log-file paths.'),
cfg.BoolOpt('use-syslog', cfg.BoolOpt('use-syslog',
default=False, default=False,
help='Use syslog for logging.'), help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility', cfg.StrOpt('syslog-log-facility',
default='LOG_USER', default='LOG_USER',
help='syslog facility to receive log lines') help='Syslog facility to receive log lines.')
] ]
generic_log_opts = [ generic_log_opts = [
cfg.BoolOpt('use_stderr', cfg.BoolOpt('use_stderr',
default=True, default=True,
help='Log output to standard error'), help='Log output to standard error.')
cfg.StrOpt('logfile_mode',
default='0644',
help='Default file mode used when creating log files'),
] ]
log_opts = [ log_opts = [
cfg.StrOpt('logging_context_format_string', cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] ' '%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s', '%(instance)s%(message)s',
help='format string to use for log messages with context'), help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string', cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s', '%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'), help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix', cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d', default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'), help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix', cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s', '%(instance)s',
help='prefix each line of exception output with this format'), help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels', cfg.ListOpt('default_log_levels',
default=[ default=[
'amqp=WARN',
'amqplib=WARN', 'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN', 'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO', 'suds=INFO',
'keystone=INFO', 'oslo.messaging=INFO',
'eventlet.wsgi.server=WARN' 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
], ],
help='list of logger=LEVEL pairs'), help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors', cfg.BoolOpt('publish_errors',
default=False, default=False,
help='publish error events'), help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations', cfg.BoolOpt('fatal_deprecations',
default=False, default=False,
help='make deprecations fatal'), help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed # NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we # a full instance (and could include more information), and other times we
# are just handed a UUID for the instance. # are just handed a UUID for the instance.
cfg.StrOpt('instance_format', cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ', default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format ' help='The format for an instance that is passed with the log '
'it like this'), 'message. '),
cfg.StrOpt('instance_uuid_format', cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ', default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, ' help='The format for an instance UUID that is passed with the '
'format it like this'), 'log message. '),
] ]
CONF = cfg.CONF CONF = cfg.CONF
@ -210,27 +241,112 @@ def _get_log_file_path(binary=None):
binary = binary or _get_binary_name() binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),) return '%s.log' % (os.path.join(logdir, binary),)
return None
class ContextAdapter(logging.LoggerAdapter):
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string): def __init__(self, logger, project_name, version_string):
self.logger = logger self.logger = logger
self.project = project_name self.project = project_name
self.version = version_string self.version = version_string
self._deprecated_messages_sent = dict()
def audit(self, msg, *args, **kwargs): @property
self.log(logging.AUDIT, msg, *args, **kwargs) def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs): def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations: if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs) self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg) raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs) # Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs): def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs: if 'extra' not in kwargs:
kwargs['extra'] = {} kwargs['extra'] = {}
extra = kwargs['extra'] extra = kwargs['extra']
@ -242,18 +358,20 @@ class ContextAdapter(logging.LoggerAdapter):
extra.update(_dictify_context(context)) extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None) instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = '' instance_extra = ''
if instance: if instance:
instance_extra = CONF.instance_format % instance instance_extra = CONF.instance_format % instance
else: elif instance_uuid:
instance_uuid = kwargs.pop('instance_uuid', None) instance_extra = (CONF.instance_uuid_format
if instance_uuid: % {'uuid': instance_uuid})
instance_extra = (CONF.instance_uuid_format extra['instance'] = instance_extra
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project}) extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra.update({"version": self.version})
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy() extra['extra'] = extra.copy()
return msg, kwargs return msg, kwargs
@ -267,7 +385,7 @@ class JSONFormatter(logging.Formatter):
def formatException(self, ei, strip_newlines=True): def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei) lines = traceback.format_exception(*ei)
if strip_newlines: if strip_newlines:
lines = [itertools.ifilter( lines = [moves.filter(
lambda x: x, lambda x: x,
line.rstrip().splitlines()) for line in lines] line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines)) lines = list(itertools.chain(*lines))
@ -304,23 +422,12 @@ class JSONFormatter(logging.Formatter):
return jsonutils.dumps(message) return jsonutils.dumps(message)
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('manila.openstack.common.notifier.log_notifier' in
CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
def _create_logging_excepthook(product_name): def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb): def logging_excepthook(exc_type, value, tb):
extra = {} extra = {'exc_info': (exc_type, value, tb)}
if CONF.verbose: getLogger(product_name).critical(
extra['exc_info'] = (type, value, tb) "".join(traceback.format_exception_only(exc_type, value)),
getLogger(product_name).critical(str(value), **extra) **extra)
return logging_excepthook return logging_excepthook
@ -337,19 +444,20 @@ class LogConfigError(Exception):
err_msg=self.err_msg) err_msg=self.err_msg)
def _load_log_config(log_config): def _load_log_config(log_config_append):
try: try:
logging.config.fileConfig(log_config) logging.config.fileConfig(log_config_append,
except ConfigParser.Error, exc: disable_existing_loggers=False)
raise LogConfigError(log_config, str(exc)) except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name): def setup(product_name, version='unknown'):
"""Setup logging.""" """Setup logging."""
if CONF.log_config: if CONF.log_config_append:
_load_log_config(CONF.log_config) _load_log_config(CONF.log_config_append)
else: else:
_setup_logging_from_conf() _setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name) sys.excepthook = _create_logging_excepthook(product_name)
@ -383,15 +491,38 @@ def _find_facility_from_conf():
return facility return facility
def _setup_logging_from_conf(): class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger log_root = getLogger(None).logger
for handler in log_root.handlers: for handler in log_root.handlers:
log_root.removeHandler(handler) log_root.removeHandler(handler)
if CONF.use_syslog: if CONF.use_syslog:
facility = _find_facility_from_conf() facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log', # TODO(bogdando) use the format provided by RFCSysLogHandler
facility=facility) # after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog) log_root.addHandler(syslog)
logpath = _get_log_file_path() logpath = _get_log_file_path()
@ -399,31 +530,35 @@ def _setup_logging_from_conf():
filelog = logging.handlers.WatchedFileHandler(logpath) filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog) log_root.addHandler(filelog)
mode = int(CONF.logfile_mode, 8)
st = os.stat(logpath)
if st.st_mode != (stat.S_IFREG | mode):
os.chmod(logpath, mode)
if CONF.use_stderr: if CONF.use_stderr:
streamlog = ColorHandler() streamlog = ColorHandler()
log_root.addHandler(streamlog) log_root.addHandler(streamlog)
elif not CONF.log_file: elif not logpath:
# pass sys.stdout as a positional argument # pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream # python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout) streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog) log_root.addHandler(streamlog)
if CONF.publish_errors: if CONF.publish_errors:
log_root.addHandler(PublishErrorsHandler(logging.ERROR)) handler = importutils.import_object(
"manila.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers: for handler in log_root.handlers:
datefmt = CONF.log_date_format # NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format: if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format, handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt)) datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else: else:
handler.setFormatter(LegacyFormatter(datefmt=datefmt)) handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug: if CONF.debug:
log_root.setLevel(logging.DEBUG) log_root.setLevel(logging.DEBUG)
@ -434,9 +569,15 @@ def _setup_logging_from_conf():
for pair in CONF.default_log_levels: for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=') mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod) logger = logging.getLogger(mod)
logger.setLevel(level) # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {} _loggers = {}
@ -449,6 +590,16 @@ def getLogger(name='unknown', version='unknown'):
return _loggers[name] return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object): class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs.""" """A thin wrapper that responds to `write` and logs."""
@ -457,10 +608,10 @@ class WritableLogger(object):
self.level = level self.level = level
def write(self, msg): def write(self, msg):
self.logger.log(self.level, msg) self.logger.log(self.level, msg.rstrip())
class LegacyFormatter(logging.Formatter): class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags. """A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string The flags used to set format strings are: logging_context_format_string
@ -471,18 +622,50 @@ class LegacyFormatter(logging.Formatter):
For information about what variables are available for the formatter see: For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
""" """
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record): def format(self, record):
"""Uses contextstring if request_id is set, otherwise default.""" """Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if # to an empty string so we don't throw an exception if
# they get used # they get used
for key in ('instance', 'color'): for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__: if key not in record.__dict__:
record.__dict__[key] = '' record.__dict__[key] = ''
if record.__dict__.get('request_id', None): if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string self._fmt = CONF.logging_context_format_string
else: else:
self._fmt = CONF.logging_default_format_string self._fmt = CONF.logging_default_format_string
@ -491,7 +674,7 @@ class LegacyFormatter(logging.Formatter):
CONF.logging_debug_format_suffix): CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy # Cache this on the record, Logger will respect our formatted copy
if record.exc_info: if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record) record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record) return logging.Formatter.format(self, record)
@ -501,7 +684,7 @@ class LegacyFormatter(logging.Formatter):
if not record: if not record:
return logging.Formatter.formatException(self, exc_info) return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO() stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer) None, stringbuffer)
lines = stringbuffer.getvalue().split('\n') lines = stringbuffer.getvalue().split('\n')

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara # Copyright 2011 Justin Santa Barbara
@ -22,7 +20,7 @@ import sys
from eventlet import event from eventlet import event
from eventlet import greenthread from eventlet import greenthread
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _LE, _LW
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common import timeutils from manila.openstack.common import timeutils
@ -30,19 +28,19 @@ LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception): class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall. """Exception to break out and stop a LoopingCallBase.
The poll-function passed to LoopingCall can raise this exception to The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to break out of the loop normally. This is somewhat analogous to
StopIteration. StopIteration.
An optional return-value can be included as the argument to the exception; An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait() this return-value will be returned by LoopingCallBase.wait()
""" """
def __init__(self, retvalue=True): def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return.""" """:param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue self.retvalue = retvalue
@ -81,14 +79,14 @@ class FixedIntervalLoopingCall(LoopingCallBase):
break break
delay = interval - timeutils.delta_seconds(start, end) delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0: if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') % LOG.warn(_LW('task run outlasted interval by %s sec') %
-delay) -delay)
greenthread.sleep(delay if delay > 0 else 0) greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone, e: except LoopingCallDone as e:
self.stop() self.stop()
done.send(e.retvalue) done.send(e.retvalue)
except Exception: except Exception:
LOG.exception(_('in fixed duration looping call')) LOG.exception(_LE('in fixed duration looping call'))
done.send_exception(*sys.exc_info()) done.send_exception(*sys.exc_info())
return return
else: else:
@ -100,11 +98,6 @@ class FixedIntervalLoopingCall(LoopingCallBase):
return self.done return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase): class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event. """A looping call which sleeps until the next known event.
@ -128,14 +121,14 @@ class DynamicLoopingCall(LoopingCallBase):
if periodic_interval_max is not None: if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max) idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f ' LOG.debug('Dynamic looping call sleeping for %.02f '
'seconds'), idle) 'seconds', idle)
greenthread.sleep(idle) greenthread.sleep(idle)
except LoopingCallDone, e: except LoopingCallDone as e:
self.stop() self.stop()
done.send(e.retvalue) done.send(e.retvalue)
except Exception: except Exception:
LOG.exception(_('in dynamic looping call')) LOG.exception(_LE('in dynamic looping call'))
done.send_exception(*sys.exc_info()) done.send_exception(*sys.exc_info())
return return
else: else:

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation. # Copyright 2012 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -19,14 +17,19 @@
Network-related utilities and helper functions. Network-related utilities and helper functions.
""" """
import logging import socket
from six.moves.urllib import parse
from manila.openstack.common.gettextutils import _LW
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def parse_host_port(address, default_port=None): def parse_host_port(address, default_port=None):
""" """Interpret a string as a host:port pair.
Interpret a string as a host:port pair.
An IPv6 address MUST be escaped if accompanied by a port, An IPv6 address MUST be escaped if accompanied by a port,
because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334
means both [2001:db8:85a3::8a2e:370:7334] and means both [2001:db8:85a3::8a2e:370:7334] and
@ -66,3 +69,92 @@ def parse_host_port(address, default_port=None):
port = default_port port = default_port
return (host, None if port is None else int(port)) return (host, None if port is None else int(port))
class ModifiedSplitResult(parse.SplitResult):
"""Split results class for urlsplit."""
# NOTE(dims): The functions below are needed for Python 2.6.x.
# We can remove these when we drop support for 2.6.x.
@property
def hostname(self):
netloc = self.netloc.split('@', 1)[-1]
host, port = parse_host_port(netloc)
return host
@property
def port(self):
netloc = self.netloc.split('@', 1)[-1]
host, port = parse_host_port(netloc)
return port
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL using urlparse.urlsplit(), splitting query and fragments.
This function papers over Python issue9374 when needed.
The parameters are the same as urlparse.urlsplit.
"""
scheme, netloc, path, query, fragment = parse.urlsplit(
url, scheme, allow_fragments)
if allow_fragments and '#' in path:
path, fragment = path.split('#', 1)
if '?' in path:
path, query = path.split('?', 1)
return ModifiedSplitResult(scheme, netloc,
path, query, fragment)
def set_tcp_keepalive(sock, tcp_keepalive=True,
tcp_keepidle=None,
tcp_keepalive_interval=None,
tcp_keepalive_count=None):
"""Set values for tcp keepalive parameters
This function configures tcp keepalive parameters if users wish to do
so.
:param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
not sure, this should be True, and default values will be used.
:param tcp_keepidle: time to wait before starting to send keepalive probes
:param tcp_keepalive_interval: time between successive probes, once the
initial wait time is over
:param tcp_keepalive_count: number of probes to send before the connection
is killed
"""
# NOTE(praneshp): Despite keepalive being a tcp concept, the level is
# still SOL_SOCKET. This is a quirk.
if isinstance(tcp_keepalive, bool):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive)
else:
raise TypeError("tcp_keepalive must be a boolean")
if not tcp_keepalive:
return
# These options aren't available in the OS X version of eventlet,
# Idle + Count * Interval effectively gives you the total timeout.
if tcp_keepidle is not None:
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
tcp_keepidle)
else:
LOG.warning(_LW('tcp_keepidle not available on your system'))
if tcp_keepalive_interval is not None:
if hasattr(socket, 'TCP_KEEPINTVL'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL,
tcp_keepalive_interval)
else:
LOG.warning(_LW('tcp_keepintvl not available on your system'))
if tcp_keepalive_count is not None:
if hasattr(socket, 'TCP_KEEPCNT'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT,
tcp_keepalive_count)
else:
LOG.warning(_LW('tcp_keepknt not available on your system'))

View File

@ -1,14 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -13,12 +13,13 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import socket
import uuid import uuid
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common import context from manila.openstack.common import context
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _, _LE
from manila.openstack.common import importutils from manila.openstack.common import importutils
from manila.openstack.common import jsonutils from manila.openstack.common import jsonutils
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
@ -35,7 +36,6 @@ notifier_opts = [
default='INFO', default='INFO',
help='Default notification level for outgoing notifications'), help='Default notification level for outgoing notifications'),
cfg.StrOpt('default_publisher_id', cfg.StrOpt('default_publisher_id',
default='$host',
help='Default publisher_id for outgoing notifications'), help='Default publisher_id for outgoing notifications'),
] ]
@ -56,7 +56,7 @@ class BadPriorityException(Exception):
def notify_decorator(name, fn): def notify_decorator(name, fn):
""" decorator for notify which is used from utils.monkey_patch() """Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function :param name: name of the function
:param function: - object of the function :param function: - object of the function
@ -74,7 +74,7 @@ def notify_decorator(name, fn):
ctxt = context.get_context_from_function_and_args(fn, args, kwarg) ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
notify(ctxt, notify(ctxt,
CONF.default_publisher_id, CONF.default_publisher_id or socket.gethostname(),
name, name,
CONF.default_notification_level, CONF.default_notification_level,
body) body)
@ -84,7 +84,10 @@ def notify_decorator(name, fn):
def publisher_id(service, host=None): def publisher_id(service, host=None):
if not host: if not host:
host = CONF.host try:
host = CONF.host
except AttributeError:
host = CONF.default_publisher_id or socket.gethostname()
return "%s.%s" % (service, host) return "%s.%s" % (service, host)
@ -138,9 +141,9 @@ def notify(context, publisher_id, event_type, priority, payload):
try: try:
driver.notify(context, msg) driver.notify(context, msg)
except Exception as e: except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to " LOG.exception(_LE("Problem '%(e)s' attempting to "
"send to notification system. " "send to notification system. "
"Payload=%(payload)s") "Payload=%(payload)s")
% dict(e=e, payload=payload)) % dict(e=e, payload=payload))
@ -153,29 +156,16 @@ def _get_drivers():
if _drivers is None: if _drivers is None:
_drivers = {} _drivers = {}
for notification_driver in CONF.notification_driver: for notification_driver in CONF.notification_driver:
add_driver(notification_driver) try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_LE("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
return _drivers.values() return _drivers.values()
def add_driver(notification_driver):
"""Add a notification driver at runtime."""
# Make sure the driver list is initialized.
_get_drivers()
if isinstance(notification_driver, basestring):
# Load and add
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except ImportError:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
else:
# Driver is already loaded; just add the object.
_drivers[notification_driver] = notification_driver
def _reset_drivers(): def _reset_drivers():
"""Used by unit tests to reset the drivers.""" """Used by unit tests to reset the drivers."""
global _drivers global _drivers

View File

@ -24,7 +24,9 @@ CONF = cfg.CONF
def notify(_context, message): def notify(_context, message):
"""Notifies the recipient of the desired event given the model. """Notifies the recipient of the desired event given the model.
Log notifications using openstack's default logging system"""
Log notifications using OpenStack's default logging system.
"""
priority = message.get('priority', priority = message.get('priority',
CONF.default_notification_level) CONF.default_notification_level)

View File

@ -15,5 +15,5 @@
def notify(_context, message): def notify(_context, message):
"""Notifies the recipient of the desired event given the model""" """Notifies the recipient of the desired event given the model."""
pass pass

View File

@ -0,0 +1,77 @@
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A temporary helper which emulates oslo.messaging.Notifier.
This helper method allows us to do the tedious porting to the new Notifier API
as a standalone commit so that the commit which switches us to oslo.messaging
is smaller and easier to review. This file will be removed as part of that
commit.
"""
from oslo.config import cfg
from manila.openstack.common.notifier import api as notifier_api
CONF = cfg.CONF
class Notifier(object):
def __init__(self, publisher_id):
super(Notifier, self).__init__()
self.publisher_id = publisher_id
_marker = object()
def prepare(self, publisher_id=_marker):
ret = self.__class__(self.publisher_id)
if publisher_id is not self._marker:
ret.publisher_id = publisher_id
return ret
def _notify(self, ctxt, event_type, payload, priority):
notifier_api.notify(ctxt,
self.publisher_id,
event_type,
priority,
payload)
def audit(self, ctxt, event_type, payload):
# No audit in old notifier.
self._notify(ctxt, event_type, payload, 'INFO')
def debug(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'DEBUG')
def info(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'INFO')
def warn(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'WARN')
warning = warn
def error(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'ERROR')
def critical(self, ctxt, event_type, payload):
self._notify(ctxt, event_type, payload, 'CRITICAL')
def get_notifier(service=None, host=None, publisher_id=None):
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
return Notifier(publisher_id)

View File

@ -16,7 +16,7 @@
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common import context as req_context from manila.openstack.common import context as req_context
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _LE
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common import rpc from manila.openstack.common import rpc
@ -24,14 +24,14 @@ LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt( notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ], 'notification_topics', default=['notifications', ],
help='AMQP topic used for openstack notifications') help='AMQP topic used for OpenStack notifications')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opt(notification_topic_opt) CONF.register_opt(notification_topic_opt)
def notify(context, message): def notify(context, message):
"""Sends a notification via RPC""" """Sends a notification via RPC."""
if not context: if not context:
context = req_context.get_admin_context() context = req_context.get_admin_context()
priority = message.get('priority', priority = message.get('priority',
@ -42,5 +42,6 @@ def notify(context, message):
try: try:
rpc.notify(context, topic, message) rpc.notify(context, topic, message)
except Exception: except Exception:
LOG.exception(_("Could not send notification to %(topic)s. " LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals()) "Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -18,7 +18,7 @@
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common import context as req_context from manila.openstack.common import context as req_context
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _LE
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common import rpc from manila.openstack.common import rpc
@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt( notification_topic_opt = cfg.ListOpt(
'topics', default=['notifications', ], 'topics', default=['notifications', ],
help='AMQP topic(s) used for openstack notifications') help='AMQP topic(s) used for OpenStack notifications')
opt_group = cfg.OptGroup(name='rpc_notifier2', opt_group = cfg.OptGroup(name='rpc_notifier2',
title='Options for rpc_notifier2') title='Options for rpc_notifier2')
@ -37,7 +37,7 @@ CONF.register_opt(notification_topic_opt, opt_group)
def notify(context, message): def notify(context, message):
"""Sends a notification via RPC""" """Sends a notification via RPC."""
if not context: if not context:
context = req_context.get_admin_context() context = req_context.get_admin_context()
priority = message.get('priority', priority = message.get('priority',
@ -48,5 +48,6 @@ def notify(context, message):
try: try:
rpc.notify(context, topic, message, envelope=True) rpc.notify(context, topic, message, envelope=True)
except Exception: except Exception:
LOG.exception(_("Could not send notification to %(topic)s. " LOG.exception(_LE("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals()) "Payload=%(message)s"),
{"topic": topic, "message": message})

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
NOTIFICATIONS = [] NOTIFICATIONS = []

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -19,6 +17,8 @@
System-level utilities and helper functions. System-level utilities and helper functions.
""" """
import errno
import logging as stdlib_logging
import os import os
import random import random
import shlex import shlex
@ -26,6 +26,7 @@ import signal
from eventlet.green import subprocess from eventlet.green import subprocess
from eventlet import greenthread from eventlet import greenthread
import six
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
@ -34,6 +35,11 @@ from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception): class UnknownArgumentError(Exception):
def __init__(self, message=None): def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message) super(UnknownArgumentError, self).__init__(message)
@ -49,11 +55,18 @@ class ProcessExecutionError(Exception):
self.description = description self.description = description
if description is None: if description is None:
description = "Unexpected error while running command." description = _("Unexpected error while running command.")
if exit_code is None: if exit_code is None:
exit_code = '-' exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" message = _('%(description)s\n'
% (description, cmd, exit_code, stdout, stderr)) 'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message) super(ProcessExecutionError, self).__init__(message)
@ -69,14 +82,17 @@ def _subprocess_setup():
def execute(*cmd, **kwargs): def execute(*cmd, **kwargs):
""" """Helper method to shell out and execute a command through subprocess.
Helper method to shell out and execute a command through subprocess with
optional retry. Allows optional retry.
:param cmd: Passed to subprocess.Popen. :param cmd: Passed to subprocess.Popen.
:type cmd: string :type cmd: string
:param process_input: Send to opened process. :param process_input: Send to opened process.
:type proces_input: string :type process_input: string
:param env_variables: Environment variables and their values that
will be set for the process.
:type env_variables: dict
:param check_exit_code: Single bool, int, or list of allowed exit :param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless :class:`ProcessExecutionError` unless
@ -97,6 +113,9 @@ def execute(*cmd, **kwargs):
:param shell: whether or not there should be a shell used to :param shell: whether or not there should be a shell used to
execute this command. Defaults to false. execute this command. Defaults to false.
:type shell: boolean :type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution :returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on :raises: :class:`UnknownArgumentError` on
receiving unknown arguments receiving unknown arguments
@ -104,6 +123,7 @@ def execute(*cmd, **kwargs):
""" """
process_input = kwargs.pop('process_input', None) process_input = kwargs.pop('process_input', None)
env_variables = kwargs.pop('env_variables', None)
check_exit_code = kwargs.pop('check_exit_code', [0]) check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True) delay_on_retry = kwargs.pop('delay_on_retry', True)
@ -111,6 +131,7 @@ def execute(*cmd, **kwargs):
run_as_root = kwargs.pop('run_as_root', False) run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '') root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False) shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool): if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code ignore_exit_code = not check_exit_code
@ -118,15 +139,15 @@ def execute(*cmd, **kwargs):
elif isinstance(check_exit_code, int): elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code] check_exit_code = [check_exit_code]
if len(kwargs): if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args ' raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs) 'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0: if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper: if not root_helper:
raise NoRootWrapSpecified( raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root ' message=_('Command requested root, but did not '
'helper.')) 'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd) cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd) cmd = map(str, cmd)
@ -134,7 +155,8 @@ def execute(*cmd, **kwargs):
while attempts > 0: while attempts > 0:
attempts -= 1 attempts -= 1
try: try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(logging.mask_password(cmd)))
_PIPE = subprocess.PIPE # pylint: disable=E1101 _PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt': if os.name == 'nt':
@ -150,28 +172,37 @@ def execute(*cmd, **kwargs):
stderr=_PIPE, stderr=_PIPE,
close_fds=close_fds, close_fds=close_fds,
preexec_fn=preexec_fn, preexec_fn=preexec_fn,
shell=shell) shell=shell,
env=env_variables)
result = None result = None
if process_input is not None: for _i in six.moves.range(20):
result = obj.communicate(process_input) # NOTE(russellb) 20 is an arbitrary number of retries to
else: # prevent any chance of looping forever here.
result = obj.communicate() try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101 obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101
if _returncode: LOG.log(loglevel, 'Result was %s' % _returncode)
LOG.debug(_('Result was %s') % _returncode) if not ignore_exit_code and _returncode not in check_exit_code:
if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result
(stdout, stderr) = result raise ProcessExecutionError(exit_code=_returncode,
raise ProcessExecutionError(exit_code=_returncode, stdout=stdout,
stdout=stdout, stderr=stderr,
stderr=stderr, cmd=' '.join(cmd))
cmd=' '.join(cmd))
return result return result
except ProcessExecutionError: except ProcessExecutionError:
if not attempts: if not attempts:
raise raise
else: else:
LOG.debug(_('%r failed. Retrying.'), cmd) LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry: if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0) greenthread.sleep(random.randint(20, 200) / 100.0)
finally: finally:
@ -179,3 +210,63 @@ def execute(*cmd, **kwargs):
# call clean something up in between calls, without # call clean something up in between calls, without
# it two execute calls in a row hangs the second one # it two execute calls in a row hangs the second one
greenthread.sleep(0) greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -25,14 +23,10 @@ For some wrappers that add message versioning to rpc, see:
rpc.proxy rpc.proxy
""" """
import inspect
import logging
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common.gettextutils import _
from manila.openstack.common import importutils from manila.openstack.common import importutils
from manila.openstack.common import local from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -56,13 +50,12 @@ rpc_opts = [
help='Seconds to wait before a cast expires (TTL). ' help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'), 'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules', cfg.ListOpt('allowed_rpc_exception_modules',
default=['manila.openstack.common.exception', default=['nova.exception',
'nova.exception', 'cinder.exception',
'manila.exception',
'exceptions', 'exceptions',
], ],
help='Modules of exceptions that are permitted to be recreated' help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'), ' upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit', cfg.BoolOpt('fake_rabbit',
default=False, default=False,
help='If passed, use a fake RabbitMQ provider'), help='If passed, use a fake RabbitMQ provider'),
@ -96,24 +89,7 @@ def create_connection(new=True):
return _get_impl().create_connection(CONF, new=new) return _get_impl().create_connection(CONF, new=new)
def _check_for_lock(): def call(context, topic, msg, timeout=None):
if not CONF.debug:
return None
if ((hasattr(local.strong_store, 'locks_held')
and local.strong_store.locks_held)):
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
LOG.warn(_('A RPC is being made while holding a lock. The locks '
'currently held are %(locks)s. This is probably a bug. '
'Please report it. Include the following: [%(stack)s].'),
{'locks': local.strong_store.locks_held,
'stack': stack})
return True
return False
def call(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method that returns something. """Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this :param context: Information that identifies the user that has made this
@ -127,16 +103,12 @@ def call(context, topic, msg, timeout=None, check_for_lock=False):
"args" : dict_of_kwargs } "args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout. :param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option. If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: A dict from the remote method. :returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response :raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached. is not received before the timeout is reached.
""" """
if check_for_lock:
_check_for_lock()
return _get_impl().call(CONF, context, topic, msg, timeout) return _get_impl().call(CONF, context, topic, msg, timeout)
@ -179,7 +151,7 @@ def fanout_cast(context, topic, msg):
return _get_impl().fanout_cast(CONF, context, topic, msg) return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None, check_for_lock=False): def multicall(context, topic, msg, timeout=None):
"""Invoke a remote method and get back an iterator. """Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in In this case, the remote method will be returning multiple values in
@ -197,8 +169,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False):
"args" : dict_of_kwargs } "args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout. :param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option. If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is :returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value an index that starts at 0 and increases by one for each value
@ -208,8 +178,6 @@ def multicall(context, topic, msg, timeout=None, check_for_lock=False):
:raises: openstack.common.rpc.common.Timeout if a complete response :raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached. is not received before the timeout is reached.
""" """
if check_for_lock:
_check_for_lock()
return _get_impl().multicall(CONF, context, topic, msg, timeout) return _get_impl().multicall(CONF, context, topic, msg, timeout)
@ -228,7 +196,7 @@ def notify(context, topic, msg, envelope=False):
def cleanup(): def cleanup():
"""Clean up resoruces in use by implementation. """Clean up resources in use by implementation.
Clean up any resources that have been allocated by the RPC implementation. Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function This is typically open connections to a messaging service. This function

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -20,9 +18,9 @@
""" """
Shared code between AMQP based openstack.common.rpc implementations. Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP. The code in this module is shared between the rpc implementations based on
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
AMQP, but is deprecated and predates this code. uses AMQP, but is deprecated and predates this code.
""" """
import collections import collections
@ -34,24 +32,26 @@ from eventlet import greenpool
from eventlet import pools from eventlet import pools
from eventlet import queue from eventlet import queue
from eventlet import semaphore from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg from oslo.config import cfg
import six
from manila.openstack.common import excutils from manila.openstack.common import excutils
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _, _LE
from manila.openstack.common import local from manila.openstack.common import local
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common.rpc import common as rpc_common from manila.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [ amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue', cfg.BoolOpt('amqp_durable_queues',
default=False, default=False,
help='Enable a fast single reply queue if using AMQP based ' deprecated_name='rabbit_durable_queues',
'RPC like RabbitMQ or Qpid.'), deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
] ]
cfg.CONF.register_opts(amqp_opts) cfg.CONF.register_opts(amqp_opts)
@ -72,7 +72,7 @@ class Pool(pools.Pool):
# TODO(comstud): Timeout connections not used in a while # TODO(comstud): Timeout connections not used in a while
def create(self): def create(self):
LOG.debug(_('Pool creating new connection')) LOG.debug('Pool creating new connection')
return self.connection_cls(self.conf) return self.connection_cls(self.conf)
def empty(self): def empty(self):
@ -83,7 +83,7 @@ class Pool(pools.Pool):
# is the above "while loop" gets all the cached connections from the # is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool # pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the # leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run # pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py # time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and # just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem. # the leakage is not a problem.
@ -102,19 +102,19 @@ def get_connection_pool(conf, connection_cls):
class ConnectionContext(rpc_common.Connection): class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of """The class that is actually returned to the create_connection() caller.
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new This is essentially a wrapper around Connection that supports 'with'.
Connection, or one from a pool. The function will also catch It can also return a new Connection, or one from a pool.
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so The function will also catch when an instance of this class is to be
forth without making the caller be responsible for catching deleted. With that we can return Connections to the pool on exceptions
them. If possible the function makes sure to return a and so forth without making the caller be responsible for catching them.
connection to the pool. If possible the function makes sure to return a connection to the pool.
""" """
def __init__(self, conf, connection_pool, pooled=True, server_params=None): def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool""" """Create a new connection, or get one from the pool."""
self.connection = None self.connection = None
self.conf = conf self.conf = conf
self.connection_pool = connection_pool self.connection_pool = connection_pool
@ -127,7 +127,7 @@ class ConnectionContext(rpc_common.Connection):
self.pooled = pooled self.pooled = pooled
def __enter__(self): def __enter__(self):
"""When with ConnectionContext() is used, return self""" """When with ConnectionContext() is used, return self."""
return self return self
def _done(self): def _done(self):
@ -165,17 +165,19 @@ class ConnectionContext(rpc_common.Connection):
def create_worker(self, topic, proxy, pool_name): def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name) self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name): def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback, self.connection.join_consumer_pool(callback,
pool_name, pool_name,
topic, topic,
exchange_name) exchange_name,
ack_on_error)
def consume_in_thread(self): def consume_in_thread(self):
self.connection.consume_in_thread() return self.connection.consume_in_thread()
def __getattr__(self, key): def __getattr__(self, key):
"""Proxy all other calls to the Connection instance""" """Proxy all other calls to the Connection instance."""
if self.connection: if self.connection:
return getattr(self.connection, key) return getattr(self.connection, key)
else: else:
@ -183,11 +185,11 @@ class ConnectionContext(rpc_common.Connection):
class ReplyProxy(ConnectionContext): class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """ """Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool): def __init__(self, conf, connection_pool):
self._call_waiters = {} self._call_waiters = {}
self._num_call_waiters = 0 self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10 self._num_call_waiters_wrn_threshold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False) super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data) self.declare_direct_consumer(self._reply_q, self._process_data)
@ -197,18 +199,20 @@ class ReplyProxy(ConnectionContext):
msg_id = message_data.pop('_msg_id', None) msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id) waiter = self._call_waiters.get(msg_id)
if not waiter: if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s' LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %s') % (msg_id, message_data)) ', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % self._call_waiters)
else: else:
waiter.put(message_data) waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id): def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1 self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
LOG.warn(_('Number of call waiters is greater than warning ' LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter ' 'threshold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold) 'leak.') % self._num_call_waiters_wrn_threshold)
self._num_call_waiters_wrn_threshhold *= 2 self._num_call_waiters_wrn_threshold *= 2
self._call_waiters[msg_id] = waiter self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id): def del_call_waiter(self, msg_id):
@ -231,18 +235,13 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure = rpc_common.serialize_remote_exception(failure, failure = rpc_common.serialize_remote_exception(failure,
log_failure) log_failure)
try: msg = {'result': reply, 'failure': failure}
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending: if ending:
msg['ending'] = True msg['ending'] = True
_add_unique_id(msg) _add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the # If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue. # reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty. # Otherwise use the msg_id for backward compatibility.
if reply_q: if reply_q:
msg['_msg_id'] = msg_id msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg)) conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
@ -251,7 +250,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
class RpcContext(rpc_common.CommonRpcContext): class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call""" """Context that supports replying to a rpc.call."""
def __init__(self, **kwargs): def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None) self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None) self.reply_q = kwargs.pop('reply_q', None)
@ -288,7 +287,7 @@ def unpack_context(conf, msg):
context_dict['reply_q'] = msg.pop('_reply_q', None) context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict) ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) rpc_common._safe_log(LOG.debug, 'unpacked context: %s', ctx.to_dict())
return ctx return ctx
@ -301,8 +300,14 @@ def pack_context(msg, context):
for args at some point. for args at some point.
""" """
context_d = dict([('_context_%s' % key, value) if isinstance(context, dict):
for (key, value) in context.to_dict().iteritems()]) context_d = dict([('_context_%s' % key, value)
for (key, value) in six.iteritems(context)])
else:
context_d = dict([('_context_%s' % key, value)
for (key, value) in
six.iteritems(context.to_dict())])
msg.update(context_d) msg.update(context_d)
@ -334,12 +339,13 @@ def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages.""" """Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id}) msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) LOG.debug('UNIQUE_ID is %s.' % (unique_id))
class _ThreadPoolWithWait(object): class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by """Base class for a delayed invocation manager.
the Connection class to start up green threads
Used by the Connection class to start up green threads
to handle incoming messages. to handle incoming messages.
""" """
@ -354,25 +360,48 @@ class _ThreadPoolWithWait(object):
class CallbackWrapper(_ThreadPoolWithWait): class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green """Wraps a straight callback.
thread.
Allows it to be invoked in a green thread.
""" """
def __init__(self, conf, callback, connection_pool): def __init__(self, conf, callback, connection_pool,
""" wait_for_consumers=False):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance :param conf: cfg.CONF instance
:param callback: a callable (probably a function) :param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by :param connection_pool: connection pool as returned by
get_connection_pool() get_connection_pool()
:param wait_for_consumers: wait for all green threads to
complete and raise the last
caught exception, if any.
""" """
super(CallbackWrapper, self).__init__( super(CallbackWrapper, self).__init__(
conf=conf, conf=conf,
connection_pool=connection_pool, connection_pool=connection_pool,
) )
self.callback = callback self.callback = callback
self.wait_for_consumers = wait_for_consumers
self.exc_info = None
def _wrap(self, message_data, **kwargs):
"""Wrap the callback invocation to catch exceptions.
"""
try:
self.callback(message_data, **kwargs)
except Exception:
self.exc_info = sys.exc_info()
def __call__(self, message_data): def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data) self.exc_info = None
self.pool.spawn_n(self._wrap, message_data)
if self.wait_for_consumers:
self.pool.waitall()
if self.exc_info:
six.reraise(self.exc_info[1], None, self.exc_info[2])
class ProxyCallback(_ThreadPoolWithWait): class ProxyCallback(_ThreadPoolWithWait):
@ -403,7 +432,7 @@ class ProxyCallback(_ThreadPoolWithWait):
# the previous context is stored in local.store.context # the previous context is stored in local.store.context
if hasattr(local.store, 'context'): if hasattr(local.store, 'context'):
del local.store.context del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data) rpc_common._safe_log(LOG.debug, 'received %s', message_data)
self.msg_id_cache.check_duplicate_message(message_data) self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data) ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method') method = message_data.get('method')
@ -440,7 +469,7 @@ class ProxyCallback(_ThreadPoolWithWait):
# This final None tells multicall that it is done. # This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool) ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e: except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') % LOG.debug('Expected exception during message handling (%s)' %
e._exc_info[1]) e._exc_info[1])
ctxt.reply(None, e._exc_info, ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool, connection_pool=self.connection_pool,
@ -448,7 +477,7 @@ class ProxyCallback(_ThreadPoolWithWait):
except Exception: except Exception:
# sys.exc_info() is deleted by LOG.exception(). # sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info() exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'), LOG.error(_LE('Exception during message handling'),
exc_info=exc_info) exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool) ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
@ -490,7 +519,7 @@ class MulticallProxyWaiter(object):
return result return result
def __iter__(self): def __iter__(self):
"""Return a result until we get a reply with an 'ending" flag""" """Return a result until we get a reply with an 'ending' flag."""
if self._done: if self._done:
raise StopIteration raise StopIteration
while True: while True:
@ -512,61 +541,8 @@ class MulticallProxyWaiter(object):
yield result yield result
#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool): def create_connection(conf, new, connection_pool):
"""Create a connection""" """Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new) return ConnectionContext(conf, connection_pool, pooled=not new)
@ -575,36 +551,20 @@ _reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool): def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times.""" """Make a call that returns multiple times."""
# TODO(pekowski): Remove all these comments in Havana. LOG.debug('Making synchronous call on %s ...', topic)
# For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
# For amqp_rpc_single_reply_queue = True,
# The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id}) msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id)) LOG.debug('MSG_ID is %s' % (msg_id))
_add_unique_id(msg) _add_unique_id(msg)
pack_context(msg, context) pack_context(msg, context)
# TODO(pekowski): Remove this flag and the code under the if clause with _reply_proxy_create_sem:
# in Havana. if not connection_pool.reply_proxy:
if not conf.amqp_rpc_single_reply_queue: connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
conn = ConnectionContext(conf, connection_pool) msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallWaiter(conf, conn, timeout) wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
conn.declare_direct_consumer(msg_id, wait_msg) with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
else:
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg return wait_msg
@ -620,7 +580,7 @@ def call(conf, context, topic, msg, timeout, connection_pool):
def cast(conf, context, topic, msg, connection_pool): def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response.""" """Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic) LOG.debug('Making asynchronous cast on %s...', topic)
_add_unique_id(msg) _add_unique_id(msg)
pack_context(msg, context) pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn: with ConnectionContext(conf, connection_pool) as conn:
@ -629,7 +589,7 @@ def cast(conf, context, topic, msg, connection_pool):
def fanout_cast(conf, context, topic, msg, connection_pool): def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response.""" """Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...')) LOG.debug('Making asynchronous fanout cast...')
_add_unique_id(msg) _add_unique_id(msg)
pack_context(msg, context) pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn: with ConnectionContext(conf, connection_pool) as conn:
@ -657,7 +617,7 @@ def fanout_cast_to_server(conf, context, server_params, topic, msg,
def notify(conf, context, topic, msg, connection_pool, envelope): def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic.""" """Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'), LOG.debug('Sending %(event_type)s on %(topic)s',
dict(event_type=msg.get('event_type'), dict(event_type=msg.get('event_type'),
topic=topic)) topic=topic))
_add_unique_id(msg) _add_unique_id(msg)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -22,18 +20,21 @@ import sys
import traceback import traceback
from oslo.config import cfg from oslo.config import cfg
import six
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _, _LE
from manila.openstack.common import importutils from manila.openstack.common import importutils
from manila.openstack.common import jsonutils from manila.openstack.common import jsonutils
from manila.openstack.common import local from manila.openstack.common import local
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common import versionutils
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_RPC_ENVELOPE_VERSION = '2.0'
'''RPC Envelope Version. '''RPC Envelope Version.
This version number applies to the top level structure of messages sent out. This version number applies to the top level structure of messages sent out.
@ -46,7 +47,7 @@ This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg(). deserialize_msg().
The current message format (version 2.0) is very simple. It is: The current message format (version 2.0) is very simple. It is::
{ {
'oslo.version': <RPC Envelope Version as a String>, 'oslo.version': <RPC Envelope Version as a String>,
@ -64,30 +65,31 @@ We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict. to the messaging libraries as a dict.
''' '''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version' _VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message' _MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception): class RPCException(Exception):
message = _("An unknown RPC related exception occurred.") msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs): def __init__(self, message=None, **kwargs):
self.kwargs = kwargs self.kwargs = kwargs
if not message: if not message:
try: try:
message = self.message % kwargs message = self.msg_fmt % kwargs
except Exception: except Exception:
# kwargs doesn't match a variable in the message # kwargs doesn't match a variable in the message
# log the issue and the kwargs # log the issue and the kwargs
LOG.exception(_('Exception in string format operation')) LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems(): for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value)) LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened # at least get the core message out if something happened
message = self.message message = self.msg_fmt
super(RPCException, self).__init__(message) super(RPCException, self).__init__(message)
@ -101,7 +103,7 @@ class RemoteError(RPCException):
contains all of the relevant info. contains all of the relevant info.
""" """
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None): def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type self.exc_type = exc_type
@ -118,12 +120,13 @@ class Timeout(RPCException):
This exception is raised if the rpc_response_timeout is reached while This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side. waiting for a response from the remote side.
""" """
message = _('Timeout while waiting on RPC response - ' msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" ' 'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"') 'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None): def __init__(self, info=None, topic=None, method=None):
""" """Initiates Timeout object.
:param info: Extra info to convey to the user :param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to :param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being :param rpc_method_name: The name of the rpc method being
@ -140,23 +143,27 @@ class Timeout(RPCException):
class DuplicateMessageError(RPCException): class DuplicateMessageError(RPCException):
message = _("Found duplicate message(%(msg_id)s). Skipping it.") msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException): class InvalidRPCConnectionReuse(RPCException):
message = _("Invalid reuse of an RPC connection.") msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException): class UnsupportedRpcVersion(RPCException):
message = _("Specified RPC version, %(version)s, not supported by " msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.") "this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException): class UnsupportedRpcEnvelopeVersion(RPCException):
message = _("Specified RPC envelope version, %(version)s, " msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.") "not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object): class Connection(object):
"""A connection, returned by rpc.create_connection(). """A connection, returned by rpc.create_connection().
@ -216,9 +223,9 @@ class Connection(object):
raise NotImplementedError() raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name): def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers for a given topic from """Register as a member of a group of consumers.
the specified exchange.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message. Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than A message will be delivered to multiple pools, if more than
@ -253,41 +260,24 @@ class Connection(object):
def _safe_log(log_func, msg, msg_data): def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging.""" """Sanitizes the msg_data field before logging."""
SANITIZE = {'set_admin_password': [('args', 'new_pass')], SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
'run_instance': [('args', 'admin_password')],
'route_message': [('args', 'message', 'args', 'method_info',
'method_kwargs', 'password'),
('args', 'message', 'args', 'method_info',
'method_kwargs', 'admin_password')]}
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE def _fix_passwords(d):
has_context_token = '_context_auth_token' in msg_data """Sanitizes the password fields in the dictionary."""
has_token = 'auth_token' in msg_data for k in six.iterkeys(d):
if k.lower().find('password') != -1:
d[k] = '<SANITIZED>'
elif k.lower() in SANITIZE:
d[k] = '<SANITIZED>'
elif isinstance(d[k], list):
for e in d[k]:
if isinstance(e, dict):
_fix_passwords(e)
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
if not any([has_method, has_context_token, has_token]): return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
return log_func(msg, msg_data)
msg_data = copy.deepcopy(msg_data)
if has_method:
for arg in SANITIZE.get(msg_data['method'], []):
try:
d = msg_data
for elem in arg[:-1]:
d = d[elem]
d[arg[-1]] = '<SANITIZED>'
except KeyError, e:
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
{'item': arg,
'err': e})
if has_context_token:
msg_data['_context_auth_token'] = '<SANITIZED>'
if has_token:
msg_data['auth_token'] = '<SANITIZED>'
return log_func(msg, msg_data)
def serialize_remote_exception(failure_info, log_failure=True): def serialize_remote_exception(failure_info, log_failure=True):
@ -299,17 +289,27 @@ def serialize_remote_exception(failure_info, log_failure=True):
tb = traceback.format_exception(*failure_info) tb = traceback.format_exception(*failure_info)
failure = failure_info[1] failure = failure_info[1]
if log_failure: if log_failure:
LOG.error(_("Returning exception %s to caller"), unicode(failure)) LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb) LOG.error(tb)
kwargs = {} kwargs = {}
if hasattr(failure, 'kwargs'): if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = { data = {
'class': str(failure.__class__.__name__), 'class': cls_name,
'module': str(failure.__class__.__module__), 'module': mod_name,
'message': unicode(failure), 'message': six.text_type(failure),
'tb': tb, 'tb': tb,
'args': failure.args, 'args': failure.args,
'kwargs': kwargs 'kwargs': kwargs
@ -345,8 +345,9 @@ def deserialize_remote_exception(conf, data):
ex_type = type(failure) ex_type = type(failure)
str_override = lambda self: message str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override}) {'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try: try:
# NOTE(ameade): Dynamically create a new exception type and swap it in # NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined # as the new type for the exception. This only works on user defined
@ -408,10 +409,11 @@ class CommonRpcContext(object):
class ClientException(Exception): class ClientException(Exception):
"""This encapsulates some actual exception that is expected to be """Encapsulates actual exception expected to be hit by a RPC proxy object.
hit by an RPC proxy object. Merely instantiating it records the
current exception information, which will be passed back to the Merely instantiating it records the current exception information, which
RPC client without exceptional logging.""" will be passed back to the RPC client without exceptional logging.
"""
def __init__(self): def __init__(self):
self._exc_info = sys.exc_info() self._exc_info = sys.exc_info()
@ -419,7 +421,7 @@ class ClientException(Exception):
def catch_client_exception(exceptions, func, *args, **kwargs): def catch_client_exception(exceptions, func, *args, **kwargs):
try: try:
return func(*args, **kwargs) return func(*args, **kwargs)
except Exception, e: except Exception as e:
if type(e) in exceptions: if type(e) in exceptions:
raise ClientException() raise ClientException()
else: else:
@ -428,11 +430,13 @@ def catch_client_exception(exceptions, func, *args, **kwargs):
def client_exceptions(*exceptions): def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions. """Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal, of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.""" ClientException, which is used internally by the RPC layer.
"""
def outer(func): def outer(func):
def inner(*args, **kwargs): def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs) return catch_client_exception(exceptions, func, *args, **kwargs)
@ -440,19 +444,15 @@ def client_exceptions(*exceptions):
return outer return outer
# TODO(sirp): we should deprecate this in favor of
# using `versionutils.is_compatible` directly
def version_is_compatible(imp_version, version): def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible. """Determine whether versions are compatible.
:param imp_version: The version implemented :param imp_version: The version implemented
:param version: The version requested by an incoming message. :param version: The version requested by an incoming message.
""" """
version_parts = version.split('.') return versionutils.is_compatible(version, imp_version)
imp_version_parts = imp_version.split('.')
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
return False
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
return False
return True
def serialize_msg(raw_msg): def serialize_msg(raw_msg):

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc. # Copyright 2012 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -83,7 +81,10 @@ On the client side, the same changes should be made as in example 1. The
minimum version that supports the new parameter should be specified. minimum version that supports the new parameter should be specified.
""" """
import six
from manila.openstack.common.rpc import common as rpc_common from manila.openstack.common.rpc import common as rpc_common
from manila.openstack.common.rpc import serializer as rpc_serializer
class RpcDispatcher(object): class RpcDispatcher(object):
@ -93,16 +94,38 @@ class RpcDispatcher(object):
contains a list of underlying managers that have an API_VERSION attribute. contains a list of underlying managers that have an API_VERSION attribute.
""" """
def __init__(self, callbacks): def __init__(self, callbacks, serializer=None):
"""Initialize the rpc dispatcher. """Initialize the rpc dispatcher.
:param callbacks: List of proxy objects that are an instance :param callbacks: List of proxy objects that are an instance
of a class with rpc methods exposed. Each proxy of a class with rpc methods exposed. Each proxy
object should have an RPC_API_VERSION attribute. object should have an RPC_API_VERSION attribute.
:param serializer: The Serializer object that will be used to
deserialize arguments before the method call and
to serialize the result after it returns.
""" """
self.callbacks = callbacks self.callbacks = callbacks
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcDispatcher, self).__init__() super(RpcDispatcher, self).__init__()
def _deserialize_args(self, context, kwargs):
"""Helper method called to deserialize args before dispatch.
This calls our serializer on each argument, returning a new set of
args that have been deserialized.
:param context: The request context
:param kwargs: The arguments to be deserialized
:returns: A new set of deserialized args
"""
new_kwargs = dict()
for argname, arg in six.iteritems(kwargs):
new_kwargs[argname] = self.serializer.deserialize_entity(context,
arg)
return new_kwargs
def dispatch(self, ctxt, version, method, namespace, **kwargs): def dispatch(self, ctxt, version, method, namespace, **kwargs):
"""Dispatch a message based on a requested version. """Dispatch a message based on a requested version.
@ -145,7 +168,9 @@ class RpcDispatcher(object):
if not hasattr(proxyobj, method): if not hasattr(proxyobj, method):
continue continue
if is_compatible: if is_compatible:
return getattr(proxyobj, method)(ctxt, **kwargs) kwargs = self._deserialize_args(ctxt, kwargs)
result = getattr(proxyobj, method)(ctxt, **kwargs)
return self.serializer.serialize_entity(ctxt, result)
if had_compatible: if had_compatible:
raise AttributeError("No such RPC function '%s'" % method) raise AttributeError("No such RPC function '%s'" % method)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation # Copyright 2011 OpenStack Foundation
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Fake RPC implementation which calls proxy methods directly with no """Fake RPC implementation which calls proxy methods directly with no
queues. Casts will block, but this is very useful for tests. queues. Casts will block, but this is very useful for tests.
""" """
@ -26,6 +25,7 @@ import json
import time import time
import eventlet import eventlet
import six
from manila.openstack.common.rpc import common as rpc_common from manila.openstack.common.rpc import common as rpc_common
@ -69,7 +69,7 @@ class Consumer(object):
# Caller might have called ctxt.reply() manually # Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response: for (reply, failure) in ctxt._response:
if failure: if failure:
raise failure[0], failure[1], failure[2] six.reraise(failure[0], failure[1], failure[2])
res.append(reply) res.append(reply)
# if ending not 'sent'...we might have more data to # if ending not 'sent'...we might have more data to
# return from the function itself # return from the function itself
@ -122,7 +122,7 @@ class Connection(object):
def create_connection(conf, new=True): def create_connection(conf, new=True):
"""Create a connection""" """Create a connection."""
return Connection() return Connection()
@ -140,13 +140,13 @@ def multicall(conf, context, topic, msg, timeout=None):
if not method: if not method:
return return
args = msg.get('args', {}) args = msg.get('args', {})
version = msg.get('version', None) version = msg.get('version')
namespace = msg.get('namespace', None) namespace = msg.get('namespace')
try: try:
consumer = CONSUMERS[topic][0] consumer = CONSUMERS[topic][0]
except (KeyError, IndexError): except (KeyError, IndexError):
return iter([None]) raise rpc_common.Timeout("No consumers available")
else: else:
return consumer.call(context, version, method, namespace, args, return consumer.call(context, version, method, namespace, args,
timeout) timeout)
@ -179,14 +179,14 @@ def cleanup():
def fanout_cast(conf, context, topic, msg): def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic""" """Cast to all consumers of a topic."""
check_serialize(msg) check_serialize(msg)
method = msg.get('method') method = msg.get('method')
if not method: if not method:
return return
args = msg.get('args', {}) args = msg.get('args', {})
version = msg.get('version', None) version = msg.get('version')
namespace = msg.get('namespace', None) namespace = msg.get('namespace')
for consumer in CONSUMERS.get(topic, []): for consumer in CONSUMERS.get(topic, []):
try: try:

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation # Copyright 2011 OpenStack Foundation
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -18,7 +16,6 @@ import functools
import itertools import itertools
import socket import socket
import ssl import ssl
import sys
import time import time
import uuid import uuid
@ -29,16 +26,22 @@ import kombu.connection
import kombu.entity import kombu.entity
import kombu.messaging import kombu.messaging
from oslo.config import cfg from oslo.config import cfg
import six
from manila.openstack.common.gettextutils import _ from manila.openstack.common import excutils
from manila.openstack.common.gettextutils import _, _LE, _LI
from manila.openstack.common import network_utils from manila.openstack.common import network_utils
from manila.openstack.common.rpc import amqp as rpc_amqp from manila.openstack.common.rpc import amqp as rpc_amqp
from manila.openstack.common.rpc import common as rpc_common from manila.openstack.common.rpc import common as rpc_common
from manila.openstack.common import sslutils
kombu_opts = [ kombu_opts = [
cfg.StrOpt('kombu_ssl_version', cfg.StrOpt('kombu_ssl_version',
default='', default='',
help='SSL version to use (valid only if SSL enabled)'), help='If SSL is enabled, the SSL version to use. Valid '
'values are TLSv1, SSLv23 and SSLv3. SSLv2 might '
'be available on some distributions.'
),
cfg.StrOpt('kombu_ssl_keyfile', cfg.StrOpt('kombu_ssl_keyfile',
default='', default='',
help='SSL key file (valid only if SSL enabled)'), help='SSL key file (valid only if SSL enabled)'),
@ -47,8 +50,8 @@ kombu_opts = [
help='SSL cert file (valid only if SSL enabled)'), help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs', cfg.StrOpt('kombu_ssl_ca_certs',
default='', default='',
help=('SSL certification authority file ' help='SSL certification authority file '
'(valid only if SSL enabled)')), '(valid only if SSL enabled)'),
cfg.StrOpt('rabbit_host', cfg.StrOpt('rabbit_host',
default='localhost', default='localhost',
help='The RabbitMQ broker address where a single node is used'), help='The RabbitMQ broker address where a single node is used'),
@ -60,36 +63,33 @@ kombu_opts = [
help='RabbitMQ HA cluster host:port pairs'), help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl', cfg.BoolOpt('rabbit_use_ssl',
default=False, default=False,
help='connect over SSL for RabbitMQ'), help='Connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid', cfg.StrOpt('rabbit_userid',
default='guest', default='guest',
help='the RabbitMQ userid'), help='The RabbitMQ userid'),
cfg.StrOpt('rabbit_password', cfg.StrOpt('rabbit_password',
default='guest', default='guest',
help='the RabbitMQ password', help='The RabbitMQ password',
secret=True), secret=True),
cfg.StrOpt('rabbit_virtual_host', cfg.StrOpt('rabbit_virtual_host',
default='/', default='/',
help='the RabbitMQ virtual host'), help='The RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval', cfg.IntOpt('rabbit_retry_interval',
default=1, default=1,
help='how frequently to retry connecting with RabbitMQ'), help='How frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff', cfg.IntOpt('rabbit_retry_backoff',
default=2, default=2,
help='how long to backoff for between retries when connecting ' help='How long to backoff for between retries when connecting '
'to RabbitMQ'), 'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries', cfg.IntOpt('rabbit_max_retries',
default=0, default=0,
help='maximum retries with trying to connect to RabbitMQ ' help='Maximum number of RabbitMQ connection retries. '
'(the default of 0 implies an infinite retry count)'), 'Default is 0 (infinite retry count)'),
cfg.BoolOpt('rabbit_durable_queues',
default=False,
help='use durable queues in RabbitMQ'),
cfg.BoolOpt('rabbit_ha_queues', cfg.BoolOpt('rabbit_ha_queues',
default=False, default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).' help='Use HA queues in RabbitMQ (x-ha-policy: all). '
'You need to wipe RabbitMQ database when ' 'If you change this option, you must wipe the '
'changing this option.'), 'RabbitMQ database.'),
] ]
@ -129,15 +129,40 @@ class ConsumerBase(object):
self.tag = str(tag) self.tag = str(tag)
self.kwargs = kwargs self.kwargs = kwargs
self.queue = None self.queue = None
self.ack_on_error = kwargs.get('ack_on_error', True)
self.reconnect(channel) self.reconnect(channel)
def reconnect(self, channel): def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect""" """Re-declare the queue after a rabbit reconnect."""
self.channel = channel self.channel = channel
self.kwargs['channel'] = channel self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs) self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare() self.queue.declare()
def _callback_handler(self, message, callback):
"""Call callback with deserialized message.
Messages that are processed without exception are ack'ed.
If the message processing generates an exception, it will be
ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
"""
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
if self.ack_on_error:
LOG.exception(_LE("Failed to process message"
" ... skipping it."))
message.ack()
else:
LOG.exception(_LE("Failed to process message"
" ... will requeue."))
message.requeue()
else:
message.ack()
def consume(self, *args, **kwargs): def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will """Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the start the flow of messages from the queue. Using the
@ -150,8 +175,6 @@ class ConsumerBase(object):
If kwargs['nowait'] is True, then this call will block until If kwargs['nowait'] is True, then this call will block until
a message is read. a message is read.
Messages will automatically be acked if the callback doesn't
raise an exception
""" """
options = {'consumer_tag': self.tag} options = {'consumer_tag': self.tag}
@ -162,21 +185,15 @@ class ConsumerBase(object):
def _callback(raw_message): def _callback(raw_message):
message = self.channel.message_to_python(raw_message) message = self.channel.message_to_python(raw_message)
try: self._callback_handler(message, callback)
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
message.ack()
self.queue.consume(*args, callback=_callback, **options) self.queue.consume(*args, callback=_callback, **options)
def cancel(self): def cancel(self):
"""Cancel the consuming from the queue, if it has started""" """Cancel the consuming from the queue, if it has started."""
try: try:
self.queue.cancel(self.tag) self.queue.cancel(self.tag)
except KeyError, e: except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug # NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag: if str(e) != "u'%s'" % self.tag:
raise raise
@ -184,7 +201,7 @@ class ConsumerBase(object):
class DirectConsumer(ConsumerBase): class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'""" """Queue/consumer class for 'direct'."""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue. """Init a 'direct' queue.
@ -216,7 +233,7 @@ class DirectConsumer(ConsumerBase):
class TopicConsumer(ConsumerBase): class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'""" """Consumer class for 'topic'."""
def __init__(self, conf, channel, topic, callback, tag, name=None, def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs): exchange_name=None, **kwargs):
@ -233,9 +250,9 @@ class TopicConsumer(ConsumerBase):
Other kombu options may be passed as keyword arguments Other kombu options may be passed as keyword arguments
""" """
# Default options # Default options
options = {'durable': conf.rabbit_durable_queues, options = {'durable': conf.amqp_durable_queues,
'queue_arguments': _get_queue_arguments(conf), 'queue_arguments': _get_queue_arguments(conf),
'auto_delete': False, 'auto_delete': conf.amqp_auto_delete,
'exclusive': False} 'exclusive': False}
options.update(kwargs) options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
@ -253,7 +270,7 @@ class TopicConsumer(ConsumerBase):
class FanoutConsumer(ConsumerBase): class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'""" """Consumer class for 'fanout'."""
def __init__(self, conf, channel, topic, callback, tag, **kwargs): def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue. """Init a 'fanout' queue.
@ -286,7 +303,7 @@ class FanoutConsumer(ConsumerBase):
class Publisher(object): class Publisher(object):
"""Base Publisher class""" """Base Publisher class."""
def __init__(self, channel, exchange_name, routing_key, **kwargs): def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key, """Init the Publisher class with the exchange_name, routing_key,
@ -298,7 +315,7 @@ class Publisher(object):
self.reconnect(channel) self.reconnect(channel)
def reconnect(self, channel): def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection""" """Re-establish the Producer after a rabbit reconnection."""
self.exchange = kombu.entity.Exchange(name=self.exchange_name, self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs) **self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange, self.producer = kombu.messaging.Producer(exchange=self.exchange,
@ -306,7 +323,7 @@ class Publisher(object):
routing_key=self.routing_key) routing_key=self.routing_key)
def send(self, msg, timeout=None): def send(self, msg, timeout=None):
"""Send a message""" """Send a message."""
if timeout: if timeout:
# #
# AMQP TTL is in milliseconds when set in the header. # AMQP TTL is in milliseconds when set in the header.
@ -317,7 +334,7 @@ class Publisher(object):
class DirectPublisher(Publisher): class DirectPublisher(Publisher):
"""Publisher class for 'direct'""" """Publisher class for 'direct'."""
def __init__(self, conf, channel, msg_id, **kwargs): def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher. """init a 'direct' publisher.
@ -333,14 +350,14 @@ class DirectPublisher(Publisher):
class TopicPublisher(Publisher): class TopicPublisher(Publisher):
"""Publisher class for 'topic'""" """Publisher class for 'topic'."""
def __init__(self, conf, channel, topic, **kwargs): def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher. """init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults Kombu options may be passed as keyword args to override defaults
""" """
options = {'durable': conf.rabbit_durable_queues, options = {'durable': conf.amqp_durable_queues,
'auto_delete': False, 'auto_delete': conf.amqp_auto_delete,
'exclusive': False} 'exclusive': False}
options.update(kwargs) options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf) exchange_name = rpc_amqp.get_control_exchange(conf)
@ -352,7 +369,7 @@ class TopicPublisher(Publisher):
class FanoutPublisher(Publisher): class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'""" """Publisher class for 'fanout'."""
def __init__(self, conf, channel, topic, **kwargs): def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher. """init a 'fanout' publisher.
@ -367,10 +384,10 @@ class FanoutPublisher(Publisher):
class NotifyPublisher(TopicPublisher): class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'""" """Publisher class for 'notify'."""
def __init__(self, conf, channel, topic, **kwargs): def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
self.queue_arguments = _get_queue_arguments(conf) self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
@ -428,7 +445,7 @@ class Connection(object):
'virtual_host': self.conf.rabbit_virtual_host, 'virtual_host': self.conf.rabbit_virtual_host,
} }
for sp_key, value in server_params.iteritems(): for sp_key, value in six.iteritems(server_params):
p_key = server_params_to_kombu_params.get(sp_key, sp_key) p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value params[p_key] = value
@ -441,19 +458,24 @@ class Connection(object):
self.params_list = params_list self.params_list = params_list
brokers_count = len(self.params_list)
self.next_broker_indices = itertools.cycle(range(brokers_count))
self.memory_transport = self.conf.fake_rabbit self.memory_transport = self.conf.fake_rabbit
self.connection = None self.connection = None
self.reconnect() self.reconnect()
def _fetch_ssl_params(self): def _fetch_ssl_params(self):
"""Handles fetching what ssl params """Handles fetching what ssl params should be used for the connection
should be used for the connection (if any)""" (if any).
"""
ssl_params = dict() ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket # http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version: if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = self.conf.kombu_ssl_version ssl_params['ssl_version'] = sslutils.validate_ssl_version(
self.conf.kombu_ssl_version)
if self.conf.kombu_ssl_keyfile: if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile: if self.conf.kombu_ssl_certfile:
@ -464,12 +486,8 @@ class Connection(object):
# future with this? # future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
if not ssl_params: # Return the extended behavior or just have the default behavior
# Just have the default behavior return ssl_params or True
return True
else:
# Return the extended behavior
return ssl_params
def _connect(self, params): def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have """Connect to rabbit. Re-establish any queues that may have
@ -477,7 +495,7 @@ class Connection(object):
be handled by the caller. be handled by the caller.
""" """
if self.connection: if self.connection:
LOG.info(_("Reconnecting to AMQP server on " LOG.info(_LI("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params) "%(hostname)s:%(port)d") % params)
try: try:
self.connection.release() self.connection.release()
@ -499,7 +517,7 @@ class Connection(object):
self.channel._new_queue('ae.undeliver') self.channel._new_queue('ae.undeliver')
for consumer in self.consumers: for consumer in self.consumers:
consumer.reconnect(self.channel) consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') % LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)d') %
params) params)
def reconnect(self): def reconnect(self):
@ -513,14 +531,14 @@ class Connection(object):
attempt = 0 attempt = 0
while True: while True:
params = self.params_list[attempt % len(self.params_list)] params = self.params_list[next(self.next_broker_indices)]
attempt += 1 attempt += 1
try: try:
self._connect(params) self._connect(params)
return return
except (IOError, self.connection_errors) as e: except (IOError, self.connection_errors) as e:
pass pass
except Exception, e: except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib # NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport # to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for # connection_errors in the case of a timeout waiting for
@ -531,18 +549,16 @@ class Connection(object):
raise raise
log_info = {} log_info = {}
log_info['err_str'] = str(e) log_info['err_str'] = e
log_info['max_retries'] = self.max_retries log_info['max_retries'] = self.max_retries
log_info.update(params) log_info.update(params)
if self.max_retries and attempt == self.max_retries: if self.max_retries and attempt == self.max_retries:
LOG.error(_('Unable to connect to AMQP server on ' msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d ' '%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info) 'tries: %(err_str)s') % log_info
# NOTE(comstud): Copied from original code. There's LOG.error(msg)
# really no better recourse because if this was a queue we raise rpc_common.RPCException(msg)
# need to consume on, we have no way to consume anymore.
sys.exit(1)
if attempt == 1: if attempt == 1:
sleep_time = self.interval_start or 1 sleep_time = self.interval_start or 1
@ -552,19 +568,19 @@ class Connection(object):
sleep_time = min(sleep_time, self.interval_max) sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is ' LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in ' 'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info) '%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time) time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs): def ensure(self, error_callback, method, *args, **kwargs):
while True: while True:
try: try:
return method(*args, **kwargs) return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError), e: except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback: if error_callback:
error_callback(e) error_callback(e)
except Exception, e: except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib # NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport # to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for # connection_errors in the case of a timeout waiting for
@ -578,18 +594,18 @@ class Connection(object):
self.reconnect() self.reconnect()
def get_channel(self): def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues""" """Convenience call for bin/clear_rabbit_queues."""
return self.channel return self.channel
def close(self): def close(self):
"""Close/release this connection""" """Close/release this connection."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.connection.release() self.connection.release()
self.connection = None self.connection = None
def reset(self): def reset(self):
"""Reset a connection so it can be used again""" """Reset a connection so it can be used again."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.channel.close() self.channel.close()
@ -605,37 +621,37 @@ class Connection(object):
""" """
def _connect_error(exc): def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)} log_info = {'topic': topic, 'err_str': exc}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': " LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info) "%(err_str)s") % log_info)
def _declare_consumer(): def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback, consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next()) six.next(self.consumer_num))
self.consumers.append(consumer) self.consumers.append(consumer)
return consumer return consumer
return self.ensure(_connect_error, _declare_consumer) return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None): def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers""" """Return an iterator that will consume from all queues/consumers."""
info = {'do_consume': True} info = {'do_consume': True}
def _error_callback(exc): def _error_callback(exc):
if isinstance(exc, socket.timeout): if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') % LOG.debug('Timed out waiting for RPC response: %s' %
str(exc)) exc)
raise rpc_common.Timeout() raise rpc_common.Timeout()
else: else:
LOG.exception(_('Failed to consume message from queue: %s') % LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc)) exc)
info['do_consume'] = True info['do_consume'] = True
def _consume(): def _consume():
if info['do_consume']: if info['do_consume']:
queues_head = self.consumers[:-1] queues_head = self.consumers[:-1] # not fanout.
queues_tail = self.consumers[-1] queues_tail = self.consumers[-1] # fanout
for queue in queues_head: for queue in queues_head:
queue.consume(nowait=True) queue.consume(nowait=True)
queues_tail.consume(nowait=False) queues_tail.consume(nowait=False)
@ -648,7 +664,7 @@ class Connection(object):
yield self.ensure(_error_callback, _consume) yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self): def cancel_consumer_thread(self):
"""Cancel a consumer thread""" """Cancel a consumer thread."""
if self.consumer_thread is not None: if self.consumer_thread is not None:
self.consumer_thread.kill() self.consumer_thread.kill()
try: try:
@ -663,11 +679,11 @@ class Connection(object):
proxy_cb.wait() proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs): def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class""" """Send to a publisher based on the publisher class."""
def _error_callback(exc): def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)} log_info = {'topic': topic, 'err_str': exc}
LOG.exception(_("Failed to publish message to topic " LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info) "'%(topic)s': %(err_str)s") % log_info)
def _publish(): def _publish():
@ -684,45 +700,47 @@ class Connection(object):
self.declare_consumer(DirectConsumer, topic, callback) self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None, def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None): exchange_name=None, ack_on_error=True):
"""Create a 'topic' consumer.""" """Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer, self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name, name=queue_name,
exchange_name=exchange_name, exchange_name=exchange_name,
ack_on_error=ack_on_error,
), ),
topic, callback) topic, callback)
def declare_fanout_consumer(self, topic, callback): def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer""" """Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback) self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg): def direct_send(self, msg_id, msg):
"""Send a 'direct' message""" """Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg) self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None): def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message""" """Send a 'topic' message."""
self.publisher_send(TopicPublisher, topic, msg, timeout) self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg): def fanout_send(self, topic, msg):
"""Send a 'fanout' message""" """Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg) self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs): def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic""" """Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs) self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None): def consume(self, limit=None):
"""Consume from all queues/consumers""" """Consume from all queues/consumers."""
it = self.iterconsume(limit=limit) it = self.iterconsume(limit=limit)
while True: while True:
try: try:
it.next() six.next(it)
except StopIteration: except StopIteration:
return return
def consume_in_thread(self): def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread""" """Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread(): def _consumer_thread():
try: try:
self.consume() self.consume()
@ -733,7 +751,7 @@ class Connection(object):
return self.consumer_thread return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False): def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object""" """Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -745,7 +763,7 @@ class Connection(object):
self.declare_topic_consumer(topic, proxy_cb) self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name): def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object""" """Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -753,7 +771,7 @@ class Connection(object):
self.declare_topic_consumer(topic, proxy_cb, pool_name) self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None): exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from """Register as a member of a group of consumers for a given topic from
the specified exchange. the specified exchange.
@ -767,6 +785,7 @@ class Connection(object):
callback=callback, callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf, connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection), Connection),
wait_for_consumers=not ack_on_error
) )
self.proxy_callbacks.append(callback_wrapper) self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer( self.declare_topic_consumer(
@ -774,11 +793,12 @@ class Connection(object):
topic=topic, topic=topic,
exchange_name=exchange_name, exchange_name=exchange_name,
callback=callback_wrapper, callback=callback_wrapper,
ack_on_error=ack_on_error,
) )
def create_connection(conf, new=True): def create_connection(conf, new=True):
"""Create a connection""" """Create a connection."""
return rpc_amqp.create_connection( return rpc_amqp.create_connection(
conf, new, conf, new,
rpc_amqp.get_connection_pool(conf, Connection)) rpc_amqp.get_connection_pool(conf, Connection))

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation # Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc. # Copyright 2011 - 2012, Red Hat, Inc.
# #
@ -18,19 +16,21 @@
import functools import functools
import itertools import itertools
import time import time
import uuid
import eventlet import eventlet
import greenlet import greenlet
from oslo.config import cfg from oslo.config import cfg
import six
from manila.openstack.common.gettextutils import _ from manila.openstack.common import excutils
from manila.openstack.common.gettextutils import _, _LE, _LI
from manila.openstack.common import importutils from manila.openstack.common import importutils
from manila.openstack.common import jsonutils from manila.openstack.common import jsonutils
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common.rpc import amqp as rpc_amqp from manila.openstack.common.rpc import amqp as rpc_amqp
from manila.openstack.common.rpc import common as rpc_common from manila.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging") qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
@ -65,15 +65,35 @@ qpid_opts = [
cfg.BoolOpt('qpid_tcp_nodelay', cfg.BoolOpt('qpid_tcp_nodelay',
default=True, default=True,
help='Disable Nagle algorithm'), help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
] ]
cfg.CONF.register_opts(qpid_opts) cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object): class ConsumerBase(object):
"""Consumer base class.""" """Consumer base class."""
def __init__(self, session, callback, node_name, node_opts, def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts): link_name, link_opts):
"""Declare a queue on an amqp session. """Declare a queue on an amqp session.
@ -91,55 +111,97 @@ class ConsumerBase(object):
self.receiver = None self.receiver = None
self.session = None self.session = None
addr_opts = { if conf.qpid_topology_version == 1:
"create": "always", addr_opts = {
"node": { "create": "always",
"type": "topic", "node": {
"x-declare": { "type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True, "durable": True,
"auto-delete": True, "x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
}, },
}, }
"link": { addr_opts["node"]["x-declare"].update(node_opts)
"name": link_name, elif conf.qpid_topology_version == 2:
"durable": True, addr_opts = {
"x-declare": { "link": {
"durable": False, "x-declare": {
"auto-delete": True, "auto-delete": True,
"exclusive": False, "exclusive": False,
},
}, },
}, }
} else:
addr_opts["node"]["x-declare"].update(node_opts) raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts) addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session) self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session): def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect""" """Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session self.session = session
self.receiver = session.receiver(self.address) self.receiver = session.receiver(self.address)
self.receiver.capacity = 1 self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self): def consume(self):
"""Fetch the message and pass it to the callback object""" """Fetch the message and pass it to the callback object."""
message = self.receiver.fetch() message = self.receiver.fetch()
try: try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content) msg = rpc_common.deserialize_msg(message.content)
self.callback(msg) self.callback(msg)
except Exception: except Exception:
LOG.exception(_("Failed to process message... skipping it.")) LOG.exception(_LE("Failed to process message... skipping it."))
finally: finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message) self.session.acknowledge(message)
def get_receiver(self): def get_receiver(self):
return self.receiver return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase): class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'""" """Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback): def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue. """Init a 'direct' queue.
@ -149,15 +211,30 @@ class DirectConsumer(ConsumerBase):
'callback' is the callback to call when messages are received 'callback' is the callback to call when messages are received
""" """
super(DirectConsumer, self).__init__(session, callback, link_opts = {
"%s/%s" % (msg_id, msg_id), "auto-delete": conf.amqp_auto_delete,
{"type": "direct"}, "exclusive": True,
msg_id, "durable": conf.amqp_durable_queues,
{"exclusive": True}) }
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase): class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'""" """Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None, def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None): exchange_name=None):
@ -171,13 +248,24 @@ class TopicConsumer(ConsumerBase):
""" """
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
super(TopicConsumer, self).__init__(session, callback, link_opts = {
"%s/%s" % (exchange_name, topic), "auto-delete": conf.amqp_auto_delete,
{}, name or topic, {}) "durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase): class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'""" """Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback): def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue. """Init a 'fanout' queue.
@ -186,90 +274,165 @@ class FanoutConsumer(ConsumerBase):
'topic' is the topic to listen on 'topic' is the topic to listen on
'callback' is the callback to call when messages are received 'callback' is the callback to call when messages are received
""" """
self.conf = conf
super(FanoutConsumer, self).__init__( link_opts = {"exclusive": True}
session, callback,
"%s_fanout" % topic, if conf.qpid_topology_version == 1:
{"durable": False, "type": "fanout"}, node_name = "%s_fanout" % topic
"%s_fanout_%s" % (topic, uuid.uuid4().hex), node_opts = {"durable": False, "type": "fanout"}
{"exclusive": True}) elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object): class Publisher(object):
"""Base Publisher class""" """Base Publisher class."""
def __init__(self, session, node_name, node_opts=None): def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key, """Init the Publisher class with the exchange_name, routing_key,
and other options and other options
""" """
self.sender = None self.sender = None
self.session = session self.session = session
addr_opts = { if conf.qpid_topology_version == 1:
"create": "always", addr_opts = {
"node": { "create": "always",
"type": "topic", "node": {
"x-declare": { "type": "topic",
"durable": False, "x-declare": {
# auto-delete isn't implemented for exchanges in qpid, "durable": False,
# but put in here anyway # auto-delete isn't implemented for exchanges in qpid,
"auto-delete": True, # but put in here anyway
"auto-delete": True,
},
}, },
}, }
} if node_opts:
if node_opts: addr_opts["node"]["x-declare"].update(node_opts)
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session) self.reconnect(session)
def reconnect(self, session): def reconnect(self, session):
"""Re-establish the Sender after a reconnection""" """Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address) self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg): def send(self, msg):
"""Send a message""" """Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg) self.sender.send(msg)
class DirectPublisher(Publisher): class DirectPublisher(Publisher):
"""Publisher class for 'direct'""" """Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id): def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher.""" """Init a 'direct' publisher."""
super(DirectPublisher, self).__init__(session, msg_id,
{"type": "Direct"}) if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher): class TopicPublisher(Publisher):
"""Publisher class for 'topic'""" """Publisher class for 'topic'."""
def __init__(self, conf, session, topic): def __init__(self, conf, session, topic):
"""init a 'topic' publisher. """Init a 'topic' publisher.
""" """
exchange_name = rpc_amqp.get_control_exchange(conf) exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic)) if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher): class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'""" """Publisher class for 'fanout'."""
def __init__(self, conf, session, topic): def __init__(self, conf, session, topic):
"""init a 'fanout' publisher. """Init a 'fanout' publisher.
""" """
super(FanoutPublisher, self).__init__(
session, if conf.qpid_topology_version == 1:
"%s_fanout" % topic, {"type": "fanout"}) node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher): class NotifyPublisher(Publisher):
"""Publisher class for notifications""" """Publisher class for notifications."""
def __init__(self, conf, session, topic): def __init__(self, conf, session, topic):
"""init a 'topic' publisher. """Init a 'topic' publisher.
""" """
exchange_name = rpc_amqp.get_control_exchange(conf) exchange_name = rpc_amqp.get_control_exchange(conf)
super(NotifyPublisher, self).__init__(session, node_opts = {"durable": True}
"%s/%s" % (exchange_name, topic),
{"durable": True}) if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object): class Connection(object):
@ -304,6 +467,10 @@ class Connection(object):
self.brokers = params['qpid_hosts'] self.brokers = params['qpid_hosts']
self.username = params['username'] self.username = params['username']
self.password = params['password'] self.password = params['password']
brokers_count = len(self.brokers)
self.next_broker_indices = itertools.cycle(range(brokers_count))
self.connection_create(self.brokers[0]) self.connection_create(self.brokers[0])
self.reconnect() self.reconnect()
@ -330,31 +497,30 @@ class Connection(object):
return self.consumers[str(receiver)] return self.consumers[str(receiver)]
def reconnect(self): def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues""" """Handles reconnecting and re-establishing sessions and queues."""
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
attempt = 0
delay = 1 delay = 1
while True: while True:
broker = self.brokers[attempt % len(self.brokers)] # Close the session if necessary
attempt += 1 if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.MessagingError:
pass
broker = self.brokers[next(self.next_broker_indices)]
try: try:
self.connection_create(broker) self.connection_create(broker)
self.connection.open() self.connection.open()
except qpid_exceptions.ConnectionError, e: except qpid_exceptions.MessagingError as e:
msg_dict = dict(e=e, delay=delay) msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. " msg = _LE("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict "Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg) LOG.error(msg)
time.sleep(delay) time.sleep(delay)
delay = min(2 * delay, 60) delay = min(delay + 1, 5)
else: else:
LOG.info(_('Connected to AMQP server on %s'), broker) LOG.info(_LI('Connected to AMQP server on %s'), broker)
break break
self.session = self.connection.session() self.session = self.connection.session()
@ -363,31 +529,37 @@ class Connection(object):
consumers = self.consumers consumers = self.consumers
self.consumers = {} self.consumers = {}
for consumer in consumers.itervalues(): for consumer in six.itervalues(consumers):
consumer.reconnect(self.session) consumer.reconnect(self.session)
self._register_consumer(consumer) self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues")) LOG.debug("Re-established AMQP queues")
def ensure(self, error_callback, method, *args, **kwargs): def ensure(self, error_callback, method, *args, **kwargs):
while True: while True:
try: try:
return method(*args, **kwargs) return method(*args, **kwargs)
except (qpid_exceptions.Empty, except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError), e: qpid_exceptions.MessagingError) as e:
if error_callback: if error_callback:
error_callback(e) error_callback(e)
self.reconnect() self.reconnect()
def close(self): def close(self):
"""Close/release this connection""" """Close/release this connection."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.connection.close() try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None self.connection = None
def reset(self): def reset(self):
"""Reset a connection so it can be used again""" """Reset a connection so it can be used again."""
self.cancel_consumer_thread() self.cancel_consumer_thread()
self.wait_on_proxy_callbacks() self.wait_on_proxy_callbacks()
self.session.close() self.session.close()
@ -399,8 +571,8 @@ class Connection(object):
add it to our list of consumers add it to our list of consumers
""" """
def _connect_error(exc): def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)} log_info = {'topic': topic, 'err_str': exc}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': " LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info) "%(err_str)s") % log_info)
def _declare_consumer(): def _declare_consumer():
@ -411,23 +583,23 @@ class Connection(object):
return self.ensure(_connect_error, _declare_consumer) return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None): def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers""" """Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc): def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty): if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') % LOG.debug('Timed out waiting for RPC response: %s' %
str(exc)) exc)
raise rpc_common.Timeout() raise rpc_common.Timeout()
else: else:
LOG.exception(_('Failed to consume message from queue: %s') % LOG.exception(_LE('Failed to consume message from queue: %s') %
str(exc)) exc)
def _consume(): def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout) nxt_receiver = self.session.next_receiver(timeout=timeout)
try: try:
self._lookup_consumer(nxt_receiver).consume() self._lookup_consumer(nxt_receiver).consume()
except Exception: except Exception:
LOG.exception(_("Error processing message. Skipping it.")) LOG.exception(_LE("Error processing message. Skipping it."))
for iteration in itertools.count(0): for iteration in itertools.count(0):
if limit and iteration >= limit: if limit and iteration >= limit:
@ -435,7 +607,7 @@ class Connection(object):
yield self.ensure(_error_callback, _consume) yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self): def cancel_consumer_thread(self):
"""Cancel a consumer thread""" """Cancel a consumer thread."""
if self.consumer_thread is not None: if self.consumer_thread is not None:
self.consumer_thread.kill() self.consumer_thread.kill()
try: try:
@ -450,11 +622,11 @@ class Connection(object):
proxy_cb.wait() proxy_cb.wait()
def publisher_send(self, cls, topic, msg): def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class""" """Send to a publisher based on the publisher class."""
def _connect_error(exc): def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)} log_info = {'topic': topic, 'err_str': exc}
LOG.exception(_("Failed to publish message to topic " LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info) "'%(topic)s': %(err_str)s") % log_info)
def _publisher_send(): def _publisher_send():
@ -480,15 +652,15 @@ class Connection(object):
topic, callback) topic, callback)
def declare_fanout_consumer(self, topic, callback): def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer""" """Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback) self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg): def direct_send(self, msg_id, msg):
"""Send a 'direct' message""" """Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg) self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None): def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message""" """Send a 'topic' message."""
# #
# We want to create a message with attributes, e.g. a TTL. We # We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer # don't really need to keep 'msg' in its JSON format any longer
@ -503,24 +675,25 @@ class Connection(object):
self.publisher_send(TopicPublisher, topic, qpid_message) self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg): def fanout_send(self, topic, msg):
"""Send a 'fanout' message""" """Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg) self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs): def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic""" """Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg) self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None): def consume(self, limit=None):
"""Consume from all queues/consumers""" """Consume from all queues/consumers."""
it = self.iterconsume(limit=limit) it = self.iterconsume(limit=limit)
while True: while True:
try: try:
it.next() six.next(it)
except StopIteration: except StopIteration:
return return
def consume_in_thread(self): def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread""" """Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread(): def _consumer_thread():
try: try:
self.consume() self.consume()
@ -531,7 +704,7 @@ class Connection(object):
return self.consumer_thread return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False): def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object""" """Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -547,7 +720,7 @@ class Connection(object):
return consumer return consumer
def create_worker(self, topic, proxy, pool_name): def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object""" """Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback( proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy, self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection)) rpc_amqp.get_connection_pool(self.conf, Connection))
@ -561,7 +734,7 @@ class Connection(object):
return consumer return consumer
def join_consumer_pool(self, callback, pool_name, topic, def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None): exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from """Register as a member of a group of consumers for a given topic from
the specified exchange. the specified exchange.
@ -575,6 +748,7 @@ class Connection(object):
callback=callback, callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf, connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection), Connection),
wait_for_consumers=not ack_on_error
) )
self.proxy_callbacks.append(callback_wrapper) self.proxy_callbacks.append(callback_wrapper)
@ -590,7 +764,7 @@ class Connection(object):
def create_connection(conf, new=True): def create_connection(conf, new=True):
"""Create a connection""" """Create a connection."""
return rpc_amqp.create_connection( return rpc_amqp.create_connection(
conf, new, conf, new,
rpc_amqp.get_connection_pool(conf, Connection)) rpc_amqp.get_connection_pool(conf, Connection))

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc # Copyright 2011 Cloudscaling Group, Inc
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -25,12 +23,13 @@ import uuid
import eventlet import eventlet
import greenlet import greenlet
from oslo.config import cfg from oslo.config import cfg
import six
from six import moves
from manila.openstack.common import excutils from manila.openstack.common import excutils
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _, _LE, _LI
from manila.openstack.common import importutils from manila.openstack.common import importutils
from manila.openstack.common import jsonutils from manila.openstack.common import jsonutils
from manila.openstack.common import processutils as utils
from manila.openstack.common.rpc import common as rpc_common from manila.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq') zmq = importutils.try_import('eventlet.green.zmq')
@ -64,7 +63,7 @@ zmq_opts = [
cfg.IntOpt('rpc_zmq_contexts', default=1, cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'), help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None, cfg.IntOpt('rpc_zmq_topic_backlog',
help='Maximum number of ingress messages to locally buffer ' help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'), 'per topic. Default is unlimited.'),
@ -81,12 +80,12 @@ CONF = cfg.CONF
CONF.register_opts(zmq_opts) CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global. ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object matchmaker = None # memorized matchmaker object
def _serialize(data): def _serialize(data):
""" """Serialization wrapper.
Serialization wrapper
We prefer using JSON, but it cannot encode all types. We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data. Error if a developer passes us bad data.
""" """
@ -94,22 +93,19 @@ def _serialize(data):
return jsonutils.dumps(data, ensure_ascii=True) return jsonutils.dumps(data, ensure_ascii=True)
except TypeError: except TypeError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed.")) LOG.error(_LE("JSON serialization failed."))
def _deserialize(data): def _deserialize(data):
""" """Deserialization wrapper."""
Deserialization wrapper LOG.debug("Deserializing: %s", data)
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data) return jsonutils.loads(data)
class ZmqSocket(object): class ZmqSocket(object):
""" """A tiny wrapper around ZeroMQ.
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement). Can be used as a Context (supports the 'with' statement).
""" """
@ -137,9 +133,9 @@ class ZmqSocket(object):
str_data = {'addr': addr, 'type': self.socket_s(), str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind} 'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug("-> Subscribed to %(subscribe)s", str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data) LOG.debug("-> bind: %(bind)s", str_data)
try: try:
if bind: if bind:
@ -159,7 +155,7 @@ class ZmqSocket(object):
"""Subscribe.""" """Subscribe."""
if not self.can_sub: if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.") raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter) LOG.debug("Subscribing to %s", msg_filter)
try: try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
@ -180,7 +176,7 @@ class ZmqSocket(object):
return return
# We must unsubscribe, or we'll leak descriptors. # We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0: if self.subscriptions:
for f in self.subscriptions: for f in self.subscriptions:
try: try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f) self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
@ -196,29 +192,27 @@ class ZmqSocket(object):
# it would be much worse if some of the code calling this # it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate # were to fail. For now, lets log, and later evaluate
# if we can safely raise here. # if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.") LOG.error(_LE("ZeroMQ socket could not be closed."))
self.sock = None self.sock = None
def recv(self): def recv(self, **kwargs):
if not self.can_recv: if not self.can_recv:
raise RPCException(_("You cannot recv on this socket.")) raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart() return self.sock.recv_multipart(**kwargs)
def send(self, data): def send(self, data, **kwargs):
if not self.can_send: if not self.can_send:
raise RPCException(_("You cannot send on this socket.")) raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data) self.sock.send_multipart(data, **kwargs)
class ZmqClient(object): class ZmqClient(object):
"""Client for ZMQ sockets.""" """Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False): def __init__(self, addr):
if socket_type is None: self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, envelope=False): def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0 msg_id = msg_id or 0
if not envelope: if not envelope:
@ -227,7 +221,7 @@ class ZmqClient(object):
return return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope) rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items()) zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes, self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg)) (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
@ -270,7 +264,7 @@ class InternalContext(object):
def _get_response(self, ctx, proxy, topic, data): def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic.""" """Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict()) LOG.debug("Running func with context: %s", ctx.to_dict())
data.setdefault('version', None) data.setdefault('version', None)
data.setdefault('args', {}) data.setdefault('args', {})
@ -282,14 +276,14 @@ class InternalContext(object):
except greenlet.GreenletExit: except greenlet.GreenletExit:
# ignore these since they are just from shutdowns # ignore these since they are just from shutdowns
pass pass
except rpc_common.ClientException, e: except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") % LOG.debug("Expected exception during message handling (%s)" %
e._exc_info[1]) e._exc_info[1])
return {'exc': return {'exc':
rpc_common.serialize_remote_exception(e._exc_info, rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)} log_failure=False)}
except Exception: except Exception:
LOG.error(_("Exception during message handling")) LOG.error(_LE("Exception during message handling"))
return {'exc': return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())} rpc_common.serialize_remote_exception(sys.exc_info())}
@ -308,7 +302,7 @@ class InternalContext(object):
self._get_response(ctx, proxy, topic, payload), self._get_response(ctx, proxy, topic, payload),
ctx.replies) ctx.replies)
LOG.debug(_("Sending reply")) LOG.debug("Sending reply")
_multi_send(_cast, ctx, topic, { _multi_send(_cast, ctx, topic, {
'method': '-process_reply', 'method': '-process_reply',
'args': { 'args': {
@ -342,7 +336,7 @@ class ConsumerBase(object):
# processed internally. (non-valid method name) # processed internally. (non-valid method name)
method = data.get('method') method = data.get('method')
if not method: if not method:
LOG.error(_("RPC message did not include method.")) LOG.error(_LE("RPC message did not include method."))
return return
# Internal method # Internal method
@ -356,16 +350,14 @@ class ConsumerBase(object):
class ZmqBaseReactor(ConsumerBase): class ZmqBaseReactor(ConsumerBase):
""" """A consumer class implementing a centralized casting broker (PULL-PUSH).
A consumer class implementing a
centralized casting broker (PULL-PUSH) Used for RoundRobin requests.
for RoundRobin requests.
""" """
def __init__(self, conf): def __init__(self, conf):
super(ZmqBaseReactor, self).__init__() super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {} self.proxies = {}
self.threads = [] self.threads = []
self.sockets = [] self.sockets = []
@ -373,11 +365,10 @@ class ZmqBaseReactor(ConsumerBase):
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size) self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None, def register(self, proxy, in_addr, zmq_type_in,
zmq_type_out=None, in_bind=True, out_bind=True, in_bind=True, subscribe=None):
subscribe=None):
LOG.info(_("Registering reactor")) LOG.info(_LI("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB): if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype") raise RPCException("Bad input socktype")
@ -389,26 +380,12 @@ class ZmqBaseReactor(ConsumerBase):
self.proxies[inq] = proxy self.proxies[inq] = proxy
self.sockets.append(inq) self.sockets.append(inq)
LOG.info(_("In reactor registered")) LOG.info(_LI("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self): def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock): def _consume(sock):
LOG.info(_("Consuming socket")) LOG.info(_LI("Consuming socket"))
while True: while True:
self.consume(sock) self.consume(sock)
@ -430,10 +407,9 @@ class ZmqBaseReactor(ConsumerBase):
class ZmqProxy(ZmqBaseReactor): class ZmqProxy(ZmqBaseReactor):
""" """A consumer class implementing a topic-based proxy.
A consumer class implementing a
topic-based proxy, forwarding to Forwards to IPC sockets.
IPC sockets.
""" """
def __init__(self, conf): def __init__(self, conf):
@ -446,11 +422,8 @@ class ZmqProxy(ZmqBaseReactor):
def consume(self, sock): def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv(copy=False)
data = sock.recv() topic = data[1].bytes
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~'): if topic.startswith('fanout~'):
sock_type = zmq.PUB sock_type = zmq.PUB
@ -462,7 +435,7 @@ class ZmqProxy(ZmqBaseReactor):
if topic not in self.topic_proxy: if topic not in self.topic_proxy:
def publisher(waiter): def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic) LOG.info(_LI("Creating proxy for topic: %s"), topic)
try: try:
# The topic is received over the network, # The topic is received over the network,
@ -492,9 +465,7 @@ class ZmqProxy(ZmqBaseReactor):
while(True): while(True):
data = self.topic_proxy[topic].get() data = self.topic_proxy[topic].get()
out_sock.send(data) out_sock.send(data, copy=False)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event() wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation) eventlet.spawn(publisher, wait_sock_creation)
@ -502,69 +473,66 @@ class ZmqProxy(ZmqBaseReactor):
try: try:
wait_sock_creation.wait() wait_sock_creation.wait()
except RPCException: except RPCException:
LOG.error(_("Topic socket file creation failed.")) LOG.error(_LE("Topic socket file creation failed."))
return return
try: try:
self.topic_proxy[topic].put_nowait(data) self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full: except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic " LOG.error(_LE("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic}) "%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self): def consume_in_thread(self):
"""Runs the ZmqProxy service""" """Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \ consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address, (CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port) CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None) consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir): try:
try: os.makedirs(ipc_dir)
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True) except os.error:
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()), if not os.path.isdir(ipc_dir):
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Could not create IPC directory %s") % LOG.error(_LE("Required IPC directory does not exist at"
(ipc_dir, )) " %s") % (ipc_dir, ))
try: try:
self.register(consumption_proxy, self.register(consumption_proxy,
consume_in, consume_in,
zmq.PULL, zmq.PULL)
out_bind=True)
except zmq.ZMQError: except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_LE("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. " LOG.error(_LE("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use.")) "Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread() super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv): def unflatten_envelope(packenv):
"""Unflattens the RPC envelope. """Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4} Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
""" """
i = iter(packenv) i = iter(packenv)
h = {} h = {}
try: try:
while True: while True:
k = i.next() k = six.next(i)
h[k] = i.next() h[k] = six.next(i)
except StopIteration: except StopIteration:
return h return h
class ZmqReactor(ZmqBaseReactor): class ZmqReactor(ZmqBaseReactor):
""" """A consumer class implementing a consumer for messages.
A consumer class implementing a
consumer for messages. Can also be Can also be used as a 1:1 proxy
used as a 1:1 proxy
""" """
def __init__(self, conf): def __init__(self, conf):
@ -573,12 +541,7 @@ class ZmqReactor(ZmqBaseReactor):
def consume(self, sock): def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying) #TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv() data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) LOG.debug("CONSUMER RECEIVED DATA: %s", data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
proxy = self.proxies[sock] proxy = self.proxies[sock]
@ -597,7 +560,7 @@ class ZmqReactor(ZmqBaseReactor):
# Unmarshal only after verifying the message. # Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3]) ctx = RpcContext.unmarshal(data[3])
else: else:
LOG.error(_("ZMQ Envelope version unsupported or unknown.")) LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
return return
self.pool.spawn_n(self.process, proxy, ctx, request) self.pool.spawn_n(self.process, proxy, ctx, request)
@ -625,14 +588,14 @@ class Connection(rpc_common.Connection):
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics: if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered.")) LOG.info(_LI("Skipping topic registration. Already registered."))
return return
# Receive messages from (local) proxy # Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \ inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic) (CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"), LOG.debug("Consumer is a zmq.%s",
['PULL', 'SUB'][sock_type == zmq.SUB]) ['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type, self.reactor.register(proxy, inaddr, sock_type,
@ -684,7 +647,7 @@ def _call(addr, context, topic, msg, timeout=None,
# Replies always come into the reply service. # Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload")) LOG.debug("Creating payload")
# Curry the original request into a reply method. # Curry the original request into a reply method.
mcontext = RpcContext.marshal(context) mcontext = RpcContext.marshal(context)
payload = { payload = {
@ -697,7 +660,7 @@ def _call(addr, context, topic, msg, timeout=None,
} }
} }
LOG.debug(_("Creating queue socket for reply waiter")) LOG.debug("Creating queue socket for reply waiter")
# Messages arriving async. # Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
@ -710,14 +673,14 @@ def _call(addr, context, topic, msg, timeout=None,
zmq.SUB, subscribe=msg_id, bind=False zmq.SUB, subscribe=msg_id, bind=False
) )
LOG.debug(_("Sending cast")) LOG.debug("Sending cast")
_cast(addr, context, topic, payload, envelope) _cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply")) LOG.debug("Cast sent; Waiting reply")
# Blocks until receives reply # Blocks until receives reply
msg = msg_waiter.recv() msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg) LOG.debug("Received message: %s", msg)
LOG.debug(_("Unpacking response")) LOG.debug("Unpacking response")
if msg[2] == 'cast': # Legacy version if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1] raw_msg = _deserialize(msg[-1])[-1]
@ -751,19 +714,18 @@ def _call(addr, context, topic, msg, timeout=None,
def _multi_send(method, context, topic, msg, timeout=None, def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None): envelope=False, _msg_id=None):
""" """Wraps the sending of messages.
Wraps the sending of messages,
dispatches to the matchmaker and sends Dispatches to the matchmaker and sends message to all relevant hosts.
message to all relevant hosts.
""" """
conf = CONF conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) LOG.debug("%(msg)s" % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic) queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues) LOG.debug("Sending message(s) to: %s", queues)
# Don't stack if we have no matchmaker results # Don't stack if we have no matchmaker results
if len(queues) == 0: if not queues:
LOG.warn(_("No matchmaker results. Not casting.")) LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle # While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie. # this exception and a timeout isn't too big a lie.
@ -811,8 +773,8 @@ def fanout_cast(conf, context, topic, msg, **kwargs):
def notify(conf, context, topic, msg, envelope): def notify(conf, context, topic, msg, envelope):
""" """Send notification event.
Send notification event.
Notifications are sent to topic-priority. Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority. This differs from the AMQP drivers which send to topic.priority.
""" """
@ -846,6 +808,11 @@ def _get_ctxt():
def _get_matchmaker(*args, **kwargs): def _get_matchmaker(*args, **kwargs):
global matchmaker global matchmaker
if not matchmaker: if not matchmaker:
matchmaker = importutils.import_object( mm = CONF.rpc_zmq_matchmaker
CONF.rpc_zmq_matchmaker, *args, **kwargs) if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker return matchmaker

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc # Copyright 2011 Cloudscaling Group, Inc
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,27 +11,22 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """
The MatchMaker classes should except a Topic or Fanout exchange key and The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance. return keys for direct exchanges, per (approximate) AMQP parlance.
""" """
import contextlib import contextlib
import itertools
import json
import eventlet import eventlet
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _, _LI
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
matchmaker_opts = [ matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('matchmaker_ringfile',
default='/etc/nova/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
cfg.IntOpt('matchmaker_heartbeat_freq', cfg.IntOpt('matchmaker_heartbeat_freq',
default=300, default=300,
help='Heartbeat frequency'), help='Heartbeat frequency'),
@ -54,8 +47,8 @@ class MatchMakerException(Exception):
class Exchange(object): class Exchange(object):
""" """Implements lookups.
Implements lookups.
Subclass this to support hashtables, dns, etc. Subclass this to support hashtables, dns, etc.
""" """
def __init__(self): def __init__(self):
@ -66,9 +59,7 @@ class Exchange(object):
class Binding(object): class Binding(object):
""" """A binding on which to perform a lookup."""
A binding on which to perform a lookup.
"""
def __init__(self): def __init__(self):
pass pass
@ -77,10 +68,10 @@ class Binding(object):
class MatchMakerBase(object): class MatchMakerBase(object):
""" """Match Maker Base Class.
Match Maker Base Class.
Build off HeartbeatMatchMakerBase if building a Build off HeartbeatMatchMakerBase if building a heartbeat-capable
heartbeat-capable MatchMaker. MatchMaker.
""" """
def __init__(self): def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true # Array of tuples. Index [2] toggles negation, [3] is last-if-true
@ -90,58 +81,47 @@ class MatchMakerBase(object):
'registration or heartbeat.') 'registration or heartbeat.')
def register(self, key, host): def register(self, key, host):
""" """Register a host on a backend.
Register a host on a backend.
Heartbeats, if applicable, may keepalive registration. Heartbeats, if applicable, may keepalive registration.
""" """
pass pass
def ack_alive(self, key, host): def ack_alive(self, key, host):
""" """Acknowledge that a key.host is alive.
Acknowledge that a key.host is alive.
Used internally for updating heartbeats, Used internally for updating heartbeats, but may also be used
but may also be used publically to acknowledge publicly to acknowledge a system is alive (i.e. rpc message
a system is alive (i.e. rpc message successfully successfully sent to host)
sent to host)
""" """
pass pass
def is_alive(self, topic, host): def is_alive(self, topic, host):
""" """Checks if a host is alive."""
Checks if a host is alive.
"""
pass pass
def expire(self, topic, host): def expire(self, topic, host):
""" """Explicitly expire a host's registration."""
Explicitly expire a host's registration.
"""
pass pass
def send_heartbeats(self): def send_heartbeats(self):
""" """Send all heartbeats.
Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread, Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method. which loops this method.
""" """
pass pass
def unregister(self, key, host): def unregister(self, key, host):
""" """Unregister a topic."""
Unregister a topic.
"""
pass pass
def start_heartbeat(self): def start_heartbeat(self):
""" """Spawn heartbeat greenthread."""
Spawn heartbeat greenthread.
"""
pass pass
def stop_heartbeat(self): def stop_heartbeat(self):
""" """Destroys the heartbeat greenthread."""
Destroys the heartbeat greenthread.
"""
pass pass
def add_binding(self, binding, rule, last=True): def add_binding(self, binding, rule, last=True):
@ -168,10 +148,10 @@ class MatchMakerBase(object):
class HeartbeatMatchMakerBase(MatchMakerBase): class HeartbeatMatchMakerBase(MatchMakerBase):
""" """Base for a heart-beat capable MatchMaker.
Base for a heart-beat capable MatchMaker.
Provides common methods for registering, Provides common methods for registering, unregistering, and maintaining
unregistering, and maintaining heartbeats. heartbeats.
""" """
def __init__(self): def __init__(self):
self.hosts = set() self.hosts = set()
@ -181,8 +161,8 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
super(HeartbeatMatchMakerBase, self).__init__() super(HeartbeatMatchMakerBase, self).__init__()
def send_heartbeats(self): def send_heartbeats(self):
""" """Send all heartbeats.
Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread, Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method. which loops this method.
""" """
@ -190,32 +170,31 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self.ack_alive(key, host) self.ack_alive(key, host)
def ack_alive(self, key, host): def ack_alive(self, key, host):
""" """Acknowledge that a host.topic is alive.
Acknowledge that a host.topic is alive.
Used internally for updating heartbeats, Used internally for updating heartbeats, but may also be used
but may also be used publically to acknowledge publicly to acknowledge a system is alive (i.e. rpc message
a system is alive (i.e. rpc message successfully successfully sent to host)
sent to host)
""" """
raise NotImplementedError("Must implement ack_alive") raise NotImplementedError("Must implement ack_alive")
def backend_register(self, key, host): def backend_register(self, key, host):
""" """Implements registration logic.
Implements registration logic.
Called by register(self,key,host) Called by register(self,key,host)
""" """
raise NotImplementedError("Must implement backend_register") raise NotImplementedError("Must implement backend_register")
def backend_unregister(self, key, key_host): def backend_unregister(self, key, key_host):
""" """Implements de-registration logic.
Implements de-registration logic.
Called by unregister(self,key,host) Called by unregister(self,key,host)
""" """
raise NotImplementedError("Must implement backend_unregister") raise NotImplementedError("Must implement backend_unregister")
def register(self, key, host): def register(self, key, host):
""" """Register a host on a backend.
Register a host on a backend.
Heartbeats, if applicable, may keepalive registration. Heartbeats, if applicable, may keepalive registration.
""" """
self.hosts.add(host) self.hosts.add(host)
@ -227,25 +206,24 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self.ack_alive(key, host) self.ack_alive(key, host)
def unregister(self, key, host): def unregister(self, key, host):
""" """Unregister a topic."""
Unregister a topic.
"""
if (key, host) in self.host_topic: if (key, host) in self.host_topic:
del self.host_topic[(key, host)] del self.host_topic[(key, host)]
self.hosts.discard(host) self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host))) self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) LOG.info(_LI("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self): def start_heartbeat(self):
""" """Implementation of MatchMakerBase.start_heartbeat.
Implementation of MatchMakerBase.start_heartbeat
Launches greenthread looping send_heartbeats(), Launches greenthread looping send_heartbeats(),
yielding for CONF.matchmaker_heartbeat_freq seconds yielding for CONF.matchmaker_heartbeat_freq seconds
between iterations. between iterations.
""" """
if len(self.hosts) == 0: if not self.hosts:
raise MatchMakerException( raise MatchMakerException(
_("Register before starting heartbeat.")) _("Register before starting heartbeat."))
@ -257,45 +235,37 @@ class HeartbeatMatchMakerBase(MatchMakerBase):
self._heart = eventlet.spawn(do_heartbeat) self._heart = eventlet.spawn(do_heartbeat)
def stop_heartbeat(self): def stop_heartbeat(self):
""" """Destroys the heartbeat greenthread."""
Destroys the heartbeat greenthread.
"""
if self._heart: if self._heart:
self._heart.kill() self._heart.kill()
class DirectBinding(Binding): class DirectBinding(Binding):
""" """Specifies a host in the key via a '.' character.
Specifies a host in the key via a '.' character
Although dots are used in the key, the behavior here is Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct. that it maps directly to a host, thus direct.
""" """
def test(self, key): def test(self, key):
if '.' in key: return '.' in key
return True
return False
class TopicBinding(Binding): class TopicBinding(Binding):
""" """Where a 'bare' key without dots.
Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots, AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior that of a topic exchange (whereas where there are dots, behavior
matches that of a direct exchange. matches that of a direct exchange.
""" """
def test(self, key): def test(self, key):
if '.' not in key: return '.' not in key
return True
return False
class FanoutBinding(Binding): class FanoutBinding(Binding):
"""Match on fanout keys, where key starts with 'fanout.' string.""" """Match on fanout keys, where key starts with 'fanout.' string."""
def test(self, key): def test(self, key):
if key.startswith('fanout~'): return key.startswith('fanout~')
return True
return False
class StubExchange(Exchange): class StubExchange(Exchange):
@ -304,67 +274,6 @@ class StubExchange(Exchange):
return [(key, None)] return [(key, None)]
class RingExchange(Exchange):
"""
Match Maker where hosts are loaded from a static file containing
a hashmap (JSON formatted).
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
fh = open(CONF.matchmaker_ringfile, 'r')
self.ring = json.load(fh)
fh.close()
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
if key in self.ring0:
return True
return False
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class LocalhostExchange(Exchange): class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local.""" """Exchange where all direct topics are local."""
def __init__(self, host='localhost'): def __init__(self, host='localhost'):
@ -376,8 +285,8 @@ class LocalhostExchange(Exchange):
class DirectExchange(Exchange): class DirectExchange(Exchange):
""" """Exchange where all topic keys are split, sending to second half.
Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute.host" running on "host" i.e. "compute.host" sends a message to "compute.host" running on "host"
""" """
def __init__(self): def __init__(self):
@ -388,20 +297,9 @@ class DirectExchange(Exchange):
return [(key, e)] return [(key, e)]
class MatchMakerRing(MatchMakerBase):
"""
Match Maker where hosts are loaded from a static hashmap.
"""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
class MatchMakerLocalhost(MatchMakerBase): class MatchMakerLocalhost(MatchMakerBase):
""" """Match Maker where all bare topics resolve to localhost.
Match Maker where all bare topics resolve to localhost.
Useful for testing. Useful for testing.
""" """
def __init__(self, host='localhost'): def __init__(self, host='localhost'):
@ -412,13 +310,13 @@ class MatchMakerLocalhost(MatchMakerBase):
class MatchMakerStub(MatchMakerBase): class MatchMakerStub(MatchMakerBase):
""" """Match Maker where topics are untouched.
Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues. Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq) Will not work where knowledge of hosts is known (i.e. zeromq)
""" """
def __init__(self): def __init__(self):
super(MatchMakerLocalhost, self).__init__() super(MatchMakerStub, self).__init__()
self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange())

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudscaling Group, Inc # Copyright 2013 Cloudscaling Group, Inc
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """
The MatchMaker classes should accept a Topic or Fanout exchange key and The MatchMaker classes should accept a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance. return keys for direct exchanges, per (approximate) AMQP parlance.
@ -35,7 +34,6 @@ matchmaker_redis_opts = [
default=6379, default=6379,
help='Use this port to connect to redis host.'), help='Use this port to connect to redis host.'),
cfg.StrOpt('password', cfg.StrOpt('password',
default=None,
help='Password for Redis server. (optional)'), help='Password for Redis server. (optional)'),
] ]
@ -55,8 +53,8 @@ class RedisExchange(mm_common.Exchange):
class RedisTopicExchange(RedisExchange): class RedisTopicExchange(RedisExchange):
""" """Exchange where all topic keys are split, sending to second half.
Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute" running on "host" i.e. "compute.host" sends a message to "compute" running on "host"
""" """
def run(self, topic): def run(self, topic):
@ -77,9 +75,7 @@ class RedisTopicExchange(RedisExchange):
class RedisFanoutExchange(RedisExchange): class RedisFanoutExchange(RedisExchange):
""" """Return a list of all hosts."""
Return a list of all hosts.
"""
def run(self, topic): def run(self, topic):
topic = topic.split('~', 1)[1] topic = topic.split('~', 1)[1]
hosts = self.redis.smembers(topic) hosts = self.redis.smembers(topic)
@ -90,16 +86,14 @@ class RedisFanoutExchange(RedisExchange):
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase): class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
""" """MatchMaker registering and looking-up hosts with a Redis server."""
MatchMaker registering and looking-up hosts with a Redis server.
"""
def __init__(self): def __init__(self):
super(MatchMakerRedis, self).__init__() super(MatchMakerRedis, self).__init__()
if not redis: if not redis:
raise ImportError("Failed to import module redis.") raise ImportError("Failed to import module redis.")
self.redis = redis.StrictRedis( self.redis = redis.Redis(
host=CONF.matchmaker_redis.host, host=CONF.matchmaker_redis.host,
port=CONF.matchmaker_redis.port, port=CONF.matchmaker_redis.port,
password=CONF.matchmaker_redis.password) password=CONF.matchmaker_redis.password)

View File

@ -0,0 +1,106 @@
# Copyright 2011-2013 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import itertools
import json
from oslo.config import cfg
from manila.openstack.common.gettextutils import _LW
from manila.openstack.common import log as logging
from manila.openstack.common.rpc import matchmaker as mm
matchmaker_opts = [
# Matchmaker ring file
cfg.StrOpt('ringfile',
deprecated_name='matchmaker_ringfile',
deprecated_group='DEFAULT',
default='/etc/oslo/matchmaker_ring.json',
help='Matchmaker ring file (JSON)'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
LOG = logging.getLogger(__name__)
class RingExchange(mm.Exchange):
"""Match Maker where hosts are loaded from a static JSON formatted file.
__init__ takes optional ring dictionary argument, otherwise
loads the ringfile from CONF.mathcmaker_ringfile.
"""
def __init__(self, ring=None):
super(RingExchange, self).__init__()
if ring:
self.ring = ring
else:
with open(CONF.matchmaker_ring.ringfile, 'r') as fh:
self.ring = json.load(fh)
self.ring0 = {}
for k in self.ring.keys():
self.ring0[k] = itertools.cycle(self.ring[k])
def _ring_has(self, key):
return key in self.ring0
class RoundRobinRingExchange(RingExchange):
"""A Topic Exchange based on a hashmap."""
def __init__(self, ring=None):
super(RoundRobinRingExchange, self).__init__(ring)
def run(self, key):
if not self._ring_has(key):
LOG.warn(
_LW("No key defining hosts for topic '%s', "
"see ringfile") % (key, )
)
return []
host = next(self.ring0[key])
return [(key + '.' + host, host)]
class FanoutRingExchange(RingExchange):
"""Fanout Exchange based on a hashmap."""
def __init__(self, ring=None):
super(FanoutRingExchange, self).__init__(ring)
def run(self, key):
# Assume starts with "fanout~", strip it for lookup.
nkey = key.split('fanout~')[1:][0]
if not self._ring_has(nkey):
LOG.warn(
_LW("No key defining hosts for topic '%s', "
"see ringfile") % (nkey, )
)
return []
return map(lambda x: (key + '.' + x, x), self.ring[nkey])
class MatchMakerRing(mm.MatchMakerBase):
"""Match Maker where hosts are loaded from a static hashmap."""
def __init__(self, ring=None):
super(MatchMakerRing, self).__init__()
self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
self.add_binding(mm.DirectBinding(), mm.DirectExchange())
self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))

View File

@ -1,6 +1,4 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012-2013 Red Hat, Inc.
# Copyright 2012 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -21,8 +19,11 @@ For more information about rpc API version numbers, see:
rpc/dispatcher.py rpc/dispatcher.py
""" """
import six
from manila.openstack.common import rpc from manila.openstack.common import rpc
from manila.openstack.common.rpc import common as rpc_common
from manila.openstack.common.rpc import serializer as rpc_serializer
class RpcProxy(object): class RpcProxy(object):
@ -34,16 +35,28 @@ class RpcProxy(object):
rpc API. rpc API.
""" """
def __init__(self, topic, default_version): # The default namespace, which can be overridden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy. """Initialize an RpcProxy.
:param topic: The topic to use for all messages. :param topic: The topic to use for all messages.
:param default_version: The default API version to request in all :param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message outgoing messages. This can be overridden on a per-message
basis. basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionally (de-)serialize entities with a
provided helper.
""" """
self.topic = topic self.topic = topic
self.default_version = default_version self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__() super(RpcProxy, self).__init__()
def _set_version(self, msg, vers): def _set_version(self, msg, vers):
@ -52,19 +65,44 @@ class RpcProxy(object):
:param msg: The message having a version added to it. :param msg: The message having a version added to it.
:param vers: The version number to add to the message. :param vers: The version number to add to the message.
""" """
msg['version'] = vers if vers else self.default_version v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
msg['version'] = v
def _get_topic(self, topic): def _get_topic(self, topic):
"""Return the topic to use for a message.""" """Return the topic to use for a message."""
return topic if topic else self.topic return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod @staticmethod
def make_namespaced_msg(method, namespace, **kwargs): def make_namespaced_msg(method, namespace, **kwargs):
return {'method': method, 'namespace': namespace, 'args': kwargs} return {'method': method, 'namespace': namespace, 'args': kwargs}
@staticmethod def make_msg(self, method, **kwargs):
def make_msg(method, **kwargs): return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
return RpcProxy.make_namespaced_msg(method, None, **kwargs) **kwargs)
def _serialize_msg_args(self, context, kwargs):
"""Helper method called to serialize message arguments.
This calls our serializer on each argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in six.iteritems(kwargs):
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None): def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method. """rpc.call() a remote method.
@ -81,9 +119,11 @@ class RpcProxy(object):
:returns: The return value from the remote method. :returns: The return value from the remote method.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic) real_topic = self._get_topic(topic)
try: try:
return rpc.call(context, real_topic, msg, timeout) result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc: except rpc.common.Timeout as exc:
raise rpc.common.Timeout( raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method')) exc.info, real_topic, msg.get('method'))
@ -104,9 +144,11 @@ class RpcProxy(object):
from the remote method as they arrive. from the remote method as they arrive.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic) real_topic = self._get_topic(topic)
try: try:
return rpc.multicall(context, real_topic, msg, timeout) result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc: except rpc.common.Timeout as exc:
raise rpc.common.Timeout( raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method')) exc.info, real_topic, msg.get('method'))
@ -124,6 +166,7 @@ class RpcProxy(object):
remote method. remote method.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg) rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None): def fanout_cast(self, context, msg, topic=None, version=None):
@ -139,6 +182,7 @@ class RpcProxy(object):
from the remote method. from the remote method.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg) rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None, def cast_to_server(self, context, server_params, msg, topic=None,
@ -157,6 +201,7 @@ class RpcProxy(object):
return values. return values.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast_to_server(context, server_params, self._get_topic(topic), msg) rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
def fanout_cast_to_server(self, context, server_params, msg, topic=None, def fanout_cast_to_server(self, context, server_params, msg, topic=None,
@ -175,5 +220,6 @@ class RpcProxy(object):
return values. return values.
""" """
self._set_version(msg, version) self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast_to_server(context, server_params, rpc.fanout_cast_to_server(context, server_params,
self._get_topic(topic), msg) self._get_topic(topic), msg)

View File

@ -0,0 +1,54 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides the definition of an RPC serialization handler"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Serializer(object):
"""Generic (de-)serialization definition base class."""
@abc.abstractmethod
def serialize_entity(self, context, entity):
"""Serialize something to primitive form.
:param context: Security context
:param entity: Entity to be serialized
:returns: Serialized form of entity
"""
pass
@abc.abstractmethod
def deserialize_entity(self, context, entity):
"""Deserialize something from primitive form.
:param context: Security context
:param entity: Primitive to be deserialized
:returns: Deserialized form of entity
"""
pass
class NoOpSerializer(Serializer):
"""A serializer that does nothing."""
def serialize_entity(self, context, entity):
return entity
def deserialize_entity(self, context, entity):
return entity

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -17,7 +15,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common import rpc from manila.openstack.common import rpc
from manila.openstack.common.rpc import dispatcher as rpc_dispatcher from manila.openstack.common.rpc import dispatcher as rpc_dispatcher
@ -30,11 +27,13 @@ LOG = logging.getLogger(__name__)
class Service(service.Service): class Service(service.Service):
"""Service object for binaries running on hosts. """Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.""" A service enables rpc by listening to queues based on topic and host.
def __init__(self, host, topic, manager=None): """
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__() super(Service, self).__init__()
self.host = host self.host = host
self.topic = topic self.topic = topic
self.serializer = serializer
if manager is None: if manager is None:
self.manager = self self.manager = self
else: else:
@ -44,10 +43,11 @@ class Service(service.Service):
super(Service, self).start() super(Service, self).start()
self.conn = rpc.create_connection(new=True) self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") % LOG.debug("Creating Consumer connection for Service %s" %
self.topic) self.topic)
dispatcher = rpc_dispatcher.RpcDispatcher([self.manager]) dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
self.serializer)
# Share this same connection for these Consumers # Share this same connection for these Consumers
self.conn.create_consumer(self.topic, dispatcher, fanout=False) self.conn.create_consumer(self.topic, dispatcher, fanout=False)

3
manila/openstack/common/rpc/zmq_receiver.py Executable file → Normal file
View File

@ -1,6 +1,3 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation # Copyright 2011 OpenStack Foundation
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may

View File

@ -0,0 +1,95 @@
# Copyright (c) 2011-2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter support
"""
from manila.openstack.common.gettextutils import _LI
from manila.openstack.common import log as logging
from manila.openstack.common.scheduler import base_handler
LOG = logging.getLogger(__name__)
class BaseFilter(object):
"""Base class for all filter classes."""
def _filter_one(self, obj, filter_properties):
"""Return True if it passes the filter, False otherwise.
Override this in a subclass.
"""
return True
def filter_all(self, filter_obj_list, filter_properties):
"""Yield objects that pass the filter.
Can be overridden in a subclass, if you need to base filtering
decisions on all objects. Otherwise, one can just override
_filter_one() to filter a single object.
"""
for obj in filter_obj_list:
if self._filter_one(obj, filter_properties):
yield obj
# Set to true in a subclass if a filter only needs to be run once
# for each request rather than for each instance
run_filter_once_per_request = False
def run_filter_for_index(self, index):
"""Return True if the filter needs to be run for the "index-th"
instance in a request. Only need to override this if a filter
needs anything other than "first only" or "all" behaviour.
"""
return not (self.run_filter_once_per_request and index > 0)
class BaseFilterHandler(base_handler.BaseHandler):
"""Base class to handle loading filter classes.
This class should be subclassed where one needs to use filters.
"""
def get_filtered_objects(self, filter_classes, objs,
filter_properties, index=0):
"""Get objects after filter
:param filter_classes: filters that will be used to filter the
objects
:param objs: objects that will be filtered
:param filter_properties: client filter properties
:param index: This value needs to be increased in the caller
function of get_filtered_objects when handling
each resource.
"""
list_objs = list(objs)
LOG.debug("Starting with %d host(s)", len(list_objs))
for filter_cls in filter_classes:
cls_name = filter_cls.__name__
filter_class = filter_cls()
if filter_class.run_filter_for_index(index):
objs = filter_class.filter_all(list_objs, filter_properties)
if objs is None:
LOG.debug("Filter %(cls_name)s says to stop filtering",
{'cls_name': cls_name})
return
list_objs = list(objs)
msg = (_LI("Filter %(cls_name)s returned %(obj_len)d host(s)")
% {'cls_name': cls_name, 'obj_len': len(list_objs)})
if not list_objs:
LOG.info(msg)
break
LOG.debug(msg)
return list_objs

View File

@ -0,0 +1,46 @@
# Copyright (c) 2011-2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A common base for handling extension classes.
Used by BaseFilterHandler and BaseWeightHandler
"""
import inspect
from stevedore import extension
class BaseHandler(object):
"""Base class to handle loading filter and weight classes."""
def __init__(self, modifier_class_type, modifier_namespace):
self.namespace = modifier_namespace
self.modifier_class_type = modifier_class_type
self.extension_manager = extension.ExtensionManager(modifier_namespace)
def _is_correct_class(self, cls):
"""Return whether an object is a class of the correct type and
is not prefixed with an underscore.
"""
return (inspect.isclass(cls) and
not cls.__name__.startswith('_') and
issubclass(cls, self.modifier_class_type))
def get_all_classes(self):
# We use a set, as some classes may have an entrypoint of their own,
# and also be returned by a function such as 'all_filters' for example
return [ext.plugin for ext in self.extension_manager if
self._is_correct_class(ext.plugin)]

View File

@ -0,0 +1,146 @@
# Copyright (c) 2011-2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Pluggable Weighing support
"""
import abc
import six
from manila.openstack.common.scheduler import base_handler
def normalize(weight_list, minval=None, maxval=None):
"""Normalize the values in a list between 0 and 1.0.
The normalization is made regarding the lower and upper values present in
weight_list. If the minval and/or maxval parameters are set, these values
will be used instead of the minimum and maximum from the list.
If all the values are equal, they are normalized to 0.
"""
if not weight_list:
return ()
if maxval is None:
maxval = max(weight_list)
if minval is None:
minval = min(weight_list)
maxval = float(maxval)
minval = float(minval)
if minval == maxval:
return [0] * len(weight_list)
range_ = maxval - minval
return ((i - minval) / range_ for i in weight_list)
class WeighedObject(object):
"""Object with weight information."""
def __init__(self, obj, weight):
self.obj = obj
self.weight = weight
def __repr__(self):
return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
class BaseWeigher(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for pluggable weighers.
The attributes maxval and minval can be specified to set up the maximum
and minimum values for the weighed objects. These values will then be
taken into account in the normalization step, instead of taking the values
from the calculated weights.
"""
minval = None
maxval = None
def weight_multiplier(self):
"""How weighted this weigher should be.
Override this method in a subclass, so that the returned value is
read from a configuration option to permit operators specify a
multiplier for the weigher.
"""
return 1.0
@abc.abstractmethod
def _weigh_object(self, obj, weight_properties):
"""Override in a subclass to specify a weight for a specific
object.
"""
def weigh_objects(self, weighed_obj_list, weight_properties):
"""Weigh multiple objects.
Override in a subclass if you need access to all objects in order
to calculate weights. Do not modify the weight of an object here,
just return a list of weights.
"""
# Calculate the weights
weights = []
for obj in weighed_obj_list:
weight = self._weigh_object(obj.obj, weight_properties)
# Record the min and max values if they are None. If they anything
# but none we assume that the weigher has set them
if self.minval is None:
self.minval = weight
if self.maxval is None:
self.maxval = weight
if weight < self.minval:
self.minval = weight
elif weight > self.maxval:
self.maxval = weight
weights.append(weight)
return weights
class BaseWeightHandler(base_handler.BaseHandler):
object_class = WeighedObject
def get_weighed_objects(self, weigher_classes, obj_list,
weighing_properties):
"""Return a sorted (descending), normalized list of WeighedObjects."""
if not obj_list:
return []
weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
for weigher_cls in weigher_classes:
weigher = weigher_cls()
weights = weigher.weigh_objects(weighed_objs, weighing_properties)
# Normalize the weights
weights = normalize(weights,
minval=weigher.minval,
maxval=weigher.maxval)
for i, weight in enumerate(weights):
obj = weighed_objs[i]
obj.weight += weigher.weight_multiplier() * weight
return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)

View File

@ -17,13 +17,10 @@
Scheduler host filters Scheduler host filters
""" """
from manila.openstack.common import log as logging from manila.openstack.common.scheduler import base_filter
from manila.openstack.common.scheduler import filter
LOG = logging.getLogger(__name__)
class BaseHostFilter(filter.BaseFilter): class BaseHostFilter(base_filter.BaseFilter):
"""Base class for host filters.""" """Base class for host filters."""
def _filter_one(self, obj, filter_properties): def _filter_one(self, obj, filter_properties):
"""Return True if the object passes the filter, otherwise False.""" """Return True if the object passes the filter, otherwise False."""
@ -36,6 +33,6 @@ class BaseHostFilter(filter.BaseFilter):
raise NotImplementedError() raise NotImplementedError()
class HostFilterHandler(filter.BaseFilterHandler): class HostFilterHandler(base_filter.BaseFilterHandler):
def __init__(self, namespace): def __init__(self, namespace):
super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) super(HostFilterHandler, self).__init__(BaseHostFilter, namespace)

View File

@ -13,16 +13,18 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from manila.openstack.common.scheduler import filters from manila.openstack.common.scheduler import filters
class AvailabilityZoneFilter(filters.BaseHostFilter): class AvailabilityZoneFilter(filters.BaseHostFilter):
"""Filters Hosts by availability zone.""" """Filters Hosts by availability zone."""
# Availability zones do not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties): def host_passes(self, host_state, filter_properties):
spec = filter_properties.get('request_spec', {}) spec = filter_properties.get('request_spec', {})
props = spec.get('resource_properties', []) props = spec.get('resource_properties', {})
availability_zone = props.get('availability_zone') availability_zone = props.get('availability_zone')
if availability_zone: if availability_zone:

View File

@ -13,25 +13,27 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import six
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common.scheduler import filters from manila.openstack.common.scheduler import filters
from manila.openstack.common.scheduler.filters import extra_specs_ops from manila.openstack.common.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class CapabilitiesFilter(filters.BaseHostFilter): class CapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter to work with resource (instance & share) type records.""" """HostFilter to work with resource (instance & volume) type records."""
def _satisfies_extra_specs(self, capabilities, resource_type): def _satisfies_extra_specs(self, capabilities, resource_type):
"""Check that the capabilities provided by the services """Check that the capabilities provided by the services satisfy
satisfy the extra specs associated with the instance type.""" the extra specs associated with the resource type.
"""
extra_specs = resource_type.get('extra_specs', []) extra_specs = resource_type.get('extra_specs', [])
if not extra_specs: if not extra_specs:
return True return True
for key, req in extra_specs.iteritems(): for key, req in six.iteritems(extra_specs):
# Either not scope format, or in capabilities scope # Either not scope format, or in capabilities scope
scope = key.split(':') scope = key.split(':')
if len(scope) > 1 and scope[0] != "capabilities": if len(scope) > 1 and scope[0] != "capabilities":
@ -40,24 +42,29 @@ class CapabilitiesFilter(filters.BaseHostFilter):
del scope[0] del scope[0]
cap = capabilities cap = capabilities
for index in range(0, len(scope)): for index in range(len(scope)):
try: try:
cap = cap.get(scope[index], None) cap = cap.get(scope[index])
except AttributeError: except AttributeError:
return False return False
if cap is None: if cap is None:
return False return False
if not extra_specs_ops.match(cap, req): if not extra_specs_ops.match(cap, req):
LOG.debug("extra_spec requirement '%(req)s' "
"does not match '%(cap)s'",
{'req': req, 'cap': cap})
return False return False
return True return True
def host_passes(self, host_state, filter_properties): def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create instance_type.""" """Return a list of hosts that can create resource_type."""
# Note(zhiteng) Currently only Manila and Nova are using # Note(zhiteng) Currently only Cinder and Nova are using
# this filter, so the resource type is either instance or # this filter, so the resource type is either instance or
# volume. # volume.
resource_type = filter_properties.get('resource_type') resource_type = filter_properties.get('resource_type')
if not self._satisfies_extra_specs(host_state.capabilities, if not self._satisfies_extra_specs(host_state.capabilities,
resource_type): resource_type):
LOG.debug("%(host_state)s fails resource_type extra_specs "
"requirements", {'host_state': host_state})
return False return False
return True return True

View File

@ -0,0 +1,55 @@
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manila.openstack.common import log as logging
from manila.openstack.common.scheduler import filters
LOG = logging.getLogger(__name__)
class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
"""Filter out previously attempted hosts
A host passes this filter if it has not already been attempted for
scheduling. The scheduler needs to add previously attempted hosts
to the 'retry' key of filter_properties in order for this to work
correctly. For example:
{
'retry': {
'hosts': ['host1', 'host2'],
'num_attempts': 3,
}
}
"""
def host_passes(self, host_state, filter_properties):
"""Skip nodes that have already been attempted."""
attempted = filter_properties.get('retry')
if not attempted:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled.")
return True
hosts = attempted.get('hosts', [])
host = host_state.host
passes = host not in hosts
pass_msg = "passes" if passes else "fails"
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
"%(hosts)s" % {'host': host,
'pass_msg': pass_msg,
'hosts': hosts})
return passes

View File

@ -13,9 +13,10 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import operator import operator
import six
from manila.openstack.common import jsonutils from manila.openstack.common import jsonutils
from manila.openstack.common.scheduler import filters from manila.openstack.common.scheduler import filters
@ -51,7 +52,7 @@ class JsonFilter(filters.BaseHostFilter):
return self._op_compare(args, operator.gt) return self._op_compare(args, operator.gt)
def _in(self, args): def _in(self, args):
"""First term is in set of remaining terms""" """First term is in set of remaining terms."""
return self._op_compare(args, operator.contains) return self._op_compare(args, operator.contains)
def _less_than_equal(self, args): def _less_than_equal(self, args):
@ -102,7 +103,7 @@ class JsonFilter(filters.BaseHostFilter):
if obj is None: if obj is None:
return None return None
for item in path[1:]: for item in path[1:]:
obj = obj.get(item, None) obj = obj.get(item)
if obj is None: if obj is None:
return None return None
return obj return obj
@ -117,7 +118,7 @@ class JsonFilter(filters.BaseHostFilter):
for arg in query[1:]: for arg in query[1:]:
if isinstance(arg, list): if isinstance(arg, list):
arg = self._process_filter(arg, host_state) arg = self._process_filter(arg, host_state)
elif isinstance(arg, basestring): elif isinstance(arg, six.string_types):
arg = self._parse_string(arg, host_state) arg = self._parse_string(arg, host_state)
if arg is not None: if arg is not None:
cooked_args.append(arg) cooked_args.append(arg)

View File

@ -18,10 +18,10 @@ Scheduler host weights
""" """
from manila.openstack.common.scheduler import weight from manila.openstack.common.scheduler import base_weight
class WeighedHost(weight.WeighedObject): class WeighedHost(base_weight.WeighedObject):
def to_dict(self): def to_dict(self):
return { return {
'weight': self.weight, 'weight': self.weight,
@ -33,12 +33,12 @@ class WeighedHost(weight.WeighedObject):
(self.obj.host, self.weight)) (self.obj.host, self.weight))
class BaseHostWeigher(weight.BaseWeigher): class BaseHostWeigher(base_weight.BaseWeigher):
"""Base class for host weights.""" """Base class for host weights."""
pass pass
class HostWeightHandler(weight.BaseWeightHandler): class HostWeightHandler(base_weight.BaseWeightHandler):
object_class = WeighedHost object_class = WeighedHost
def __init__(self, namespace): def __init__(self, namespace):

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara # Copyright 2011 Justin Santa Barbara
@ -20,20 +18,30 @@
"""Generic Node base class for all workers that run on hosts.""" """Generic Node base class for all workers that run on hosts."""
import errno import errno
import logging as std_logging
import os import os
import random import random
import signal import signal
import sys import sys
import time import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet import eventlet
import logging as std_logging from eventlet import event
from oslo.config import cfg from oslo.config import cfg
from manila.openstack.common import eventlet_backdoor from manila.openstack.common import eventlet_backdoor
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _LE, _LI, _LW
from manila.openstack.common import importutils from manila.openstack.common import importutils
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common import systemd
from manila.openstack.common import threadgroup from manila.openstack.common import threadgroup
@ -42,6 +50,53 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object): class Launcher(object):
"""Launch one or more services and wait for them to complete.""" """Launch one or more services and wait for them to complete."""
@ -51,19 +106,8 @@ class Launcher(object):
:returns: None :returns: None
""" """
self._services = threadgroup.ThreadGroup() self.services = Services()
eventlet_backdoor.initialize_if_enabled() self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: service to run and wait for.
:returns: None
"""
service.start()
service.wait()
def launch_service(self, service): def launch_service(self, service):
"""Load and start the given service. """Load and start the given service.
@ -72,7 +116,8 @@ class Launcher(object):
:returns: None :returns: None
""" """
self._services.add_thread(self.run_service, service) service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self): def stop(self):
"""Stop all services which are currently running. """Stop all services which are currently running.
@ -80,7 +125,7 @@ class Launcher(object):
:returns: None :returns: None
""" """
self._services.stop() self.services.stop()
def wait(self): def wait(self):
"""Waits until all services have been stopped, and then returns. """Waits until all services have been stopped, and then returns.
@ -88,7 +133,16 @@ class Launcher(object):
:returns: None :returns: None
""" """
self._services.wait() self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit): class SignalExit(SystemExit):
@ -100,33 +154,49 @@ class SignalExit(SystemExit):
class ServiceLauncher(Launcher): class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame): def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes # Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL) _set_signals_handler(signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise SignalExit(signo) raise SignalExit(signo)
def wait(self): def handle_signal(self):
signal.signal(signal.SIGTERM, self._handle_signal) _set_signals_handler(self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
LOG.debug(_('Full set of CONF:')) def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG) CONF.log_opt_values(LOG, std_logging.DEBUG)
status = None
try: try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait() super(ServiceLauncher, self).wait()
except SignalExit as exc: except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM', signame = _signo_to_signame(exc.signo)
signal.SIGINT: 'SIGINT'}[exc.signo] LOG.info(_LI('Caught %s, exiting'), signame)
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code status = exc.code
signo = exc.signo
except SystemExit as exc: except SystemExit as exc:
status = exc.code status = exc.code
finally: finally:
if rpc:
rpc.cleanup()
self.stop() self.stop()
return status if rpc:
try:
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_LE('Exception during rpc cleanup.'))
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object): class ServiceWrapper(object):
@ -138,43 +208,82 @@ class ServiceWrapper(object):
class ProcessLauncher(object): class ProcessLauncher(object):
def __init__(self): def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {} self.children = {}
self.sigcaught = None self.sigcaught = None
self.running = True self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe() rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
signal.signal(signal.SIGTERM, self._handle_signal) def handle_signal(self):
signal.signal(signal.SIGINT, self._handle_signal) _set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame): def _handle_signal(self, signo, frame):
self.sigcaught = signo self.sigcaught = signo
self.running = False self.running = False
# Allow the process to be killed again and die from natural causes # Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL) _set_signals_handler(signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def _pipe_watcher(self): def _pipe_watcher(self):
# This will block until the write end is closed when the parent # This will block until the write end is closed when the parent
# dies unexpectedly # dies unexpectedly
self.readpipe.read() self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting')) LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1) sys.exit(1)
def _child_process(self, service): def _child_process_handle_signal(self):
# Setup child signal handlers differently # Setup child signal handlers differently
def _sigterm(*args): def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM) raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm) signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM # Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll # Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad # fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub() eventlet.hubs.use_hub()
@ -188,7 +297,8 @@ class ProcessLauncher(object):
random.seed() random.seed()
launcher = Launcher() launcher = Launcher()
launcher.run_service(service) launcher.launch_service(service)
return launcher
def _start_child(self, wrap): def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers: if len(wrap.forktimes) > wrap.workers:
@ -197,7 +307,7 @@ class ProcessLauncher(object):
# start up quickly but ensure we don't fork off children that # start up quickly but ensure we don't fork off children that
# die instantly too quickly. # die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers: if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping')) LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1) time.sleep(1)
wrap.forktimes.pop(0) wrap.forktimes.pop(0)
@ -206,28 +316,17 @@ class ProcessLauncher(object):
pid = os.fork() pid = os.fork()
if pid == 0: if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this launcher = self._child_process(wrap.service)
# doesn't fallback into the loop spawning children. It would while True:
# be bad for a child to spawn more children. self._child_process_handle_signal()
status = 0 status, signo = self._child_wait_for_exit_or_signal(launcher)
try: if not _is_sighup_and_daemon(signo):
self._child_process(wrap.service) break
except SignalExit as exc: launcher.restart()
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
wrap.service.stop()
os._exit(status) os._exit(status)
LOG.info(_('Started child %d'), pid) LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid) wrap.children.add(pid)
self.children[pid] = wrap self.children[pid] = wrap
@ -237,7 +336,7 @@ class ProcessLauncher(object):
def launch_service(self, service, workers=1): def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers) wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers) LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers: while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap) self._start_child(wrap)
@ -254,43 +353,56 @@ class ProcessLauncher(object):
if os.WIFSIGNALED(status): if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status) sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'), LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig)) dict(pid=pid, sig=sig))
else: else:
code = os.WEXITSTATUS(status) code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'), LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code)) dict(pid=pid, code=code))
if pid not in self.children: if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid) LOG.warning(_LW('pid %d not in child list'), pid)
return None return None
wrap = self.children.pop(pid) wrap = self.children.pop(pid)
wrap.children.remove(pid) wrap.children.remove(pid)
return wrap return wrap
def wait(self): def _respawn_children(self):
"""Loop waiting on children to die and respawning as necessary"""
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
while self.running: while self.running:
wrap = self._wait_child() wrap = self._wait_child()
if not wrap: if not wrap:
# Yield to other threads if no children have exited # Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage # Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346) # (see bug #1095346)
eventlet.greenthread.sleep(.01) eventlet.greenthread.sleep(self.wait_interval)
continue continue
while self.running and len(wrap.children) < wrap.workers: while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap) self._start_child(wrap)
if self.sigcaught: def wait(self):
signame = {signal.SIGTERM: 'SIGTERM', """Loop waiting on children to die and respawning as necessary."""
signal.SIGINT: 'SIGINT'}[self.sigcaught]
LOG.info(_('Caught %s, stopping children'), signame) systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
if self.sigcaught:
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
for pid in self.children: for pid in self.children:
try: try:
@ -301,7 +413,7 @@ class ProcessLauncher(object):
# Wait for children to die # Wait for children to die
if self.children: if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children)) LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children: while self.children:
self._wait_child() self._wait_child()
@ -312,21 +424,81 @@ class Service(object):
def __init__(self, threads=1000): def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads) self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self): def start(self):
pass pass
def stop(self): def stop(self):
self.tg.stop() self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self): def wait(self):
self.tg.wait() self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
def launch(service, workers=None): @staticmethod
if workers: def run_service(service, done):
launcher = ProcessLauncher() """Service start wrapper.
launcher.launch_service(service, workers=workers)
else: :param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher() launcher = ServiceLauncher()
launcher.launch_service(service) launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher return launcher

View File

@ -0,0 +1,95 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from manila.openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
help="CA certificate file to use to verify "
"connecting clients."),
cfg.StrOpt('cert_file',
help="Certificate file to use when starting "
"the server securely."),
cfg.StrOpt('key_file',
help="Private key file to use when starting "
"the server securely."),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -19,18 +17,41 @@
System-level utilities and helper functions. System-level utilities and helper functions.
""" """
import math
import re
import sys import sys
import unicodedata
import six
from manila.openstack.common.gettextutils import _ from manila.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject): def int_from_bool_as_string(subject):
""" """Interpret a string as a boolean and return either 1 or 0.
Interpret a string as a boolean and return either 1 or 0.
Any string value in: Any string value in:
@ -43,13 +64,12 @@ def int_from_bool_as_string(subject):
return bool_from_string(subject) and 1 or 0 return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False): def bool_from_string(subject, strict=False, default=False):
""" """Interpret a string as a boolean.
Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't', A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when 'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False. `strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing. Useful for JSON-decoded stuff and config file parsing.
@ -57,8 +77,8 @@ def bool_from_string(subject, strict=False):
ValueError which is useful when parsing values passed in from an API call. ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
""" """
if not isinstance(subject, basestring): if not isinstance(subject, six.string_types):
subject = str(subject) subject = six.text_type(subject)
lowered = subject.strip().lower() lowered = subject.strip().lower()
@ -74,25 +94,24 @@ def bool_from_string(subject, strict=False):
'acceptable': acceptable} 'acceptable': acceptable}
raise ValueError(msg) raise ValueError(msg)
else: else:
return False return default
def safe_decode(text, incoming=None, errors='strict'): def safe_decode(text, incoming=None, errors='strict'):
""" """Decodes incoming text/bytes string using `incoming` if they're not
Decodes incoming str using `incoming` if they're already unicode.
not already unicode.
:param incoming: Text's current encoding :param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid :param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded :returns: text or a unicode `incoming` encoded
representation of it. representation of it.
:raises TypeError: If text is not an isntance of basestring :raises TypeError: If text is not an instance of str
""" """
if not isinstance(text, basestring): if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text)) raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, unicode): if isinstance(text, six.text_type):
return text return text
if not incoming: if not incoming:
@ -119,11 +138,10 @@ def safe_decode(text, incoming=None, errors='strict'):
def safe_encode(text, incoming=None, def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'): encoding='utf-8', errors='strict'):
""" """Encodes incoming text/bytes string using `encoding`.
Encodes incoming str/unicode using `encoding`. If
incoming is not specified, text is expected to If incoming is not specified, text is expected to be encoded with
be encoded with current python's default encoding. current python's default encoding. (`sys.getdefaultencoding`)
(`sys.getdefaultencoding`)
:param incoming: Text's current encoding :param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8) :param encoding: Expected encoding for text (Default UTF-8)
@ -131,20 +149,91 @@ def safe_encode(text, incoming=None,
values http://docs.python.org/2/library/codecs.html values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded :returns: text or a bytestring `encoding` encoded
representation of it. representation of it.
:raises TypeError: If text is not an isntance of basestring :raises TypeError: If text is not an instance of str
""" """
if not isinstance(text, basestring): if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text)) raise TypeError("%s can't be encoded" % type(text))
if not incoming: if not incoming:
incoming = (sys.stdin.encoding or incoming = (sys.stdin.encoding or
sys.getdefaultencoding()) sys.getdefaultencoding())
if isinstance(text, unicode): if isinstance(text, six.text_type):
return text.encode(encoding, errors) return text.encode(encoding, errors)
elif text and encoding != incoming: elif text and encoding != incoming:
# Decode text before encoding it with `encoding` # Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors) text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors) return text.encode(encoding, errors)
else:
return text
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc. # Copyright 2012 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,10 +11,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import threading
from eventlet import greenlet import eventlet
from eventlet import greenpool from eventlet import greenpool
from eventlet import greenthread
from manila.openstack.common import log as logging from manila.openstack.common import log as logging
from manila.openstack.common import loopingcall from manila.openstack.common import loopingcall
@ -26,7 +24,7 @@ LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs): def _thread_done(gt, *args, **kwargs):
""" Callback function to be passed to GreenThread.link() when we spawn() """Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if. Calls the :class:`ThreadGroup` to notify if.
""" """
@ -34,7 +32,7 @@ def _thread_done(gt, *args, **kwargs):
class Thread(object): class Thread(object):
""" Wrapper around a greenthread, that holds a reference to the """Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list. it has done so it can be removed from the threads list.
""" """
@ -48,9 +46,12 @@ class Thread(object):
def wait(self): def wait(self):
return self.thread.wait() return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object): class ThreadGroup(object):
""" The point of the ThreadGroup classis to: """The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them * keep track of timers and greenthreads (making it easier to stop them
when need be). when need be).
@ -61,6 +62,13 @@ class ThreadGroup(object):
self.threads = [] self.threads = []
self.timers = [] self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None, def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs): *args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
@ -72,13 +80,17 @@ class ThreadGroup(object):
gt = self.pool.spawn(callback, *args, **kwargs) gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self) th = Thread(gt, self)
self.threads.append(th) self.threads.append(th)
return th
def thread_done(self, thread): def thread_done(self, thread):
self.threads.remove(thread) self.threads.remove(thread)
def stop(self): def _stop_threads(self):
current = greenthread.getcurrent() current = threading.current_thread()
for x in self.threads:
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current: if x is current:
# don't kill the current thread. # don't kill the current thread.
continue continue
@ -87,6 +99,7 @@ class ThreadGroup(object):
except Exception as ex: except Exception as ex:
LOG.exception(ex) LOG.exception(ex)
def stop_timers(self):
for x in self.timers: for x in self.timers:
try: try:
x.stop() x.stop()
@ -94,21 +107,41 @@ class ThreadGroup(object):
LOG.exception(ex) LOG.exception(ex)
self.timers = [] self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self): def wait(self):
for x in self.timers: for x in self.timers:
try: try:
x.wait() x.wait()
except greenlet.GreenletExit: except eventlet.greenlet.GreenletExit:
pass pass
except Exception as ex: except Exception as ex:
LOG.exception(ex) LOG.exception(ex)
current = greenthread.getcurrent() current = threading.current_thread()
for x in self.threads:
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current: if x is current:
continue continue
try: try:
x.wait() x.wait()
except greenlet.GreenletExit: except eventlet.greenlet.GreenletExit:
pass pass
except Exception as ex: except Exception as ex:
LOG.exception(ex) LOG.exception(ex)

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation. # Copyright 2011 OpenStack Foundation.
# All Rights Reserved. # All Rights Reserved.
# #
@ -21,8 +19,10 @@ Time related utilities and helper functions.
import calendar import calendar
import datetime import datetime
import time
import iso8601 import iso8601
import six
# ISO 8601 extended time format with microseconds # ISO 8601 extended time format with microseconds
@ -32,7 +32,7 @@ PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False): def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format""" """Stringify time in ISO 8601 format."""
if not at: if not at:
at = utcnow() at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT st = at.strftime(_ISO8601_TIME_FORMAT
@ -44,13 +44,13 @@ def isotime(at=None, subsecond=False):
def parse_isotime(timestr): def parse_isotime(timestr):
"""Parse time from ISO 8601 format""" """Parse time from ISO 8601 format."""
try: try:
return iso8601.parse_date(timestr) return iso8601.parse_date(timestr)
except iso8601.ParseError as e: except iso8601.ParseError as e:
raise ValueError(e.message) raise ValueError(six.text_type(e))
except TypeError as e: except TypeError as e:
raise ValueError(e.message) raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT): def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
@ -66,7 +66,7 @@ def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
def normalize_time(timestamp): def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object""" """Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset() offset = timestamp.utcoffset()
if offset is None: if offset is None:
return timestamp return timestamp
@ -75,20 +75,31 @@ def normalize_time(timestamp):
def is_older_than(before, seconds): def is_older_than(before, seconds):
"""Return True if before is older than seconds.""" """Return True if before is older than seconds."""
if isinstance(before, basestring): if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None) before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds) return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds): def is_newer_than(after, seconds):
"""Return True if after is newer than seconds.""" """Return True if after is newer than seconds."""
if isinstance(after, basestring): if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None) after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds) return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts(): def utcnow_ts():
"""Timestamp version of our utcnow function.""" """Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple()) return calendar.timegm(utcnow().timetuple())
@ -103,19 +114,22 @@ def utcnow():
def iso8601_from_timestamp(timestamp): def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp""" """Returns a iso8601 formatted date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp)) return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()): def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
""" """
Override utils.utcnow to return a constant time or a list thereof, utcnow.override_time = override_time or datetime.datetime.utcnow()
one at a time.
"""
utcnow.override_time = override_time
def advance_time_delta(timedelta): def advance_time_delta(timedelta):
@ -141,7 +155,8 @@ def clear_time_override():
def marshall_now(now=None): def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds. """Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.""" Note: tzinfo is stripped, but not required for relative times.
"""
if not now: if not now:
now = utcnow() now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
@ -161,11 +176,21 @@ def unmarshall_time(tyme):
def delta_seconds(before, after): def delta_seconds(before, after):
""" """Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution). datetime objects (as a float, to microsecond resolution).
""" """
delta = after - before delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try: try:
return delta.total_seconds() return delta.total_seconds()
except AttributeError: except AttributeError:
@ -174,11 +199,10 @@ def delta_seconds(before, after):
def is_soon(dt, window): def is_soon(dt, window):
""" """Determines if time is going to happen in the next window seconds.
Determines if time is going to happen in the next window seconds.
:params dt: the time :param dt: the time
:params window: minimum seconds to remain to consider the time not soon :param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration :return: True if expiration is within the given duration
""" """

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Intel Corporation. # Copyright (c) 2012 Intel Corporation.
# All Rights Reserved. # All Rights Reserved.
# #

View File

@ -0,0 +1,148 @@
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import pkg_resources
from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
"""
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
_RELEASES = {
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func):
if not self.what:
self.what = func.__name__ + '()'
@functools.wraps(func)
def wrapped(*args, **kwargs):
msg, details = self._build_message()
LOG.deprecated(msg, details)
return func(*args, **kwargs)
return wrapped
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
msg = self._deprecated_msg_with_alternative
else:
msg = self._deprecated_msg_no_alternative
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC. # Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved. # All Rights Reserved.
# #
@ -16,26 +14,19 @@
# under the License. # under the License.
"""Policy Engine For Manila""" """Policy Engine For Manila"""
import functools import functools
import os.path
from oslo.config import cfg from oslo.config import cfg
from manila import exception from manila import exception
from manila.openstack.common import policy from manila.openstack.common import policy
from manila import utils from manila import utils
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('JSON file representing policy')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Rule checked when requested rule is not found')), ]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(policy_opts)
_ENFORCER = None
_POLICY_PATH = None _POLICY_PATH = None
_POLICY_CACHE = {} _POLICY_CACHE = {}
@ -43,26 +34,33 @@ _POLICY_CACHE = {}
def reset(): def reset():
global _POLICY_PATH global _POLICY_PATH
global _POLICY_CACHE global _POLICY_CACHE
global _ENFORCER
_POLICY_PATH = None _POLICY_PATH = None
_POLICY_CACHE = {} _POLICY_CACHE = {}
policy.reset() _ENFORCER = None
def init(): def init():
global _POLICY_PATH global _POLICY_PATH
global _POLICY_CACHE global _POLICY_CACHE
global _ENFORCER
if not _POLICY_PATH: if not _POLICY_PATH:
_POLICY_PATH = utils.find_config(CONF.policy_file) _POLICY_PATH = CONF.policy_file
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, if not os.path.exists(_POLICY_PATH):
reload_func=_set_brain) _POLICY_PATH = utils.find_config(_POLICY_PATH)
if not _ENFORCER:
_ENFORCER = policy.Enforcer(policy_file=_POLICY_PATH)
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, reload_func=_set_rules)
def _set_brain(data): def _set_rules(data):
global _ENFORCER
default_rule = CONF.policy_default_rule default_rule = CONF.policy_default_rule
policy.set_brain(policy.HttpBrain.load_json(data, default_rule)) _ENFORCER.set_rules(policy.Rules.load_json(
data, default_rule))
def enforce(context, action, target): def enforce(context, action, target, do_raise=True):
"""Verifies that the action is valid on the target in this context. """Verifies that the action is valid on the target in this context.
:param context: manila context :param context: manila context
@ -80,12 +78,15 @@ def enforce(context, action, target):
""" """
init() init()
if not isinstance(context, dict):
context = context.to_dict()
match_list = ('rule:%s' % action,) # Add the exception arguments if asked to do a raise
credentials = context.to_dict() extra = {}
if do_raise:
policy.enforce(match_list, target, credentials, extra.update(exc=exception.PolicyNotAuthorized, action=action,
exception.PolicyNotAuthorized, action=action) do_raise=do_raise)
return _ENFORCER.enforce(action, target, context, **extra)
def check_is_admin(roles): def check_is_admin(roles):
@ -94,16 +95,13 @@ def check_is_admin(roles):
""" """
init() init()
action = 'context_is_admin'
match_list = ('rule:%s' % action,)
# include project_id on target to avoid KeyError if context_is_admin # include project_id on target to avoid KeyError if context_is_admin
# policy definition is missing, and default admin_or_owner rule # policy definition is missing, and default admin_or_owner rule
# attempts to apply. Since our credentials dict does not include a # attempts to apply. Since our credentials dict does not include a
# project_id, this target can never match as a generic rule. # project_id, this target can never match as a generic rule.
target = {'project_id': ''} target = {'project_id': ''}
credentials = {'roles': roles} credentials = {'roles': roles}
return _ENFORCER.enforce("context_is_admin", target, credentials)
return policy.enforce(match_list, target, credentials)
def wrap_check_policy(resource): def wrap_check_policy(resource):

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -20,10 +18,6 @@ from oslo.config import cfg
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('policy_file', 'manila.policy')
def_vol_type = 'fake_vol_type'
def set_defaults(conf): def set_defaults(conf):
conf.set_default('connection_type', 'fake') conf.set_default('connection_type', 'fake')

View File

@ -1,5 +1,3 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved. # All Rights Reserved.
@ -17,19 +15,19 @@
"""Test of Policy Engine For Manila.""" """Test of Policy Engine For Manila."""
import mock
import os.path import os.path
import StringIO import six
import urllib2 from six.moves.urllib import request as urlrequest
from oslo.config import cfg
from manila import context from manila import context
from manila import exception from manila import exception
import manila.openstack.common.policy
from manila.openstack.common import policy as common_policy from manila.openstack.common import policy as common_policy
from manila import policy from manila import policy
from manila import test from manila import test
from manila import utils from manila import utils
from oslo.config import cfg
CONF = cfg.CONF CONF = cfg.CONF
@ -68,9 +66,8 @@ class PolicyTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(PolicyTestCase, self).setUp() super(PolicyTestCase, self).setUp()
policy.reset() policy.reset()
# NOTE(vish): preload rules to circumvent reloading from file
policy.init() policy.init()
rules = { self.rules = {
"true": [], "true": [],
"example:allowed": [], "example:allowed": [],
"example:denied": [["false:false"]], "example:denied": [["false:false"]],
@ -82,8 +79,7 @@ class PolicyTestCase(test.TestCase):
"example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]],
"example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]],
} }
# NOTE(vish): then overload underlying brain self._set_rules()
common_policy.set_brain(common_policy.HttpBrain(rules))
self.context = context.RequestContext('fake', 'fake', roles=['member']) self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {} self.target = {}
@ -91,6 +87,12 @@ class PolicyTestCase(test.TestCase):
policy.reset() policy.reset()
super(PolicyTestCase, self).tearDown() super(PolicyTestCase, self).tearDown()
def _set_rules(self):
these_rules = common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in self.rules.items()))
policy._ENFORCER.set_rules(these_rules)
def test_enforce_nonexistent_action_throws(self): def test_enforce_nonexistent_action_throws(self):
action = "example:noexist" action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
@ -108,22 +110,24 @@ class PolicyTestCase(test.TestCase):
def test_enforce_http_true(self): def test_enforce_http_true(self):
def fakeurlopen(url, post_data): def fakeurlopen(url, post_data):
return StringIO.StringIO("True") return six.StringIO("True")
self.stubs.Set(urllib2, 'urlopen', fakeurlopen)
action = "example:get_http" action = "example:get_http"
target = {} target = {}
result = policy.enforce(self.context, action, target) with mock.patch.object(urlrequest, 'urlopen', fakeurlopen):
self.assertEqual(result, None) result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_http_false(self): def test_enforce_http_false(self):
def fakeurlopen(url, post_data): def fakeurlopen(url, post_data):
return StringIO.StringIO("False") return six.StringIO("False")
self.stubs.Set(urllib2, 'urlopen', fakeurlopen)
action = "example:get_http" action = "example:get_http"
target = {} target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, with mock.patch.object(urlrequest, 'urlopen', fakeurlopen):
self.context, action, target) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self): def test_templatized_enforcement(self):
target_mine = {'project_id': 'fake'} target_mine = {'project_id': 'fake'}
@ -165,20 +169,19 @@ class DefaultPolicyTestCase(test.TestCase):
"default": [], "default": [],
"example:exist": [["false:false"]] "example:exist": [["false:false"]]
} }
self._set_rules('default')
self._set_brain('default')
self.context = context.RequestContext('fake', 'fake') self.context = context.RequestContext('fake', 'fake')
def _set_brain(self, default_rule):
brain = manila.openstack.common.policy.HttpBrain(self.rules,
default_rule)
manila.openstack.common.policy.set_brain(brain)
def tearDown(self): def tearDown(self):
super(DefaultPolicyTestCase, self).tearDown() super(DefaultPolicyTestCase, self).tearDown()
policy.reset() policy.reset()
def _set_rules(self, default_rule):
these_rules = common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in self.rules.items()), default_rule)
policy._ENFORCER.set_rules(these_rules)
def test_policy_called(self): def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {}) self.context, "example:exist", {})
@ -187,7 +190,13 @@ class DefaultPolicyTestCase(test.TestCase):
policy.enforce(self.context, "example:noexist", {}) policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self): def test_default_not_found(self):
self._set_brain("default_noexist") new_default_rule = "default_noexist"
# FIXME(gyee): need to overwrite the Enforcer's default_rule first
# as it is recreating the rules with its own default_rule instead
# of the default_rule passed in from set_rules(). I think this is a
# bug in Oslo policy.
policy._ENFORCER.default_rule = new_default_rule
self._set_rules(new_default_rule)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {}) self.context, "example:noexist", {})
@ -199,6 +208,12 @@ class ContextIsAdminPolicyTestCase(test.TestCase):
policy.reset() policy.reset()
policy.init() policy.init()
def _set_rules(self, rules, default_rule):
these_rules = common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in rules.items()), default_rule)
policy._ENFORCER.set_rules(these_rules)
def test_default_admin_role_is_admin(self): def test_default_admin_role_is_admin(self):
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertFalse(ctx.is_admin) self.assertFalse(ctx.is_admin)
@ -210,8 +225,7 @@ class ContextIsAdminPolicyTestCase(test.TestCase):
rules = { rules = {
'context_is_admin': [["role:administrator"], ["role:johnny-admin"]] 'context_is_admin': [["role:administrator"], ["role:johnny-admin"]]
} }
brain = common_policy.Brain(rules, CONF.policy_default_rule) self._set_rules(rules, CONF.policy_default_rule)
common_policy.set_brain(brain)
ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin'])
self.assertTrue(ctx.is_admin) self.assertTrue(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['administrator']) ctx = context.RequestContext('fake', 'fake', roles=['administrator'])
@ -225,8 +239,7 @@ class ContextIsAdminPolicyTestCase(test.TestCase):
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]], "admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": [["rule:admin_or_owner"]], "default": [["rule:admin_or_owner"]],
} }
brain = common_policy.Brain(rules, CONF.policy_default_rule) self._set_rules(rules, CONF.policy_default_rule)
common_policy.set_brain(brain)
ctx = context.RequestContext('fake', 'fake') ctx = context.RequestContext('fake', 'fake')
self.assertFalse(ctx.is_admin) self.assertFalse(ctx.is_admin)
ctx = context.RequestContext('fake', 'fake', roles=['admin']) ctx = context.RequestContext('fake', 'fake', roles=['admin'])

View File

@ -2,7 +2,6 @@
# The list of modules to copy from openstack-common # The list of modules to copy from openstack-common
module=context module=context
module=exception
module=excutils module=excutils
module=fileutils module=fileutils
module=flakes module=flakes

View File

@ -12,10 +12,12 @@ oslo.config>=1.2.0
paramiko>=1.8.0 paramiko>=1.8.0
Paste Paste
PasteDeploy>=1.5.0 PasteDeploy>=1.5.0
posix_ipc
python-neutronclient>=2.3.0,<3 python-neutronclient>=2.3.0,<3
python-glanceclient>=0.9.0 python-glanceclient>=0.9.0
python-keystoneclient>=0.3.2 python-keystoneclient>=0.3.2
Routes>=1.12.3 Routes>=1.12.3
six>=1.6.0
SQLAlchemy>=0.7.8,<=0.7.99 SQLAlchemy>=0.7.8,<=0.7.99
sqlalchemy-migrate>=0.7.2 sqlalchemy-migrate>=0.7.2
stevedore>=0.10 stevedore>=0.10