Sync with oslo.incubator

Add oslo.policy and oslo.concurrency to oslo config generator extra
libraries to retrieve config options
Update oslo_incubator service module
Update oslo_incubator versionutils with new list_opts discovery

Change-Id: I5558f6f9f708b1643ebd6c8b2a213a1fae582a50
This commit is contained in:
Ghe Rivero 2015-03-19 18:39:54 +01:00
parent 3bdf13db77
commit b2f15d07d3
4 changed files with 85 additions and 8 deletions

View File

@ -1192,6 +1192,23 @@
#cleaning_network_uuid=<None>
[oslo_concurrency]
#
# Options defined in oslo.concurrency
#
# Enables or disables inter-process locks. (boolean value)
#disable_process_locking=false
# Directory to use for lock files. For security, the
# specified directory should only be writable by the user
# running the processes that need locking. Defaults to
# environment variable OSLO_LOCK_PATH. If external locks are
# used, a lock path must be set. (string value)
#lock_path=<None>
[oslo_messaging_amqp]
#
@ -1376,11 +1393,42 @@
# value)
#rabbit_ha_queues=false
# Number of seconds after which the Rabbit broker is
# considered down if heartbeat's keep-alive fails (0 disable
# the heartbeat). (integer value)
#heartbeat_timeout_threshold=60
# How often times during the heartbeat_timeout_threshold we
# check the heartbeat. (integer value)
#heartbeat_rate=2
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
# (boolean value)
#fake_rabbit=false
[oslo_policy]
#
# Options defined in oslo.policy
#
# The JSON file that defines policies. (string value)
#policy_file=policy.json
# Default rule. Enforced when a requested rule is not found.
# (string value)
#policy_default_rule=default
# Directories where policy configuration files are stored.
# They can be relative to any directory in the search path
# defined by the config_dir option, or absolute paths. The
# file defined by policy_file must exist for these directories
# to be searched. Missing or empty directories are ignored.
# (multi valued)
#policy_dirs=policy.d
[pxe]
#

View File

@ -199,18 +199,30 @@ class ServiceWrapper(object):
class ProcessLauncher(object):
def __init__(self):
"""Constructor."""
_signal_handlers_set = set()
@classmethod
def _handle_class_signals(cls, *args, **kwargs):
for handler in cls._signal_handlers_set:
handler(*args, **kwargs)
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
self._signal_handlers_set.add(self._handle_signal)
_set_signals_handler(self._handle_class_signals)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
@ -329,8 +341,8 @@ class ProcessLauncher(object):
def _wait_child(self):
try:
# Block while any of child processes have exited
pid, status = os.waitpid(0, 0)
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
@ -359,6 +371,10 @@ class ProcessLauncher(object):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
@ -383,8 +399,14 @@ class ProcessLauncher(object):
if not _is_sighup_and_daemon(self.sigcaught):
break
cfg.CONF.reload_config_files()
for service in set(
[wrap.service for wrap in self.children.values()]):
service.reset()
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:

View File

@ -17,6 +17,7 @@
Helpers for comparing version strings.
"""
import copy
import functools
import inspect
import logging
@ -32,13 +33,19 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
opts = [
deprecated_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
def list_opts():
"""Entry point for oslo.config-generator.
"""
return [(None, copy.deepcopy(deprecated_opts))]
class deprecated(object):
"""A decorator to mark callables as deprecated.
@ -232,7 +239,7 @@ def report_deprecated_feature(logger, msg, *args, **kwargs):
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
CONF.register_opts(opts)
CONF.register_opts(deprecated_opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)

View File

@ -1,2 +1,2 @@
export IRONIC_CONFIG_GENERATOR_EXTRA_LIBRARIES='oslo.db oslo.messaging keystonemiddleware.auth_token'
export IRONIC_CONFIG_GENERATOR_EXTRA_LIBRARIES='oslo.db oslo.messaging keystonemiddleware.auth_token oslo.concurrency oslo.policy'
export IRONIC_CONFIG_GENERATOR_EXTRA_MODULES=