Sync charm-helpers
Additionally, this has unit test fixes for a CephContext update and a Keystone V3 update that came with this sync. Change-Id: I8ad78dbebf94ac0e6d0bcee6af2e24552c7175a3
This commit is contained in:
parent
fb2f757494
commit
0a9daf723e
@ -227,3 +227,22 @@ class MonContext(context.CephContext):
|
|||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
def context_complete(self, ctxt):
|
||||||
|
"""Overridden here to ensure the context is actually complete.
|
||||||
|
|
||||||
|
We set `key` and `auth` to None here, by default, to ensure
|
||||||
|
that the context will always evaluate to incomplete until the
|
||||||
|
Ceph relation has actually sent these details; otherwise,
|
||||||
|
there is a potential race condition between the relation
|
||||||
|
appearing and the first unit actually setting this data on the
|
||||||
|
relation.
|
||||||
|
|
||||||
|
:param ctxt: The current context members
|
||||||
|
:type ctxt: Dict[str, ANY]
|
||||||
|
:returns: True if the context is complete
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
if 'fsid' not in ctxt:
|
||||||
|
return False
|
||||||
|
return context.OSContextGenerator.context_complete(self, ctxt)
|
||||||
|
@ -33,6 +33,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
hook_name,
|
hook_name,
|
||||||
local_unit,
|
local_unit,
|
||||||
log,
|
log,
|
||||||
|
relation_get,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
relation_set,
|
relation_set,
|
||||||
relations_of_type,
|
relations_of_type,
|
||||||
@ -260,11 +261,23 @@ class NRPE(object):
|
|||||||
relation = relation_ids('nrpe-external-master')
|
relation = relation_ids('nrpe-external-master')
|
||||||
if relation:
|
if relation:
|
||||||
log("Setting charm primary status {}".format(primary))
|
log("Setting charm primary status {}".format(primary))
|
||||||
for rid in relation_ids('nrpe-external-master'):
|
for rid in relation:
|
||||||
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
|
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
|
||||||
|
self.remove_check_queue = set()
|
||||||
|
|
||||||
def add_check(self, *args, **kwargs):
|
def add_check(self, *args, **kwargs):
|
||||||
|
shortname = None
|
||||||
|
if kwargs.get('shortname') is None:
|
||||||
|
if len(args) > 0:
|
||||||
|
shortname = args[0]
|
||||||
|
else:
|
||||||
|
shortname = kwargs['shortname']
|
||||||
|
|
||||||
self.checks.append(Check(*args, **kwargs))
|
self.checks.append(Check(*args, **kwargs))
|
||||||
|
try:
|
||||||
|
self.remove_check_queue.remove(shortname)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
def remove_check(self, *args, **kwargs):
|
def remove_check(self, *args, **kwargs):
|
||||||
if kwargs.get('shortname') is None:
|
if kwargs.get('shortname') is None:
|
||||||
@ -281,6 +294,7 @@ class NRPE(object):
|
|||||||
|
|
||||||
check = Check(*args, **kwargs)
|
check = Check(*args, **kwargs)
|
||||||
check.remove(self.hostname)
|
check.remove(self.hostname)
|
||||||
|
self.remove_check_queue.add(kwargs['shortname'])
|
||||||
|
|
||||||
def write(self):
|
def write(self):
|
||||||
try:
|
try:
|
||||||
@ -313,8 +327,25 @@ class NRPE(object):
|
|||||||
monitor_ids = relation_ids("local-monitors") + \
|
monitor_ids = relation_ids("local-monitors") + \
|
||||||
relation_ids("nrpe-external-master")
|
relation_ids("nrpe-external-master")
|
||||||
for rid in monitor_ids:
|
for rid in monitor_ids:
|
||||||
|
reldata = relation_get(unit=local_unit(), rid=rid)
|
||||||
|
if 'monitors' in reldata:
|
||||||
|
# update the existing set of monitors with the new data
|
||||||
|
old_monitors = yaml.safe_load(reldata['monitors'])
|
||||||
|
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
|
||||||
|
# remove keys that are in the remove_check_queue
|
||||||
|
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
|
||||||
|
if k not in self.remove_check_queue}
|
||||||
|
# update/add nrpe_monitors
|
||||||
|
old_nrpe_monitors.update(nrpe_monitors)
|
||||||
|
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
|
||||||
|
# write back to the relation
|
||||||
|
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
|
||||||
|
else:
|
||||||
|
# write a brand new set of monitors, as no existing ones.
|
||||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
||||||
|
|
||||||
|
self.remove_check_queue.clear()
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
||||||
"""
|
"""
|
||||||
|
@ -294,8 +294,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('bionic', None): self.bionic_queens,
|
('bionic', None): self.bionic_queens,
|
||||||
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
|
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
|
||||||
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
|
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
|
||||||
|
('bionic', 'cloud:bionic-train'): self.bionic_train,
|
||||||
('cosmic', None): self.cosmic_rocky,
|
('cosmic', None): self.cosmic_rocky,
|
||||||
('disco', None): self.disco_stein,
|
('disco', None): self.disco_stein,
|
||||||
|
('eoan', None): self.eoan_train,
|
||||||
}
|
}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
@ -313,6 +315,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('bionic', 'queens'),
|
('bionic', 'queens'),
|
||||||
('cosmic', 'rocky'),
|
('cosmic', 'rocky'),
|
||||||
('disco', 'stein'),
|
('disco', 'stein'),
|
||||||
|
('eoan', 'train'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
@ -320,6 +323,23 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
else:
|
else:
|
||||||
return releases[self.series]
|
return releases[self.series]
|
||||||
|
|
||||||
|
def get_percona_service_entry(self, memory_constraint=None):
|
||||||
|
"""Return a amulet service entry for percona cluster.
|
||||||
|
|
||||||
|
:param memory_constraint: Override the default memory constraint
|
||||||
|
in the service entry.
|
||||||
|
:type memory_constraint: str
|
||||||
|
:returns: Amulet service entry.
|
||||||
|
:rtype: dict
|
||||||
|
"""
|
||||||
|
memory_constraint = memory_constraint or '3072M'
|
||||||
|
svc_entry = {
|
||||||
|
'name': 'percona-cluster',
|
||||||
|
'constraints': {'mem': memory_constraint}}
|
||||||
|
if self._get_openstack_release() <= self.trusty_mitaka:
|
||||||
|
svc_entry['location'] = 'cs:trusty/percona-cluster'
|
||||||
|
return svc_entry
|
||||||
|
|
||||||
def get_ceph_expected_pools(self, radosgw=False):
|
def get_ceph_expected_pools(self, radosgw=False):
|
||||||
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||||
test scenario, based on OpenStack release and whether ceph radosgw
|
test scenario, based on OpenStack release and whether ceph radosgw
|
||||||
|
@ -54,11 +54,15 @@ NOVA_CLIENT_VERSION = "2"
|
|||||||
|
|
||||||
OPENSTACK_RELEASES_PAIRS = [
|
OPENSTACK_RELEASES_PAIRS = [
|
||||||
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
|
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
|
||||||
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
|
'trusty_mitaka', 'xenial_mitaka',
|
||||||
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
|
'xenial_newton', 'yakkety_newton',
|
||||||
'xenial_pike', 'artful_pike', 'xenial_queens',
|
'xenial_ocata', 'zesty_ocata',
|
||||||
'bionic_queens', 'bionic_rocky', 'cosmic_rocky',
|
'xenial_pike', 'artful_pike',
|
||||||
'bionic_stein', 'disco_stein']
|
'xenial_queens', 'bionic_queens',
|
||||||
|
'bionic_rocky', 'cosmic_rocky',
|
||||||
|
'bionic_stein', 'disco_stein',
|
||||||
|
'bionic_train', 'eoan_train',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletUtils(AmuletUtils):
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
@ -126,7 +126,11 @@ def _config_ini(path):
|
|||||||
:returns: Configuration contained in path
|
:returns: Configuration contained in path
|
||||||
:rtype: Dict
|
:rtype: Dict
|
||||||
"""
|
"""
|
||||||
conf = configparser.ConfigParser()
|
# When strict is enabled, duplicate options are not allowed in the
|
||||||
|
# parsed INI; however, Oslo allows duplicate values. This change
|
||||||
|
# causes us to ignore the duplicate values which is acceptable as
|
||||||
|
# long as we don't validate any multi-value options
|
||||||
|
conf = configparser.ConfigParser(strict=False)
|
||||||
conf.read(path)
|
conf.read(path)
|
||||||
return dict(conf)
|
return dict(conf)
|
||||||
|
|
||||||
@ -204,7 +208,7 @@ def validate_file_ownership(config):
|
|||||||
"Invalid ownership configuration: {}".format(key))
|
"Invalid ownership configuration: {}".format(key))
|
||||||
owner = options.get('owner', config.get('owner', 'root'))
|
owner = options.get('owner', config.get('owner', 'root'))
|
||||||
group = options.get('group', config.get('group', 'root'))
|
group = options.get('group', config.get('group', 'root'))
|
||||||
optional = options.get('optional', config.get('optional', 'False'))
|
optional = options.get('optional', config.get('optional', False))
|
||||||
if '*' in file_name:
|
if '*' in file_name:
|
||||||
for file in glob.glob(file_name):
|
for file in glob.glob(file_name):
|
||||||
if file not in files.keys():
|
if file not in files.keys():
|
||||||
@ -226,7 +230,7 @@ def validate_file_permissions(config):
|
|||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"Invalid ownership configuration: {}".format(key))
|
"Invalid ownership configuration: {}".format(key))
|
||||||
mode = options.get('mode', config.get('permissions', '600'))
|
mode = options.get('mode', config.get('permissions', '600'))
|
||||||
optional = options.get('optional', config.get('optional', 'False'))
|
optional = options.get('optional', config.get('optional', False))
|
||||||
if '*' in file_name:
|
if '*' in file_name:
|
||||||
for file in glob.glob(file_name):
|
for file in glob.glob(file_name):
|
||||||
if file not in files.keys():
|
if file not in files.keys():
|
||||||
|
@ -106,9 +106,11 @@ class CertRequest(object):
|
|||||||
sans = sorted(list(set(entry['addresses'])))
|
sans = sorted(list(set(entry['addresses'])))
|
||||||
request[entry['cn']] = {'sans': sans}
|
request[entry['cn']] = {'sans': sans}
|
||||||
if self.json_encode:
|
if self.json_encode:
|
||||||
return {'cert_requests': json.dumps(request, sort_keys=True)}
|
req = {'cert_requests': json.dumps(request, sort_keys=True)}
|
||||||
else:
|
else:
|
||||||
return {'cert_requests': request}
|
req = {'cert_requests': request}
|
||||||
|
req['unit_name'] = local_unit().replace('/', '_')
|
||||||
|
return req
|
||||||
|
|
||||||
|
|
||||||
def get_certificate_request(json_encode=True):
|
def get_certificate_request(json_encode=True):
|
||||||
|
@ -258,7 +258,7 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
'database_password': rdata.get(password_setting),
|
'database_password': rdata.get(password_setting),
|
||||||
'database_type': 'mysql+pymysql'
|
'database_type': 'mysql+pymysql'
|
||||||
}
|
}
|
||||||
if CompareOpenStackReleases(rel) < 'stein':
|
if CompareOpenStackReleases(rel) < 'queens':
|
||||||
ctxt['database_type'] = 'mysql'
|
ctxt['database_type'] = 'mysql'
|
||||||
if self.context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
db_ssl(rdata, ctxt, self.ssl_dir)
|
db_ssl(rdata, ctxt, self.ssl_dir)
|
||||||
@ -362,7 +362,7 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
pkg_names = map(lambda x: x + python_name, ('python3-', 'python-'))
|
pkg_names = map(lambda x: x + python_name, ('python3-', 'python-'))
|
||||||
|
|
||||||
for pkg in pkg_names:
|
for pkg in pkg_names:
|
||||||
if not filter_installed_packages(pkg):
|
if not filter_installed_packages((pkg,)):
|
||||||
return pkg
|
return pkg
|
||||||
|
|
||||||
return None
|
return None
|
||||||
@ -443,8 +443,10 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
'api_version': api_version})
|
'api_version': api_version})
|
||||||
|
|
||||||
if float(api_version) > 2:
|
if float(api_version) > 2:
|
||||||
ctxt.update({'admin_domain_name':
|
ctxt.update({
|
||||||
rdata.get('service_domain')})
|
'admin_domain_name': rdata.get('service_domain'),
|
||||||
|
'service_project_id': rdata.get('service_tenant_id'),
|
||||||
|
'service_domain_id': rdata.get('service_domain_id')})
|
||||||
|
|
||||||
# we keep all veriables in ctxt for compatibility and
|
# we keep all veriables in ctxt for compatibility and
|
||||||
# add nested dictionary for keystone_authtoken generic
|
# add nested dictionary for keystone_authtoken generic
|
||||||
@ -521,6 +523,86 @@ class IdentityCredentialsContext(IdentityServiceContext):
|
|||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class NovaVendorMetadataContext(OSContextGenerator):
|
||||||
|
"""Context used for configuring nova vendor metadata on nova.conf file."""
|
||||||
|
|
||||||
|
def __init__(self, os_release_pkg, interfaces=None):
|
||||||
|
"""Initialize the NovaVendorMetadataContext object.
|
||||||
|
|
||||||
|
:param os_release_pkg: the package name to extract the OpenStack
|
||||||
|
release codename from.
|
||||||
|
:type os_release_pkg: str
|
||||||
|
:param interfaces: list of string values to be used as the Context's
|
||||||
|
relation interfaces.
|
||||||
|
:type interfaces: List[str]
|
||||||
|
"""
|
||||||
|
self.os_release_pkg = os_release_pkg
|
||||||
|
if interfaces is not None:
|
||||||
|
self.interfaces = interfaces
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
cmp_os_release = CompareOpenStackReleases(
|
||||||
|
os_release(self.os_release_pkg))
|
||||||
|
ctxt = {'vendor_data': False}
|
||||||
|
|
||||||
|
vdata_providers = []
|
||||||
|
vdata = config('vendor-data')
|
||||||
|
vdata_url = config('vendor-data-url')
|
||||||
|
|
||||||
|
if vdata:
|
||||||
|
try:
|
||||||
|
# validate the JSON. If invalid, we do not set anything here
|
||||||
|
json.loads(vdata)
|
||||||
|
except (TypeError, ValueError) as e:
|
||||||
|
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
|
||||||
|
else:
|
||||||
|
ctxt['vendor_data'] = True
|
||||||
|
# Mitaka does not support DynamicJSON
|
||||||
|
# so vendordata_providers is not needed
|
||||||
|
if cmp_os_release > 'mitaka':
|
||||||
|
vdata_providers.append('StaticJSON')
|
||||||
|
|
||||||
|
if vdata_url:
|
||||||
|
if cmp_os_release > 'mitaka':
|
||||||
|
ctxt['vendor_data_url'] = vdata_url
|
||||||
|
vdata_providers.append('DynamicJSON')
|
||||||
|
else:
|
||||||
|
log('Dynamic vendor data unsupported'
|
||||||
|
' for {}.'.format(cmp_os_release), level=ERROR)
|
||||||
|
if vdata_providers:
|
||||||
|
ctxt['vendordata_providers'] = ','.join(vdata_providers)
|
||||||
|
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class NovaVendorMetadataJSONContext(OSContextGenerator):
|
||||||
|
"""Context used for writing nova vendor metadata json file."""
|
||||||
|
|
||||||
|
def __init__(self, os_release_pkg):
|
||||||
|
"""Initialize the NovaVendorMetadataJSONContext object.
|
||||||
|
|
||||||
|
:param os_release_pkg: the package name to extract the OpenStack
|
||||||
|
release codename from.
|
||||||
|
:type os_release_pkg: str
|
||||||
|
"""
|
||||||
|
self.os_release_pkg = os_release_pkg
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {'vendor_data_json': '{}'}
|
||||||
|
|
||||||
|
vdata = config('vendor-data')
|
||||||
|
if vdata:
|
||||||
|
try:
|
||||||
|
# validate the JSON. If invalid, we return empty.
|
||||||
|
json.loads(vdata)
|
||||||
|
except (TypeError, ValueError) as e:
|
||||||
|
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
|
||||||
|
else:
|
||||||
|
ctxt['vendor_data_json'] = vdata
|
||||||
|
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class AMQPContext(OSContextGenerator):
|
class AMQPContext(OSContextGenerator):
|
||||||
|
|
||||||
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
|
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
|
||||||
@ -647,6 +729,10 @@ class AMQPContext(OSContextGenerator):
|
|||||||
if notification_format:
|
if notification_format:
|
||||||
ctxt['notification_format'] = notification_format
|
ctxt['notification_format'] = notification_format
|
||||||
|
|
||||||
|
send_notifications_to_logs = conf.get('send-notifications-to-logs', None)
|
||||||
|
if send_notifications_to_logs:
|
||||||
|
ctxt['send_notifications_to_logs'] = send_notifications_to_logs
|
||||||
|
|
||||||
if not self.complete:
|
if not self.complete:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
@ -698,6 +784,25 @@ class CephContext(OSContextGenerator):
|
|||||||
ensure_packages(['ceph-common'])
|
ensure_packages(['ceph-common'])
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
def context_complete(self, ctxt):
|
||||||
|
"""Overridden here to ensure the context is actually complete.
|
||||||
|
|
||||||
|
We set `key` and `auth` to None here, by default, to ensure
|
||||||
|
that the context will always evaluate to incomplete until the
|
||||||
|
Ceph relation has actually sent these details; otherwise,
|
||||||
|
there is a potential race condition between the relation
|
||||||
|
appearing and the first unit actually setting this data on the
|
||||||
|
relation.
|
||||||
|
|
||||||
|
:param ctxt: The current context members
|
||||||
|
:type ctxt: Dict[str, ANY]
|
||||||
|
:returns: True if the context is complete
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
if 'auth' not in ctxt or 'key' not in ctxt:
|
||||||
|
return False
|
||||||
|
return super(CephContext, self).context_complete(ctxt)
|
||||||
|
|
||||||
|
|
||||||
class HAProxyContext(OSContextGenerator):
|
class HAProxyContext(OSContextGenerator):
|
||||||
"""Provides half a context for the haproxy template, which describes
|
"""Provides half a context for the haproxy template, which describes
|
||||||
@ -1188,7 +1293,9 @@ class NeutronPortContext(OSContextGenerator):
|
|||||||
|
|
||||||
hwaddr_to_nic = {}
|
hwaddr_to_nic = {}
|
||||||
hwaddr_to_ip = {}
|
hwaddr_to_ip = {}
|
||||||
for nic in list_nics():
|
extant_nics = list_nics()
|
||||||
|
|
||||||
|
for nic in extant_nics:
|
||||||
# Ignore virtual interfaces (bond masters will be identified from
|
# Ignore virtual interfaces (bond masters will be identified from
|
||||||
# their slaves)
|
# their slaves)
|
||||||
if not is_phy_iface(nic):
|
if not is_phy_iface(nic):
|
||||||
@ -1219,10 +1326,11 @@ class NeutronPortContext(OSContextGenerator):
|
|||||||
# Entry is a MAC address for a valid interface that doesn't
|
# Entry is a MAC address for a valid interface that doesn't
|
||||||
# have an IP address assigned yet.
|
# have an IP address assigned yet.
|
||||||
resolved.append(hwaddr_to_nic[entry])
|
resolved.append(hwaddr_to_nic[entry])
|
||||||
else:
|
elif entry in extant_nics:
|
||||||
# If the passed entry is not a MAC address, assume it's a valid
|
# If the passed entry is not a MAC address and the interface
|
||||||
# interface, and that the user put it there on purpose (we can
|
# exists, assume it's a valid interface, and that the user put
|
||||||
# trust it to be the real external network).
|
# it there on purpose (we can trust it to be the real external
|
||||||
|
# network).
|
||||||
resolved.append(entry)
|
resolved.append(entry)
|
||||||
|
|
||||||
# Ensure no duplicates
|
# Ensure no duplicates
|
||||||
@ -1604,6 +1712,18 @@ class NeutronAPIContext(OSContextGenerator):
|
|||||||
'rel_key': 'enable-nsg-logging',
|
'rel_key': 'enable-nsg-logging',
|
||||||
'default': False,
|
'default': False,
|
||||||
},
|
},
|
||||||
|
'enable_nfg_logging': {
|
||||||
|
'rel_key': 'enable-nfg-logging',
|
||||||
|
'default': False,
|
||||||
|
},
|
||||||
|
'global_physnet_mtu': {
|
||||||
|
'rel_key': 'global-physnet-mtu',
|
||||||
|
'default': 1500,
|
||||||
|
},
|
||||||
|
'physical_network_mtus': {
|
||||||
|
'rel_key': 'physical-network-mtus',
|
||||||
|
'default': None,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
ctxt = self.get_neutron_options({})
|
ctxt = self.get_neutron_options({})
|
||||||
for rid in relation_ids('neutron-plugin-api'):
|
for rid in relation_ids('neutron-plugin-api'):
|
||||||
@ -1665,13 +1785,13 @@ class DataPortContext(NeutronPortContext):
|
|||||||
def __call__(self):
|
def __call__(self):
|
||||||
ports = config('data-port')
|
ports = config('data-port')
|
||||||
if ports:
|
if ports:
|
||||||
# Map of {port/mac:bridge}
|
# Map of {bridge:port/mac}
|
||||||
portmap = parse_data_port_mappings(ports)
|
portmap = parse_data_port_mappings(ports)
|
||||||
ports = portmap.keys()
|
ports = portmap.keys()
|
||||||
# Resolve provided ports or mac addresses and filter out those
|
# Resolve provided ports or mac addresses and filter out those
|
||||||
# already attached to a bridge.
|
# already attached to a bridge.
|
||||||
resolved = self.resolve_ports(ports)
|
resolved = self.resolve_ports(ports)
|
||||||
# FIXME: is this necessary?
|
# Rebuild port index using resolved and filtered ports.
|
||||||
normalized = {get_nic_hwaddr(port): port for port in resolved
|
normalized = {get_nic_hwaddr(port): port for port in resolved
|
||||||
if port not in ports}
|
if port not in ports}
|
||||||
normalized.update({port: port for port in resolved
|
normalized.update({port: port for port in resolved
|
||||||
|
@ -217,6 +217,11 @@ def neutron_plugins():
|
|||||||
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
|
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
|
||||||
plugins['vsp']['driver'] = (
|
plugins['vsp']['driver'] = (
|
||||||
'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
|
'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
|
||||||
|
if CompareOpenStackReleases(release) >= 'newton':
|
||||||
|
plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
||||||
|
plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||||
|
plugins['vsp']['server_packages'] = ['neutron-server',
|
||||||
|
'neutron-plugin-ml2']
|
||||||
return plugins
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,6 +2,9 @@
|
|||||||
[oslo_messaging_notifications]
|
[oslo_messaging_notifications]
|
||||||
driver = {{ oslo_messaging_driver }}
|
driver = {{ oslo_messaging_driver }}
|
||||||
transport_url = {{ transport_url }}
|
transport_url = {{ transport_url }}
|
||||||
|
{% if send_notifications_to_logs %}
|
||||||
|
driver = log
|
||||||
|
{% endif %}
|
||||||
{% if notification_topics -%}
|
{% if notification_topics -%}
|
||||||
topics = {{ notification_topics }}
|
topics = {{ notification_topics }}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
{{ vendor_data_json }}
|
@ -120,6 +120,7 @@ OPENSTACK_RELEASES = (
|
|||||||
'queens',
|
'queens',
|
||||||
'rocky',
|
'rocky',
|
||||||
'stein',
|
'stein',
|
||||||
|
'train',
|
||||||
)
|
)
|
||||||
|
|
||||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
@ -139,6 +140,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||||||
('bionic', 'queens'),
|
('bionic', 'queens'),
|
||||||
('cosmic', 'rocky'),
|
('cosmic', 'rocky'),
|
||||||
('disco', 'stein'),
|
('disco', 'stein'),
|
||||||
|
('eoan', 'train'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
@ -159,6 +161,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
|||||||
('2018.1', 'queens'),
|
('2018.1', 'queens'),
|
||||||
('2018.2', 'rocky'),
|
('2018.2', 'rocky'),
|
||||||
('2019.1', 'stein'),
|
('2019.1', 'stein'),
|
||||||
|
('2019.2', 'train'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# The ugly duckling - must list releases oldest to newest
|
# The ugly duckling - must list releases oldest to newest
|
||||||
@ -195,6 +198,8 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
['2.18.0', '2.19.0']),
|
['2.18.0', '2.19.0']),
|
||||||
('stein',
|
('stein',
|
||||||
['2.20.0', '2.21.0']),
|
['2.20.0', '2.21.0']),
|
||||||
|
('train',
|
||||||
|
['2.22.0']),
|
||||||
])
|
])
|
||||||
|
|
||||||
# >= Liberty version->codename mapping
|
# >= Liberty version->codename mapping
|
||||||
@ -208,6 +213,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('17', 'queens'),
|
('17', 'queens'),
|
||||||
('18', 'rocky'),
|
('18', 'rocky'),
|
||||||
('19', 'stein'),
|
('19', 'stein'),
|
||||||
|
('20', 'train'),
|
||||||
]),
|
]),
|
||||||
'neutron-common': OrderedDict([
|
'neutron-common': OrderedDict([
|
||||||
('7', 'liberty'),
|
('7', 'liberty'),
|
||||||
@ -218,6 +224,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('12', 'queens'),
|
('12', 'queens'),
|
||||||
('13', 'rocky'),
|
('13', 'rocky'),
|
||||||
('14', 'stein'),
|
('14', 'stein'),
|
||||||
|
('15', 'train'),
|
||||||
]),
|
]),
|
||||||
'cinder-common': OrderedDict([
|
'cinder-common': OrderedDict([
|
||||||
('7', 'liberty'),
|
('7', 'liberty'),
|
||||||
@ -228,6 +235,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('12', 'queens'),
|
('12', 'queens'),
|
||||||
('13', 'rocky'),
|
('13', 'rocky'),
|
||||||
('14', 'stein'),
|
('14', 'stein'),
|
||||||
|
('15', 'train'),
|
||||||
]),
|
]),
|
||||||
'keystone': OrderedDict([
|
'keystone': OrderedDict([
|
||||||
('8', 'liberty'),
|
('8', 'liberty'),
|
||||||
@ -238,6 +246,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('13', 'queens'),
|
('13', 'queens'),
|
||||||
('14', 'rocky'),
|
('14', 'rocky'),
|
||||||
('15', 'stein'),
|
('15', 'stein'),
|
||||||
|
('16', 'train'),
|
||||||
]),
|
]),
|
||||||
'horizon-common': OrderedDict([
|
'horizon-common': OrderedDict([
|
||||||
('8', 'liberty'),
|
('8', 'liberty'),
|
||||||
@ -248,6 +257,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('13', 'queens'),
|
('13', 'queens'),
|
||||||
('14', 'rocky'),
|
('14', 'rocky'),
|
||||||
('15', 'stein'),
|
('15', 'stein'),
|
||||||
|
('16', 'train'),
|
||||||
]),
|
]),
|
||||||
'ceilometer-common': OrderedDict([
|
'ceilometer-common': OrderedDict([
|
||||||
('5', 'liberty'),
|
('5', 'liberty'),
|
||||||
@ -258,6 +268,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('10', 'queens'),
|
('10', 'queens'),
|
||||||
('11', 'rocky'),
|
('11', 'rocky'),
|
||||||
('12', 'stein'),
|
('12', 'stein'),
|
||||||
|
('13', 'train'),
|
||||||
]),
|
]),
|
||||||
'heat-common': OrderedDict([
|
'heat-common': OrderedDict([
|
||||||
('5', 'liberty'),
|
('5', 'liberty'),
|
||||||
@ -268,6 +279,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('10', 'queens'),
|
('10', 'queens'),
|
||||||
('11', 'rocky'),
|
('11', 'rocky'),
|
||||||
('12', 'stein'),
|
('12', 'stein'),
|
||||||
|
('13', 'train'),
|
||||||
]),
|
]),
|
||||||
'glance-common': OrderedDict([
|
'glance-common': OrderedDict([
|
||||||
('11', 'liberty'),
|
('11', 'liberty'),
|
||||||
@ -278,6 +290,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('16', 'queens'),
|
('16', 'queens'),
|
||||||
('17', 'rocky'),
|
('17', 'rocky'),
|
||||||
('18', 'stein'),
|
('18', 'stein'),
|
||||||
|
('19', 'train'),
|
||||||
]),
|
]),
|
||||||
'openstack-dashboard': OrderedDict([
|
'openstack-dashboard': OrderedDict([
|
||||||
('8', 'liberty'),
|
('8', 'liberty'),
|
||||||
@ -288,6 +301,7 @@ PACKAGE_CODENAMES = {
|
|||||||
('13', 'queens'),
|
('13', 'queens'),
|
||||||
('14', 'rocky'),
|
('14', 'rocky'),
|
||||||
('15', 'stein'),
|
('15', 'stein'),
|
||||||
|
('16', 'train'),
|
||||||
]),
|
]),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1482,13 +1482,28 @@ def send_request_if_needed(request, relation='ceph'):
|
|||||||
relation_set(relation_id=rid, broker_req=request.request)
|
relation_set(relation_id=rid, broker_req=request.request)
|
||||||
|
|
||||||
|
|
||||||
|
def has_broker_rsp(rid=None, unit=None):
|
||||||
|
"""Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data.
|
||||||
|
|
||||||
|
:param rid: The relation to check (default of None means current relation)
|
||||||
|
:type rid: Union[str, None]
|
||||||
|
:param unit: The remote unit to check (default of None means current unit)
|
||||||
|
:type unit: Union[str, None]
|
||||||
|
:returns: True if broker key exists and is set to something 'truthy'
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
rdata = relation_get(rid=rid, unit=unit) or {}
|
||||||
|
broker_rsp = rdata.get(get_broker_rsp_key())
|
||||||
|
return True if broker_rsp else False
|
||||||
|
|
||||||
|
|
||||||
def is_broker_action_done(action, rid=None, unit=None):
|
def is_broker_action_done(action, rid=None, unit=None):
|
||||||
"""Check whether broker action has completed yet.
|
"""Check whether broker action has completed yet.
|
||||||
|
|
||||||
@param action: name of action to be performed
|
@param action: name of action to be performed
|
||||||
@returns True if action complete otherwise False
|
@returns True if action complete otherwise False
|
||||||
"""
|
"""
|
||||||
rdata = relation_get(rid, unit) or {}
|
rdata = relation_get(rid=rid, unit=unit) or {}
|
||||||
broker_rsp = rdata.get(get_broker_rsp_key())
|
broker_rsp = rdata.get(get_broker_rsp_key())
|
||||||
if not broker_rsp:
|
if not broker_rsp:
|
||||||
return False
|
return False
|
||||||
@ -1510,7 +1525,7 @@ def mark_broker_action_done(action, rid=None, unit=None):
|
|||||||
@param action: name of action to be performed
|
@param action: name of action to be performed
|
||||||
@returns None
|
@returns None
|
||||||
"""
|
"""
|
||||||
rdata = relation_get(rid, unit) or {}
|
rdata = relation_get(rid=rid, unit=unit) or {}
|
||||||
broker_rsp = rdata.get(get_broker_rsp_key())
|
broker_rsp = rdata.get(get_broker_rsp_key())
|
||||||
if not broker_rsp:
|
if not broker_rsp:
|
||||||
return
|
return
|
||||||
|
@ -110,17 +110,19 @@ def is_device_mounted(device):
|
|||||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||||
|
|
||||||
|
|
||||||
def mkfs_xfs(device, force=False):
|
def mkfs_xfs(device, force=False, inode_size=1024):
|
||||||
"""Format device with XFS filesystem.
|
"""Format device with XFS filesystem.
|
||||||
|
|
||||||
By default this should fail if the device already has a filesystem on it.
|
By default this should fail if the device already has a filesystem on it.
|
||||||
:param device: Full path to device to format
|
:param device: Full path to device to format
|
||||||
:ptype device: tr
|
:ptype device: tr
|
||||||
:param force: Force operation
|
:param force: Force operation
|
||||||
:ptype: force: boolean"""
|
:ptype: force: boolean
|
||||||
|
:param inode_size: XFS inode size in bytes
|
||||||
|
:ptype inode_size: int"""
|
||||||
cmd = ['mkfs.xfs']
|
cmd = ['mkfs.xfs']
|
||||||
if force:
|
if force:
|
||||||
cmd.append("-f")
|
cmd.append("-f")
|
||||||
|
|
||||||
cmd += ['-i', 'size=1024', device]
|
cmd += ['-i', "size={}".format(inode_size), device]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
@ -173,6 +173,14 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||||||
'stein/proposed': 'bionic-proposed/stein',
|
'stein/proposed': 'bionic-proposed/stein',
|
||||||
'bionic-stein/proposed': 'bionic-proposed/stein',
|
'bionic-stein/proposed': 'bionic-proposed/stein',
|
||||||
'bionic-proposed/stein': 'bionic-proposed/stein',
|
'bionic-proposed/stein': 'bionic-proposed/stein',
|
||||||
|
# Train
|
||||||
|
'train': 'bionic-updates/train',
|
||||||
|
'bionic-train': 'bionic-updates/train',
|
||||||
|
'bionic-train/updates': 'bionic-updates/train',
|
||||||
|
'bionic-updates/train': 'bionic-updates/train',
|
||||||
|
'train/proposed': 'bionic-proposed/train',
|
||||||
|
'bionic-train/proposed': 'bionic-proposed/train',
|
||||||
|
'bionic-proposed/train': 'bionic-proposed/train',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False):
|
|||||||
for r, fn in six.iteritems(_mapping):
|
for r, fn in six.iteritems(_mapping):
|
||||||
m = re.match(r, source)
|
m = re.match(r, source)
|
||||||
if m:
|
if m:
|
||||||
# call the assoicated function with the captured groups
|
|
||||||
# raises SourceConfigError on error.
|
|
||||||
fn(*m.groups())
|
|
||||||
if key:
|
if key:
|
||||||
|
# Import key before adding the source which depends on it,
|
||||||
|
# as refreshing packages could fail otherwise.
|
||||||
try:
|
try:
|
||||||
import_key(key)
|
import_key(key)
|
||||||
except GPGKeyError as e:
|
except GPGKeyError as e:
|
||||||
raise SourceConfigError(str(e))
|
raise SourceConfigError(str(e))
|
||||||
|
# call the associated function with the captured groups
|
||||||
|
# raises SourceConfigError on error.
|
||||||
|
fn(*m.groups())
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
# nothing matched. log an error and maybe sys.exit
|
# nothing matched. log an error and maybe sys.exit
|
||||||
|
43
tests/bundles/bionic-stein.yaml
Normal file
43
tests/bundles/bionic-stein.yaml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
options:
|
||||||
|
source: &source cloud:bionic-stein
|
||||||
|
series: bionic
|
||||||
|
applications:
|
||||||
|
ceph-radosgw:
|
||||||
|
charm: ceph-radosgw
|
||||||
|
series: bionic
|
||||||
|
num_units: 1
|
||||||
|
options:
|
||||||
|
source: *source
|
||||||
|
ceph-osd:
|
||||||
|
charm: cs:~openstack-charmers-next/ceph-osd
|
||||||
|
num_units: 3
|
||||||
|
constraints: "mem=2048"
|
||||||
|
storage:
|
||||||
|
osd-devices: 'cinder,10G'
|
||||||
|
options:
|
||||||
|
source: *source
|
||||||
|
osd-devices: '/srv/ceph /dev/test-non-existent'
|
||||||
|
ceph-mon:
|
||||||
|
charm: cs:~openstack-charmers-next/ceph-mon
|
||||||
|
num_units: 3
|
||||||
|
options:
|
||||||
|
source: *source
|
||||||
|
auth-supported: 'none'
|
||||||
|
percona-cluster:
|
||||||
|
charm: cs:~openstack-charmers-next/percona-cluster
|
||||||
|
num_units: 1
|
||||||
|
keystone:
|
||||||
|
expose: True
|
||||||
|
charm: cs:~openstack-charmers-next/keystone
|
||||||
|
num_units: 1
|
||||||
|
options:
|
||||||
|
openstack-origin: *source
|
||||||
|
relations:
|
||||||
|
- - keystone:shared-db
|
||||||
|
- percona-cluster:shared-db
|
||||||
|
- - ceph-osd:mon
|
||||||
|
- ceph-mon:osd
|
||||||
|
- - ceph-radosgw:mon
|
||||||
|
- ceph-mon:radosgw
|
||||||
|
- - ceph-radosgw:identity-service
|
||||||
|
- keystone:identity-service
|
@ -24,7 +24,7 @@ applications:
|
|||||||
source: *source
|
source: *source
|
||||||
auth-supported: 'none'
|
auth-supported: 'none'
|
||||||
percona-cluster:
|
percona-cluster:
|
||||||
charm: cs:~openstack-charmers-next/percona-cluster
|
charm: cs:trusty/percona-cluster
|
||||||
num_units: 1
|
num_units: 1
|
||||||
keystone:
|
keystone:
|
||||||
expose: True
|
expose: True
|
||||||
|
@ -7,8 +7,9 @@ gate_bundles:
|
|||||||
- xenial-queens
|
- xenial-queens
|
||||||
- bionic-queens
|
- bionic-queens
|
||||||
- bionic-rocky
|
- bionic-rocky
|
||||||
|
- bionic-stein
|
||||||
smoke_bundles:
|
smoke_bundles:
|
||||||
- bionic-rocky
|
- bionic-stein
|
||||||
dev_bundles:
|
dev_bundles:
|
||||||
- cosmic-rocky
|
- cosmic-rocky
|
||||||
- bionic-queens-multisite
|
- bionic-queens-multisite
|
||||||
|
@ -248,8 +248,10 @@ class IdentityServiceContextTest(CharmTestCase):
|
|||||||
'auth_protocol': 'http',
|
'auth_protocol': 'http',
|
||||||
'auth_type': 'keystone',
|
'auth_type': 'keystone',
|
||||||
'cache_size': '42',
|
'cache_size': '42',
|
||||||
|
'service_domain_id': '8e50f28a556911e8aaeed33789425d23',
|
||||||
'service_host': '127.0.0.4',
|
'service_host': '127.0.0.4',
|
||||||
'service_port': 9876,
|
'service_port': 9876,
|
||||||
|
'service_project_id': '2852107b8f8f473aaf0d769c7bbcf86b',
|
||||||
'service_protocol': 'http',
|
'service_protocol': 'http',
|
||||||
'user_roles': 'Babel,Dart',
|
'user_roles': 'Babel,Dart',
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user