Sync charm-helpers
Notable issues resolved: openstack_upgrade_available() broken for swift https://bugs.launchpad.net/charm-swift-proxy/+bug/1743847 haproxy context doesn't consider bindings https://bugs.launchpad.net/charm-helpers/+bug/1735421 regression in haproxy check https://bugs.launchpad.net/charm-helpers/+bug/1743287 Change-Id: I0c3b5d90238b3e5665455983616f58446a682429
This commit is contained in:
parent
edad8b6054
commit
d4db0181cd
@ -93,12 +93,10 @@ from charmhelpers.contrib.network.ip import (
|
||||
format_ipv6_addr,
|
||||
is_bridge_member,
|
||||
is_ipv6_disabled,
|
||||
get_relation_ip,
|
||||
)
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
config_flags_parser,
|
||||
get_host_ip,
|
||||
git_determine_usr_bin,
|
||||
git_determine_python_path,
|
||||
enable_memcache,
|
||||
snap_install_requested,
|
||||
CompareOpenStackReleases,
|
||||
@ -334,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
self.rel_name = rel_name
|
||||
self.interfaces = [self.rel_name]
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for ' + self.rel_name, level=DEBUG)
|
||||
ctxt = {}
|
||||
|
||||
def _setup_pki_cache(self):
|
||||
if self.service and self.service_user:
|
||||
# This is required for pki token signing if we don't want /tmp to
|
||||
# be used.
|
||||
@ -347,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
mkdir(path=cachedir, owner=self.service_user,
|
||||
group=self.service_user, perms=0o700)
|
||||
|
||||
return cachedir
|
||||
return None
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for ' + self.rel_name, level=DEBUG)
|
||||
ctxt = {}
|
||||
|
||||
cachedir = self._setup_pki_cache()
|
||||
if cachedir:
|
||||
ctxt['signing_dir'] = cachedir
|
||||
|
||||
for rid in relation_ids(self.rel_name):
|
||||
@ -385,6 +389,62 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
return {}
|
||||
|
||||
|
||||
class IdentityCredentialsContext(IdentityServiceContext):
|
||||
'''Context for identity-credentials interface type'''
|
||||
|
||||
def __init__(self,
|
||||
service=None,
|
||||
service_user=None,
|
||||
rel_name='identity-credentials'):
|
||||
super(IdentityCredentialsContext, self).__init__(service,
|
||||
service_user,
|
||||
rel_name)
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for ' + self.rel_name, level=DEBUG)
|
||||
ctxt = {}
|
||||
|
||||
cachedir = self._setup_pki_cache()
|
||||
if cachedir:
|
||||
ctxt['signing_dir'] = cachedir
|
||||
|
||||
for rid in relation_ids(self.rel_name):
|
||||
self.related = True
|
||||
for unit in related_units(rid):
|
||||
rdata = relation_get(rid=rid, unit=unit)
|
||||
credentials_host = rdata.get('credentials_host')
|
||||
credentials_host = (
|
||||
format_ipv6_addr(credentials_host) or credentials_host
|
||||
)
|
||||
auth_host = rdata.get('auth_host')
|
||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||
svc_protocol = rdata.get('credentials_protocol') or 'http'
|
||||
auth_protocol = rdata.get('auth_protocol') or 'http'
|
||||
api_version = rdata.get('api_version') or '2.0'
|
||||
ctxt.update({
|
||||
'service_port': rdata.get('credentials_port'),
|
||||
'service_host': credentials_host,
|
||||
'auth_host': auth_host,
|
||||
'auth_port': rdata.get('auth_port'),
|
||||
'admin_tenant_name': rdata.get('credentials_project'),
|
||||
'admin_tenant_id': rdata.get('credentials_project_id'),
|
||||
'admin_user': rdata.get('credentials_username'),
|
||||
'admin_password': rdata.get('credentials_password'),
|
||||
'service_protocol': svc_protocol,
|
||||
'auth_protocol': auth_protocol,
|
||||
'api_version': api_version
|
||||
})
|
||||
|
||||
if float(api_version) > 2:
|
||||
ctxt.update({'admin_domain_name':
|
||||
rdata.get('domain')})
|
||||
|
||||
if self.context_complete(ctxt):
|
||||
return ctxt
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
class AMQPContext(OSContextGenerator):
|
||||
|
||||
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
|
||||
@ -566,11 +626,6 @@ class HAProxyContext(OSContextGenerator):
|
||||
if not relation_ids('cluster') and not self.singlenode_mode:
|
||||
return {}
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
else:
|
||||
addr = get_host_ip(unit_get('private-address'))
|
||||
|
||||
l_unit = local_unit().replace('/', '-')
|
||||
cluster_hosts = {}
|
||||
|
||||
@ -578,7 +633,15 @@ class HAProxyContext(OSContextGenerator):
|
||||
# and associated backends
|
||||
for addr_type in ADDRESS_TYPES:
|
||||
cfg_opt = 'os-{}-network'.format(addr_type)
|
||||
laddr = get_address_in_network(config(cfg_opt))
|
||||
# NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
|
||||
# than 'internal'
|
||||
if addr_type == 'internal':
|
||||
_addr_map_type = INTERNAL
|
||||
else:
|
||||
_addr_map_type = addr_type
|
||||
# Network spaces aware
|
||||
laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
|
||||
config(cfg_opt))
|
||||
if laddr:
|
||||
netmask = get_netmask_for_address(laddr)
|
||||
cluster_hosts[laddr] = {
|
||||
@ -589,15 +652,19 @@ class HAProxyContext(OSContextGenerator):
|
||||
}
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in sorted(related_units(rid)):
|
||||
# API Charms will need to set {addr_type}-address with
|
||||
# get_relation_ip(addr_type)
|
||||
_laddr = relation_get('{}-address'.format(addr_type),
|
||||
rid=rid, unit=unit)
|
||||
if _laddr:
|
||||
_unit = unit.replace('/', '-')
|
||||
cluster_hosts[laddr]['backends'][_unit] = _laddr
|
||||
|
||||
# NOTE(jamespage) add backend based on private address - this
|
||||
# with either be the only backend or the fallback if no acls
|
||||
# NOTE(jamespage) add backend based on get_relation_ip - this
|
||||
# will either be the only backend or the fallback if no acls
|
||||
# match in the frontend
|
||||
# Network spaces aware
|
||||
addr = get_relation_ip('cluster')
|
||||
cluster_hosts[addr] = {}
|
||||
netmask = get_netmask_for_address(addr)
|
||||
cluster_hosts[addr] = {
|
||||
@ -607,6 +674,8 @@ class HAProxyContext(OSContextGenerator):
|
||||
}
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in sorted(related_units(rid)):
|
||||
# API Charms will need to set their private-address with
|
||||
# get_relation_ip('cluster')
|
||||
_laddr = relation_get('private-address',
|
||||
rid=rid, unit=unit)
|
||||
if _laddr:
|
||||
@ -1323,8 +1392,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
|
||||
"public_processes": int(math.ceil(self.public_process_weight *
|
||||
total_processes)),
|
||||
"threads": 1,
|
||||
"usr_bin": git_determine_usr_bin(),
|
||||
"python_path": git_determine_python_path(),
|
||||
}
|
||||
return ctxt
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
CRITICAL=0
|
||||
NOTACTIVE=''
|
||||
LOGFILE=/var/log/nagios/check_haproxy.log
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}')
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
|
||||
|
||||
typeset -i N_INSTANCES=0
|
||||
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
|
||||
|
@ -10,7 +10,7 @@
|
||||
CURRQthrsh=0
|
||||
MAXQthrsh=100
|
||||
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
||||
AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
|
||||
|
||||
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
|
||||
|
||||
|
@ -23,6 +23,8 @@
|
||||
Helpers for high availability.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
import re
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
@ -32,6 +34,7 @@ from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
status_set,
|
||||
DEBUG,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
@ -40,6 +43,23 @@ from charmhelpers.core.host import (
|
||||
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
resolve_address,
|
||||
is_ipv6,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_iface_for_address,
|
||||
get_netmask_for_address,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
get_hacluster_config
|
||||
)
|
||||
|
||||
JSON_ENCODE_OPTIONS = dict(
|
||||
sort_keys=True,
|
||||
allow_nan=False,
|
||||
indent=None,
|
||||
separators=(',', ':'),
|
||||
)
|
||||
|
||||
|
||||
@ -53,8 +73,8 @@ class DNSHAException(Exception):
|
||||
def update_dns_ha_resource_params(resources, resource_params,
|
||||
relation_id=None,
|
||||
crm_ocf='ocf:maas:dns'):
|
||||
""" Check for os-*-hostname settings and update resource dictionaries for
|
||||
the HA relation.
|
||||
""" Configure DNS-HA resources based on provided configuration and
|
||||
update resource dictionaries for the HA relation.
|
||||
|
||||
@param resources: Pointer to dictionary of resources.
|
||||
Usually instantiated in ha_joined().
|
||||
@ -64,7 +84,85 @@ def update_dns_ha_resource_params(resources, resource_params,
|
||||
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
|
||||
DNS HA
|
||||
"""
|
||||
_relation_data = {'resources': {}, 'resource_params': {}}
|
||||
update_hacluster_dns_ha(charm_name(),
|
||||
_relation_data,
|
||||
crm_ocf)
|
||||
resources.update(_relation_data['resources'])
|
||||
resource_params.update(_relation_data['resource_params'])
|
||||
relation_set(relation_id=relation_id, groups=_relation_data['groups'])
|
||||
|
||||
|
||||
def assert_charm_supports_dns_ha():
|
||||
"""Validate prerequisites for DNS HA
|
||||
The MAAS client is only available on Xenial or greater
|
||||
|
||||
:raises DNSHAException: if release is < 16.04
|
||||
"""
|
||||
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
|
||||
msg = ('DNS HA is only supported on 16.04 and greater '
|
||||
'versions of Ubuntu.')
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
return True
|
||||
|
||||
|
||||
def expect_ha():
|
||||
""" Determine if the unit expects to be in HA
|
||||
|
||||
Check for VIP or dns-ha settings which indicate the unit should expect to
|
||||
be related to hacluster.
|
||||
|
||||
@returns boolean
|
||||
"""
|
||||
return config('vip') or config('dns-ha')
|
||||
|
||||
|
||||
def generate_ha_relation_data(service):
|
||||
""" Generate relation data for ha relation
|
||||
|
||||
Based on configuration options and unit interfaces, generate a json
|
||||
encoded dict of relation data items for the hacluster relation,
|
||||
providing configuration for DNS HA or VIP's + haproxy clone sets.
|
||||
|
||||
@returns dict: json encoded data for use with relation_set
|
||||
"""
|
||||
_haproxy_res = 'res_{}_haproxy'.format(service)
|
||||
_relation_data = {
|
||||
'resources': {
|
||||
_haproxy_res: 'lsb:haproxy',
|
||||
},
|
||||
'resource_params': {
|
||||
_haproxy_res: 'op monitor interval="5s"'
|
||||
},
|
||||
'init_services': {
|
||||
_haproxy_res: 'haproxy'
|
||||
},
|
||||
'clones': {
|
||||
'cl_{}_haproxy'.format(service): _haproxy_res
|
||||
},
|
||||
}
|
||||
|
||||
if config('dns-ha'):
|
||||
update_hacluster_dns_ha(service, _relation_data)
|
||||
else:
|
||||
update_hacluster_vip(service, _relation_data)
|
||||
|
||||
return {
|
||||
'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
|
||||
for k, v in _relation_data.items() if v
|
||||
}
|
||||
|
||||
|
||||
def update_hacluster_dns_ha(service, relation_data,
|
||||
crm_ocf='ocf:maas:dns'):
|
||||
""" Configure DNS-HA resources based on provided configuration
|
||||
|
||||
@param service: Name of the service being configured
|
||||
@param relation_data: Pointer to dictionary of relation data.
|
||||
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
|
||||
DNS HA
|
||||
"""
|
||||
# Validate the charm environment for DNS HA
|
||||
assert_charm_supports_dns_ha()
|
||||
|
||||
@ -93,7 +191,7 @@ def update_dns_ha_resource_params(resources, resource_params,
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
|
||||
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type)
|
||||
hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
|
||||
if hostname_key in hostname_group:
|
||||
log('DNS HA: Resource {}: {} already exists in '
|
||||
'hostname group - skipping'.format(hostname_key, hostname),
|
||||
@ -101,42 +199,67 @@ def update_dns_ha_resource_params(resources, resource_params,
|
||||
continue
|
||||
|
||||
hostname_group.append(hostname_key)
|
||||
resources[hostname_key] = crm_ocf
|
||||
resource_params[hostname_key] = (
|
||||
'params fqdn="{}" ip_address="{}" '
|
||||
''.format(hostname, resolve_address(endpoint_type=endpoint_type,
|
||||
override=False)))
|
||||
relation_data['resources'][hostname_key] = crm_ocf
|
||||
relation_data['resource_params'][hostname_key] = (
|
||||
'params fqdn="{}" ip_address="{}"'
|
||||
.format(hostname, resolve_address(endpoint_type=endpoint_type,
|
||||
override=False)))
|
||||
|
||||
if len(hostname_group) >= 1:
|
||||
log('DNS HA: Hostname group is set with {} as members. '
|
||||
'Informing the ha relation'.format(' '.join(hostname_group)),
|
||||
DEBUG)
|
||||
relation_set(relation_id=relation_id, groups={
|
||||
'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
|
||||
relation_data['groups'] = {
|
||||
'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
|
||||
}
|
||||
else:
|
||||
msg = 'DNS HA: Hostname group has no members.'
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
|
||||
|
||||
def assert_charm_supports_dns_ha():
|
||||
"""Validate prerequisites for DNS HA
|
||||
The MAAS client is only available on Xenial or greater
|
||||
def update_hacluster_vip(service, relation_data):
|
||||
""" Configure VIP resources based on provided configuration
|
||||
|
||||
@param service: Name of the service being configured
|
||||
@param relation_data: Pointer to dictionary of relation data.
|
||||
"""
|
||||
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
|
||||
msg = ('DNS HA is only supported on 16.04 and greater '
|
||||
'versions of Ubuntu.')
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
return True
|
||||
cluster_config = get_hacluster_config()
|
||||
vip_group = []
|
||||
for vip in cluster_config['vip'].split():
|
||||
if is_ipv6(vip):
|
||||
res_neutron_vip = 'ocf:heartbeat:IPv6addr'
|
||||
vip_params = 'ipv6addr'
|
||||
else:
|
||||
res_neutron_vip = 'ocf:heartbeat:IPaddr2'
|
||||
vip_params = 'ip'
|
||||
|
||||
iface = (get_iface_for_address(vip) or
|
||||
config('vip_iface'))
|
||||
netmask = (get_netmask_for_address(vip) or
|
||||
config('vip_cidr'))
|
||||
|
||||
def expect_ha():
|
||||
""" Determine if the unit expects to be in HA
|
||||
if iface is not None:
|
||||
vip_key = 'res_{}_{}_vip'.format(service, iface)
|
||||
if vip_key in vip_group:
|
||||
if vip not in relation_data['resource_params'][vip_key]:
|
||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
||||
else:
|
||||
log("Resource '%s' (vip='%s') already exists in "
|
||||
"vip group - skipping" % (vip_key, vip), WARNING)
|
||||
continue
|
||||
|
||||
Check for VIP or dns-ha settings which indicate the unit should expect to
|
||||
be related to hacluster.
|
||||
relation_data['resources'][vip_key] = res_neutron_vip
|
||||
relation_data['resource_params'][vip_key] = (
|
||||
'params {ip}="{vip}" cidr_netmask="{netmask}" '
|
||||
'nic="{iface}"'.format(ip=vip_params,
|
||||
vip=vip,
|
||||
iface=iface,
|
||||
netmask=netmask)
|
||||
)
|
||||
vip_group.append(vip_key)
|
||||
|
||||
@returns boolean
|
||||
"""
|
||||
return config('vip') or config('dns-ha')
|
||||
if len(vip_group) >= 1:
|
||||
relation_data['groups'] = {
|
||||
'grp_{}_vips'.format(service): ' '.join(vip_group)
|
||||
}
|
||||
|
@ -15,9 +15,6 @@ Listen {{ public_port }}
|
||||
{% if port -%}
|
||||
<VirtualHost *:{{ port }}>
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
{% if python_path -%}
|
||||
python-path={{ python_path }} \
|
||||
{% endif -%}
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}
|
||||
WSGIScriptAlias / {{ script }}
|
||||
@ -29,7 +26,7 @@ Listen {{ public_port }}
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory {{ usr_bin }}>
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
@ -44,9 +41,6 @@ Listen {{ public_port }}
|
||||
{% if admin_port -%}
|
||||
<VirtualHost *:{{ admin_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
{% if python_path -%}
|
||||
python-path={{ python_path }} \
|
||||
{% endif -%}
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-admin
|
||||
WSGIScriptAlias / {{ admin_script }}
|
||||
@ -58,7 +52,7 @@ Listen {{ public_port }}
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory {{ usr_bin }}>
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
@ -73,9 +67,6 @@ Listen {{ public_port }}
|
||||
{% if public_port -%}
|
||||
<VirtualHost *:{{ public_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||
{% if python_path -%}
|
||||
python-path={{ python_path }} \
|
||||
{% endif -%}
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-public
|
||||
WSGIScriptAlias / {{ public_script }}
|
||||
@ -87,7 +78,7 @@ Listen {{ public_port }}
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory {{ usr_bin }}>
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
|
@ -23,7 +23,6 @@ import sys
|
||||
import re
|
||||
import itertools
|
||||
import functools
|
||||
import shutil
|
||||
|
||||
import six
|
||||
import traceback
|
||||
@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
|
||||
related_units,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
service_name,
|
||||
status_set,
|
||||
hook_name,
|
||||
application_version_set,
|
||||
@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
|
||||
port_has_listener,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.python.packages import (
|
||||
pip_create_virtualenv,
|
||||
pip_install,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release,
|
||||
mounts,
|
||||
@ -84,7 +77,6 @@ from charmhelpers.core.host import (
|
||||
)
|
||||
from charmhelpers.fetch import (
|
||||
apt_cache,
|
||||
install_remote,
|
||||
import_key as fetch_import_key,
|
||||
add_source as fetch_add_source,
|
||||
SourceConfigError,
|
||||
@ -278,27 +270,6 @@ PACKAGE_CODENAMES = {
|
||||
]),
|
||||
}
|
||||
|
||||
GIT_DEFAULT_REPOS = {
|
||||
'requirements': 'git://github.com/openstack/requirements',
|
||||
'cinder': 'git://github.com/openstack/cinder',
|
||||
'glance': 'git://github.com/openstack/glance',
|
||||
'horizon': 'git://github.com/openstack/horizon',
|
||||
'keystone': 'git://github.com/openstack/keystone',
|
||||
'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
|
||||
'neutron': 'git://github.com/openstack/neutron',
|
||||
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
|
||||
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
|
||||
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
|
||||
'nova': 'git://github.com/openstack/nova',
|
||||
}
|
||||
|
||||
GIT_DEFAULT_BRANCHES = {
|
||||
'liberty': 'stable/liberty',
|
||||
'mitaka': 'stable/mitaka',
|
||||
'newton': 'stable/newton',
|
||||
'master': 'master',
|
||||
}
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
|
||||
|
||||
@ -530,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
|
||||
if _os_rel:
|
||||
return _os_rel
|
||||
_os_rel = (
|
||||
git_os_codename_install_source(config('openstack-origin-git')) or
|
||||
get_os_codename_package(package, fatal=False) or
|
||||
get_os_codename_install_source(config('openstack-origin')) or
|
||||
base)
|
||||
@ -656,11 +626,6 @@ def openstack_upgrade_available(package):
|
||||
else:
|
||||
avail_vers = get_os_version_install_source(src)
|
||||
apt.init()
|
||||
if "swift" in package:
|
||||
major_cur_vers = cur_vers.split('.', 1)[0]
|
||||
major_avail_vers = avail_vers.split('.', 1)[0]
|
||||
major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
|
||||
return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
|
||||
return apt.version_compare(avail_vers, cur_vers) == 1
|
||||
|
||||
|
||||
@ -771,417 +736,6 @@ def os_requires_version(ostack_release, pkg):
|
||||
return wrap
|
||||
|
||||
|
||||
def git_install_requested():
|
||||
"""
|
||||
Returns true if openstack-origin-git is specified.
|
||||
"""
|
||||
return config('openstack-origin-git') is not None
|
||||
|
||||
|
||||
def git_os_codename_install_source(projects_yaml):
|
||||
"""
|
||||
Returns OpenStack codename of release being installed from source.
|
||||
"""
|
||||
if git_install_requested():
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if projects in GIT_DEFAULT_BRANCHES.keys():
|
||||
if projects == 'master':
|
||||
return 'ocata'
|
||||
return projects
|
||||
|
||||
if 'release' in projects:
|
||||
if projects['release'] == 'master':
|
||||
return 'ocata'
|
||||
return projects['release']
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def git_default_repos(projects_yaml):
|
||||
"""
|
||||
Returns default repos if a default openstack-origin-git value is specified.
|
||||
"""
|
||||
service = service_name()
|
||||
core_project = service
|
||||
|
||||
for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
|
||||
if projects_yaml == default:
|
||||
|
||||
# add the requirements repo first
|
||||
repo = {
|
||||
'name': 'requirements',
|
||||
'repository': GIT_DEFAULT_REPOS['requirements'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos = [repo]
|
||||
|
||||
# neutron-* and nova-* charms require some additional repos
|
||||
if service in ['neutron-api', 'neutron-gateway',
|
||||
'neutron-openvswitch']:
|
||||
core_project = 'neutron'
|
||||
if service == 'neutron-api':
|
||||
repo = {
|
||||
'name': 'networking-hyperv',
|
||||
'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
for project in ['neutron-fwaas', 'neutron-lbaas',
|
||||
'neutron-vpnaas', 'nova']:
|
||||
repo = {
|
||||
'name': project,
|
||||
'repository': GIT_DEFAULT_REPOS[project],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
elif service in ['nova-cloud-controller', 'nova-compute']:
|
||||
core_project = 'nova'
|
||||
repo = {
|
||||
'name': 'neutron',
|
||||
'repository': GIT_DEFAULT_REPOS['neutron'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
elif service == 'openstack-dashboard':
|
||||
core_project = 'horizon'
|
||||
|
||||
# finally add the current service's core project repo
|
||||
repo = {
|
||||
'name': core_project,
|
||||
'repository': GIT_DEFAULT_REPOS[core_project],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
return yaml.dump(dict(repositories=repos, release=default))
|
||||
|
||||
return projects_yaml
|
||||
|
||||
|
||||
def _git_yaml_load(projects_yaml):
|
||||
"""
|
||||
Load the specified yaml into a dictionary.
|
||||
"""
|
||||
if not projects_yaml:
|
||||
return None
|
||||
|
||||
return yaml.load(projects_yaml)
|
||||
|
||||
|
||||
requirements_dir = None
|
||||
|
||||
|
||||
def git_clone_and_install(projects_yaml, core_project):
|
||||
"""
|
||||
Clone/install all specified OpenStack repositories.
|
||||
|
||||
The expected format of projects_yaml is:
|
||||
|
||||
repositories:
|
||||
- {name: keystone,
|
||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
||||
branch: 'stable/icehouse'}
|
||||
- {name: requirements,
|
||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
||||
branch: 'stable/icehouse'}
|
||||
|
||||
directory: /mnt/openstack-git
|
||||
http_proxy: squid-proxy-url
|
||||
https_proxy: squid-proxy-url
|
||||
|
||||
The directory, http_proxy, and https_proxy keys are optional.
|
||||
|
||||
"""
|
||||
global requirements_dir
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
http_proxy = None
|
||||
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
_git_validate_projects_yaml(projects, core_project)
|
||||
|
||||
old_environ = dict(os.environ)
|
||||
|
||||
if 'http_proxy' in projects.keys():
|
||||
http_proxy = projects['http_proxy']
|
||||
os.environ['http_proxy'] = projects['http_proxy']
|
||||
if 'https_proxy' in projects.keys():
|
||||
os.environ['https_proxy'] = projects['https_proxy']
|
||||
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
|
||||
|
||||
# Upgrade setuptools and pip from default virtualenv versions. The default
|
||||
# versions in trusty break master OpenStack branch deployments.
|
||||
for p in ['pip', 'setuptools']:
|
||||
pip_install(p, upgrade=True, proxy=http_proxy,
|
||||
venv=os.path.join(parent_dir, 'venv'))
|
||||
|
||||
constraints = None
|
||||
for p in projects['repositories']:
|
||||
repo = p['repository']
|
||||
branch = p['branch']
|
||||
depth = '1'
|
||||
if 'depth' in p.keys():
|
||||
depth = p['depth']
|
||||
if p['name'] == 'requirements':
|
||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||
parent_dir, http_proxy,
|
||||
update_requirements=False)
|
||||
requirements_dir = repo_dir
|
||||
constraints = os.path.join(repo_dir, "upper-constraints.txt")
|
||||
# upper-constraints didn't exist until after icehouse
|
||||
if not os.path.isfile(constraints):
|
||||
constraints = None
|
||||
# use constraints unless project yaml sets use_constraints to false
|
||||
if 'use_constraints' in projects.keys():
|
||||
if not projects['use_constraints']:
|
||||
constraints = None
|
||||
else:
|
||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||
parent_dir, http_proxy,
|
||||
update_requirements=True,
|
||||
constraints=constraints)
|
||||
|
||||
os.environ = old_environ
|
||||
|
||||
|
||||
def _git_validate_projects_yaml(projects, core_project):
|
||||
"""
|
||||
Validate the projects yaml.
|
||||
"""
|
||||
_git_ensure_key_exists('repositories', projects)
|
||||
|
||||
for project in projects['repositories']:
|
||||
_git_ensure_key_exists('name', project.keys())
|
||||
_git_ensure_key_exists('repository', project.keys())
|
||||
_git_ensure_key_exists('branch', project.keys())
|
||||
|
||||
if projects['repositories'][0]['name'] != 'requirements':
|
||||
error_out('{} git repo must be specified first'.format('requirements'))
|
||||
|
||||
if projects['repositories'][-1]['name'] != core_project:
|
||||
error_out('{} git repo must be specified last'.format(core_project))
|
||||
|
||||
_git_ensure_key_exists('release', projects)
|
||||
|
||||
|
||||
def _git_ensure_key_exists(key, keys):
|
||||
"""
|
||||
Ensure that key exists in keys.
|
||||
"""
|
||||
if key not in keys:
|
||||
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
||||
|
||||
|
||||
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
|
||||
update_requirements, constraints=None):
|
||||
"""
|
||||
Clone and install a single git repository.
|
||||
"""
|
||||
if not os.path.exists(parent_dir):
|
||||
juju_log('Directory already exists at {}. '
|
||||
'No need to create directory.'.format(parent_dir))
|
||||
os.mkdir(parent_dir)
|
||||
|
||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||
repo_dir = install_remote(
|
||||
repo, dest=parent_dir, branch=branch, depth=depth)
|
||||
|
||||
venv = os.path.join(parent_dir, 'venv')
|
||||
|
||||
if update_requirements:
|
||||
if not requirements_dir:
|
||||
error_out('requirements repo must be cloned before '
|
||||
'updating from global requirements.')
|
||||
_git_update_requirements(venv, repo_dir, requirements_dir)
|
||||
|
||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||
if http_proxy:
|
||||
pip_install(repo_dir, proxy=http_proxy, venv=venv,
|
||||
constraints=constraints)
|
||||
else:
|
||||
pip_install(repo_dir, venv=venv, constraints=constraints)
|
||||
|
||||
return repo_dir
|
||||
|
||||
|
||||
def _git_update_requirements(venv, package_dir, reqs_dir):
|
||||
"""
|
||||
Update from global requirements.
|
||||
|
||||
Update an OpenStack git directory's requirements.txt and
|
||||
test-requirements.txt from global-requirements.txt.
|
||||
"""
|
||||
orig_dir = os.getcwd()
|
||||
os.chdir(reqs_dir)
|
||||
python = os.path.join(venv, 'bin/python')
|
||||
cmd = [python, 'update.py', package_dir]
|
||||
try:
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
package = os.path.basename(package_dir)
|
||||
error_out("Error updating {} from "
|
||||
"global-requirements.txt".format(package))
|
||||
os.chdir(orig_dir)
|
||||
|
||||
|
||||
def git_pip_venv_dir(projects_yaml):
|
||||
"""
|
||||
Return the pip virtualenv path.
|
||||
"""
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
return os.path.join(parent_dir, 'venv')
|
||||
|
||||
|
||||
def git_src_dir(projects_yaml, project):
|
||||
"""
|
||||
Return the directory where the specified project's source is located.
|
||||
"""
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
for p in projects['repositories']:
|
||||
if p['name'] == project:
|
||||
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def git_yaml_value(projects_yaml, key):
|
||||
"""
|
||||
Return the value in projects_yaml for the specified key.
|
||||
"""
|
||||
projects = _git_yaml_load(projects_yaml)
|
||||
|
||||
if key in projects.keys():
|
||||
return projects[key]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def git_generate_systemd_init_files(templates_dir):
|
||||
"""
|
||||
Generate systemd init files.
|
||||
|
||||
Generates and installs systemd init units and script files based on the
|
||||
*.init.in files contained in the templates_dir directory.
|
||||
|
||||
This code is based on the openstack-pkg-tools package and its init
|
||||
script generation, which is used by the OpenStack packages.
|
||||
"""
|
||||
for f in os.listdir(templates_dir):
|
||||
# Create the init script and systemd unit file from the template
|
||||
if f.endswith(".init.in"):
|
||||
init_in_file = f
|
||||
init_file = f[:-8]
|
||||
service_file = "{}.service".format(init_file)
|
||||
|
||||
init_in_source = os.path.join(templates_dir, init_in_file)
|
||||
init_source = os.path.join(templates_dir, init_file)
|
||||
service_source = os.path.join(templates_dir, service_file)
|
||||
|
||||
init_dest = os.path.join('/etc/init.d', init_file)
|
||||
service_dest = os.path.join('/lib/systemd/system', service_file)
|
||||
|
||||
shutil.copyfile(init_in_source, init_source)
|
||||
with open(init_source, 'a') as outfile:
|
||||
template = ('/usr/share/openstack-pkg-tools/'
|
||||
'init-script-template')
|
||||
with open(template) as infile:
|
||||
outfile.write('\n\n{}'.format(infile.read()))
|
||||
|
||||
cmd = ['pkgos-gen-systemd-unit', init_in_source]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
if os.path.exists(init_dest):
|
||||
os.remove(init_dest)
|
||||
if os.path.exists(service_dest):
|
||||
os.remove(service_dest)
|
||||
shutil.copyfile(init_source, init_dest)
|
||||
shutil.copyfile(service_source, service_dest)
|
||||
os.chmod(init_dest, 0o755)
|
||||
|
||||
for f in os.listdir(templates_dir):
|
||||
# If there's a service.in file, use it instead of the generated one
|
||||
if f.endswith(".service.in"):
|
||||
service_in_file = f
|
||||
service_file = f[:-3]
|
||||
|
||||
service_in_source = os.path.join(templates_dir, service_in_file)
|
||||
service_source = os.path.join(templates_dir, service_file)
|
||||
service_dest = os.path.join('/lib/systemd/system', service_file)
|
||||
|
||||
shutil.copyfile(service_in_source, service_source)
|
||||
|
||||
if os.path.exists(service_dest):
|
||||
os.remove(service_dest)
|
||||
shutil.copyfile(service_source, service_dest)
|
||||
|
||||
for f in os.listdir(templates_dir):
|
||||
# Generate the systemd unit if there's no existing .service.in
|
||||
if f.endswith(".init.in"):
|
||||
init_in_file = f
|
||||
init_file = f[:-8]
|
||||
service_in_file = "{}.service.in".format(init_file)
|
||||
service_file = "{}.service".format(init_file)
|
||||
|
||||
init_in_source = os.path.join(templates_dir, init_in_file)
|
||||
service_in_source = os.path.join(templates_dir, service_in_file)
|
||||
service_source = os.path.join(templates_dir, service_file)
|
||||
service_dest = os.path.join('/lib/systemd/system', service_file)
|
||||
|
||||
if not os.path.exists(service_in_source):
|
||||
cmd = ['pkgos-gen-systemd-unit', init_in_source]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
if os.path.exists(service_dest):
|
||||
os.remove(service_dest)
|
||||
shutil.copyfile(service_source, service_dest)
|
||||
|
||||
|
||||
def git_determine_usr_bin():
|
||||
"""Return the /usr/bin path for Apache2 config.
|
||||
|
||||
The /usr/bin path will be located in the virtualenv if the charm
|
||||
is configured to deploy from source.
|
||||
"""
|
||||
if git_install_requested():
|
||||
projects_yaml = config('openstack-origin-git')
|
||||
projects_yaml = git_default_repos(projects_yaml)
|
||||
return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
|
||||
else:
|
||||
return '/usr/bin'
|
||||
|
||||
|
||||
def git_determine_python_path():
|
||||
"""Return the python-path for Apache2 config.
|
||||
|
||||
Returns 'None' unless the charm is configured to deploy from source,
|
||||
in which case the path of the virtualenv's site-packages is returned.
|
||||
"""
|
||||
if git_install_requested():
|
||||
projects_yaml = config('openstack-origin-git')
|
||||
projects_yaml = git_default_repos(projects_yaml)
|
||||
return os.path.join(git_pip_venv_dir(projects_yaml),
|
||||
'lib/python2.7/site-packages')
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def os_workload_status(configs, required_interfaces, charm_func=None):
|
||||
"""
|
||||
Decorator to set workload status based on complete contexts
|
||||
@ -1615,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
||||
"""
|
||||
ret = False
|
||||
|
||||
if git_install_requested():
|
||||
action_set({'outcome': 'installed from source, skipped upgrade.'})
|
||||
else:
|
||||
if openstack_upgrade_available(package):
|
||||
if config('action-managed-upgrade'):
|
||||
juju_log('Upgrading OpenStack release')
|
||||
if openstack_upgrade_available(package):
|
||||
if config('action-managed-upgrade'):
|
||||
juju_log('Upgrading OpenStack release')
|
||||
|
||||
try:
|
||||
upgrade_callback(configs=configs)
|
||||
action_set({'outcome': 'success, upgrade completed.'})
|
||||
ret = True
|
||||
except Exception:
|
||||
action_set({'outcome': 'upgrade failed, see traceback.'})
|
||||
action_set({'traceback': traceback.format_exc()})
|
||||
action_fail('do_openstack_upgrade resulted in an '
|
||||
'unexpected error')
|
||||
else:
|
||||
action_set({'outcome': 'action-managed-upgrade config is '
|
||||
'False, skipped upgrade.'})
|
||||
try:
|
||||
upgrade_callback(configs=configs)
|
||||
action_set({'outcome': 'success, upgrade completed.'})
|
||||
ret = True
|
||||
except Exception:
|
||||
action_set({'outcome': 'upgrade failed, see traceback.'})
|
||||
action_set({'traceback': traceback.format_exc()})
|
||||
action_fail('do_openstack_upgrade resulted in an '
|
||||
'unexpected error')
|
||||
else:
|
||||
action_set({'outcome': 'no upgrade available.'})
|
||||
action_set({'outcome': 'action-managed-upgrade config is '
|
||||
'False, skipped upgrade.'})
|
||||
else:
|
||||
action_set({'outcome': 'no upgrade available.'})
|
||||
|
||||
return ret
|
||||
|
||||
@ -2045,14 +1596,25 @@ def token_cache_pkgs(source=None, release=None):
|
||||
|
||||
def update_json_file(filename, items):
|
||||
"""Updates the json `filename` with a given dict.
|
||||
:param filename: json filename (i.e.: /etc/glance/policy.json)
|
||||
:param filename: path to json file (e.g. /etc/glance/policy.json)
|
||||
:param items: dict of items to update
|
||||
"""
|
||||
if not items:
|
||||
return
|
||||
|
||||
with open(filename) as fd:
|
||||
policy = json.load(fd)
|
||||
|
||||
# Compare before and after and if nothing has changed don't write the file
|
||||
# since that could cause unnecessary service restarts.
|
||||
before = json.dumps(policy, indent=4, sort_keys=True)
|
||||
policy.update(items)
|
||||
after = json.dumps(policy, indent=4, sort_keys=True)
|
||||
if before == after:
|
||||
return
|
||||
|
||||
with open(filename, "w") as fd:
|
||||
fd.write(json.dumps(policy, indent=4))
|
||||
fd.write(after)
|
||||
|
||||
|
||||
@cached
|
||||
|
@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
|
||||
assert isinstance(valid_range, list), \
|
||||
"valid_range must be a list, was given {}".format(valid_range)
|
||||
# If we're dealing with strings
|
||||
if valid_type is six.string_types:
|
||||
if isinstance(value, six.string_types):
|
||||
assert value in valid_range, \
|
||||
"{} is not in the list {}".format(value, valid_range)
|
||||
# Integer, float should have a min and max
|
||||
@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value):
|
||||
:param value:
|
||||
:return: None. Can raise CalledProcessError
|
||||
"""
|
||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
|
||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
|
||||
str(value).lower()]
|
||||
try:
|
||||
check_call(cmd)
|
||||
except CalledProcessError:
|
||||
@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
|
||||
:param durability_estimator: int
|
||||
:return: None. Can raise CalledProcessError
|
||||
"""
|
||||
version = ceph_version()
|
||||
|
||||
# Ensure this failure_domain is allowed by Ceph
|
||||
validator(failure_domain, six.string_types,
|
||||
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||
|
||||
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
|
||||
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
|
||||
'ruleset_failure_domain=' + failure_domain]
|
||||
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
|
||||
]
|
||||
if locality is not None and durability_estimator is not None:
|
||||
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||
|
||||
# failure_domain changed in luminous
|
||||
if version and version >= '12.0.0':
|
||||
cmd.append('crush-failure-domain=' + failure_domain)
|
||||
else:
|
||||
cmd.append('ruleset-failure-domain=' + failure_domain)
|
||||
|
||||
# Add plugin specific information
|
||||
if locality is not None:
|
||||
# For local erasure codes
|
||||
@ -1064,14 +1073,24 @@ class CephBrokerRq(object):
|
||||
self.ops = []
|
||||
|
||||
def add_op_request_access_to_group(self, name, namespace=None,
|
||||
permission=None, key_name=None):
|
||||
permission=None, key_name=None,
|
||||
object_prefix_permissions=None):
|
||||
"""
|
||||
Adds the requested permissions to the current service's Ceph key,
|
||||
allowing the key to access only the specified pools
|
||||
allowing the key to access only the specified pools or
|
||||
object prefixes. object_prefix_permissions should be a dictionary
|
||||
keyed on the permission with the corresponding value being a list
|
||||
of prefixes to apply that permission to.
|
||||
{
|
||||
'rwx': ['prefix1', 'prefix2'],
|
||||
'class-read': ['prefix3']}
|
||||
"""
|
||||
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
|
||||
'namespace': namespace, 'name': key_name or service_name(),
|
||||
'group-permission': permission})
|
||||
self.ops.append({
|
||||
'op': 'add-permissions-to-key', 'group': name,
|
||||
'namespace': namespace,
|
||||
'name': key_name or service_name(),
|
||||
'group-permission': permission,
|
||||
'object-prefix-permissions': object_prefix_permissions})
|
||||
|
||||
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
||||
weight=None, group=None, namespace=None):
|
||||
@ -1107,7 +1126,10 @@ class CephBrokerRq(object):
|
||||
def _ops_equal(self, other):
|
||||
if len(self.ops) == len(other.ops):
|
||||
for req_no in range(0, len(self.ops)):
|
||||
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
|
||||
for key in [
|
||||
'replicas', 'name', 'op', 'pg_num', 'weight',
|
||||
'group', 'group-namespace', 'group-permission',
|
||||
'object-prefix-permissions']:
|
||||
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
|
||||
return False
|
||||
else:
|
||||
|
@ -39,6 +39,7 @@ if not six.PY3:
|
||||
else:
|
||||
from collections import UserDict
|
||||
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
@ -344,6 +345,7 @@ class Config(dict):
|
||||
|
||||
"""
|
||||
with open(self.path, 'w') as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
json.dump(self, f)
|
||||
|
||||
def _implicit_save(self):
|
||||
|
@ -175,6 +175,8 @@ class Storage(object):
|
||||
else:
|
||||
self.db_path = os.path.join(
|
||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||
with open(self.db_path, 'a') as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||
self.cursor = self.conn.cursor()
|
||||
self.revision = None
|
||||
|
@ -39,6 +39,7 @@ if not six.PY3:
|
||||
else:
|
||||
from collections import UserDict
|
||||
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
@ -344,6 +345,7 @@ class Config(dict):
|
||||
|
||||
"""
|
||||
with open(self.path, 'w') as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
json.dump(self, f)
|
||||
|
||||
def _implicit_save(self):
|
||||
|
@ -175,6 +175,8 @@ class Storage(object):
|
||||
else:
|
||||
self.db_path = os.path.join(
|
||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||
with open(self.db_path, 'a') as f:
|
||||
os.fchmod(f.fileno(), 0o600)
|
||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||
self.cursor = self.conn.cursor()
|
||||
self.revision = None
|
||||
|
@ -39,18 +39,19 @@ class HAProxyContextTests(CharmTestCase):
|
||||
self.relation_get.side_effect = self.test_relation.get
|
||||
self.config.side_effect = self.test_config.get
|
||||
|
||||
@patch('charmhelpers.contrib.openstack.context.get_relation_ip')
|
||||
@patch('charmhelpers.contrib.openstack.context.mkdir')
|
||||
@patch('charmhelpers.contrib.openstack.context.unit_get')
|
||||
@patch('charmhelpers.contrib.openstack.context.local_unit')
|
||||
@patch('charmhelpers.contrib.openstack.context.get_host_ip')
|
||||
@patch('charmhelpers.contrib.openstack.context.config')
|
||||
@patch('charmhelpers.contrib.hahelpers.cluster.config_get')
|
||||
@patch('charmhelpers.contrib.openstack.context.relation_ids')
|
||||
@patch('charmhelpers.contrib.hahelpers.cluster.relation_ids')
|
||||
def test_ctxt(self, _harelation_ids, _ctxtrelation_ids, _haconfig,
|
||||
_ctxtconfig, _get_host_ip, _local_unit, _unit_get, _mkdir):
|
||||
_get_host_ip.return_value = '10.0.0.10'
|
||||
_ctxtconfig, _local_unit, _unit_get, _mkdir,
|
||||
_get_relation_ip):
|
||||
_unit_get.return_value = '10.0.0.10'
|
||||
_get_relation_ip.return_value = '10.0.0.10'
|
||||
_ctxtconfig.side_effect = self.test_config.get
|
||||
_haconfig.side_effect = self.test_config.get
|
||||
_harelation_ids.return_value = []
|
||||
|
Loading…
Reference in New Issue
Block a user