Sync charm helpers for Mitaka awareness
This commit is contained in:
parent
c7b00c7b03
commit
502992535f
@ -53,7 +53,7 @@ def _validate_cidr(network):
|
|||||||
|
|
||||||
|
|
||||||
def no_ip_found_error_out(network):
|
def no_ip_found_error_out(network):
|
||||||
errmsg = ("No IP address found in network: %s" % network)
|
errmsg = ("No IP address found in network(s): %s" % network)
|
||||||
raise ValueError(errmsg)
|
raise ValueError(errmsg)
|
||||||
|
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
:param network (str): CIDR presentation format. For example,
|
||||||
'192.168.1.0/24'.
|
'192.168.1.0/24'. Supports multiple networks as a space-delimited list.
|
||||||
:param fallback (str): If no address is found, return fallback.
|
:param fallback (str): If no address is found, return fallback.
|
||||||
:param fatal (boolean): If no address is found, fallback is not
|
:param fatal (boolean): If no address is found, fallback is not
|
||||||
set and fatal is True then exit(1).
|
set and fatal is True then exit(1).
|
||||||
@ -75,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
_validate_cidr(network)
|
networks = network.split() or [network]
|
||||||
network = netaddr.IPNetwork(network)
|
for network in networks:
|
||||||
for iface in netifaces.interfaces():
|
_validate_cidr(network)
|
||||||
addresses = netifaces.ifaddresses(iface)
|
network = netaddr.IPNetwork(network)
|
||||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
for iface in netifaces.interfaces():
|
||||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
addresses = netifaces.ifaddresses(iface)
|
||||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||||
if cidr in network:
|
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||||
return str(cidr.ip)
|
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||||
|
if cidr in network:
|
||||||
|
return str(cidr.ip)
|
||||||
|
|
||||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||||
for addr in addresses[netifaces.AF_INET6]:
|
for addr in addresses[netifaces.AF_INET6]:
|
||||||
if not addr['addr'].startswith('fe80'):
|
if not addr['addr'].startswith('fe80'):
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||||
addr['netmask']))
|
addr['netmask']))
|
||||||
if cidr in network:
|
if cidr in network:
|
||||||
return str(cidr.ip)
|
return str(cidr.ip)
|
||||||
|
|
||||||
if fallback is not None:
|
if fallback is not None:
|
||||||
return fallback
|
return fallback
|
||||||
@ -454,3 +456,18 @@ def get_hostname(address, fqdn=True):
|
|||||||
return result
|
return result
|
||||||
else:
|
else:
|
||||||
return result.split('.')[0]
|
return result.split('.')[0]
|
||||||
|
|
||||||
|
|
||||||
|
def port_has_listener(address, port):
|
||||||
|
"""
|
||||||
|
Returns True if the address:port is open and being listened to,
|
||||||
|
else False.
|
||||||
|
|
||||||
|
@param address: an IP address or hostname
|
||||||
|
@param port: integer port
|
||||||
|
|
||||||
|
Note calls 'zc' via a subprocess shell
|
||||||
|
"""
|
||||||
|
cmd = ['nc', '-z', address, str(port)]
|
||||||
|
result = subprocess.call(cmd)
|
||||||
|
return not(bool(result))
|
||||||
|
@ -121,11 +121,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
# Charms which should use the source config option
|
# Charms which should use the source config option
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
|
||||||
|
|
||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
|
'cinder-backup']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
@ -225,7 +226,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
self.wily_liberty) = range(12)
|
self.wily_liberty, self.trusty_mitaka,
|
||||||
|
self.xenial_mitaka) = range(14)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
@ -237,9 +239,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||||
('utopic', None): self.utopic_juno,
|
('utopic', None): self.utopic_juno,
|
||||||
('vivid', None): self.vivid_kilo,
|
('vivid', None): self.vivid_kilo,
|
||||||
('wily', None): self.wily_liberty}
|
('wily', None): self.wily_liberty,
|
||||||
|
('xenial', None): self.xenial_mitaka}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
@ -256,6 +260,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
@ -57,6 +57,7 @@ from charmhelpers.core.host import (
|
|||||||
get_nic_hwaddr,
|
get_nic_hwaddr,
|
||||||
mkdir,
|
mkdir,
|
||||||
write_file,
|
write_file,
|
||||||
|
pwgen,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import (
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
determine_apache_port,
|
determine_apache_port,
|
||||||
@ -87,6 +88,14 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
is_bridge_member,
|
is_bridge_member,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.openstack.utils import get_host_ip
|
from charmhelpers.contrib.openstack.utils import get_host_ip
|
||||||
|
from charmhelpers.core.unitdata import kv
|
||||||
|
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-psutil', fatal=True)
|
||||||
|
import psutil
|
||||||
|
|
||||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||||
|
|
||||||
@ -401,6 +410,7 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||||
svc_protocol = rdata.get('service_protocol') or 'http'
|
svc_protocol = rdata.get('service_protocol') or 'http'
|
||||||
auth_protocol = rdata.get('auth_protocol') or 'http'
|
auth_protocol = rdata.get('auth_protocol') or 'http'
|
||||||
|
api_version = rdata.get('api_version') or '2.0'
|
||||||
ctxt.update({'service_port': rdata.get('service_port'),
|
ctxt.update({'service_port': rdata.get('service_port'),
|
||||||
'service_host': serv_host,
|
'service_host': serv_host,
|
||||||
'auth_host': auth_host,
|
'auth_host': auth_host,
|
||||||
@ -409,7 +419,8 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
'admin_user': rdata.get('service_username'),
|
'admin_user': rdata.get('service_username'),
|
||||||
'admin_password': rdata.get('service_password'),
|
'admin_password': rdata.get('service_password'),
|
||||||
'service_protocol': svc_protocol,
|
'service_protocol': svc_protocol,
|
||||||
'auth_protocol': auth_protocol})
|
'auth_protocol': auth_protocol,
|
||||||
|
'api_version': api_version})
|
||||||
|
|
||||||
if self.context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
# NOTE(jamespage) this is required for >= icehouse
|
# NOTE(jamespage) this is required for >= icehouse
|
||||||
@ -636,11 +647,18 @@ class HAProxyContext(OSContextGenerator):
|
|||||||
ctxt['ipv6'] = True
|
ctxt['ipv6'] = True
|
||||||
ctxt['local_host'] = 'ip6-localhost'
|
ctxt['local_host'] = 'ip6-localhost'
|
||||||
ctxt['haproxy_host'] = '::'
|
ctxt['haproxy_host'] = '::'
|
||||||
ctxt['stat_port'] = ':::8888'
|
|
||||||
else:
|
else:
|
||||||
ctxt['local_host'] = '127.0.0.1'
|
ctxt['local_host'] = '127.0.0.1'
|
||||||
ctxt['haproxy_host'] = '0.0.0.0'
|
ctxt['haproxy_host'] = '0.0.0.0'
|
||||||
ctxt['stat_port'] = ':8888'
|
|
||||||
|
ctxt['stat_port'] = '8888'
|
||||||
|
|
||||||
|
db = kv()
|
||||||
|
ctxt['stat_password'] = db.get('stat-password')
|
||||||
|
if not ctxt['stat_password']:
|
||||||
|
ctxt['stat_password'] = db.set('stat-password',
|
||||||
|
pwgen(32))
|
||||||
|
db.flush()
|
||||||
|
|
||||||
for frontend in cluster_hosts:
|
for frontend in cluster_hosts:
|
||||||
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
||||||
@ -1094,6 +1112,20 @@ class OSConfigFlagContext(OSContextGenerator):
|
|||||||
config_flags_parser(config_flags)}
|
config_flags_parser(config_flags)}
|
||||||
|
|
||||||
|
|
||||||
|
class LibvirtConfigFlagsContext(OSContextGenerator):
|
||||||
|
"""
|
||||||
|
This context provides support for extending
|
||||||
|
the libvirt section through user-defined flags.
|
||||||
|
"""
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
libvirt_flags = config('libvirt-flags')
|
||||||
|
if libvirt_flags:
|
||||||
|
ctxt['libvirt_flags'] = config_flags_parser(
|
||||||
|
libvirt_flags)
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class SubordinateConfigContext(OSContextGenerator):
|
class SubordinateConfigContext(OSContextGenerator):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -1234,13 +1266,11 @@ class WorkerConfigContext(OSContextGenerator):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def num_cpus(self):
|
def num_cpus(self):
|
||||||
try:
|
# NOTE: use cpu_count if present (16.04 support)
|
||||||
from psutil import NUM_CPUS
|
if hasattr(psutil, 'cpu_count'):
|
||||||
except ImportError:
|
return psutil.cpu_count()
|
||||||
apt_install('python-psutil', fatal=True)
|
else:
|
||||||
from psutil import NUM_CPUS
|
return psutil.NUM_CPUS
|
||||||
|
|
||||||
return NUM_CPUS
|
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
multiplier = config('worker-multiplier') or 0
|
multiplier = config('worker-multiplier') or 0
|
||||||
@ -1443,6 +1473,8 @@ class NetworkServiceContext(OSContextGenerator):
|
|||||||
rdata.get('service_protocol') or 'http',
|
rdata.get('service_protocol') or 'http',
|
||||||
'auth_protocol':
|
'auth_protocol':
|
||||||
rdata.get('auth_protocol') or 'http',
|
rdata.get('auth_protocol') or 'http',
|
||||||
|
'api_version':
|
||||||
|
rdata.get('api_version') or '2.0',
|
||||||
}
|
}
|
||||||
if self.context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
return ctxt
|
return ctxt
|
||||||
|
@ -9,15 +9,17 @@
|
|||||||
CRITICAL=0
|
CRITICAL=0
|
||||||
NOTACTIVE=''
|
NOTACTIVE=''
|
||||||
LOGFILE=/var/log/nagios/check_haproxy.log
|
LOGFILE=/var/log/nagios/check_haproxy.log
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
|
||||||
|
|
||||||
for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
|
typeset -i N_INSTANCES=0
|
||||||
|
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
|
||||||
do
|
do
|
||||||
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
|
N_INSTANCES=N_INSTANCES+1
|
||||||
|
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
|
||||||
if [ $? != 0 ]; then
|
if [ $? != 0 ]; then
|
||||||
date >> $LOGFILE
|
date >> $LOGFILE
|
||||||
echo $output >> $LOGFILE
|
echo $output >> $LOGFILE
|
||||||
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
|
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
|
||||||
CRITICAL=1
|
CRITICAL=1
|
||||||
NOTACTIVE="${NOTACTIVE} $appserver"
|
NOTACTIVE="${NOTACTIVE} $appserver"
|
||||||
fi
|
fi
|
||||||
@ -28,5 +30,5 @@ if [ $CRITICAL = 1 ]; then
|
|||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "OK: All haproxy instances looking good"
|
echo "OK: All haproxy instances ($N_INSTANCES) looking good"
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -50,7 +50,7 @@ def determine_dkms_package():
|
|||||||
if kernel_version() >= (3, 13):
|
if kernel_version() >= (3, 13):
|
||||||
return []
|
return []
|
||||||
else:
|
else:
|
||||||
return ['openvswitch-datapath-dkms']
|
return [headers_package(), 'openvswitch-datapath-dkms']
|
||||||
|
|
||||||
|
|
||||||
# legacy
|
# legacy
|
||||||
@ -70,7 +70,7 @@ def quantum_plugins():
|
|||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=QUANTUM_CONF_DIR)],
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
'services': ['quantum-plugin-openvswitch-agent'],
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['quantum-plugin-openvswitch-agent']],
|
['quantum-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['quantum-server',
|
'server_packages': ['quantum-server',
|
||||||
'quantum-plugin-openvswitch'],
|
'quantum-plugin-openvswitch'],
|
||||||
@ -111,7 +111,7 @@ def neutron_plugins():
|
|||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['neutron-plugin-openvswitch-agent']],
|
['neutron-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-openvswitch'],
|
'neutron-plugin-openvswitch'],
|
||||||
@ -155,7 +155,7 @@ def neutron_plugins():
|
|||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['neutron-plugin-cisco']],
|
['neutron-plugin-cisco']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-cisco'],
|
'neutron-plugin-cisco'],
|
||||||
@ -174,7 +174,7 @@ def neutron_plugins():
|
|||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
'nova-api-metadata',
|
'nova-api-metadata',
|
||||||
'etcd'],
|
'etcd'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['calico-compute',
|
['calico-compute',
|
||||||
'bird',
|
'bird',
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
@ -219,7 +219,7 @@ def neutron_plugins():
|
|||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [[headers_package()] + determine_dkms_package()],
|
'packages': [determine_dkms_package()],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'python-neutron-plugin-midonet'],
|
'python-neutron-plugin-midonet'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
@ -233,6 +233,18 @@ def neutron_plugins():
|
|||||||
'neutron-plugin-ml2']
|
'neutron-plugin-ml2']
|
||||||
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
||||||
plugins['nvp'] = plugins['nsx']
|
plugins['nvp'] = plugins['nsx']
|
||||||
|
if release >= 'kilo':
|
||||||
|
plugins['midonet']['driver'] = (
|
||||||
|
'neutron.plugins.midonet.plugin.MidonetPluginV2')
|
||||||
|
if release >= 'liberty':
|
||||||
|
midonet_origin = config('midonet-origin')
|
||||||
|
if midonet_origin is not None and midonet_origin[4:5] == '1':
|
||||||
|
plugins['midonet']['driver'] = (
|
||||||
|
'midonet.neutron.plugin_v1.MidonetPluginV2')
|
||||||
|
plugins['midonet']['server_packages'].remove(
|
||||||
|
'python-neutron-plugin-midonet')
|
||||||
|
plugins['midonet']['server_packages'].append(
|
||||||
|
'python-networking-midonet')
|
||||||
return plugins
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
|
@ -33,13 +33,14 @@ defaults
|
|||||||
timeout server 30000
|
timeout server 30000
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
listen stats {{ stat_port }}
|
listen stats
|
||||||
|
bind {{ local_host }}:{{ stat_port }}
|
||||||
mode http
|
mode http
|
||||||
stats enable
|
stats enable
|
||||||
stats hide-version
|
stats hide-version
|
||||||
stats realm Haproxy\ Statistics
|
stats realm Haproxy\ Statistics
|
||||||
stats uri /
|
stats uri /
|
||||||
stats auth admin:password
|
stats auth admin:{{ stat_password }}
|
||||||
|
|
||||||
{% if frontends -%}
|
{% if frontends -%}
|
||||||
{% for service, ports in service_ports.items() -%}
|
{% for service, ports in service_ports.items() -%}
|
||||||
|
@ -1,4 +1,14 @@
|
|||||||
{% if auth_host -%}
|
{% if auth_host -%}
|
||||||
|
{% if api_version == '3' -%}
|
||||||
|
[keystone_authtoken]
|
||||||
|
auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
|
||||||
|
project_name = {{ admin_tenant_name }}
|
||||||
|
username = {{ admin_user }}
|
||||||
|
password = {{ admin_password }}
|
||||||
|
project_domain_name = default
|
||||||
|
user_domain_name = default
|
||||||
|
auth_plugin = password
|
||||||
|
{% else -%}
|
||||||
[keystone_authtoken]
|
[keystone_authtoken]
|
||||||
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
|
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
|
||||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
|
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
|
||||||
@ -7,3 +17,4 @@ admin_user = {{ admin_user }}
|
|||||||
admin_password = {{ admin_password }}
|
admin_password = {{ admin_password }}
|
||||||
signing_dir = {{ signing_dir }}
|
signing_dir = {{ signing_dir }}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
{% endif -%}
|
||||||
|
@ -23,8 +23,10 @@ import json
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
|
import itertools
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
import tempfile
|
||||||
import traceback
|
import traceback
|
||||||
import uuid
|
import uuid
|
||||||
import yaml
|
import yaml
|
||||||
@ -41,6 +43,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
config,
|
config,
|
||||||
log as juju_log,
|
log as juju_log,
|
||||||
charm_dir,
|
charm_dir,
|
||||||
|
DEBUG,
|
||||||
INFO,
|
INFO,
|
||||||
related_units,
|
related_units,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
@ -58,6 +61,7 @@ from charmhelpers.contrib.storage.linux.lvm import (
|
|||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_ipv6_addr,
|
get_ipv6_addr,
|
||||||
is_ipv6,
|
is_ipv6,
|
||||||
|
port_has_listener,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.python.packages import (
|
from charmhelpers.contrib.python.packages import (
|
||||||
@ -65,7 +69,7 @@ from charmhelpers.contrib.python.packages import (
|
|||||||
pip_install,
|
pip_install,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
from charmhelpers.core.host import lsb_release, mounts, umount, service_running
|
||||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||||
@ -86,6 +90,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
@ -99,61 +104,70 @@ OPENSTACK_CODENAMES = OrderedDict([
|
|||||||
('2014.2', 'juno'),
|
('2014.2', 'juno'),
|
||||||
('2015.1', 'kilo'),
|
('2015.1', 'kilo'),
|
||||||
('2015.2', 'liberty'),
|
('2015.2', 'liberty'),
|
||||||
|
('2016.1', 'mitaka'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# The ugly duckling
|
# The ugly duckling - must list releases oldest to newest
|
||||||
SWIFT_CODENAMES = OrderedDict([
|
SWIFT_CODENAMES = OrderedDict([
|
||||||
('1.4.3', 'diablo'),
|
('diablo',
|
||||||
('1.4.8', 'essex'),
|
['1.4.3']),
|
||||||
('1.7.4', 'folsom'),
|
('essex',
|
||||||
('1.8.0', 'grizzly'),
|
['1.4.8']),
|
||||||
('1.7.7', 'grizzly'),
|
('folsom',
|
||||||
('1.7.6', 'grizzly'),
|
['1.7.4']),
|
||||||
('1.10.0', 'havana'),
|
('grizzly',
|
||||||
('1.9.1', 'havana'),
|
['1.7.6', '1.7.7', '1.8.0']),
|
||||||
('1.9.0', 'havana'),
|
('havana',
|
||||||
('1.13.1', 'icehouse'),
|
['1.9.0', '1.9.1', '1.10.0']),
|
||||||
('1.13.0', 'icehouse'),
|
('icehouse',
|
||||||
('1.12.0', 'icehouse'),
|
['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
|
||||||
('1.11.0', 'icehouse'),
|
('juno',
|
||||||
('2.0.0', 'juno'),
|
['2.0.0', '2.1.0', '2.2.0']),
|
||||||
('2.1.0', 'juno'),
|
('kilo',
|
||||||
('2.2.0', 'juno'),
|
['2.2.1', '2.2.2']),
|
||||||
('2.2.1', 'kilo'),
|
('liberty',
|
||||||
('2.2.2', 'kilo'),
|
['2.3.0', '2.4.0', '2.5.0']),
|
||||||
('2.3.0', 'liberty'),
|
('mitaka',
|
||||||
('2.4.0', 'liberty'),
|
['2.5.0']),
|
||||||
('2.5.0', 'liberty'),
|
|
||||||
])
|
])
|
||||||
|
|
||||||
# >= Liberty version->codename mapping
|
# >= Liberty version->codename mapping
|
||||||
PACKAGE_CODENAMES = {
|
PACKAGE_CODENAMES = {
|
||||||
'nova-common': OrderedDict([
|
'nova-common': OrderedDict([
|
||||||
('12.0.0', 'liberty'),
|
('12.0', 'liberty'),
|
||||||
|
('13.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'neutron-common': OrderedDict([
|
'neutron-common': OrderedDict([
|
||||||
('7.0.0', 'liberty'),
|
('7.0', 'liberty'),
|
||||||
|
('8.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'cinder-common': OrderedDict([
|
'cinder-common': OrderedDict([
|
||||||
('7.0.0', 'liberty'),
|
('7.0', 'liberty'),
|
||||||
|
('8.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'keystone': OrderedDict([
|
'keystone': OrderedDict([
|
||||||
('8.0.0', 'liberty'),
|
('8.0', 'liberty'),
|
||||||
|
('9.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'horizon-common': OrderedDict([
|
'horizon-common': OrderedDict([
|
||||||
('8.0.0', 'liberty'),
|
('8.0', 'liberty'),
|
||||||
|
('9.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'ceilometer-common': OrderedDict([
|
'ceilometer-common': OrderedDict([
|
||||||
('5.0.0', 'liberty'),
|
('5.0', 'liberty'),
|
||||||
|
('6.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'heat-common': OrderedDict([
|
'heat-common': OrderedDict([
|
||||||
('5.0.0', 'liberty'),
|
('5.0', 'liberty'),
|
||||||
|
('6.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'glance-common': OrderedDict([
|
'glance-common': OrderedDict([
|
||||||
('11.0.0', 'liberty'),
|
('11.0', 'liberty'),
|
||||||
|
('12.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'openstack-dashboard': OrderedDict([
|
'openstack-dashboard': OrderedDict([
|
||||||
('8.0.0', 'liberty'),
|
('8.0', 'liberty'),
|
||||||
|
('9.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,6 +230,33 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
|
|||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_codename_swift(codename):
|
||||||
|
'''Determine OpenStack version number of swift from codename.'''
|
||||||
|
for k, v in six.iteritems(SWIFT_CODENAMES):
|
||||||
|
if k == codename:
|
||||||
|
return v[-1]
|
||||||
|
e = 'Could not derive swift version for '\
|
||||||
|
'codename: %s' % codename
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_swift_codename(version):
|
||||||
|
'''Determine OpenStack codename that corresponds to swift version.'''
|
||||||
|
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
|
||||||
|
if len(codenames) > 1:
|
||||||
|
# If more than one release codename contains this version we determine
|
||||||
|
# the actual codename based on the highest available install source.
|
||||||
|
for codename in reversed(codenames):
|
||||||
|
releases = UBUNTU_OPENSTACK_RELEASE
|
||||||
|
release = [k for k, v in six.iteritems(releases) if codename in v]
|
||||||
|
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
|
||||||
|
if codename in ret or release[0] in ret:
|
||||||
|
return codename
|
||||||
|
elif len(codenames) == 1:
|
||||||
|
return codenames[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_package(package, fatal=True):
|
def get_os_codename_package(package, fatal=True):
|
||||||
'''Derive OpenStack release codename from an installed package.'''
|
'''Derive OpenStack release codename from an installed package.'''
|
||||||
import apt_pkg as apt
|
import apt_pkg as apt
|
||||||
@ -240,7 +281,14 @@ def get_os_codename_package(package, fatal=True):
|
|||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
|
if 'swift' in pkg.name:
|
||||||
|
# Fully x.y.z match for swift versions
|
||||||
|
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
|
||||||
|
else:
|
||||||
|
# x.y match only for 20XX.X
|
||||||
|
# and ignore patch level for other packages
|
||||||
|
match = re.match('^(\d+)\.(\d+)', vers)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
vers = match.group(0)
|
vers = match.group(0)
|
||||||
|
|
||||||
@ -252,13 +300,8 @@ def get_os_codename_package(package, fatal=True):
|
|||||||
# < Liberty co-ordinated project versions
|
# < Liberty co-ordinated project versions
|
||||||
try:
|
try:
|
||||||
if 'swift' in pkg.name:
|
if 'swift' in pkg.name:
|
||||||
swift_vers = vers[:5]
|
return get_swift_codename(vers)
|
||||||
if swift_vers not in SWIFT_CODENAMES:
|
|
||||||
# Deal with 1.10.0 upward
|
|
||||||
swift_vers = vers[:6]
|
|
||||||
return SWIFT_CODENAMES[swift_vers]
|
|
||||||
else:
|
else:
|
||||||
vers = vers[:6]
|
|
||||||
return OPENSTACK_CODENAMES[vers]
|
return OPENSTACK_CODENAMES[vers]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
if not fatal:
|
if not fatal:
|
||||||
@ -276,12 +319,14 @@ def get_os_version_package(pkg, fatal=True):
|
|||||||
|
|
||||||
if 'swift' in pkg:
|
if 'swift' in pkg:
|
||||||
vers_map = SWIFT_CODENAMES
|
vers_map = SWIFT_CODENAMES
|
||||||
|
for cname, version in six.iteritems(vers_map):
|
||||||
|
if cname == codename:
|
||||||
|
return version[-1]
|
||||||
else:
|
else:
|
||||||
vers_map = OPENSTACK_CODENAMES
|
vers_map = OPENSTACK_CODENAMES
|
||||||
|
for version, cname in six.iteritems(vers_map):
|
||||||
for version, cname in six.iteritems(vers_map):
|
if cname == codename:
|
||||||
if cname == codename:
|
return version
|
||||||
return version
|
|
||||||
# e = "Could not determine OpenStack version for package: %s" % pkg
|
# e = "Could not determine OpenStack version for package: %s" % pkg
|
||||||
# error_out(e)
|
# error_out(e)
|
||||||
|
|
||||||
@ -306,12 +351,42 @@ def os_release(package, base='essex'):
|
|||||||
|
|
||||||
|
|
||||||
def import_key(keyid):
|
def import_key(keyid):
|
||||||
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
key = keyid.strip()
|
||||||
"--recv-keys %s" % keyid
|
if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
|
||||||
try:
|
key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
|
||||||
subprocess.check_call(cmd.split(' '))
|
juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
|
||||||
except subprocess.CalledProcessError:
|
juju_log("Importing ASCII Armor PGP key", level=DEBUG)
|
||||||
error_out("Error importing repo key %s" % keyid)
|
with tempfile.NamedTemporaryFile() as keyfile:
|
||||||
|
with open(keyfile.name, 'w') as fd:
|
||||||
|
fd.write(key)
|
||||||
|
fd.write("\n")
|
||||||
|
|
||||||
|
cmd = ['apt-key', 'add', keyfile.name]
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
error_out("Error importing PGP key '%s'" % key)
|
||||||
|
else:
|
||||||
|
juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
|
||||||
|
juju_log("Importing PGP key from keyserver", level=DEBUG)
|
||||||
|
cmd = ['apt-key', 'adv', '--keyserver',
|
||||||
|
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
error_out("Error importing PGP key '%s'" % key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_source_and_pgp_key(input):
|
||||||
|
"""Look for a pgp key ID or ascii-armor key in the given input."""
|
||||||
|
index = input.strip()
|
||||||
|
index = input.rfind('|')
|
||||||
|
if index < 0:
|
||||||
|
return input, None
|
||||||
|
|
||||||
|
key = input[index + 1:].strip('|')
|
||||||
|
source = input[:index]
|
||||||
|
return source, key
|
||||||
|
|
||||||
|
|
||||||
def configure_installation_source(rel):
|
def configure_installation_source(rel):
|
||||||
@ -323,16 +398,16 @@ def configure_installation_source(rel):
|
|||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
||||||
elif rel[:4] == "ppa:":
|
elif rel[:4] == "ppa:":
|
||||||
src = rel
|
src, key = get_source_and_pgp_key(rel)
|
||||||
|
if key:
|
||||||
|
import_key(key)
|
||||||
|
|
||||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||||
elif rel[:3] == "deb":
|
elif rel[:3] == "deb":
|
||||||
l = len(rel.split('|'))
|
src, key = get_source_and_pgp_key(rel)
|
||||||
if l == 2:
|
if key:
|
||||||
src, key = rel.split('|')
|
|
||||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
|
||||||
import_key(key)
|
import_key(key)
|
||||||
elif l == 1:
|
|
||||||
src = rel
|
|
||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
f.write(src)
|
f.write(src)
|
||||||
elif rel[:6] == 'cloud:':
|
elif rel[:6] == 'cloud:':
|
||||||
@ -377,6 +452,9 @@ def configure_installation_source(rel):
|
|||||||
'liberty': 'trusty-updates/liberty',
|
'liberty': 'trusty-updates/liberty',
|
||||||
'liberty/updates': 'trusty-updates/liberty',
|
'liberty/updates': 'trusty-updates/liberty',
|
||||||
'liberty/proposed': 'trusty-proposed/liberty',
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -444,11 +522,16 @@ def openstack_upgrade_available(package):
|
|||||||
cur_vers = get_os_version_package(package)
|
cur_vers = get_os_version_package(package)
|
||||||
if "swift" in package:
|
if "swift" in package:
|
||||||
codename = get_os_codename_install_source(src)
|
codename = get_os_codename_install_source(src)
|
||||||
available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
|
avail_vers = get_os_version_codename_swift(codename)
|
||||||
else:
|
else:
|
||||||
available_vers = get_os_version_install_source(src)
|
avail_vers = get_os_version_install_source(src)
|
||||||
apt.init()
|
apt.init()
|
||||||
return apt.version_compare(available_vers, cur_vers) == 1
|
if "swift" in package:
|
||||||
|
major_cur_vers = cur_vers.split('.', 1)[0]
|
||||||
|
major_avail_vers = avail_vers.split('.', 1)[0]
|
||||||
|
major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
|
||||||
|
return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
|
||||||
|
return apt.version_compare(avail_vers, cur_vers) == 1
|
||||||
|
|
||||||
|
|
||||||
def ensure_block_device(block_device):
|
def ensure_block_device(block_device):
|
||||||
@ -577,7 +660,7 @@ def _git_yaml_load(projects_yaml):
|
|||||||
return yaml.load(projects_yaml)
|
return yaml.load(projects_yaml)
|
||||||
|
|
||||||
|
|
||||||
def git_clone_and_install(projects_yaml, core_project, depth=1):
|
def git_clone_and_install(projects_yaml, core_project):
|
||||||
"""
|
"""
|
||||||
Clone/install all specified OpenStack repositories.
|
Clone/install all specified OpenStack repositories.
|
||||||
|
|
||||||
@ -627,6 +710,9 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
|
|||||||
for p in projects['repositories']:
|
for p in projects['repositories']:
|
||||||
repo = p['repository']
|
repo = p['repository']
|
||||||
branch = p['branch']
|
branch = p['branch']
|
||||||
|
depth = '1'
|
||||||
|
if 'depth' in p.keys():
|
||||||
|
depth = p['depth']
|
||||||
if p['name'] == 'requirements':
|
if p['name'] == 'requirements':
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||||
parent_dir, http_proxy,
|
parent_dir, http_proxy,
|
||||||
@ -671,19 +757,13 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
|
|||||||
"""
|
"""
|
||||||
Clone and install a single git repository.
|
Clone and install a single git repository.
|
||||||
"""
|
"""
|
||||||
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
|
|
||||||
|
|
||||||
if not os.path.exists(parent_dir):
|
if not os.path.exists(parent_dir):
|
||||||
juju_log('Directory already exists at {}. '
|
juju_log('Directory already exists at {}. '
|
||||||
'No need to create directory.'.format(parent_dir))
|
'No need to create directory.'.format(parent_dir))
|
||||||
os.mkdir(parent_dir)
|
os.mkdir(parent_dir)
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
|
||||||
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
|
|
||||||
depth=depth)
|
|
||||||
else:
|
|
||||||
repo_dir = dest_dir
|
|
||||||
|
|
||||||
venv = os.path.join(parent_dir, 'venv')
|
venv = os.path.join(parent_dir, 'venv')
|
||||||
|
|
||||||
@ -782,13 +862,23 @@ def os_workload_status(configs, required_interfaces, charm_func=None):
|
|||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
def set_os_workload_status(configs, required_interfaces, charm_func=None):
|
def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None):
|
||||||
"""
|
"""
|
||||||
Set workload status based on complete contexts.
|
Set workload status based on complete contexts.
|
||||||
status-set missing or incomplete contexts
|
status-set missing or incomplete contexts
|
||||||
and juju-log details of missing required data.
|
and juju-log details of missing required data.
|
||||||
charm_func is a charm specific function to run checking
|
charm_func is a charm specific function to run checking
|
||||||
for charm specific requirements such as a VIP setting.
|
for charm specific requirements such as a VIP setting.
|
||||||
|
|
||||||
|
This function also checks for whether the services defined are ACTUALLY
|
||||||
|
running and that the ports they advertise are open and being listened to.
|
||||||
|
|
||||||
|
@param services - OPTIONAL: a [{'service': <string>, 'ports': [<int>]]
|
||||||
|
The ports are optional.
|
||||||
|
If services is a [<string>] then ports are ignored.
|
||||||
|
@param ports - OPTIONAL: an [<int>] representing ports that shoudl be
|
||||||
|
open.
|
||||||
|
@returns None
|
||||||
"""
|
"""
|
||||||
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
|
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
|
||||||
state = 'active'
|
state = 'active'
|
||||||
@ -867,6 +957,65 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None):
|
|||||||
else:
|
else:
|
||||||
message = charm_message
|
message = charm_message
|
||||||
|
|
||||||
|
# If the charm thinks the unit is active, check that the actual services
|
||||||
|
# really are active.
|
||||||
|
if services is not None and state == 'active':
|
||||||
|
# if we're passed the dict() then just grab the values as a list.
|
||||||
|
if isinstance(services, dict):
|
||||||
|
services = services.values()
|
||||||
|
# either extract the list of services from the dictionary, or if
|
||||||
|
# it is a simple string, use that. i.e. works with mixed lists.
|
||||||
|
_s = []
|
||||||
|
for s in services:
|
||||||
|
if isinstance(s, dict) and 'service' in s:
|
||||||
|
_s.append(s['service'])
|
||||||
|
if isinstance(s, str):
|
||||||
|
_s.append(s)
|
||||||
|
services_running = [service_running(s) for s in _s]
|
||||||
|
if not all(services_running):
|
||||||
|
not_running = [s for s, running in zip(_s, services_running)
|
||||||
|
if not running]
|
||||||
|
message = ("Services not running that should be: {}"
|
||||||
|
.format(", ".join(not_running)))
|
||||||
|
state = 'blocked'
|
||||||
|
# also verify that the ports that should be open are open
|
||||||
|
# NB, that ServiceManager objects only OPTIONALLY have ports
|
||||||
|
port_map = OrderedDict([(s['service'], s['ports'])
|
||||||
|
for s in services if 'ports' in s])
|
||||||
|
if state == 'active' and port_map:
|
||||||
|
all_ports = list(itertools.chain(*port_map.values()))
|
||||||
|
ports_open = [port_has_listener('0.0.0.0', p)
|
||||||
|
for p in all_ports]
|
||||||
|
if not all(ports_open):
|
||||||
|
not_opened = [p for p, opened in zip(all_ports, ports_open)
|
||||||
|
if not opened]
|
||||||
|
map_not_open = OrderedDict()
|
||||||
|
for service, ports in port_map.items():
|
||||||
|
closed_ports = set(ports).intersection(not_opened)
|
||||||
|
if closed_ports:
|
||||||
|
map_not_open[service] = closed_ports
|
||||||
|
# find which service has missing ports. They are in service
|
||||||
|
# order which makes it a bit easier.
|
||||||
|
message = (
|
||||||
|
"Services with ports not open that should be: {}"
|
||||||
|
.format(
|
||||||
|
", ".join([
|
||||||
|
"{}: [{}]".format(
|
||||||
|
service,
|
||||||
|
", ".join([str(v) for v in ports]))
|
||||||
|
for service, ports in map_not_open.items()])))
|
||||||
|
state = 'blocked'
|
||||||
|
|
||||||
|
if ports is not None and state == 'active':
|
||||||
|
# and we can also check ports which we don't know the service for
|
||||||
|
ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
|
||||||
|
if not all(ports_open):
|
||||||
|
message = (
|
||||||
|
"Ports which should be open, but are not: {}"
|
||||||
|
.format(", ".join([str(p) for p, v in zip(ports, ports_open)
|
||||||
|
if not v])))
|
||||||
|
state = 'blocked'
|
||||||
|
|
||||||
# Set to active if all requirements have been met
|
# Set to active if all requirements have been met
|
||||||
if state == 'active':
|
if state == 'active':
|
||||||
message = "Unit is ready"
|
message = "Unit is ready"
|
||||||
|
@ -19,20 +19,35 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import charm_dir, log
|
from charmhelpers.core.hookenv import charm_dir, log
|
||||||
|
|
||||||
try:
|
|
||||||
from pip import main as pip_execute
|
|
||||||
except ImportError:
|
|
||||||
apt_update()
|
|
||||||
apt_install('python-pip')
|
|
||||||
from pip import main as pip_execute
|
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
|
def pip_execute(*args, **kwargs):
|
||||||
|
"""Overriden pip_execute() to stop sys.path being changed.
|
||||||
|
|
||||||
|
The act of importing main from the pip module seems to cause add wheels
|
||||||
|
from the /usr/share/python-wheels which are installed by various tools.
|
||||||
|
This function ensures that sys.path remains the same after the call is
|
||||||
|
executed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
_path = sys.path
|
||||||
|
try:
|
||||||
|
from pip import main as _pip_execute
|
||||||
|
except ImportError:
|
||||||
|
apt_update()
|
||||||
|
apt_install('python-pip')
|
||||||
|
from pip import main as _pip_execute
|
||||||
|
_pip_execute(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
sys.path = _path
|
||||||
|
|
||||||
|
|
||||||
def parse_options(given, available):
|
def parse_options(given, available):
|
||||||
"""Given a set of options, check if available"""
|
"""Given a set of options, check if available"""
|
||||||
for key, value in sorted(given.items()):
|
for key, value in sorted(given.items()):
|
||||||
@ -42,8 +57,12 @@ def parse_options(given, available):
|
|||||||
yield "--{0}={1}".format(key, value)
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
|
|
||||||
def pip_install_requirements(requirements, **options):
|
def pip_install_requirements(requirements, constraints=None, **options):
|
||||||
"""Install a requirements file """
|
"""Install a requirements file.
|
||||||
|
|
||||||
|
:param constraints: Path to pip constraints file.
|
||||||
|
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
|
||||||
|
"""
|
||||||
command = ["install"]
|
command = ["install"]
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', )
|
available_options = ('proxy', 'src', 'log', )
|
||||||
@ -51,8 +70,13 @@ def pip_install_requirements(requirements, **options):
|
|||||||
command.append(option)
|
command.append(option)
|
||||||
|
|
||||||
command.append("-r {0}".format(requirements))
|
command.append("-r {0}".format(requirements))
|
||||||
log("Installing from file: {} with options: {}".format(requirements,
|
if constraints:
|
||||||
command))
|
command.append("-c {0}".format(constraints))
|
||||||
|
log("Installing from file: {} with constraints {} "
|
||||||
|
"and options: {}".format(requirements, constraints, command))
|
||||||
|
else:
|
||||||
|
log("Installing from file: {} with options: {}".format(requirements,
|
||||||
|
command))
|
||||||
pip_execute(command)
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,10 +23,11 @@
|
|||||||
# James Page <james.page@ubuntu.com>
|
# James Page <james.page@ubuntu.com>
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
#
|
#
|
||||||
|
import bisect
|
||||||
|
import six
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import six
|
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
@ -73,6 +74,394 @@ log to syslog = {use_syslog}
|
|||||||
err to syslog = {use_syslog}
|
err to syslog = {use_syslog}
|
||||||
clog to syslog = {use_syslog}
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
|
||||||
|
powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
|
||||||
|
|
||||||
|
|
||||||
|
def validator(value, valid_type, valid_range=None):
|
||||||
|
"""
|
||||||
|
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
||||||
|
Example input:
|
||||||
|
validator(value=1,
|
||||||
|
valid_type=int,
|
||||||
|
valid_range=[0, 2])
|
||||||
|
This says I'm testing value=1. It must be an int inclusive in [0,2]
|
||||||
|
|
||||||
|
:param value: The value to validate
|
||||||
|
:param valid_type: The type that value should be.
|
||||||
|
:param valid_range: A range of values that value can assume.
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
assert isinstance(value, valid_type), "{} is not a {}".format(
|
||||||
|
value,
|
||||||
|
valid_type)
|
||||||
|
if valid_range is not None:
|
||||||
|
assert isinstance(valid_range, list), \
|
||||||
|
"valid_range must be a list, was given {}".format(valid_range)
|
||||||
|
# If we're dealing with strings
|
||||||
|
if valid_type is six.string_types:
|
||||||
|
assert value in valid_range, \
|
||||||
|
"{} is not in the list {}".format(value, valid_range)
|
||||||
|
# Integer, float should have a min and max
|
||||||
|
else:
|
||||||
|
if len(valid_range) != 2:
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid valid_range list of {} for {}. "
|
||||||
|
"List must be [min,max]".format(valid_range, value))
|
||||||
|
assert value >= valid_range[0], \
|
||||||
|
"{} is less than minimum allowed value of {}".format(
|
||||||
|
value, valid_range[0])
|
||||||
|
assert value <= valid_range[1], \
|
||||||
|
"{} is greater than maximum allowed value of {}".format(
|
||||||
|
value, valid_range[1])
|
||||||
|
|
||||||
|
|
||||||
|
class PoolCreationError(Exception):
|
||||||
|
"""
|
||||||
|
A custom error to inform the caller that a pool creation failed. Provides an error message
|
||||||
|
"""
|
||||||
|
def __init__(self, message):
|
||||||
|
super(PoolCreationError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class Pool(object):
|
||||||
|
"""
|
||||||
|
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
|
||||||
|
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
|
||||||
|
"""
|
||||||
|
def __init__(self, service, name):
|
||||||
|
self.service = service
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
# Create the pool if it doesn't exist already
|
||||||
|
# To be implemented by subclasses
|
||||||
|
def create(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_cache_tier(self, cache_pool, mode):
|
||||||
|
"""
|
||||||
|
Adds a new cache tier to an existing pool.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to add.
|
||||||
|
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# Check the input types and values
|
||||||
|
validator(value=cache_pool, valid_type=six.string_types)
|
||||||
|
validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
|
||||||
|
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
|
||||||
|
|
||||||
|
def remove_cache_tier(self, cache_pool):
|
||||||
|
"""
|
||||||
|
Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to remove.
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# read-only is easy, writeback is much harder
|
||||||
|
mode = get_cache_mode(cache_pool)
|
||||||
|
if mode == 'readonly':
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
elif mode == 'writeback':
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
|
||||||
|
# Flush the cache and wait for it to return
|
||||||
|
check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
def get_pgs(self, pool_size):
|
||||||
|
"""
|
||||||
|
:param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
|
||||||
|
erasure coded pools
|
||||||
|
:return: int. The number of pgs to use.
|
||||||
|
"""
|
||||||
|
validator(value=pool_size, valid_type=int)
|
||||||
|
osds = get_osds(self.service)
|
||||||
|
if not osds:
|
||||||
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
|
# which don't support OSD query from cli
|
||||||
|
return 200
|
||||||
|
|
||||||
|
# Calculate based on Ceph best practices
|
||||||
|
if osds < 5:
|
||||||
|
return 128
|
||||||
|
elif 5 < osds < 10:
|
||||||
|
return 512
|
||||||
|
elif 10 < osds < 50:
|
||||||
|
return 4096
|
||||||
|
else:
|
||||||
|
estimate = (osds * 100) / pool_size
|
||||||
|
# Return the next nearest power of 2
|
||||||
|
index = bisect.bisect_right(powers_of_two, estimate)
|
||||||
|
return powers_of_two[index]
|
||||||
|
|
||||||
|
|
||||||
|
class ReplicatedPool(Pool):
|
||||||
|
def __init__(self, service, name, replicas=2):
|
||||||
|
super(ReplicatedPool, self).__init__(service=service, name=name)
|
||||||
|
self.replicas = replicas
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Create it
|
||||||
|
pgs = self.get_pgs(self.replicas)
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Default jerasure erasure coded pool
|
||||||
|
class ErasurePool(Pool):
|
||||||
|
def __init__(self, service, name, erasure_code_profile="default"):
|
||||||
|
super(ErasurePool, self).__init__(service=service, name=name)
|
||||||
|
self.erasure_code_profile = erasure_code_profile
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Try to find the erasure profile information so we can properly size the pgs
|
||||||
|
erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
|
||||||
|
|
||||||
|
# Check for errors
|
||||||
|
if erasure_profile is None:
|
||||||
|
log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
|
||||||
|
if 'k' not in erasure_profile or 'm' not in erasure_profile:
|
||||||
|
# Error
|
||||||
|
log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(
|
||||||
|
message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
|
||||||
|
|
||||||
|
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
|
||||||
|
# Create it
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs),
|
||||||
|
'erasure', self.erasure_code_profile]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
"""Get an existing erasure code profile if it already exists.
|
||||||
|
Returns json formatted output"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_erasure_profile(service, name):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
out = check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name, '--format=json'])
|
||||||
|
return json.loads(out)
|
||||||
|
except (CalledProcessError, OSError, ValueError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def pool_set(service, pool_name, key, value):
|
||||||
|
"""
|
||||||
|
Sets a value for a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param key: six.string_types
|
||||||
|
:param value:
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def snapshot_pool(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Snapshots a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_snapshot(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Remove a snapshot from a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# max_bytes should be an int or long
|
||||||
|
def set_pool_quota(service, pool_name, max_bytes):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param max_bytes: int or long
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Set a byte quota on a RADOS pool in ceph.
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_quota(service, pool_name):
|
||||||
|
"""
|
||||||
|
Set a byte quota on a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host',
|
||||||
|
data_chunks=2, coding_chunks=1,
|
||||||
|
locality=None, durability_estimator=None):
|
||||||
|
"""
|
||||||
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
|
for more details
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param profile_name: six.string_types
|
||||||
|
:param erasure_plugin_name: six.string_types
|
||||||
|
:param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
|
||||||
|
'room', 'root', 'row'])
|
||||||
|
:param data_chunks: int
|
||||||
|
:param coding_chunks: int
|
||||||
|
:param locality: int
|
||||||
|
:param durability_estimator: int
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Ensure this failure_domain is allowed by Ceph
|
||||||
|
validator(failure_domain, six.string_types,
|
||||||
|
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
|
||||||
|
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
|
||||||
|
'ruleset_failure_domain=' + failure_domain]
|
||||||
|
if locality is not None and durability_estimator is not None:
|
||||||
|
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||||
|
|
||||||
|
# Add plugin specific information
|
||||||
|
if locality is not None:
|
||||||
|
# For local erasure codes
|
||||||
|
cmd.append('l=' + str(locality))
|
||||||
|
if durability_estimator is not None:
|
||||||
|
# For Shec erasure codes
|
||||||
|
cmd.append('c=' + str(durability_estimator))
|
||||||
|
|
||||||
|
if erasure_profile_exists(service, profile_name):
|
||||||
|
cmd.append('--force')
|
||||||
|
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def rename_pool(service, old_name, new_name):
|
||||||
|
"""
|
||||||
|
Rename a Ceph pool from old_name to new_name
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param old_name: six.string_types
|
||||||
|
:param new_name: six.string_types
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
validator(value=old_name, valid_type=six.string_types)
|
||||||
|
validator(value=new_name, valid_type=six.string_types)
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def erasure_profile_exists(service, name):
|
||||||
|
"""
|
||||||
|
Check to see if an Erasure code profile already exists.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=name, valid_type=six.string_types)
|
||||||
|
try:
|
||||||
|
check_call(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name])
|
||||||
|
return True
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_cache_mode(service, pool_name):
|
||||||
|
"""
|
||||||
|
Find the current caching mode of the pool_name given.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=service, valid_type=six.string_types)
|
||||||
|
validator(value=pool_name, valid_type=six.string_types)
|
||||||
|
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
|
||||||
|
try:
|
||||||
|
osd_json = json.loads(out)
|
||||||
|
for pool in osd_json['pools']:
|
||||||
|
if pool['pool_name'] == pool_name:
|
||||||
|
return pool['cache_mode']
|
||||||
|
return None
|
||||||
|
except ValueError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def pool_exists(service, name):
|
||||||
|
"""Check to see if a RADOS pool already exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rados', '--id', service,
|
||||||
|
'lspools']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def get_osds(service):
|
||||||
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
|
cluster.
|
||||||
|
"""
|
||||||
|
version = ceph_version()
|
||||||
|
if version and version >= '0.56':
|
||||||
|
return json.loads(check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'ls',
|
||||||
|
'--format=json']).decode('UTF-8'))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def install():
|
def install():
|
||||||
@ -102,30 +491,6 @@ def create_rbd_image(service, pool, image, sizemb):
|
|||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def pool_exists(service, name):
|
|
||||||
"""Check to see if a RADOS pool already exists."""
|
|
||||||
try:
|
|
||||||
out = check_output(['rados', '--id', service,
|
|
||||||
'lspools']).decode('UTF-8')
|
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return name in out
|
|
||||||
|
|
||||||
|
|
||||||
def get_osds(service):
|
|
||||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
|
||||||
cluster.
|
|
||||||
"""
|
|
||||||
version = ceph_version()
|
|
||||||
if version and version >= '0.56':
|
|
||||||
return json.loads(check_output(['ceph', '--id', service,
|
|
||||||
'osd', 'ls',
|
|
||||||
'--format=json']).decode('UTF-8'))
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def update_pool(client, pool, settings):
|
def update_pool(client, pool, settings):
|
||||||
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
|
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
|
||||||
for k, v in six.iteritems(settings):
|
for k, v in six.iteritems(settings):
|
||||||
@ -414,6 +779,7 @@ class CephBrokerRq(object):
|
|||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
The API is versioned and defaults to version 1.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, api_version=1, request_id=None):
|
def __init__(self, api_version=1, request_id=None):
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
if request_id:
|
if request_id:
|
||||||
|
@ -492,7 +492,7 @@ def relation_types():
|
|||||||
|
|
||||||
@cached
|
@cached
|
||||||
def peer_relation_id():
|
def peer_relation_id():
|
||||||
'''Get a peer relation id if a peer relation has been joined, else None.'''
|
'''Get the peers relation id if a peers relation has been joined, else None.'''
|
||||||
md = metadata()
|
md = metadata()
|
||||||
section = md.get('peers')
|
section = md.get('peers')
|
||||||
if section:
|
if section:
|
||||||
@ -517,12 +517,12 @@ def relation_to_interface(relation_name):
|
|||||||
def relation_to_role_and_interface(relation_name):
|
def relation_to_role_and_interface(relation_name):
|
||||||
"""
|
"""
|
||||||
Given the name of a relation, return the role and the name of the interface
|
Given the name of a relation, return the role and the name of the interface
|
||||||
that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
|
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
||||||
"""
|
"""
|
||||||
_metadata = metadata()
|
_metadata = metadata()
|
||||||
for role in ('provides', 'requires', 'peer'):
|
for role in ('provides', 'requires', 'peers'):
|
||||||
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
||||||
if interface:
|
if interface:
|
||||||
return role, interface
|
return role, interface
|
||||||
@ -534,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name):
|
|||||||
"""
|
"""
|
||||||
Given a role and interface name, return a list of relation names for the
|
Given a role and interface name, return a list of relation names for the
|
||||||
current charm that use that interface under that role (where role is one
|
current charm that use that interface under that role (where role is one
|
||||||
of ``provides``, ``requires``, or ``peer``).
|
of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
:returns: A list of relation names.
|
:returns: A list of relation names.
|
||||||
"""
|
"""
|
||||||
@ -555,7 +555,7 @@ def interface_to_relations(interface_name):
|
|||||||
:returns: A list of relation names.
|
:returns: A list of relation names.
|
||||||
"""
|
"""
|
||||||
results = []
|
results = []
|
||||||
for role in ('provides', 'requires', 'peer'):
|
for role in ('provides', 'requires', 'peers'):
|
||||||
results.extend(role_and_interface_to_relations(role, interface_name))
|
results.extend(role_and_interface_to_relations(role, interface_name))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
@ -637,7 +637,7 @@ def unit_private_ip():
|
|||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def storage_get(attribute="", storage_id=""):
|
def storage_get(attribute=None, storage_id=None):
|
||||||
"""Get storage attributes"""
|
"""Get storage attributes"""
|
||||||
_args = ['storage-get', '--format=json']
|
_args = ['storage-get', '--format=json']
|
||||||
if storage_id:
|
if storage_id:
|
||||||
@ -651,7 +651,7 @@ def storage_get(attribute="", storage_id=""):
|
|||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def storage_list(storage_name=""):
|
def storage_list(storage_name=None):
|
||||||
"""List the storage IDs for the unit"""
|
"""List the storage IDs for the unit"""
|
||||||
_args = ['storage-list', '--format=json']
|
_args = ['storage-list', '--format=json']
|
||||||
if storage_name:
|
if storage_name:
|
||||||
@ -878,6 +878,40 @@ def leader_set(settings=None, **kwargs):
|
|||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_register(ptype, klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know that a
|
||||||
|
payload has been started."""
|
||||||
|
cmd = ['payload-register']
|
||||||
|
for x in [ptype, klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_unregister(klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know
|
||||||
|
that a payload has been manually stopped. The <class> and <id> provided
|
||||||
|
must match a payload that has been previously registered with juju using
|
||||||
|
payload-register."""
|
||||||
|
cmd = ['payload-unregister']
|
||||||
|
for x in [klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_status_set(klass, pid, status):
|
||||||
|
"""is used to update the current status of a registered payload.
|
||||||
|
The <class> and <id> provided must match a payload that has been previously
|
||||||
|
registered with juju using payload-register. The <status> must be one of the
|
||||||
|
follow: starting, started, stopping, stopped"""
|
||||||
|
cmd = ['payload-status-set']
|
||||||
|
for x in [klass, pid, status]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def juju_version():
|
def juju_version():
|
||||||
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
||||||
|
@ -72,7 +72,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
|
|||||||
stopped = service_stop(service_name)
|
stopped = service_stop(service_name)
|
||||||
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
sysv_file = os.path.join(initd_dir, service_name)
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
if os.path.exists(upstart_file):
|
if init_is_systemd():
|
||||||
|
service('disable', service_name)
|
||||||
|
elif os.path.exists(upstart_file):
|
||||||
override_path = os.path.join(
|
override_path = os.path.join(
|
||||||
init_dir, '{}.override'.format(service_name))
|
init_dir, '{}.override'.format(service_name))
|
||||||
with open(override_path, 'w') as fh:
|
with open(override_path, 'w') as fh:
|
||||||
@ -80,9 +82,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
|
|||||||
elif os.path.exists(sysv_file):
|
elif os.path.exists(sysv_file):
|
||||||
subprocess.check_call(["update-rc.d", service_name, "disable"])
|
subprocess.check_call(["update-rc.d", service_name, "disable"])
|
||||||
else:
|
else:
|
||||||
# XXX: Support SystemD too
|
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
service_name, upstart_file, sysv_file))
|
service_name, upstart_file, sysv_file))
|
||||||
return stopped
|
return stopped
|
||||||
|
|
||||||
@ -94,7 +96,9 @@ def service_resume(service_name, init_dir="/etc/init",
|
|||||||
Reenable starting again at boot. Start the service"""
|
Reenable starting again at boot. Start the service"""
|
||||||
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
sysv_file = os.path.join(initd_dir, service_name)
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
if os.path.exists(upstart_file):
|
if init_is_systemd():
|
||||||
|
service('enable', service_name)
|
||||||
|
elif os.path.exists(upstart_file):
|
||||||
override_path = os.path.join(
|
override_path = os.path.join(
|
||||||
init_dir, '{}.override'.format(service_name))
|
init_dir, '{}.override'.format(service_name))
|
||||||
if os.path.exists(override_path):
|
if os.path.exists(override_path):
|
||||||
@ -102,9 +106,9 @@ def service_resume(service_name, init_dir="/etc/init",
|
|||||||
elif os.path.exists(sysv_file):
|
elif os.path.exists(sysv_file):
|
||||||
subprocess.check_call(["update-rc.d", service_name, "enable"])
|
subprocess.check_call(["update-rc.d", service_name, "enable"])
|
||||||
else:
|
else:
|
||||||
# XXX: Support SystemD too
|
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
service_name, upstart_file, sysv_file))
|
service_name, upstart_file, sysv_file))
|
||||||
|
|
||||||
started = service_running(service_name)
|
started = service_running(service_name)
|
||||||
@ -115,23 +119,30 @@ def service_resume(service_name, init_dir="/etc/init",
|
|||||||
|
|
||||||
def service(action, service_name):
|
def service(action, service_name):
|
||||||
"""Control a system service"""
|
"""Control a system service"""
|
||||||
cmd = ['service', service_name, action]
|
if init_is_systemd():
|
||||||
|
cmd = ['systemctl', action, service_name]
|
||||||
|
else:
|
||||||
|
cmd = ['service', service_name, action]
|
||||||
return subprocess.call(cmd) == 0
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
def service_running(service):
|
def service_running(service_name):
|
||||||
"""Determine whether a system service is running"""
|
"""Determine whether a system service is running"""
|
||||||
try:
|
if init_is_systemd():
|
||||||
output = subprocess.check_output(
|
return service('is-active', service_name)
|
||||||
['service', service, 'status'],
|
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return False
|
|
||||||
else:
|
else:
|
||||||
if ("start/running" in output or "is running" in output):
|
try:
|
||||||
return True
|
output = subprocess.check_output(
|
||||||
else:
|
['service', service_name, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
return False
|
return False
|
||||||
|
else:
|
||||||
|
if ("start/running" in output or "is running" in output or
|
||||||
|
"up and running" in output):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def service_available(service_name):
|
def service_available(service_name):
|
||||||
@ -146,8 +157,29 @@ def service_available(service_name):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
SYSTEMD_SYSTEM = '/run/systemd/system'
|
||||||
"""Add a user to the system"""
|
|
||||||
|
|
||||||
|
def init_is_systemd():
|
||||||
|
"""Return True if the host system uses systemd, False otherwise."""
|
||||||
|
return os.path.isdir(SYSTEMD_SYSTEM)
|
||||||
|
|
||||||
|
|
||||||
|
def adduser(username, password=None, shell='/bin/bash', system_user=False,
|
||||||
|
primary_group=None, secondary_groups=None):
|
||||||
|
"""Add a user to the system.
|
||||||
|
|
||||||
|
Will log but otherwise succeed if the user already exists.
|
||||||
|
|
||||||
|
:param str username: Username to create
|
||||||
|
:param str password: Password for user; if ``None``, create a system user
|
||||||
|
:param str shell: The default shell for the user
|
||||||
|
:param bool system_user: Whether to create a login or system user
|
||||||
|
:param str primary_group: Primary group for user; defaults to username
|
||||||
|
:param list secondary_groups: Optional list of additional groups
|
||||||
|
|
||||||
|
:returns: The password database entry struct, as returned by `pwd.getpwnam`
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
log('user {0} already exists!'.format(username))
|
log('user {0} already exists!'.format(username))
|
||||||
@ -162,6 +194,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|||||||
'--shell', shell,
|
'--shell', shell,
|
||||||
'--password', password,
|
'--password', password,
|
||||||
])
|
])
|
||||||
|
if not primary_group:
|
||||||
|
try:
|
||||||
|
grp.getgrnam(username)
|
||||||
|
primary_group = username # avoid "group exists" error
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
if primary_group:
|
||||||
|
cmd.extend(['-g', primary_group])
|
||||||
|
if secondary_groups:
|
||||||
|
cmd.extend(['-G', ','.join(secondary_groups)])
|
||||||
cmd.append(username)
|
cmd.append(username)
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
@ -259,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
|||||||
|
|
||||||
|
|
||||||
def fstab_remove(mp):
|
def fstab_remove(mp):
|
||||||
"""Remove the given mountpoint entry from /etc/fstab
|
"""Remove the given mountpoint entry from /etc/fstab"""
|
||||||
"""
|
|
||||||
return Fstab.remove_by_mountpoint(mp)
|
return Fstab.remove_by_mountpoint(mp)
|
||||||
|
|
||||||
|
|
||||||
def fstab_add(dev, mp, fs, options=None):
|
def fstab_add(dev, mp, fs, options=None):
|
||||||
"""Adds the given device entry to the /etc/fstab file
|
"""Adds the given device entry to the /etc/fstab file"""
|
||||||
"""
|
|
||||||
return Fstab.add(dev, mp, fs, options=options)
|
return Fstab.add(dev, mp, fs, options=options)
|
||||||
|
|
||||||
|
|
||||||
@ -322,8 +362,7 @@ def fstab_mount(mountpoint):
|
|||||||
|
|
||||||
|
|
||||||
def file_hash(path, hash_type='md5'):
|
def file_hash(path, hash_type='md5'):
|
||||||
"""
|
"""Generate a hash checksum of the contents of 'path' or None if not found.
|
||||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
|
||||||
|
|
||||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
@ -338,10 +377,9 @@ def file_hash(path, hash_type='md5'):
|
|||||||
|
|
||||||
|
|
||||||
def path_hash(path):
|
def path_hash(path):
|
||||||
"""
|
"""Generate a hash checksum of all files matching 'path'. Standard
|
||||||
Generate a hash checksum of all files matching 'path'. Standard wildcards
|
wildcards like '*' and '?' are supported, see documentation for the 'glob'
|
||||||
like '*' and '?' are supported, see documentation for the 'glob' module for
|
module for more information.
|
||||||
more information.
|
|
||||||
|
|
||||||
:return: dict: A { filename: hash } dictionary for all matched files.
|
:return: dict: A { filename: hash } dictionary for all matched files.
|
||||||
Empty if none found.
|
Empty if none found.
|
||||||
@ -353,8 +391,7 @@ def path_hash(path):
|
|||||||
|
|
||||||
|
|
||||||
def check_hash(path, checksum, hash_type='md5'):
|
def check_hash(path, checksum, hash_type='md5'):
|
||||||
"""
|
"""Validate a file using a cryptographic checksum.
|
||||||
Validate a file using a cryptographic checksum.
|
|
||||||
|
|
||||||
:param str checksum: Value of the checksum used to validate the file.
|
:param str checksum: Value of the checksum used to validate the file.
|
||||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||||
@ -369,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'):
|
|||||||
|
|
||||||
|
|
||||||
class ChecksumError(ValueError):
|
class ChecksumError(ValueError):
|
||||||
|
"""A class derived from Value error to indicate the checksum failed."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -474,7 +512,7 @@ def get_bond_master(interface):
|
|||||||
|
|
||||||
|
|
||||||
def list_nics(nic_type=None):
|
def list_nics(nic_type=None):
|
||||||
'''Return a list of nics of given type(s)'''
|
"""Return a list of nics of given type(s)"""
|
||||||
if isinstance(nic_type, six.string_types):
|
if isinstance(nic_type, six.string_types):
|
||||||
int_types = [nic_type]
|
int_types = [nic_type]
|
||||||
else:
|
else:
|
||||||
@ -516,12 +554,13 @@ def list_nics(nic_type=None):
|
|||||||
|
|
||||||
|
|
||||||
def set_nic_mtu(nic, mtu):
|
def set_nic_mtu(nic, mtu):
|
||||||
'''Set MTU on a network interface'''
|
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
|
||||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def get_nic_mtu(nic):
|
def get_nic_mtu(nic):
|
||||||
|
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
|
||||||
cmd = ['ip', 'addr', 'show', nic]
|
cmd = ['ip', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
mtu = ""
|
mtu = ""
|
||||||
@ -533,6 +572,7 @@ def get_nic_mtu(nic):
|
|||||||
|
|
||||||
|
|
||||||
def get_nic_hwaddr(nic):
|
def get_nic_hwaddr(nic):
|
||||||
|
"""Return the Media Access Control (MAC) for a network interface."""
|
||||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
hwaddr = ""
|
hwaddr = ""
|
||||||
@ -543,7 +583,7 @@ def get_nic_hwaddr(nic):
|
|||||||
|
|
||||||
|
|
||||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
'''Compare supplied revno with the revno of the installed package
|
"""Compare supplied revno with the revno of the installed package
|
||||||
|
|
||||||
* 1 => Installed revno is greater than supplied arg
|
* 1 => Installed revno is greater than supplied arg
|
||||||
* 0 => Installed revno is the same as supplied arg
|
* 0 => Installed revno is the same as supplied arg
|
||||||
@ -552,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
This function imports apt_cache function from charmhelpers.fetch if
|
This function imports apt_cache function from charmhelpers.fetch if
|
||||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||||
you call this function, or pass an apt_pkg.Cache() instance.
|
you call this function, or pass an apt_pkg.Cache() instance.
|
||||||
'''
|
"""
|
||||||
import apt_pkg
|
import apt_pkg
|
||||||
if not pkgcache:
|
if not pkgcache:
|
||||||
from charmhelpers.fetch import apt_cache
|
from charmhelpers.fetch import apt_cache
|
||||||
@ -562,19 +602,27 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def chdir(d):
|
def chdir(directory):
|
||||||
|
"""Change the current working directory to a different directory for a code
|
||||||
|
block and return the previous directory after the block exits. Useful to
|
||||||
|
run commands from a specificed directory.
|
||||||
|
|
||||||
|
:param str directory: The directory path to change to for this context.
|
||||||
|
"""
|
||||||
cur = os.getcwd()
|
cur = os.getcwd()
|
||||||
try:
|
try:
|
||||||
yield os.chdir(d)
|
yield os.chdir(directory)
|
||||||
finally:
|
finally:
|
||||||
os.chdir(cur)
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
||||||
"""
|
"""Recursively change user and group ownership of files and directories
|
||||||
Recursively change user and group ownership of files and directories
|
|
||||||
in given path. Doesn't chown path itself by default, only its children.
|
in given path. Doesn't chown path itself by default, only its children.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
:param bool follow_links: Also Chown links if True
|
:param bool follow_links: Also Chown links if True
|
||||||
:param bool chowntopdir: Also chown path itself if True
|
:param bool chowntopdir: Also chown path itself if True
|
||||||
"""
|
"""
|
||||||
@ -598,15 +646,23 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
|||||||
|
|
||||||
|
|
||||||
def lchownr(path, owner, group):
|
def lchownr(path, owner, group):
|
||||||
|
"""Recursively change user and group ownership of files and directories
|
||||||
|
in a given path, not following symbolic links. See the documentation for
|
||||||
|
'os.lchown' for more information.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
|
"""
|
||||||
chownr(path, owner, group, follow_links=False)
|
chownr(path, owner, group, follow_links=False)
|
||||||
|
|
||||||
|
|
||||||
def get_total_ram():
|
def get_total_ram():
|
||||||
'''The total amount of system RAM in bytes.
|
"""The total amount of system RAM in bytes.
|
||||||
|
|
||||||
This is what is reported by the OS, and may be overcommitted when
|
This is what is reported by the OS, and may be overcommitted when
|
||||||
there are multiple containers hosted on the same machine.
|
there are multiple containers hosted on the same machine.
|
||||||
'''
|
"""
|
||||||
with open('/proc/meminfo', 'r') as f:
|
with open('/proc/meminfo', 'r') as f:
|
||||||
for line in f.readlines():
|
for line in f.readlines():
|
||||||
if line:
|
if line:
|
||||||
|
@ -243,13 +243,15 @@ class TemplateCallback(ManagerCallback):
|
|||||||
:param str source: The template source file, relative to
|
:param str source: The template source file, relative to
|
||||||
`$CHARM_DIR/templates`
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
:param str target: The target to write the rendered template to
|
:param str target: The target to write the rendered template to (or None)
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
:param partial on_change_action: functools partial to be executed when
|
:param partial on_change_action: functools partial to be executed when
|
||||||
rendered file changes
|
rendered file changes
|
||||||
:param jinja2 loader template_loader: A jinja2 template loader
|
:param jinja2 loader template_loader: A jinja2 template loader
|
||||||
|
|
||||||
|
:return str: The rendered template
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target,
|
def __init__(self, source, target,
|
||||||
owner='root', group='root', perms=0o444,
|
owner='root', group='root', perms=0o444,
|
||||||
@ -267,12 +269,14 @@ class TemplateCallback(ManagerCallback):
|
|||||||
if self.on_change_action and os.path.isfile(self.target):
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
pre_checksum = host.file_hash(self.target)
|
pre_checksum = host.file_hash(self.target)
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
context = {}
|
context = {'ctx': {}}
|
||||||
for ctx in service.get('required_data', []):
|
for ctx in service.get('required_data', []):
|
||||||
context.update(ctx)
|
context.update(ctx)
|
||||||
templating.render(self.source, self.target, context,
|
context['ctx'].update(ctx)
|
||||||
self.owner, self.group, self.perms,
|
|
||||||
template_loader=self.template_loader)
|
result = templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms,
|
||||||
|
template_loader=self.template_loader)
|
||||||
if self.on_change_action:
|
if self.on_change_action:
|
||||||
if pre_checksum == host.file_hash(self.target):
|
if pre_checksum == host.file_hash(self.target):
|
||||||
hookenv.log(
|
hookenv.log(
|
||||||
@ -281,6 +285,8 @@ class TemplateCallback(ManagerCallback):
|
|||||||
else:
|
else:
|
||||||
self.on_change_action()
|
self.on_change_action()
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
# Convenience aliases for templates
|
||||||
render_template = template = TemplateCallback
|
render_template = template = TemplateCallback
|
||||||
|
@ -27,7 +27,8 @@ def render(source, target, context, owner='root', group='root',
|
|||||||
|
|
||||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
The `target` path should be absolute.
|
The `target` path should be absolute. It can also be `None`, in which
|
||||||
|
case no file will be written.
|
||||||
|
|
||||||
The context should be a dict containing the values to be replaced in the
|
The context should be a dict containing the values to be replaced in the
|
||||||
template.
|
template.
|
||||||
@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root',
|
|||||||
|
|
||||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
The rendered template will be written to the file as well as being returned
|
||||||
|
as a string.
|
||||||
|
|
||||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||||
"""
|
"""
|
||||||
@ -67,9 +71,11 @@ def render(source, target, context, owner='root', group='root',
|
|||||||
level=hookenv.ERROR)
|
level=hookenv.ERROR)
|
||||||
raise e
|
raise e
|
||||||
content = template.render(context)
|
content = template.render(context)
|
||||||
target_dir = os.path.dirname(target)
|
if target is not None:
|
||||||
if not os.path.exists(target_dir):
|
target_dir = os.path.dirname(target)
|
||||||
# This is a terrible default directory permission, as the file
|
if not os.path.exists(target_dir):
|
||||||
# or its siblings will often contain secrets.
|
# This is a terrible default directory permission, as the file
|
||||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
# or its siblings will often contain secrets.
|
||||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||||
|
host.write_file(target, content.encode(encoding), owner, group, perms)
|
||||||
|
return content
|
||||||
|
@ -98,6 +98,14 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||||||
'liberty/proposed': 'trusty-proposed/liberty',
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
||||||
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
||||||
|
# Mitaka
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'trusty-updates/mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
|
||||||
}
|
}
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
@ -411,7 +419,7 @@ def plugins(fetch_handlers=None):
|
|||||||
importlib.import_module(package),
|
importlib.import_module(package),
|
||||||
classname)
|
classname)
|
||||||
plugin_list.append(handler_class())
|
plugin_list.append(handler_class())
|
||||||
except (ImportError, AttributeError):
|
except NotImplementedError:
|
||||||
# Skip missing plugins so that they can be ommitted from
|
# Skip missing plugins so that they can be ommitted from
|
||||||
# installation if desired
|
# installation if desired
|
||||||
log("FetchHandler {} not found, skipping plugin".format(
|
log("FetchHandler {} not found, skipping plugin".format(
|
||||||
|
@ -108,7 +108,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
install_opener(opener)
|
install_opener(opener)
|
||||||
response = urlopen(source)
|
response = urlopen(source)
|
||||||
try:
|
try:
|
||||||
with open(dest, 'w') as dest_file:
|
with open(dest, 'wb') as dest_file:
|
||||||
dest_file.write(response.read())
|
dest_file.write(response.read())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if os.path.isfile(dest):
|
if os.path.isfile(dest):
|
||||||
|
@ -15,60 +15,50 @@
|
|||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
raise ImportError('bzrlib does not support Python3')
|
|
||||||
|
|
||||||
try:
|
if filter_installed_packages(['bzr']) != []:
|
||||||
from bzrlib.branch import Branch
|
apt_install(['bzr'])
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
if filter_installed_packages(['bzr']) != []:
|
||||||
except ImportError:
|
raise NotImplementedError('Unable to install bzr')
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-bzrlib")
|
|
||||||
from bzrlib.branch import Branch
|
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
|
||||||
|
|
||||||
|
|
||||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||||
"""Handler for bazaar branches via generic and lp URLs"""
|
"""Handler for bazaar branches via generic and lp URLs"""
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.bzr'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def branch(self, source, dest):
|
def branch(self, source, dest):
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
# If we use lp:branchname scheme we need to load plugins
|
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
if url_parts.scheme == "lp":
|
if os.path.exists(dest):
|
||||||
from bzrlib.plugin import load_plugins
|
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
|
||||||
load_plugins()
|
else:
|
||||||
try:
|
check_call(['bzr', 'branch', source, dest])
|
||||||
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
|
|
||||||
except errors.AlreadyControlDirError:
|
|
||||||
local_branch = Branch.open(dest)
|
|
||||||
try:
|
|
||||||
remote_branch = Branch.open(source)
|
|
||||||
remote_branch.push(local_branch)
|
|
||||||
tree = workingtree.WorkingTree.open(dest)
|
|
||||||
tree.update()
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def install(self, source):
|
def install(self, source, dest=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
if dest:
|
||||||
branch_name)
|
dest_dir = os.path.join(dest, branch_name)
|
||||||
|
else:
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
|
branch_name)
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0o755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
|
@ -15,24 +15,18 @@
|
|||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call, CalledProcessError
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
|
||||||
|
|
||||||
import six
|
if filter_installed_packages(['git']) != []:
|
||||||
if six.PY3:
|
apt_install(['git'])
|
||||||
raise ImportError('GitPython does not support Python 3')
|
if filter_installed_packages(['git']) != []:
|
||||||
|
raise NotImplementedError('Unable to install git')
|
||||||
try:
|
|
||||||
from git import Repo
|
|
||||||
except ImportError:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-git")
|
|
||||||
from git import Repo
|
|
||||||
|
|
||||||
from git.exc import GitCommandError # noqa E402
|
|
||||||
|
|
||||||
|
|
||||||
class GitUrlFetchHandler(BaseFetchHandler):
|
class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
# TODO (mattyw) no support for ssh git@ yet
|
# TODO (mattyw) no support for ssh git@ yet
|
||||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
if url_parts.scheme not in ('http', 'https', 'git', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.git'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def clone(self, source, dest, branch, depth=None):
|
def clone(self, source, dest, branch="master", depth=None):
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
|
||||||
if depth:
|
if os.path.exists(dest):
|
||||||
Repo.clone_from(source, dest, branch=branch, depth=depth)
|
cmd = ['git', '-C', dest, 'pull', source, branch]
|
||||||
else:
|
else:
|
||||||
Repo.clone_from(source, dest, branch=branch)
|
cmd = ['git', 'clone', source, dest, '--branch', branch]
|
||||||
|
if depth:
|
||||||
|
cmd.extend(['--depth', depth])
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
def install(self, source, branch="master", dest=None, depth=None):
|
def install(self, source, branch="master", dest=None, depth=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
@ -62,11 +61,9 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
else:
|
else:
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
branch_name)
|
branch_name)
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
mkdir(dest_dir, perms=0o755)
|
|
||||||
try:
|
try:
|
||||||
self.clone(source, dest_dir, branch, depth)
|
self.clone(source, dest_dir, branch, depth)
|
||||||
except GitCommandError as e:
|
except CalledProcessError as e:
|
||||||
raise UnhandledSource(e)
|
raise UnhandledSource(e)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
|
@ -121,11 +121,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
# Charms which should use the source config option
|
# Charms which should use the source config option
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
|
||||||
|
|
||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
|
'cinder-backup']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
@ -225,7 +226,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
self.wily_liberty) = range(12)
|
self.wily_liberty, self.trusty_mitaka,
|
||||||
|
self.xenial_mitaka) = range(14)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
@ -237,9 +239,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||||
('utopic', None): self.utopic_juno,
|
('utopic', None): self.utopic_juno,
|
||||||
('vivid', None): self.vivid_kilo,
|
('vivid', None): self.vivid_kilo,
|
||||||
('wily', None): self.wily_liberty}
|
('wily', None): self.wily_liberty,
|
||||||
|
('xenial', None): self.xenial_mitaka}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
@ -256,6 +260,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
Loading…
Reference in New Issue
Block a user