diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml
index f8185cbb..9f626c62 100644
--- a/charm-helpers-hooks.yaml
+++ b/charm-helpers-hooks.yaml
@@ -15,3 +15,4 @@ include:
- contrib.openstack.ip
- contrib.storage.linux
- contrib.python.packages
+ - contrib.charmsupport
diff --git a/config.yaml b/config.yaml
index 077341fc..114998cb 100644
--- a/config.yaml
+++ b/config.yaml
@@ -175,3 +175,19 @@ options:
If your prefix has a dash in it that will be used to split the prefix
into region and zone. Please read the documentation on federated rados
gateways for more information on region and zone.
+ nagios_context:
+ default: "juju"
+ type: string
+ description: |
+ Used by the nrpe-external-master subordinate charm.
+ A string that will be prepended to instance name to set the host name
+ in nagios. So for instance the hostname would be something like:
+ juju-myservice-0
+ If you're running multiple environments with the same services in them
+ this allows you to differentiate between them.
+ nagios_servicegroups:
+ default: ""
+ type: string
+ description: |
+ A comma-separated list of nagios servicegroups.
+ If left empty, the nagios_context will be used as the servicegroup
diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py
new file mode 100644
index 00000000..d1400a02
--- /dev/null
+++ b/hooks/charmhelpers/contrib/charmsupport/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
new file mode 100644
index 00000000..2f246429
--- /dev/null
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -0,0 +1,398 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+"""Compatibility with the nrpe-external-master charm"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# Matthew Wedgwood
+
+import subprocess
+import pwd
+import grp
+import os
+import glob
+import shutil
+import re
+import shlex
+import yaml
+
+from charmhelpers.core.hookenv import (
+ config,
+ local_unit,
+ log,
+ relation_ids,
+ relation_set,
+ relations_of_type,
+)
+
+from charmhelpers.core.host import service
+
+# This module adds compatibility with the nrpe-external-master and plain nrpe
+# subordinate charms. To use it in your charm:
+#
+# 1. Update metadata.yaml
+#
+# provides:
+# (...)
+# nrpe-external-master:
+# interface: nrpe-external-master
+# scope: container
+#
+# and/or
+#
+# provides:
+# (...)
+# local-monitors:
+# interface: local-monitors
+# scope: container
+
+#
+# 2. Add the following to config.yaml
+#
+# nagios_context:
+# default: "juju"
+# type: string
+# description: |
+# Used by the nrpe subordinate charms.
+# A string that will be prepended to instance name to set the host name
+# in nagios. So for instance the hostname would be something like:
+# juju-myservice-0
+# If you're running multiple environments with the same services in them
+# this allows you to differentiate between them.
+# nagios_servicegroups:
+# default: ""
+# type: string
+# description: |
+# A comma-separated list of nagios servicegroups.
+# If left empty, the nagios_context will be used as the servicegroup
+#
+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
+#
+# 4. Update your hooks.py with something like this:
+#
+# from charmsupport.nrpe import NRPE
+# (...)
+# def update_nrpe_config():
+# nrpe_compat = NRPE()
+# nrpe_compat.add_check(
+# shortname = "myservice",
+# description = "Check MyService",
+# check_cmd = "check_http -w 2 -c 10 http://localhost"
+# )
+# nrpe_compat.add_check(
+# "myservice_other",
+# "Check for widget failures",
+# check_cmd = "/srv/myapp/scripts/widget_check"
+# )
+# nrpe_compat.write()
+#
+# def config_changed():
+# (...)
+# update_nrpe_config()
+#
+# def nrpe_external_master_relation_changed():
+# update_nrpe_config()
+#
+# def local_monitors_relation_changed():
+# update_nrpe_config()
+#
+# 5. ln -s hooks.py nrpe-external-master-relation-changed
+# ln -s hooks.py local-monitors-relation-changed
+
+
+class CheckException(Exception):
+ pass
+
+
+class Check(object):
+ shortname_re = '[A-Za-z0-9-_]+$'
+ service_template = ("""
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {{
+ use active-service
+ host_name {nagios_hostname}
+ service_description {nagios_hostname}[{shortname}] """
+ """{description}
+ check_command check_nrpe!{command}
+ servicegroups {nagios_servicegroup}
+}}
+""")
+
+ def __init__(self, shortname, description, check_cmd):
+ super(Check, self).__init__()
+ # XXX: could be better to calculate this from the service name
+ if not re.match(self.shortname_re, shortname):
+ raise CheckException("shortname must match {}".format(
+ Check.shortname_re))
+ self.shortname = shortname
+ self.command = "check_{}".format(shortname)
+ # Note: a set of invalid characters is defined by the
+ # Nagios server config
+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
+ self.description = description
+ self.check_cmd = self._locate_cmd(check_cmd)
+
+ def _get_check_filename(self):
+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
+
+ def _get_service_filename(self, hostname):
+ return os.path.join(NRPE.nagios_exportdir,
+ 'service__{}_{}.cfg'.format(hostname, self.command))
+
+ def _locate_cmd(self, check_cmd):
+ search_path = (
+ '/usr/lib/nagios/plugins',
+ '/usr/local/lib/nagios/plugins',
+ )
+ parts = shlex.split(check_cmd)
+ for path in search_path:
+ if os.path.exists(os.path.join(path, parts[0])):
+ command = os.path.join(path, parts[0])
+ if len(parts) > 1:
+ command += " " + " ".join(parts[1:])
+ return command
+ log('Check command not found: {}'.format(parts[0]))
+ return ''
+
+ def _remove_service_files(self):
+ if not os.path.exists(NRPE.nagios_exportdir):
+ return
+ for f in os.listdir(NRPE.nagios_exportdir):
+ if f.endswith('_{}.cfg'.format(self.command)):
+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
+
+ def remove(self, hostname):
+ nrpe_check_file = self._get_check_filename()
+ if os.path.exists(nrpe_check_file):
+ os.remove(nrpe_check_file)
+ self._remove_service_files()
+
+ def write(self, nagios_context, hostname, nagios_servicegroups):
+ nrpe_check_file = self._get_check_filename()
+ with open(nrpe_check_file, 'w') as nrpe_check_config:
+ nrpe_check_config.write("# check {}\n".format(self.shortname))
+ nrpe_check_config.write("command[{}]={}\n".format(
+ self.command, self.check_cmd))
+
+ if not os.path.exists(NRPE.nagios_exportdir):
+ log('Not writing service config as {} is not accessible'.format(
+ NRPE.nagios_exportdir))
+ else:
+ self.write_service_config(nagios_context, hostname,
+ nagios_servicegroups)
+
+ def write_service_config(self, nagios_context, hostname,
+ nagios_servicegroups):
+ self._remove_service_files()
+
+ templ_vars = {
+ 'nagios_hostname': hostname,
+ 'nagios_servicegroup': nagios_servicegroups,
+ 'description': self.description,
+ 'shortname': self.shortname,
+ 'command': self.command,
+ }
+ nrpe_service_text = Check.service_template.format(**templ_vars)
+ nrpe_service_file = self._get_service_filename(hostname)
+ with open(nrpe_service_file, 'w') as nrpe_service_config:
+ nrpe_service_config.write(str(nrpe_service_text))
+
+ def run(self):
+ subprocess.call(self.check_cmd)
+
+
+class NRPE(object):
+ nagios_logdir = '/var/log/nagios'
+ nagios_exportdir = '/var/lib/nagios/export'
+ nrpe_confdir = '/etc/nagios/nrpe.d'
+
+ def __init__(self, hostname=None):
+ super(NRPE, self).__init__()
+ self.config = config()
+ self.nagios_context = self.config['nagios_context']
+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
+ self.nagios_servicegroups = self.config['nagios_servicegroups']
+ else:
+ self.nagios_servicegroups = self.nagios_context
+ self.unit_name = local_unit().replace('/', '-')
+ if hostname:
+ self.hostname = hostname
+ else:
+ nagios_hostname = get_nagios_hostname()
+ if nagios_hostname:
+ self.hostname = nagios_hostname
+ else:
+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
+ self.checks = []
+
+ def add_check(self, *args, **kwargs):
+ self.checks.append(Check(*args, **kwargs))
+
+ def remove_check(self, *args, **kwargs):
+ if kwargs.get('shortname') is None:
+ raise ValueError('shortname of check must be specified')
+
+ # Use sensible defaults if they're not specified - these are not
+ # actually used during removal, but they're required for constructing
+ # the Check object; check_disk is chosen because it's part of the
+ # nagios-plugins-basic package.
+ if kwargs.get('check_cmd') is None:
+ kwargs['check_cmd'] = 'check_disk'
+ if kwargs.get('description') is None:
+ kwargs['description'] = ''
+
+ check = Check(*args, **kwargs)
+ check.remove(self.hostname)
+
+ def write(self):
+ try:
+ nagios_uid = pwd.getpwnam('nagios').pw_uid
+ nagios_gid = grp.getgrnam('nagios').gr_gid
+ except:
+ log("Nagios user not set up, nrpe checks not updated")
+ return
+
+ if not os.path.exists(NRPE.nagios_logdir):
+ os.mkdir(NRPE.nagios_logdir)
+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
+
+ nrpe_monitors = {}
+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
+ for nrpecheck in self.checks:
+ nrpecheck.write(self.nagios_context, self.hostname,
+ self.nagios_servicegroups)
+ nrpe_monitors[nrpecheck.shortname] = {
+ "command": nrpecheck.command,
+ }
+
+ service('restart', 'nagios-nrpe-server')
+
+ monitor_ids = relation_ids("local-monitors") + \
+ relation_ids("nrpe-external-master")
+ for rid in monitor_ids:
+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
+
+
+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_host_context
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_host_context' in rel:
+ return rel['nagios_host_context']
+
+
+def get_nagios_hostname(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_hostname
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_hostname' in rel:
+ return rel['nagios_hostname']
+
+
+def get_nagios_unit_name(relation_name='nrpe-external-master'):
+ """
+ Return the nagios unit name prepended with host_context if needed
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ host_context = get_nagios_hostcontext(relation_name)
+ if host_context:
+ unit = "%s:%s" % (host_context, local_unit())
+ else:
+ unit = local_unit()
+ return unit
+
+
+def add_init_service_checks(nrpe, services, unit_name):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param list services: List of services to check
+ :param str unit_name: Unit name to use in check description
+ """
+ for svc in services:
+ upstart_init = '/etc/init/%s.conf' % svc
+ sysv_init = '/etc/init.d/%s' % svc
+ if os.path.exists(upstart_init):
+ # Don't add a check for these services from neutron-gateway
+ if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_upstart_job %s' % svc
+ )
+ elif os.path.exists(sysv_init):
+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
+ cron_file = ('*/5 * * * * root '
+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
+ '-s /etc/init.d/%s status > '
+ '/var/lib/nagios/service-check-%s.txt\n' % (svc,
+ svc)
+ )
+ f = open(cronpath, 'w')
+ f.write(cron_file)
+ f.close()
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_status_file.py -f '
+ '/var/lib/nagios/service-check-%s.txt' % svc,
+ )
+
+
+def copy_nrpe_checks():
+ """
+ Copy the nrpe checks into place
+
+ """
+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
+ nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
+ 'charmhelpers', 'contrib', 'openstack',
+ 'files')
+
+ if not os.path.exists(NAGIOS_PLUGINS):
+ os.makedirs(NAGIOS_PLUGINS)
+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
+ if os.path.isfile(fname):
+ shutil.copy2(fname,
+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
+
+
+def add_haproxy_checks(nrpe, unit_name):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param str unit_name: Unit name to use in check description
+ """
+ nrpe.add_check(
+ shortname='haproxy_servers',
+ description='Check HAProxy {%s}' % unit_name,
+ check_cmd='check_haproxy.sh')
+ nrpe.add_check(
+ shortname='haproxy_queue',
+ description='Check HAProxy queue depth {%s}' % unit_name,
+ check_cmd='check_haproxy_queue_depth.sh')
diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py
new file mode 100644
index 00000000..320961b9
--- /dev/null
+++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py
@@ -0,0 +1,175 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+'''
+Functions for managing volumes in juju units. One volume is supported per unit.
+Subordinates may have their own storage, provided it is on its own partition.
+
+Configuration stanzas::
+
+ volume-ephemeral:
+ type: boolean
+ default: true
+ description: >
+ If false, a volume is mounted as sepecified in "volume-map"
+ If true, ephemeral storage will be used, meaning that log data
+ will only exist as long as the machine. YOU HAVE BEEN WARNED.
+ volume-map:
+ type: string
+ default: {}
+ description: >
+ YAML map of units to device names, e.g:
+ "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
+ Service units will raise a configure-error if volume-ephemeral
+ is 'true' and no volume-map value is set. Use 'juju set' to set a
+ value and 'juju resolved' to complete configuration.
+
+Usage::
+
+ from charmsupport.volumes import configure_volume, VolumeConfigurationError
+ from charmsupport.hookenv import log, ERROR
+ def post_mount_hook():
+ stop_service('myservice')
+ def post_mount_hook():
+ start_service('myservice')
+
+ if __name__ == '__main__':
+ try:
+ configure_volume(before_change=pre_mount_hook,
+ after_change=post_mount_hook)
+ except VolumeConfigurationError:
+ log('Storage could not be configured', ERROR)
+
+'''
+
+# XXX: Known limitations
+# - fstab is neither consulted nor updated
+
+import os
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+import yaml
+
+
+MOUNT_BASE = '/srv/juju/volumes'
+
+
+class VolumeConfigurationError(Exception):
+ '''Volume configuration data is missing or invalid'''
+ pass
+
+
+def get_config():
+ '''Gather and sanity-check volume configuration data'''
+ volume_config = {}
+ config = hookenv.config()
+
+ errors = False
+
+ if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
+ volume_config['ephemeral'] = True
+ else:
+ volume_config['ephemeral'] = False
+
+ try:
+ volume_map = yaml.safe_load(config.get('volume-map', '{}'))
+ except yaml.YAMLError as e:
+ hookenv.log("Error parsing YAML volume-map: {}".format(e),
+ hookenv.ERROR)
+ errors = True
+ if volume_map is None:
+ # probably an empty string
+ volume_map = {}
+ elif not isinstance(volume_map, dict):
+ hookenv.log("Volume-map should be a dictionary, not {}".format(
+ type(volume_map)))
+ errors = True
+
+ volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
+ if volume_config['device'] and volume_config['ephemeral']:
+ # asked for ephemeral storage but also defined a volume ID
+ hookenv.log('A volume is defined for this unit, but ephemeral '
+ 'storage was requested', hookenv.ERROR)
+ errors = True
+ elif not volume_config['device'] and not volume_config['ephemeral']:
+ # asked for permanent storage but did not define volume ID
+ hookenv.log('Ephemeral storage was requested, but there is no volume '
+ 'defined for this unit.', hookenv.ERROR)
+ errors = True
+
+ unit_mount_name = hookenv.local_unit().replace('/', '-')
+ volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
+
+ if errors:
+ return None
+ return volume_config
+
+
+def mount_volume(config):
+ if os.path.exists(config['mountpoint']):
+ if not os.path.isdir(config['mountpoint']):
+ hookenv.log('Not a directory: {}'.format(config['mountpoint']))
+ raise VolumeConfigurationError()
+ else:
+ host.mkdir(config['mountpoint'])
+ if os.path.ismount(config['mountpoint']):
+ unmount_volume(config)
+ if not host.mount(config['device'], config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def unmount_volume(config):
+ if os.path.ismount(config['mountpoint']):
+ if not host.umount(config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def managed_mounts():
+ '''List of all mounted managed volumes'''
+ return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
+
+
+def configure_volume(before_change=lambda: None, after_change=lambda: None):
+ '''Set up storage (or don't) according to the charm's volume configuration.
+ Returns the mount point or "ephemeral". before_change and after_change
+ are optional functions to be called if the volume configuration changes.
+ '''
+
+ config = get_config()
+ if not config:
+ hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
+ raise VolumeConfigurationError()
+
+ if config['ephemeral']:
+ if os.path.ismount(config['mountpoint']):
+ before_change()
+ unmount_volume(config)
+ after_change()
+ return 'ephemeral'
+ else:
+ # persistent storage
+ if os.path.ismount(config['mountpoint']):
+ mounts = dict(managed_mounts())
+ if mounts.get(config['mountpoint']) != config['device']:
+ before_change()
+ unmount_volume(config)
+ mount_volume(config)
+ after_change()
+ else:
+ before_change()
+ mount_volume(config)
+ after_change()
+ return config['mountpoint']
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index 2995124d..ef3bdccf 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -31,7 +31,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3
from keystoneclient import session as keystone_session
from keystoneclient.v3 import client as keystone_client_v3
-import novaclient.v1_1.client as nova_client
+import novaclient.client as nova_client
import pika
import swiftclient
@@ -42,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG
ERROR = logging.ERROR
+NOVA_CLIENT_VERSION = "2"
+
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@@ -157,12 +159,12 @@ class OpenStackAmuletUtils(AmuletUtils):
if e['name'] == act.name:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'id': act.id}
- if api_version == 2:
- a['tenantId'] = act.tenantId
- else:
+ if api_version == 3:
a['default_project_id'] = getattr(act,
'default_project_id',
'none')
+ else:
+ a['tenantId'] = act.tenantId
found = True
ret = self._validate_dict_data(e, a)
if ret:
@@ -249,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
- return nova_client.Client(username=user, api_key=password,
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 68eb27e1..3fb67b10 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -137,7 +137,7 @@ SWIFT_CODENAMES = OrderedDict([
('liberty',
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
- ['2.5.0']),
+ ['2.5.0', '2.6.0']),
])
# >= Liberty version->codename mapping
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index f4582545..1b4b1de7 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -173,7 +173,7 @@ class Pool(object):
elif mode == 'writeback':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
# Flush the cache and wait for it to return
- check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
+ check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
diff --git a/hooks/hooks.py b/hooks/hooks.py
index 2a0d558c..03aecfcb 100755
--- a/hooks/hooks.py
+++ b/hooks/hooks.py
@@ -68,7 +68,9 @@ from utils import (
REQUIRED_INTERFACES,
check_optional_relations,
setup_ipv6,
+ services,
)
+from charmhelpers.contrib.charmsupport import nrpe
hooks = Hooks()
CONFIGS = register_configs()
@@ -265,6 +267,7 @@ def config_changed():
# Ensure started but do a soft reload
subprocess.call(['service', 'apache2', 'start'])
subprocess.call(['service', 'apache2', 'reload'])
+ update_nrpe_config()
@hooks.hook('mon-relation-departed',
@@ -432,6 +435,20 @@ def ha_relation_changed():
identity_joined(relid=r_id)
+@hooks.hook('nrpe-external-master-relation-joined',
+ 'nrpe-external-master-relation-changed')
+def update_nrpe_config():
+ # python-dbus is used by check_upstart_job
+ apt_install('python-dbus')
+ hostname = nrpe.get_nagios_hostname()
+ current_unit = nrpe.get_nagios_unit_name()
+ nrpe_setup = nrpe.NRPE(hostname=hostname)
+ nrpe.copy_nrpe_checks()
+ nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
+ nrpe.add_haproxy_checks(nrpe_setup, current_unit)
+ nrpe_setup.write()
+
+
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
diff --git a/hooks/nrpe-external-master-relation-changed b/hooks/nrpe-external-master-relation-changed
new file mode 120000
index 00000000..9416ca6a
--- /dev/null
+++ b/hooks/nrpe-external-master-relation-changed
@@ -0,0 +1 @@
+hooks.py
\ No newline at end of file
diff --git a/hooks/nrpe-external-master-relation-joined b/hooks/nrpe-external-master-relation-joined
new file mode 120000
index 00000000..9416ca6a
--- /dev/null
+++ b/hooks/nrpe-external-master-relation-joined
@@ -0,0 +1 @@
+hooks.py
\ No newline at end of file
diff --git a/hooks/utils.py b/hooks/utils.py
index 08d53260..0e7f4c71 100644
--- a/hooks/utils.py
+++ b/hooks/utils.py
@@ -124,6 +124,14 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR):
return template.render(context)
+def services():
+ ''' Returns a list of services associate with this charm '''
+ _services = []
+ for v in BASE_RESOURCE_MAP.values():
+ _services.extend(v.get('services', []))
+ return list(set(_services))
+
+
def enable_pocket(pocket):
apt_sources = "/etc/apt/sources.list"
with open(apt_sources, "r") as sources:
diff --git a/metadata.yaml b/metadata.yaml
index a442d5d8..6399b759 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -21,6 +21,9 @@ requires:
interface: hacluster
scope: container
provides:
+ nrpe-external-master:
+ interface: nrpe-external-master
+ scope: container
gateway:
interface: http
peers:
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index 2995124d..ef3bdccf 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -31,7 +31,7 @@ from keystoneclient.auth.identity import v3 as keystone_id_v3
from keystoneclient import session as keystone_session
from keystoneclient.v3 import client as keystone_client_v3
-import novaclient.v1_1.client as nova_client
+import novaclient.client as nova_client
import pika
import swiftclient
@@ -42,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG
ERROR = logging.ERROR
+NOVA_CLIENT_VERSION = "2"
+
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@@ -157,12 +159,12 @@ class OpenStackAmuletUtils(AmuletUtils):
if e['name'] == act.name:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'id': act.id}
- if api_version == 2:
- a['tenantId'] = act.tenantId
- else:
+ if api_version == 3:
a['default_project_id'] = getattr(act,
'default_project_id',
'none')
+ else:
+ a['tenantId'] = act.tenantId
found = True
ret = self._validate_dict_data(e, a)
if ret:
@@ -249,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
- return nova_client.Client(username=user, api_key=password,
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):
diff --git a/unit_tests/test_hooks.py b/unit_tests/test_hooks.py
index 44fd7a87..737ee0d3 100644
--- a/unit_tests/test_hooks.py
+++ b/unit_tests/test_hooks.py
@@ -112,12 +112,14 @@ class CephRadosGWTests(CharmTestCase):
self.enable_pocket.assert_called_with('multiverse')
self.os.makedirs.called_with('/var/lib/ceph/nss')
+ @patch.object(ceph_hooks, 'update_nrpe_config')
@patch.object(ceph_hooks, 'mkdir', lambda *args: None)
- def test_config_changed(self):
+ def test_config_changed(self, update_nrpe_config):
_install_packages = self.patch('install_packages')
ceph_hooks.config_changed()
self.assertTrue(_install_packages.called)
self.CONFIGS.write_all.assert_called_with()
+ update_nrpe_config.assert_called_with()
@patch.object(ceph_hooks, 'is_request_complete',
lambda *args, **kwargs: True)