Sync charm-helpers to switch to using in memory apt cache
This commit is contained in:
parent
812db701ea
commit
07537f85cb
@ -1,2 +1,3 @@
|
|||||||
.project
|
.project
|
||||||
.pydevproject
|
.pydevproject
|
||||||
|
bin
|
||||||
|
10
Makefile
10
Makefile
@ -1,11 +1,17 @@
|
|||||||
#!/usr/bin/make
|
#!/usr/bin/make
|
||||||
|
PYTHON := /usr/bin/env python
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@flake8 --exclude hooks/charmhelpers hooks
|
@flake8 --exclude hooks/charmhelpers hooks
|
||||||
@charm proof
|
@charm proof
|
||||||
|
|
||||||
sync:
|
bin/charm_helpers_sync.py:
|
||||||
@charm-helper-sync -c charm-helpers-sync.yaml
|
@mkdir -p bin
|
||||||
|
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
|
||||||
|
> bin/charm_helpers_sync.py
|
||||||
|
|
||||||
|
sync: bin/charm_helpers_sync.py
|
||||||
|
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
|
||||||
|
|
||||||
publish: lint
|
publish: lint
|
||||||
bzr push lp:charms/ceph-radosgw
|
bzr push lp:charms/ceph-radosgw
|
||||||
|
@ -46,5 +46,8 @@ def is_device_mounted(device):
|
|||||||
:returns: boolean: True if the path represents a mounted device, False if
|
:returns: boolean: True if the path represents a mounted device, False if
|
||||||
it doesn't.
|
it doesn't.
|
||||||
'''
|
'''
|
||||||
|
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||||
out = check_output(['mount'])
|
out = check_output(['mount'])
|
||||||
|
if is_partition:
|
||||||
|
return bool(re.search(device + r"\b", out))
|
||||||
return bool(re.search(device + r"[0-9]+\b", out))
|
return bool(re.search(device + r"[0-9]+\b", out))
|
||||||
|
@ -156,12 +156,15 @@ def hook_name():
|
|||||||
|
|
||||||
|
|
||||||
class Config(dict):
|
class Config(dict):
|
||||||
"""A Juju charm config dictionary that can write itself to
|
"""A dictionary representation of the charm's config.yaml, with some
|
||||||
disk (as json) and track which values have changed since
|
extra features:
|
||||||
the previous hook invocation.
|
|
||||||
|
|
||||||
Do not instantiate this object directly - instead call
|
- See which values in the dictionary have changed since the previous hook.
|
||||||
``hookenv.config()``
|
- For values that have changed, see what the previous value was.
|
||||||
|
- Store arbitrary data for use in a later hook.
|
||||||
|
|
||||||
|
NOTE: Do not instantiate this object directly - instead call
|
||||||
|
``hookenv.config()``, which will return an instance of :class:`Config`.
|
||||||
|
|
||||||
Example usage::
|
Example usage::
|
||||||
|
|
||||||
@ -170,8 +173,8 @@ class Config(dict):
|
|||||||
>>> config = hookenv.config()
|
>>> config = hookenv.config()
|
||||||
>>> config['foo']
|
>>> config['foo']
|
||||||
'bar'
|
'bar'
|
||||||
|
>>> # store a new key/value for later use
|
||||||
>>> config['mykey'] = 'myval'
|
>>> config['mykey'] = 'myval'
|
||||||
>>> config.save()
|
|
||||||
|
|
||||||
|
|
||||||
>>> # user runs `juju set mycharm foo=baz`
|
>>> # user runs `juju set mycharm foo=baz`
|
||||||
@ -188,22 +191,34 @@ class Config(dict):
|
|||||||
>>> # keys/values that we add are preserved across hooks
|
>>> # keys/values that we add are preserved across hooks
|
||||||
>>> config['mykey']
|
>>> config['mykey']
|
||||||
'myval'
|
'myval'
|
||||||
>>> # don't forget to save at the end of hook!
|
|
||||||
>>> config.save()
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
CONFIG_FILE_NAME = '.juju-persistent-config'
|
||||||
|
|
||||||
def __init__(self, *args, **kw):
|
def __init__(self, *args, **kw):
|
||||||
super(Config, self).__init__(*args, **kw)
|
super(Config, self).__init__(*args, **kw)
|
||||||
|
self.implicit_save = True
|
||||||
self._prev_dict = None
|
self._prev_dict = None
|
||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||||
if os.path.exists(self.path):
|
if os.path.exists(self.path):
|
||||||
self.load_previous()
|
self.load_previous()
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
"""For regular dict lookups, check the current juju config first,
|
||||||
|
then the previous (saved) copy. This ensures that user-saved values
|
||||||
|
will be returned by a dict lookup.
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return dict.__getitem__(self, key)
|
||||||
|
except KeyError:
|
||||||
|
return (self._prev_dict or {})[key]
|
||||||
|
|
||||||
def load_previous(self, path=None):
|
def load_previous(self, path=None):
|
||||||
"""Load previous copy of config from disk so that current values
|
"""Load previous copy of config from disk.
|
||||||
can be compared to previous values.
|
|
||||||
|
In normal usage you don't need to call this method directly - it
|
||||||
|
is called automatically at object initialization.
|
||||||
|
|
||||||
:param path:
|
:param path:
|
||||||
|
|
||||||
@ -218,8 +233,8 @@ class Config(dict):
|
|||||||
self._prev_dict = json.load(f)
|
self._prev_dict = json.load(f)
|
||||||
|
|
||||||
def changed(self, key):
|
def changed(self, key):
|
||||||
"""Return true if the value for this key has changed since
|
"""Return True if the current value for this key is different from
|
||||||
the last save.
|
the previous value.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict is None:
|
if self._prev_dict is None:
|
||||||
@ -228,7 +243,7 @@ class Config(dict):
|
|||||||
|
|
||||||
def previous(self, key):
|
def previous(self, key):
|
||||||
"""Return previous value for this key, or None if there
|
"""Return previous value for this key, or None if there
|
||||||
is no "previous" value.
|
is no previous value.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict:
|
if self._prev_dict:
|
||||||
@ -238,7 +253,13 @@ class Config(dict):
|
|||||||
def save(self):
|
def save(self):
|
||||||
"""Save this config to disk.
|
"""Save this config to disk.
|
||||||
|
|
||||||
Preserves items in _prev_dict that do not exist in self.
|
If the charm is using the :mod:`Services Framework <services.base>`
|
||||||
|
or :meth:'@hook <Hooks.hook>' decorator, this
|
||||||
|
is called automatically at the end of successful hook execution.
|
||||||
|
Otherwise, it should be called directly by user code.
|
||||||
|
|
||||||
|
To disable automatic saves, set ``implicit_save=False`` on this
|
||||||
|
instance.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict:
|
if self._prev_dict:
|
||||||
@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||||
"""Set relation information for the current unit"""
|
"""Set relation information for the current unit"""
|
||||||
|
relation_settings = relation_settings if relation_settings else {}
|
||||||
relation_cmd_line = ['relation-set']
|
relation_cmd_line = ['relation-set']
|
||||||
if relation_id is not None:
|
if relation_id is not None:
|
||||||
relation_cmd_line.extend(('-r', relation_id))
|
relation_cmd_line.extend(('-r', relation_id))
|
||||||
@ -477,6 +499,9 @@ class Hooks(object):
|
|||||||
hook_name = os.path.basename(args[0])
|
hook_name = os.path.basename(args[0])
|
||||||
if hook_name in self._hooks:
|
if hook_name in self._hooks:
|
||||||
self._hooks[hook_name]()
|
self._hooks[hook_name]()
|
||||||
|
cfg = config()
|
||||||
|
if cfg.implicit_save:
|
||||||
|
cfg.save()
|
||||||
else:
|
else:
|
||||||
raise UnregisteredHookError(hook_name)
|
raise UnregisteredHookError(hook_name)
|
||||||
|
|
||||||
|
@ -12,6 +12,8 @@ import random
|
|||||||
import string
|
import string
|
||||||
import subprocess
|
import subprocess
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import shutil
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
@ -52,7 +54,7 @@ def service(action, service_name):
|
|||||||
def service_running(service):
|
def service_running(service):
|
||||||
"""Determine whether a system service is running"""
|
"""Determine whether a system service is running"""
|
||||||
try:
|
try:
|
||||||
output = subprocess.check_output(['service', service, 'status'])
|
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
@ -62,6 +64,16 @@ def service_running(service):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
"""Determine whether a system service is available"""
|
||||||
|
try:
|
||||||
|
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||||
"""Add a user to the system"""
|
"""Add a user to the system"""
|
||||||
try:
|
try:
|
||||||
@ -320,12 +332,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
import apt_pkg
|
import apt_pkg
|
||||||
|
from charmhelpers.fetch import apt_cache
|
||||||
if not pkgcache:
|
if not pkgcache:
|
||||||
apt_pkg.init()
|
pkgcache = apt_cache()
|
||||||
# Force Apt to build its cache in memory. That way we avoid race
|
|
||||||
# conditions with other applications building the cache in the same
|
|
||||||
# place.
|
|
||||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
|
||||||
pkgcache = apt_pkg.Cache()
|
|
||||||
pkg = pkgcache[package]
|
pkg = pkgcache[package]
|
||||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def chdir(d):
|
||||||
|
cur = os.getcwd()
|
||||||
|
try:
|
||||||
|
yield os.chdir(d)
|
||||||
|
finally:
|
||||||
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
|
def chownr(path, owner, group):
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(path):
|
||||||
|
for name in dirs + files:
|
||||||
|
full = os.path.join(root, name)
|
||||||
|
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||||
|
if not broken_symlink:
|
||||||
|
os.chown(full, uid, gid)
|
||||||
|
2
hooks/charmhelpers/core/services/__init__.py
Normal file
2
hooks/charmhelpers/core/services/__init__.py
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
from .base import *
|
||||||
|
from .helpers import *
|
313
hooks/charmhelpers/core/services/base.py
Normal file
313
hooks/charmhelpers/core/services/base.py
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
from collections import Iterable
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||||
|
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||||
|
'service_restart', 'service_stop']
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceManager(object):
|
||||||
|
def __init__(self, services=None):
|
||||||
|
"""
|
||||||
|
Register a list of services, given their definitions.
|
||||||
|
|
||||||
|
Service definitions are dicts in the following formats (all keys except
|
||||||
|
'service' are optional)::
|
||||||
|
|
||||||
|
{
|
||||||
|
"service": <service name>,
|
||||||
|
"required_data": <list of required data contexts>,
|
||||||
|
"provided_data": <list of provided data contexts>,
|
||||||
|
"data_ready": <one or more callbacks>,
|
||||||
|
"data_lost": <one or more callbacks>,
|
||||||
|
"start": <one or more callbacks>,
|
||||||
|
"stop": <one or more callbacks>,
|
||||||
|
"ports": <list of ports to manage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
The 'required_data' list should contain dicts of required data (or
|
||||||
|
dependency managers that act like dicts and know how to collect the data).
|
||||||
|
Only when all items in the 'required_data' list are populated are the list
|
||||||
|
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
The 'provided_data' list should contain relation data providers, most likely
|
||||||
|
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||||
|
that will indicate a set of data to set on a given relation.
|
||||||
|
|
||||||
|
The 'data_ready' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||||
|
are fired.
|
||||||
|
|
||||||
|
The 'data_lost' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when a 'required_data' item no longer passes
|
||||||
|
`is_ready()`. Each callback will be called with the service name as the
|
||||||
|
only parameter. After all of the 'data_lost' callbacks are called,
|
||||||
|
the 'stop' callbacks are fired.
|
||||||
|
|
||||||
|
The 'start' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when starting the service, after the 'data_ready'
|
||||||
|
callbacks are complete. Each callback will be called with the service
|
||||||
|
name as the only parameter. This defaults to
|
||||||
|
`[host.service_start, services.open_ports]`.
|
||||||
|
|
||||||
|
The 'stop' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when stopping the service. If the service is
|
||||||
|
being stopped because it no longer has all of its 'required_data', this
|
||||||
|
will be called after all of the 'data_lost' callbacks are complete.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
This defaults to `[services.close_ports, host.service_stop]`.
|
||||||
|
|
||||||
|
The 'ports' value should be a list of ports to manage. The default
|
||||||
|
'start' handler will open the ports after the service is started,
|
||||||
|
and the default 'stop' handler will close the ports prior to stopping
|
||||||
|
the service.
|
||||||
|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
The following registers an Upstart service called bingod that depends on
|
||||||
|
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||||
|
restarting the service, and a Runit service called spadesd::
|
||||||
|
|
||||||
|
manager = services.ServiceManager([
|
||||||
|
{
|
||||||
|
'service': 'bingod',
|
||||||
|
'ports': [80, 443],
|
||||||
|
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||||
|
'data_ready': [
|
||||||
|
services.template(source='bingod.conf'),
|
||||||
|
services.template(source='bingod.ini',
|
||||||
|
target='/etc/bingod.ini',
|
||||||
|
owner='bingo', perms=0400),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'service': 'spadesd',
|
||||||
|
'data_ready': services.template(source='spadesd_run.j2',
|
||||||
|
target='/etc/sv/spadesd/run',
|
||||||
|
perms=0555),
|
||||||
|
'start': runit_start,
|
||||||
|
'stop': runit_stop,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
manager.manage()
|
||||||
|
"""
|
||||||
|
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||||
|
self._ready = None
|
||||||
|
self.services = {}
|
||||||
|
for service in services or []:
|
||||||
|
service_name = service['service']
|
||||||
|
self.services[service_name] = service
|
||||||
|
|
||||||
|
def manage(self):
|
||||||
|
"""
|
||||||
|
Handle the current hook by doing The Right Thing with the registered services.
|
||||||
|
"""
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
if hook_name == 'stop':
|
||||||
|
self.stop_services()
|
||||||
|
else:
|
||||||
|
self.provide_data()
|
||||||
|
self.reconfigure_services()
|
||||||
|
cfg = hookenv.config()
|
||||||
|
if cfg.implicit_save:
|
||||||
|
cfg.save()
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Set the relation data for each provider in the ``provided_data`` list.
|
||||||
|
|
||||||
|
A provider must have a `name` attribute, which indicates which relation
|
||||||
|
to set data on, and a `provide_data()` method, which returns a dict of
|
||||||
|
data to set.
|
||||||
|
"""
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
for service in self.services.values():
|
||||||
|
for provider in service.get('provided_data', []):
|
||||||
|
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
|
||||||
|
data = provider.provide_data()
|
||||||
|
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
|
||||||
|
if _ready:
|
||||||
|
hookenv.relation_set(None, data)
|
||||||
|
|
||||||
|
def reconfigure_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Update all files for one or more registered services, and,
|
||||||
|
if ready, optionally restart them.
|
||||||
|
|
||||||
|
If no service names are given, reconfigures all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
if self.is_ready(service_name):
|
||||||
|
self.fire_event('data_ready', service_name)
|
||||||
|
self.fire_event('start', service_name, default=[
|
||||||
|
service_restart,
|
||||||
|
manage_ports])
|
||||||
|
self.save_ready(service_name)
|
||||||
|
else:
|
||||||
|
if self.was_ready(service_name):
|
||||||
|
self.fire_event('data_lost', service_name)
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
self.save_lost(service_name)
|
||||||
|
|
||||||
|
def stop_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Stop one or more registered services, by name.
|
||||||
|
|
||||||
|
If no service names are given, stops all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
|
||||||
|
def get_service(self, service_name):
|
||||||
|
"""
|
||||||
|
Given the name of a registered service, return its service definition.
|
||||||
|
"""
|
||||||
|
service = self.services.get(service_name)
|
||||||
|
if not service:
|
||||||
|
raise KeyError('Service not registered: %s' % service_name)
|
||||||
|
return service
|
||||||
|
|
||||||
|
def fire_event(self, event_name, service_name, default=None):
|
||||||
|
"""
|
||||||
|
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
callbacks = service.get(event_name, default)
|
||||||
|
if not callbacks:
|
||||||
|
return
|
||||||
|
if not isinstance(callbacks, Iterable):
|
||||||
|
callbacks = [callbacks]
|
||||||
|
for callback in callbacks:
|
||||||
|
if isinstance(callback, ManagerCallback):
|
||||||
|
callback(self, service_name, event_name)
|
||||||
|
else:
|
||||||
|
callback(service_name)
|
||||||
|
|
||||||
|
def is_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if a registered service is ready, by checking its 'required_data'.
|
||||||
|
|
||||||
|
A 'required_data' item can be any mapping type, and is considered ready
|
||||||
|
if `bool(item)` evaluates as True.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
reqs = service.get('required_data', [])
|
||||||
|
return all(bool(req) for req in reqs)
|
||||||
|
|
||||||
|
def _load_ready_file(self):
|
||||||
|
if self._ready is not None:
|
||||||
|
return
|
||||||
|
if os.path.exists(self._ready_file):
|
||||||
|
with open(self._ready_file) as fp:
|
||||||
|
self._ready = set(json.load(fp))
|
||||||
|
else:
|
||||||
|
self._ready = set()
|
||||||
|
|
||||||
|
def _save_ready_file(self):
|
||||||
|
if self._ready is None:
|
||||||
|
return
|
||||||
|
with open(self._ready_file, 'w') as fp:
|
||||||
|
json.dump(list(self._ready), fp)
|
||||||
|
|
||||||
|
def save_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is now data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.add(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def save_lost(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is no longer data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.discard(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def was_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if the given service was previously data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
return service_name in self._ready
|
||||||
|
|
||||||
|
|
||||||
|
class ManagerCallback(object):
|
||||||
|
"""
|
||||||
|
Special case of a callback that takes the `ServiceManager` instance
|
||||||
|
in addition to the service name.
|
||||||
|
|
||||||
|
Subclasses should implement `__call__` which should accept three parameters:
|
||||||
|
|
||||||
|
* `manager` The `ServiceManager` instance
|
||||||
|
* `service_name` The name of the service it's being triggered for
|
||||||
|
* `event_name` The name of the event that this callback is handling
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class PortManagerCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will open or close ports, for use as either
|
||||||
|
a start or stop action.
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
new_ports = service.get('ports', [])
|
||||||
|
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||||
|
if os.path.exists(port_file):
|
||||||
|
with open(port_file) as fp:
|
||||||
|
old_ports = fp.read().split(',')
|
||||||
|
for old_port in old_ports:
|
||||||
|
if bool(old_port):
|
||||||
|
old_port = int(old_port)
|
||||||
|
if old_port not in new_ports:
|
||||||
|
hookenv.close_port(old_port)
|
||||||
|
with open(port_file, 'w') as fp:
|
||||||
|
fp.write(','.join(str(port) for port in new_ports))
|
||||||
|
for port in new_ports:
|
||||||
|
if event_name == 'start':
|
||||||
|
hookenv.open_port(port)
|
||||||
|
elif event_name == 'stop':
|
||||||
|
hookenv.close_port(port)
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_stop(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_available(service_name):
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_restart(service_name)
|
||||||
|
else:
|
||||||
|
host.service_start(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases
|
||||||
|
open_ports = close_ports = manage_ports = PortManagerCallback()
|
125
hooks/charmhelpers/core/services/helpers.py
Normal file
125
hooks/charmhelpers/core/services/helpers.py
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['RelationContext', 'TemplateCallback',
|
||||||
|
'render_template', 'template']
|
||||||
|
|
||||||
|
|
||||||
|
class RelationContext(dict):
|
||||||
|
"""
|
||||||
|
Base class for a context generator that gets relation data from juju.
|
||||||
|
|
||||||
|
Subclasses must provide the attributes `name`, which is the name of the
|
||||||
|
interface of interest, `interface`, which is the type of the interface of
|
||||||
|
interest, and `required_keys`, which is the set of keys required for the
|
||||||
|
relation to be considered complete. The data for all interfaces matching
|
||||||
|
the `name` attribute that are complete will used to populate the dictionary
|
||||||
|
values (see `get_data`, below).
|
||||||
|
|
||||||
|
The generated context will be namespaced under the interface type, to prevent
|
||||||
|
potential naming conflicts.
|
||||||
|
"""
|
||||||
|
name = None
|
||||||
|
interface = None
|
||||||
|
required_keys = []
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(RelationContext, self).__init__(*args, **kwargs)
|
||||||
|
self.get_data()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the required_keys are available.
|
||||||
|
"""
|
||||||
|
return self.is_ready()
|
||||||
|
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return super(RelationContext, self).__repr__()
|
||||||
|
|
||||||
|
def is_ready(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the `required_keys` are available from any units.
|
||||||
|
"""
|
||||||
|
ready = len(self.get(self.name, [])) > 0
|
||||||
|
if not ready:
|
||||||
|
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||||
|
return ready
|
||||||
|
|
||||||
|
def _is_ready(self, unit_data):
|
||||||
|
"""
|
||||||
|
Helper method that tests a set of relation data and returns True if
|
||||||
|
all of the `required_keys` are present.
|
||||||
|
"""
|
||||||
|
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||||
|
|
||||||
|
def get_data(self):
|
||||||
|
"""
|
||||||
|
Retrieve the relation data for each unit involved in a relation and,
|
||||||
|
if complete, store it in a list under `self[self.name]`. This
|
||||||
|
is automatically called when the RelationContext is instantiated.
|
||||||
|
|
||||||
|
The units are sorted lexographically first by the service ID, then by
|
||||||
|
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||||
|
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||||
|
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||||
|
set of data, the relation data for the units will be stored in the
|
||||||
|
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||||
|
|
||||||
|
If you only care about a single unit on the relation, you can just
|
||||||
|
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||||
|
support multiple units on a relation, you should iterate over the list,
|
||||||
|
like::
|
||||||
|
|
||||||
|
{% for unit in interface -%}
|
||||||
|
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
|
Note that since all sets of relation data from all related services and
|
||||||
|
units are in a single list, if you need to know which service or unit a
|
||||||
|
set of data came from, you'll need to extend this class to preserve
|
||||||
|
that information.
|
||||||
|
"""
|
||||||
|
if not hookenv.relation_ids(self.name):
|
||||||
|
return
|
||||||
|
|
||||||
|
ns = self.setdefault(self.name, [])
|
||||||
|
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||||
|
for unit in sorted(hookenv.related_units(rid)):
|
||||||
|
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||||
|
if self._is_ready(reldata):
|
||||||
|
ns.append(reldata)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Return data to be relation_set for this interface.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will render a template, for use as a ready action.
|
||||||
|
"""
|
||||||
|
def __init__(self, source, target, owner='root', group='root', perms=0444):
|
||||||
|
self.source = source
|
||||||
|
self.target = target
|
||||||
|
self.owner = owner
|
||||||
|
self.group = group
|
||||||
|
self.perms = perms
|
||||||
|
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
context = {}
|
||||||
|
for ctx in service.get('required_data', []):
|
||||||
|
context.update(ctx)
|
||||||
|
templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases for templates
|
||||||
|
render_template = template = TemplateCallback
|
51
hooks/charmhelpers/core/templating.py
Normal file
51
hooks/charmhelpers/core/templating.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
|
||||||
|
"""
|
||||||
|
Render a template.
|
||||||
|
|
||||||
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
|
The `target` path should be absolute.
|
||||||
|
|
||||||
|
The context should be a dict containing the values to be replaced in the
|
||||||
|
template.
|
||||||
|
|
||||||
|
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||||
|
|
||||||
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||||
|
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
except ImportError:
|
||||||
|
hookenv.log('Could not import jinja2, and could not import '
|
||||||
|
'charmhelpers.fetch to install it',
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise
|
||||||
|
apt_install('python-jinja2', fatal=True)
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
|
if templates_dir is None:
|
||||||
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
|
loader = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
|
try:
|
||||||
|
source = source
|
||||||
|
template = loader.get_template(source)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
|
(source, templates_dir),
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise e
|
||||||
|
content = template.render(context)
|
||||||
|
host.mkdir(os.path.dirname(target))
|
||||||
|
host.write_file(target, content, owner, group, perms)
|
@ -1,4 +1,5 @@
|
|||||||
import importlib
|
import importlib
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
import time
|
import time
|
||||||
from yaml import safe_load
|
from yaml import safe_load
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
@ -116,14 +117,7 @@ class BaseFetchHandler(object):
|
|||||||
|
|
||||||
def filter_installed_packages(packages):
|
def filter_installed_packages(packages):
|
||||||
"""Returns a list of packages that require installation"""
|
"""Returns a list of packages that require installation"""
|
||||||
import apt_pkg
|
cache = apt_cache()
|
||||||
apt_pkg.init()
|
|
||||||
|
|
||||||
# Tell apt to build an in-memory cache to prevent race conditions (if
|
|
||||||
# another process is already building the cache).
|
|
||||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
|
||||||
|
|
||||||
cache = apt_pkg.Cache()
|
|
||||||
_pkgs = []
|
_pkgs = []
|
||||||
for package in packages:
|
for package in packages:
|
||||||
try:
|
try:
|
||||||
@ -136,6 +130,16 @@ def filter_installed_packages(packages):
|
|||||||
return _pkgs
|
return _pkgs
|
||||||
|
|
||||||
|
|
||||||
|
def apt_cache(in_memory=True):
|
||||||
|
"""Build and return an apt cache"""
|
||||||
|
import apt_pkg
|
||||||
|
apt_pkg.init()
|
||||||
|
if in_memory:
|
||||||
|
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||||
|
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
||||||
|
return apt_pkg.Cache()
|
||||||
|
|
||||||
|
|
||||||
def apt_install(packages, options=None, fatal=False):
|
def apt_install(packages, options=None, fatal=False):
|
||||||
"""Install one or more packages"""
|
"""Install one or more packages"""
|
||||||
if options is None:
|
if options is None:
|
||||||
@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False):
|
|||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
def add_source(source, key=None):
|
||||||
|
"""Add a package source to this system.
|
||||||
|
|
||||||
|
@param source: a URL or sources.list entry, as supported by
|
||||||
|
add-apt-repository(1). Examples:
|
||||||
|
ppa:charmers/example
|
||||||
|
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||||
|
|
||||||
|
In addition:
|
||||||
|
'proposed:' may be used to enable the standard 'proposed'
|
||||||
|
pocket for the release.
|
||||||
|
'cloud:' may be used to activate official cloud archive pockets,
|
||||||
|
such as 'cloud:icehouse'
|
||||||
|
|
||||||
|
@param key: A key to be added to the system's APT keyring and used
|
||||||
|
to verify the signatures on packages. Ideally, this should be an
|
||||||
|
ASCII format GPG public key including the block headers. A GPG key
|
||||||
|
id may also be used, but be aware that only insecure protocols are
|
||||||
|
available to retrieve the actual public key from a public keyserver
|
||||||
|
placing your Juju environment at risk. ppa and cloud archive keys
|
||||||
|
are securely added automtically, so sould not be provided.
|
||||||
|
"""
|
||||||
if source is None:
|
if source is None:
|
||||||
log('Source is not present. Skipping')
|
log('Source is not present. Skipping')
|
||||||
return
|
return
|
||||||
@ -225,10 +250,23 @@ def add_source(source, key=None):
|
|||||||
release = lsb_release()['DISTRIB_CODENAME']
|
release = lsb_release()['DISTRIB_CODENAME']
|
||||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||||
apt.write(PROPOSED_POCKET.format(release))
|
apt.write(PROPOSED_POCKET.format(release))
|
||||||
|
else:
|
||||||
|
raise SourceConfigError("Unknown source: {!r}".format(source))
|
||||||
|
|
||||||
if key:
|
if key:
|
||||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
with NamedTemporaryFile() as key_file:
|
||||||
key])
|
key_file.write(key)
|
||||||
|
key_file.flush()
|
||||||
|
key_file.seek(0)
|
||||||
|
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
||||||
|
else:
|
||||||
|
# Note that hkp: is in no way a secure protocol. Using a
|
||||||
|
# GPG key id is pointless from a security POV unless you
|
||||||
|
# absolutely trust your network and DNS.
|
||||||
|
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||||
|
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||||
|
key])
|
||||||
|
|
||||||
|
|
||||||
def configure_sources(update=False,
|
def configure_sources(update=False,
|
||||||
@ -238,7 +276,8 @@ def configure_sources(update=False,
|
|||||||
Configure multiple sources from charm configuration.
|
Configure multiple sources from charm configuration.
|
||||||
|
|
||||||
The lists are encoded as yaml fragments in the configuration.
|
The lists are encoded as yaml fragments in the configuration.
|
||||||
The frament needs to be included as a string.
|
The frament needs to be included as a string. Sources and their
|
||||||
|
corresponding keys are of the types supported by add_source().
|
||||||
|
|
||||||
Example config:
|
Example config:
|
||||||
install_sources: |
|
install_sources: |
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
import os
|
import os
|
||||||
import urllib2
|
import urllib2
|
||||||
|
from urllib import urlretrieve
|
||||||
import urlparse
|
import urlparse
|
||||||
|
import hashlib
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
@ -12,7 +14,17 @@ from charmhelpers.payload.archive import (
|
|||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
"""
|
||||||
|
This class is a plugin for charmhelpers.fetch.install_remote.
|
||||||
|
|
||||||
|
It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
install_remote("https://example.com/some/archive.tar.gz")
|
||||||
|
# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/.
|
||||||
|
|
||||||
|
See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types.
|
||||||
|
"""
|
||||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||||
"""Handler for archives via generic URLs"""
|
"""Handler for archives via generic URLs"""
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
@ -61,3 +73,31 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
return extract(dld_file)
|
return extract(dld_file)
|
||||||
|
|
||||||
|
# Mandatory file validation via Sha1 or MD5 hashing.
|
||||||
|
def download_and_validate(self, url, hashsum, validate="sha1"):
|
||||||
|
if validate == 'sha1' and len(hashsum) != 40:
|
||||||
|
raise ValueError("HashSum must be = 40 characters when using sha1"
|
||||||
|
" validation")
|
||||||
|
if validate == 'md5' and len(hashsum) != 32:
|
||||||
|
raise ValueError("HashSum must be = 32 characters when using md5"
|
||||||
|
" validation")
|
||||||
|
tempfile, headers = urlretrieve(url)
|
||||||
|
self.validate_file(tempfile, hashsum, validate)
|
||||||
|
return tempfile
|
||||||
|
|
||||||
|
# Predicate method that returns status of hash matching expected hash.
|
||||||
|
def validate_file(self, source, hashsum, vmethod='sha1'):
|
||||||
|
if vmethod != 'sha1' and vmethod != 'md5':
|
||||||
|
raise ValueError("Validation Method not supported")
|
||||||
|
|
||||||
|
if vmethod == 'md5':
|
||||||
|
m = hashlib.md5()
|
||||||
|
if vmethod == 'sha1':
|
||||||
|
m = hashlib.sha1()
|
||||||
|
with open(source) as f:
|
||||||
|
for line in f:
|
||||||
|
m.update(line)
|
||||||
|
if hashsum != m.hexdigest():
|
||||||
|
msg = "Hash Mismatch on {} expected {} got {}"
|
||||||
|
raise ValueError(msg.format(source, hashsum, m.hexdigest()))
|
||||||
|
Loading…
Reference in New Issue
Block a user