diff --git a/Makefile b/Makefile deleted file mode 100644 index c772e4c1..00000000 --- a/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/make -PYTHON := /usr/bin/env python3 - -lint: - @tox -e pep8 - -test: - @echo Starting unit tests... - @tox -e py27 - -functional_test: - @echo Starting Amulet tests... - @tox -e func27 - -bin/charm_helpers_sync.py: - @mkdir -p bin - @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py - - -bin/git_sync.py: - @mkdir -p bin - @wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py - -ch-sync: bin/charm_helpers_sync.py - $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - -ceph-sync: bin/git_sync.py - $(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git - -sync: ch-sync - -publish: lint test - bzr push lp:charms/ceph-mon - bzr push lp:charms/trusty/ceph-mon diff --git a/TODO b/TODO deleted file mode 100644 index 22e0889d..00000000 --- a/TODO +++ /dev/null @@ -1,6 +0,0 @@ -Ceph Charm -========== - - * fix tunables (http://tracker.newdream.net/issues/2210) - * more than 192 PGs - * fixup data placement in crush to be host not osd driven diff --git a/actions/__init__.py b/actions/__init__.py index b7fe4e1b..9b088de8 100644 --- a/actions/__init__.py +++ b/actions/__init__.py @@ -11,6 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import sys -sys.path.append('hooks') diff --git a/actions/ceph_ops.py b/actions/ceph_ops.py index 0e6eb7ac..a71c6869 100755 --- a/actions/ceph_ops.py +++ b/actions/ceph_ops.py @@ -14,9 +14,6 @@ import json from subprocess import CalledProcessError, check_output -import sys - -sys.path.append('hooks') from charmhelpers.core.hookenv import ( action_get, diff --git a/actions/change_osd_weight.py b/actions/change_osd_weight.py index 9a517349..1732f010 100755 --- a/actions/change_osd_weight.py +++ b/actions/change_osd_weight.py @@ -16,11 +16,6 @@ """Changes the crush weight of an OSD.""" -import sys - -sys.path.append("lib") -sys.path.append("hooks") - from charmhelpers.core.hookenv import function_fail, function_get, log from charms_ceph.utils import reweight_osd diff --git a/actions/copy_pool.py b/actions/copy_pool.py index 5112cf70..84723c8a 100755 --- a/actions/copy_pool.py +++ b/actions/copy_pool.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import subprocess -sys.path.append('hooks') - import charmhelpers.core.hookenv as hookenv diff --git a/actions/create_cache_tier.py b/actions/create_cache_tier.py index 0ef212ed..cc68257e 100755 --- a/actions/create_cache_tier.py +++ b/actions/create_cache_tier.py @@ -15,9 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/actions/create_crush_rule.py b/actions/create_crush_rule.py index 207b4f4f..65781132 100755 --- a/actions/create_crush_rule.py +++ b/actions/create_crush_rule.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import subprocess -sys.path.append('hooks') - import charmhelpers.core.hookenv as hookenv diff --git a/actions/create_erasure_profile.py b/actions/create_erasure_profile.py index 5306baa6..40673d7e 100755 --- a/actions/create_erasure_profile.py +++ b/actions/create_erasure_profile.py @@ -15,9 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/actions/create_pool.py b/actions/create_pool.py index f8faee1f..7b9582e2 100755 --- a/actions/create_pool.py +++ b/actions/create_pool.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool diff --git a/actions/crushmap_update.py b/actions/crushmap_update.py index c4aa13f0..fbe188fc 100755 --- a/actions/crushmap_update.py +++ b/actions/crushmap_update.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import base64 from charmhelpers.core.hookenv import action_get, action_fail from subprocess import check_output, CalledProcessError, PIPE, Popen diff --git a/actions/delete_erasure_profile.py b/actions/delete_erasure_profile.py index 17dc2ef5..748ce5a6 100755 --- a/actions/delete_erasure_profile.py +++ b/actions/delete_erasure_profile.py @@ -17,9 +17,6 @@ from subprocess import CalledProcessError __author__ = 'chris' -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/actions/delete_pool.py b/actions/delete_pool.py index d05078da..3d7460e3 100755 --- a/actions/delete_pool.py +++ b/actions/delete_pool.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import subprocess -sys.path.append('hooks') - from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/actions/delete_user.py b/actions/delete_user.py index 93c6016c..4dc8283b 100755 --- a/actions/delete_user.py +++ b/actions/delete_user.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from charmhelpers.core.hookenv import action_get, action_fail, action_set, log from subprocess import CalledProcessError, check_output, STDOUT diff --git a/actions/get_erasure_profile.py b/actions/get_erasure_profile.py index a259e748..9038f2b0 100755 --- a/actions/get_erasure_profile.py +++ b/actions/get_erasure_profile.py @@ -14,10 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') - from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile from charmhelpers.core.hookenv import action_get, action_set diff --git a/actions/get_or_create_user.py b/actions/get_or_create_user.py index 000855d7..a841dd66 100755 --- a/actions/get_or_create_user.py +++ b/actions/get_or_create_user.py @@ -14,10 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys import json -sys.path.append("hooks") from charmhelpers.core.hookenv import action_get, action_fail, action_set, log from subprocess import CalledProcessError, check_output diff --git a/actions/get_quorum_status.py b/actions/get_quorum_status.py index 31f04890..a537bce3 100755 --- a/actions/get_quorum_status.py +++ b/actions/get_quorum_status.py @@ -16,12 +16,9 @@ """Run action to collect Ceph quorum_status output.""" import json -import sys from subprocess import CalledProcessError -sys.path.append('hooks') - from ceph_ops import get_quorum_status from charmhelpers.core.hookenv import function_fail, function_get, function_set diff --git a/actions/list_crush_rules.py b/actions/list_crush_rules.py index a28fcc2b..6f57cc45 100755 --- a/actions/list_crush_rules.py +++ b/actions/list_crush_rules.py @@ -13,24 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import json -import os -import sys import yaml from subprocess import check_output, CalledProcessError -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, "../hooks")) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - - -_add_path(_hooks) - - from charmhelpers.core.hookenv import ( ERROR, log, diff --git a/actions/list_erasure_profiles.py b/actions/list_erasure_profiles.py index c26804ec..2c067583 100755 --- a/actions/list_erasure_profiles.py +++ b/actions/list_erasure_profiles.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys from subprocess import check_output, CalledProcessError -sys.path.append('hooks') - from charmhelpers.core.hookenv import action_get, log, action_set, action_fail if __name__ == '__main__': diff --git a/actions/list_inconsistent_objs.py b/actions/list_inconsistent_objs.py index 6d8de5d0..5112166b 100755 --- a/actions/list_inconsistent_objs.py +++ b/actions/list_inconsistent_objs.py @@ -16,12 +16,9 @@ import json import re -import sys from subprocess import check_output, CalledProcessError import yaml -sys.path.append('hooks') - from charmhelpers.core.hookenv import function_fail, function_get, \ function_set, log diff --git a/actions/list_pools.py b/actions/list_pools.py index 10c05611..4c1384a9 100755 --- a/actions/list_pools.py +++ b/actions/list_pools.py @@ -13,23 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import json -import os -import sys from subprocess import check_output, CalledProcessError -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, "../hooks")) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - - -_add_path(_hooks) - - from charmhelpers.core.hookenv import ( log, function_fail, diff --git a/actions/pg_repair.py b/actions/pg_repair.py index 6dd17ecc..be440f5e 100755 --- a/actions/pg_repair.py +++ b/actions/pg_repair.py @@ -13,24 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import json -import os -import sys from subprocess import check_output, CalledProcessError -_path = os.path.dirname(os.path.realpath(__file__)) -_hooks = os.path.abspath(os.path.join(_path, "../hooks")) -_lib = os.path.abspath(os.path.join(_path, "../lib")) - - -def _add_path(path): - if path not in sys.path: - sys.path.insert(1, path) - - -_add_path(_hooks) -_add_path(_lib) - from charmhelpers.core.hookenv import ( log, diff --git a/actions/pool_get.py b/actions/pool_get.py index 5073d8c3..b139d0dc 100755 --- a/actions/pool_get.py +++ b/actions/pool_get.py @@ -14,11 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys from subprocess import check_output, CalledProcessError -sys.path.append('hooks') - from charmhelpers.core.hookenv import log, action_set, action_get, action_fail if __name__ == '__main__': diff --git a/actions/pool_set.py b/actions/pool_set.py index 39ee9345..fafa6898 100755 --- a/actions/pool_set.py +++ b/actions/pool_set.py @@ -15,10 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('lib') -sys.path.append('hooks') from charmhelpers.core.hookenv import action_get, log, action_fail from charms_ceph.broker import handle_set_pool_value diff --git a/actions/pool_statistics.py b/actions/pool_statistics.py index 30635fb3..e6e8e796 100755 --- a/actions/pool_statistics.py +++ b/actions/pool_statistics.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import check_output, CalledProcessError from charmhelpers.core.hookenv import log, action_set, action_fail diff --git a/actions/purge_osd.py b/actions/purge_osd.py index 29328075..e884186f 100755 --- a/actions/purge_osd.py +++ b/actions/purge_osd.py @@ -26,16 +26,12 @@ from subprocess import ( CalledProcessError, ) -import sys -sys.path.append('lib') -sys.path.append('hooks') - - from charmhelpers.core.hookenv import ( function_get, log, function_fail ) + from charmhelpers.core.host import cmp_pkgrevno from charmhelpers.contrib.storage.linux import ceph from charms_ceph.utils import get_osd_weight diff --git a/actions/remove_cache_tier.py b/actions/remove_cache_tier.py index e0c3444f..18c816c5 100755 --- a/actions/remove_cache_tier.py +++ b/actions/remove_cache_tier.py @@ -15,9 +15,6 @@ # limitations under the License. from subprocess import CalledProcessError -import sys - -sys.path.append('hooks') from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists from charmhelpers.core.hookenv import action_get, log, action_fail diff --git a/actions/remove_pool_snapshot.py b/actions/remove_pool_snapshot.py index b451b99e..065f6f67 100755 --- a/actions/remove_pool_snapshot.py +++ b/actions/remove_pool_snapshot.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot diff --git a/actions/rename_pool.py b/actions/rename_pool.py index ba7f7ac2..7a759d15 100755 --- a/actions/rename_pool.py +++ b/actions/rename_pool.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import rename_pool diff --git a/actions/security_checklist.py b/actions/security_checklist.py index 23b1caf1..8bc1b27b 100755 --- a/actions/security_checklist.py +++ b/actions/security_checklist.py @@ -16,8 +16,6 @@ import sys -sys.path.append('hooks') - import charmhelpers.contrib.openstack.audits as audits from charmhelpers.contrib.openstack.audits import ( openstack_security_guide, diff --git a/actions/set_noout.py b/actions/set_noout.py index 145c6988..47ebad80 100755 --- a/actions/set_noout.py +++ b/actions/set_noout.py @@ -14,10 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -sys.path.append('hooks') from charmhelpers.core.hookenv import action_set, action_fail -sys.path.append('lib') from charms_ceph.utils import osd_noout if __name__ == '__main__': diff --git a/actions/set_pool_max_bytes.py b/actions/set_pool_max_bytes.py index d5893c73..7ffc662a 100755 --- a/actions/set_pool_max_bytes.py +++ b/actions/set_pool_max_bytes.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import set_pool_quota diff --git a/actions/show_disk_free.py b/actions/show_disk_free.py index 1f38f094..1b372782 100755 --- a/actions/show_disk_free.py +++ b/actions/show_disk_free.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import check_output, CalledProcessError from charmhelpers.core.hookenv import log, action_get, action_set, action_fail diff --git a/actions/snapshot_pool.py b/actions/snapshot_pool.py index a147b755..251d3fe1 100755 --- a/actions/snapshot_pool.py +++ b/actions/snapshot_pool.py @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -sys.path.append('hooks') from subprocess import CalledProcessError from charmhelpers.core.hookenv import action_get, log, action_fail from charmhelpers.contrib.storage.linux.ceph import snapshot_pool diff --git a/actions/unset_noout.py b/actions/unset_noout.py index 36be4a69..30035cc9 100755 --- a/actions/unset_noout.py +++ b/actions/unset_noout.py @@ -14,10 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -sys.path.append('hooks') from charmhelpers.core.hookenv import action_set, action_fail -sys.path.append('lib') from charms_ceph.utils import osd_noout if __name__ == '__main__': diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml deleted file mode 100644 index df1e68a5..00000000 --- a/charm-helpers-hooks.yaml +++ /dev/null @@ -1,21 +0,0 @@ -repo: https://github.com/juju/charm-helpers -destination: hooks/charmhelpers -include: - - core - - osplatform - - cli - - fetch - - contrib.storage.linux - - payload.execd - - contrib.openstack - - contrib.network.ip - - contrib.hahelpers - - contrib.openstack: - - alternatives - - audits - - exceptions - - utils - - contrib.charmsupport - - contrib.hardening|inc=* - - fetch.python - - contrib.openstack.policyd diff --git a/charmcraft.yaml b/charmcraft.yaml index b3a85236..97af1335 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -2,23 +2,25 @@ type: charm parts: charm: - plugin: dump - source: . prime: - actions/* - - files/* - - hooks/* - lib/* - templates/* - - actions.yaml - - config.yaml - - copyright - - hardening.yaml - - icon.svg - - LICENSE - - Makefile - - metadata.yaml - - README.md + after: + - update-certificates + charm-python-packages: + # Use the updated version of setuptools (needed by jinja2). + - setuptools + build-packages: + - git + + update-certificates: + # Ensure that certificates in the base image are up-to-date. + plugin: nil + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates bases: - build-on: diff --git a/hooks/__init__.py b/hooks/__init__.py deleted file mode 100644 index 9b088de8..00000000 --- a/hooks/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/admin-relation-changed b/hooks/admin-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/admin-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/admin-relation-joined b/hooks/admin-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/admin-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/bootstrap-source-relation-changed b/hooks/bootstrap-source-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/bootstrap-source-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/bootstrap-source-relation-departed b/hooks/bootstrap-source-relation-departed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/bootstrap-source-relation-departed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py deleted file mode 100644 index ddf30450..00000000 --- a/hooks/charmhelpers/__init__.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import functools -import inspect -import subprocess - - -try: - import yaml # NOQA:F401 -except ImportError: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # NOQA:F401 - - -# Holds a list of mapping of mangled function names that have been deprecated -# using the @deprecate decorator below. This is so that the warning is only -# printed once for each usage of the function. -__deprecated_functions = {} - - -def deprecate(warning, date=None, log=None): - """Add a deprecation warning the first time the function is used. - - The date which is a string in semi-ISO8660 format indicates the year-month - that the function is officially going to be removed. - - usage: - - @deprecate('use core/fetch/add_source() instead', '2017-04') - def contributed_add_source_thing(...): - ... - - And it then prints to the log ONCE that the function is deprecated. - The reason for passing the logging function (log) is so that hookenv.log - can be used for a charm if needed. - - :param warning: String to indicate what is to be used instead. - :param date: Optional string in YYYY-MM format to indicate when the - function will definitely (probably) be removed. - :param log: The log function to call in order to log. If None, logs to - stdout - """ - def wrap(f): - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - try: - module = inspect.getmodule(f) - file = inspect.getsourcefile(f) - lines = inspect.getsourcelines(f) - f_name = "{}-{}-{}..{}-{}".format( - module.__name__, file, lines[0], lines[-1], f.__name__) - except (IOError, TypeError): - # assume it was local, so just use the name of the function - f_name = f.__name__ - if f_name not in __deprecated_functions: - __deprecated_functions[f_name] = True - s = "DEPRECATION WARNING: Function {} is being removed".format( - f.__name__) - if date: - s = "{} on/around {}".format(s, date) - if warning: - s = "{} : {}".format(s, warning) - if log: - log(s) - else: - print(s) - return f(*args, **kwargs) - return wrapped_f - return wrap diff --git a/hooks/charmhelpers/cli/__init__.py b/hooks/charmhelpers/cli/__init__.py deleted file mode 100644 index 2b0c4b7a..00000000 --- a/hooks/charmhelpers/cli/__init__.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import argparse -import sys - -import charmhelpers.core.unitdata - - -class OutputFormatter(object): - def __init__(self, outfile=sys.stdout): - self.formats = ( - "raw", - "json", - "py", - "yaml", - "csv", - "tab", - ) - self.outfile = outfile - - def add_arguments(self, argument_parser): - formatgroup = argument_parser.add_mutually_exclusive_group() - choices = self.supported_formats - formatgroup.add_argument("--format", metavar='FMT', - help="Select output format for returned data, " - "where FMT is one of: {}".format(choices), - choices=choices, default='raw') - for fmt in self.formats: - fmtfunc = getattr(self, fmt) - formatgroup.add_argument("-{}".format(fmt[0]), - "--{}".format(fmt), action='store_const', - const=fmt, dest='format', - help=fmtfunc.__doc__) - - @property - def supported_formats(self): - return self.formats - - def raw(self, output): - """Output data as raw string (default)""" - if isinstance(output, (list, tuple)): - output = '\n'.join(map(str, output)) - self.outfile.write(str(output)) - - def py(self, output): - """Output data as a nicely-formatted python data structure""" - import pprint - pprint.pprint(output, stream=self.outfile) - - def json(self, output): - """Output data in JSON format""" - import json - json.dump(output, self.outfile) - - def yaml(self, output): - """Output data in YAML format""" - import yaml - yaml.safe_dump(output, self.outfile) - - def csv(self, output): - """Output data as excel-compatible CSV""" - import csv - csvwriter = csv.writer(self.outfile) - csvwriter.writerows(output) - - def tab(self, output): - """Output data in excel-compatible tab-delimited format""" - import csv - csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) - csvwriter.writerows(output) - - def format_output(self, output, fmt='raw'): - fmtfunc = getattr(self, fmt) - fmtfunc(output) - - -class CommandLine(object): - argument_parser = None - subparsers = None - formatter = None - exit_code = 0 - - def __init__(self): - if not self.argument_parser: - self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') - if not self.formatter: - self.formatter = OutputFormatter() - self.formatter.add_arguments(self.argument_parser) - if not self.subparsers: - self.subparsers = self.argument_parser.add_subparsers(help='Commands') - - def subcommand(self, command_name=None): - """ - Decorate a function as a subcommand. Use its arguments as the - command-line arguments""" - def wrapper(decorated): - cmd_name = command_name or decorated.__name__ - subparser = self.subparsers.add_parser(cmd_name, - description=decorated.__doc__) - for args, kwargs in describe_arguments(decorated): - subparser.add_argument(*args, **kwargs) - subparser.set_defaults(func=decorated) - return decorated - return wrapper - - def test_command(self, decorated): - """ - Subcommand is a boolean test function, so bool return values should be - converted to a 0/1 exit code. - """ - decorated._cli_test_command = True - return decorated - - def no_output(self, decorated): - """ - Subcommand is not expected to return a value, so don't print a spurious None. - """ - decorated._cli_no_output = True - return decorated - - def subcommand_builder(self, command_name, description=None): - """ - Decorate a function that builds a subcommand. Builders should accept a - single argument (the subparser instance) and return the function to be - run as the command.""" - def wrapper(decorated): - subparser = self.subparsers.add_parser(command_name) - func = decorated(subparser) - subparser.set_defaults(func=func) - subparser.description = description or func.__doc__ - return wrapper - - def run(self): - "Run cli, processing arguments and executing subcommands." - arguments = self.argument_parser.parse_args() - argspec = inspect.getfullargspec(arguments.func) - vargs = [] - for arg in argspec.args: - vargs.append(getattr(arguments, arg)) - if argspec.varargs: - vargs.extend(getattr(arguments, argspec.varargs)) - output = arguments.func(*vargs) - if getattr(arguments.func, '_cli_test_command', False): - self.exit_code = 0 if output else 1 - output = '' - if getattr(arguments.func, '_cli_no_output', False): - output = '' - self.formatter.format_output(output, arguments.format) - if charmhelpers.core.unitdata._KV: - charmhelpers.core.unitdata._KV.flush() - - -cmdline = CommandLine() - - -def describe_arguments(func): - """ - Analyze a function's signature and return a data structure suitable for - passing in as arguments to an argparse parser's add_argument() method.""" - - argspec = inspect.getfullargspec(func) - # we should probably raise an exception somewhere if func includes **kwargs - if argspec.defaults: - positional_args = argspec.args[:-len(argspec.defaults)] - keyword_names = argspec.args[-len(argspec.defaults):] - for arg, default in zip(keyword_names, argspec.defaults): - yield ('--{}'.format(arg),), {'default': default} - else: - positional_args = argspec.args - - for arg in positional_args: - yield (arg,), {} - if argspec.varargs: - yield (argspec.varargs,), {'nargs': '*'} diff --git a/hooks/charmhelpers/cli/benchmark.py b/hooks/charmhelpers/cli/benchmark.py deleted file mode 100644 index 303af14b..00000000 --- a/hooks/charmhelpers/cli/benchmark.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.contrib.benchmark import Benchmark - - -@cmdline.subcommand(command_name='benchmark-start') -def start(): - Benchmark.start() - - -@cmdline.subcommand(command_name='benchmark-finish') -def finish(): - Benchmark.finish() - - -@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") -def service(subparser): - subparser.add_argument("value", help="The composite score.") - subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") - subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") - return Benchmark.set_composite_score diff --git a/hooks/charmhelpers/cli/commands.py b/hooks/charmhelpers/cli/commands.py deleted file mode 100644 index b9310565..00000000 --- a/hooks/charmhelpers/cli/commands.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module loads sub-modules into the python runtime so they can be -discovered via the inspect module. In order to prevent flake8 from (rightfully) -telling us these are unused modules, throw a ' # noqa' at the end of each import -so that the warning is suppressed. -""" - -from . import CommandLine # noqa - -""" -Import the sub-modules which have decorated subcommands to register with chlp. -""" -from . import host # noqa -from . import benchmark # noqa -from . import unitdata # noqa -from . import hookenv # noqa diff --git a/hooks/charmhelpers/cli/hookenv.py b/hooks/charmhelpers/cli/hookenv.py deleted file mode 100644 index bd72f448..00000000 --- a/hooks/charmhelpers/cli/hookenv.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.core import hookenv - - -cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) -cmdline.subcommand('service-name')(hookenv.service_name) -cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/hooks/charmhelpers/cli/host.py b/hooks/charmhelpers/cli/host.py deleted file mode 100644 index 40396849..00000000 --- a/hooks/charmhelpers/cli/host.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.core import host - - -@cmdline.subcommand() -def mounts(): - "List mounts" - return host.mounts() - - -@cmdline.subcommand_builder('service', description="Control system services") -def service(subparser): - subparser.add_argument("action", help="The action to perform (start, stop, etc...)") - subparser.add_argument("service_name", help="Name of the service to control") - return host.service diff --git a/hooks/charmhelpers/cli/unitdata.py b/hooks/charmhelpers/cli/unitdata.py deleted file mode 100644 index acce846f..00000000 --- a/hooks/charmhelpers/cli/unitdata.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import cmdline -from charmhelpers.core import unitdata - - -@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") -def unitdata_cmd(subparser): - nested = subparser.add_subparsers() - - get_cmd = nested.add_parser('get', help='Retrieve data') - get_cmd.add_argument('key', help='Key to retrieve the value of') - get_cmd.set_defaults(action='get', value=None) - - getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') - getrange_cmd.add_argument('key', metavar='prefix', - help='Prefix of the keys to retrieve') - getrange_cmd.set_defaults(action='getrange', value=None) - - set_cmd = nested.add_parser('set', help='Store data') - set_cmd.add_argument('key', help='Key to set') - set_cmd.add_argument('value', help='Value to store') - set_cmd.set_defaults(action='set') - - def _unitdata_cmd(action, key, value): - if action == 'get': - return unitdata.kv().get(key) - elif action == 'getrange': - return unitdata.kv().getrange(key) - elif action == 'set': - unitdata.kv().set(key, value) - unitdata.kv().flush() - return '' - return _unitdata_cmd diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py deleted file mode 100644 index bad7a533..00000000 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright 2012-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Compatibility with the nrpe-external-master charm""" -# -# Authors: -# Matthew Wedgwood - -import glob -import grp -import os -import pwd -import re -import shlex -import shutil -import subprocess -import yaml - -from charmhelpers.core.hookenv import ( - application_name, - config, - hook_name, - local_unit, - log, - relation_get, - relation_ids, - relation_set, - relations_of_type, -) - -from charmhelpers.core.host import service -from charmhelpers.core import host - -# This module adds compatibility with the nrpe-external-master and plain nrpe -# subordinate charms. To use it in your charm: -# -# 1. Update metadata.yaml -# -# provides: -# (...) -# nrpe-external-master: -# interface: nrpe-external-master -# scope: container -# -# and/or -# -# provides: -# (...) -# local-monitors: -# interface: local-monitors -# scope: container - -# -# 2. Add the following to config.yaml -# -# nagios_context: -# default: "juju" -# type: string -# description: | -# Used by the nrpe subordinate charms. -# A string that will be prepended to instance name to set the host name -# in nagios. So for instance the hostname would be something like: -# juju-myservice-0 -# If you're running multiple environments with the same services in them -# this allows you to differentiate between them. -# nagios_servicegroups: -# default: "" -# type: string -# description: | -# A comma-separated list of nagios servicegroups. -# If left empty, the nagios_context will be used as the servicegroup -# -# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master -# -# 4. Update your hooks.py with something like this: -# -# from charmsupport.nrpe import NRPE -# (...) -# def update_nrpe_config(): -# nrpe_compat = NRPE() -# nrpe_compat.add_check( -# shortname = "myservice", -# description = "Check MyService", -# check_cmd = "check_http -w 2 -c 10 http://localhost" -# ) -# nrpe_compat.add_check( -# "myservice_other", -# "Check for widget failures", -# check_cmd = "/srv/myapp/scripts/widget_check" -# ) -# nrpe_compat.write() -# -# def config_changed(): -# (...) -# update_nrpe_config() -# -# def nrpe_external_master_relation_changed(): -# update_nrpe_config() -# -# def local_monitors_relation_changed(): -# update_nrpe_config() -# -# 4.a If your charm is a subordinate charm set primary=False -# -# from charmsupport.nrpe import NRPE -# (...) -# def update_nrpe_config(): -# nrpe_compat = NRPE(primary=False) -# -# 5. ln -s hooks.py nrpe-external-master-relation-changed -# ln -s hooks.py local-monitors-relation-changed - - -class CheckException(Exception): - pass - - -class Check(object): - shortname_re = '[A-Za-z0-9-_.@]+$' - service_template = (""" -#--------------------------------------------------- -# This file is Juju managed -#--------------------------------------------------- -define service {{ - use active-service - host_name {nagios_hostname} - service_description {nagios_hostname}[{shortname}] """ - """{description} - check_command check_nrpe!{command} - servicegroups {nagios_servicegroup} -{service_config_overrides} -}} -""") - - def __init__(self, shortname, description, check_cmd, max_check_attempts=None): - super(Check, self).__init__() - # XXX: could be better to calculate this from the service name - if not re.match(self.shortname_re, shortname): - raise CheckException("shortname must match {}".format( - Check.shortname_re)) - self.shortname = shortname - self.command = "check_{}".format(shortname) - # Note: a set of invalid characters is defined by the - # Nagios server config - # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= - self.description = description - self.check_cmd = self._locate_cmd(check_cmd) - self.max_check_attempts = max_check_attempts - - def _get_check_filename(self): - return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) - - def _get_service_filename(self, hostname): - return os.path.join(NRPE.nagios_exportdir, - 'service__{}_{}.cfg'.format(hostname, self.command)) - - def _locate_cmd(self, check_cmd): - search_path = ( - '/usr/lib/nagios/plugins', - '/usr/local/lib/nagios/plugins', - ) - parts = shlex.split(check_cmd) - for path in search_path: - if os.path.exists(os.path.join(path, parts[0])): - command = os.path.join(path, parts[0]) - if len(parts) > 1: - safe_args = [shlex.quote(arg) for arg in parts[1:]] - command += " " + " ".join(safe_args) - return command - log('Check command not found: {}'.format(parts[0])) - return '' - - def _remove_service_files(self): - if not os.path.exists(NRPE.nagios_exportdir): - return - for f in os.listdir(NRPE.nagios_exportdir): - if f.endswith('_{}.cfg'.format(self.command)): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) - - def remove(self, hostname): - nrpe_check_file = self._get_check_filename() - if os.path.exists(nrpe_check_file): - os.remove(nrpe_check_file) - self._remove_service_files() - - def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = self._get_check_filename() - with open(nrpe_check_file, 'w') as nrpe_check_config: - nrpe_check_config.write("# check {}\n".format(self.shortname)) - if nagios_servicegroups: - nrpe_check_config.write( - "# The following header was added automatically by juju\n") - nrpe_check_config.write( - "# Modifying it will affect nagios monitoring and alerting\n") - nrpe_check_config.write( - "# servicegroups: {}\n".format(nagios_servicegroups)) - nrpe_check_config.write("command[{}]={}\n".format( - self.command, self.check_cmd)) - - if not os.path.exists(NRPE.nagios_exportdir): - log('Not writing service config as {} is not accessible'.format( - NRPE.nagios_exportdir)) - else: - self.write_service_config(nagios_context, hostname, - nagios_servicegroups) - - def write_service_config(self, nagios_context, hostname, - nagios_servicegroups): - self._remove_service_files() - - if self.max_check_attempts: - service_config_overrides = ' max_check_attempts {}'.format( - self.max_check_attempts - ) # Note indentation is here rather than in the template to avoid trailing spaces - else: - service_config_overrides = '' # empty string to avoid printing 'None' - templ_vars = { - 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_servicegroups, - 'description': self.description, - 'shortname': self.shortname, - 'command': self.command, - 'service_config_overrides': service_config_overrides, - } - nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = self._get_service_filename(hostname) - with open(nrpe_service_file, 'w') as nrpe_service_config: - nrpe_service_config.write(str(nrpe_service_text)) - - def run(self): - subprocess.call(self.check_cmd) - - -class NRPE(object): - nagios_logdir = '/var/log/nagios' - nagios_exportdir = '/var/lib/nagios/export' - nrpe_confdir = '/etc/nagios/nrpe.d' - homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server - - def __init__(self, hostname=None, primary=True): - super(NRPE, self).__init__() - self.config = config() - self.primary = primary - self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: - self.nagios_servicegroups = self.config['nagios_servicegroups'] - else: - self.nagios_servicegroups = self.nagios_context - self.unit_name = local_unit().replace('/', '-') - if hostname: - self.hostname = hostname - else: - nagios_hostname = get_nagios_hostname() - if nagios_hostname: - self.hostname = nagios_hostname - else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) - self.checks = [] - # Iff in an nrpe-external-master relation hook, set primary status - relation = relation_ids('nrpe-external-master') - if relation: - log("Setting charm primary status {}".format(primary)) - for rid in relation: - relation_set(relation_id=rid, relation_settings={'primary': self.primary}) - self.remove_check_queue = set() - - @classmethod - def does_nrpe_conf_dir_exist(cls): - """Return True if th nrpe_confdif directory exists.""" - return os.path.isdir(cls.nrpe_confdir) - - def add_check(self, *args, **kwargs): - shortname = None - if kwargs.get('shortname') is None: - if len(args) > 0: - shortname = args[0] - else: - shortname = kwargs['shortname'] - - self.checks.append(Check(*args, **kwargs)) - try: - self.remove_check_queue.remove(shortname) - except KeyError: - pass - - def remove_check(self, *args, **kwargs): - if kwargs.get('shortname') is None: - raise ValueError('shortname of check must be specified') - - # Use sensible defaults if they're not specified - these are not - # actually used during removal, but they're required for constructing - # the Check object; check_disk is chosen because it's part of the - # nagios-plugins-basic package. - if kwargs.get('check_cmd') is None: - kwargs['check_cmd'] = 'check_disk' - if kwargs.get('description') is None: - kwargs['description'] = '' - - check = Check(*args, **kwargs) - check.remove(self.hostname) - self.remove_check_queue.add(kwargs['shortname']) - - def write(self): - try: - nagios_uid = pwd.getpwnam('nagios').pw_uid - nagios_gid = grp.getgrnam('nagios').gr_gid - except Exception: - log("Nagios user not set up, nrpe checks not updated") - return - - if not os.path.exists(NRPE.nagios_logdir): - os.mkdir(NRPE.nagios_logdir) - os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) - - nrpe_monitors = {} - monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} - - # check that the charm can write to the conf dir. If not, then nagios - # probably isn't installed, and we can defer. - if not self.does_nrpe_conf_dir_exist(): - return - - for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname, - self.nagios_servicegroups) - nrpe_monitors[nrpecheck.shortname] = { - "command": nrpecheck.command, - } - # If we were passed max_check_attempts, add that to the relation data - if nrpecheck.max_check_attempts is not None: - nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts - - # update-status hooks are configured to firing every 5 minutes by - # default. When nagios-nrpe-server is restarted, the nagios server - # reports checks failing causing unnecessary alerts. Let's not restart - # on update-status hooks. - if not hook_name() == 'update-status': - service('restart', 'nagios-nrpe-server') - - monitor_ids = relation_ids("local-monitors") + \ - relation_ids("nrpe-external-master") - for rid in monitor_ids: - reldata = relation_get(unit=local_unit(), rid=rid) - if 'monitors' in reldata: - # update the existing set of monitors with the new data - old_monitors = yaml.safe_load(reldata['monitors']) - old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] - # remove keys that are in the remove_check_queue - old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() - if k not in self.remove_check_queue} - # update/add nrpe_monitors - old_nrpe_monitors.update(nrpe_monitors) - old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors - # write back to the relation - relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) - else: - # write a brand new set of monitors, as no existing ones. - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) - - self.remove_check_queue.clear() - - -def get_nagios_hostcontext(relation_name='nrpe-external-master'): - """ - Query relation with nrpe subordinate, return the nagios_host_context - - :param str relation_name: Name of relation nrpe sub joined to - """ - for rel in relations_of_type(relation_name): - if 'nagios_host_context' in rel: - return rel['nagios_host_context'] - - -def get_nagios_hostname(relation_name='nrpe-external-master'): - """ - Query relation with nrpe subordinate, return the nagios_hostname - - :param str relation_name: Name of relation nrpe sub joined to - """ - for rel in relations_of_type(relation_name): - if 'nagios_hostname' in rel: - return rel['nagios_hostname'] - - -def get_nagios_unit_name(relation_name='nrpe-external-master'): - """ - Return the nagios unit name prepended with host_context if needed - - :param str relation_name: Name of relation nrpe sub joined to - """ - host_context = get_nagios_hostcontext(relation_name) - if host_context: - unit = "%s:%s" % (host_context, local_unit()) - else: - unit = local_unit() - return unit - - -def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): - """ - Add checks for each service in list - - :param NRPE nrpe: NRPE object to add check to - :param list services: List of services to check - :param str unit_name: Unit name to use in check description - :param bool immediate_check: For sysv init, run the service check immediately - """ - for svc in services: - # Don't add a check for these services from neutron-gateway - if svc in ['ext-port', 'os-charm-phy-nic-mtu']: - next - - upstart_init = '/etc/init/%s.conf' % svc - sysv_init = '/etc/init.d/%s' % svc - - if host.init_is_systemd(service_name=svc): - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_systemd.py %s' % svc - ) - elif os.path.exists(upstart_init): - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) - elif os.path.exists(sysv_init): - cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) - croncmd = ( - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-e -s /etc/init.d/%s status' % svc - ) - cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) - f = open(cronpath, 'w') - f.write(cron_file) - f.close() - nrpe.add_check( - shortname=svc, - description='service check {%s}' % unit_name, - check_cmd='check_status_file.py -f %s' % checkpath, - ) - # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail - # (LP: #1670223). - if immediate_check and os.path.isdir(nrpe.homedir): - f = open(checkpath, 'w') - subprocess.call( - croncmd.split(), - stdout=f, - stderr=subprocess.STDOUT - ) - f.close() - os.chmod(checkpath, 0o644) - - -def copy_nrpe_checks(nrpe_files_dir=None): - """ - Copy the nrpe checks into place - - """ - NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - if nrpe_files_dir is None: - # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks - for segment in ['.', 'hooks']: - nrpe_files_dir = os.path.abspath(os.path.join( - os.getenv('CHARM_DIR'), - segment, - 'charmhelpers', - 'contrib', - 'openstack', - 'files')) - if os.path.isdir(nrpe_files_dir): - break - else: - raise RuntimeError("Couldn't find charmhelpers directory") - if not os.path.exists(NAGIOS_PLUGINS): - os.makedirs(NAGIOS_PLUGINS) - for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): - if os.path.isfile(fname): - shutil.copy2(fname, - os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) - - -def add_haproxy_checks(nrpe, unit_name): - """ - Add checks for each service in list - - :param NRPE nrpe: NRPE object to add check to - :param str unit_name: Unit name to use in check description - """ - nrpe.add_check( - shortname='haproxy_servers', - description='Check HAProxy {%s}' % unit_name, - check_cmd='check_haproxy.sh') - nrpe.add_check( - shortname='haproxy_queue', - description='Check HAProxy queue depth {%s}' % unit_name, - check_cmd='check_haproxy_queue_depth.sh') - - -def remove_deprecated_check(nrpe, deprecated_services): - """ - Remove checks for deprecated services in list - - :param nrpe: NRPE object to remove check from - :type nrpe: NRPE - :param deprecated_services: List of deprecated services that are removed - :type deprecated_services: list - """ - for dep_svc in deprecated_services: - log('Deprecated service: {}'.format(dep_svc)) - nrpe.remove_check(shortname=dep_svc) - - -def add_deferred_restarts_check(nrpe): - """ - Add NRPE check for services with deferred restarts. - - :param NRPE nrpe: NRPE object to add check to - """ - unit_name = local_unit().replace('/', '-') - shortname = unit_name + '_deferred_restarts' - check_cmd = 'check_deferred_restarts.py --application {}'.format( - application_name()) - - log('Adding deferred restarts nrpe check: {}'.format(shortname)) - nrpe.add_check( - shortname=shortname, - description='Check deferred service restarts {}'.format(unit_name), - check_cmd=check_cmd) - - -def remove_deferred_restarts_check(nrpe): - """ - Remove NRPE check for services with deferred service restarts. - - :param NRPE nrpe: NRPE object to remove check from - """ - unit_name = local_unit().replace('/', '-') - shortname = unit_name + '_deferred_restarts' - check_cmd = 'check_deferred_restarts.py --application {}'.format( - application_name()) - - log('Removing deferred restarts nrpe check: {}'.format(shortname)) - nrpe.remove_check( - shortname=shortname, - description='Check deferred service restarts {}'.format(unit_name), - check_cmd=check_cmd) diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py deleted file mode 100644 index f7c6fbdc..00000000 --- a/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' -Functions for managing volumes in juju units. One volume is supported per unit. -Subordinates may have their own storage, provided it is on its own partition. - -Configuration stanzas:: - - volume-ephemeral: - type: boolean - default: true - description: > - If false, a volume is mounted as specified in "volume-map" - If true, ephemeral storage will be used, meaning that log data - will only exist as long as the machine. YOU HAVE BEEN WARNED. - volume-map: - type: string - default: {} - description: > - YAML map of units to device names, e.g: - "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" - Service units will raise a configure-error if volume-ephemeral - is 'true' and no volume-map value is set. Use 'juju set' to set a - value and 'juju resolved' to complete configuration. - -Usage:: - - from charmsupport.volumes import configure_volume, VolumeConfigurationError - from charmsupport.hookenv import log, ERROR - def post_mount_hook(): - stop_service('myservice') - def post_mount_hook(): - start_service('myservice') - - if __name__ == '__main__': - try: - configure_volume(before_change=pre_mount_hook, - after_change=post_mount_hook) - except VolumeConfigurationError: - log('Storage could not be configured', ERROR) - -''' - -# XXX: Known limitations -# - fstab is neither consulted nor updated - -import os -from charmhelpers.core import hookenv -from charmhelpers.core import host -import yaml - - -MOUNT_BASE = '/srv/juju/volumes' - - -class VolumeConfigurationError(Exception): - '''Volume configuration data is missing or invalid''' - pass - - -def get_config(): - '''Gather and sanity-check volume configuration data''' - volume_config = {} - config = hookenv.config() - - errors = False - - if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): - volume_config['ephemeral'] = True - else: - volume_config['ephemeral'] = False - - try: - volume_map = yaml.safe_load(config.get('volume-map', '{}')) - except yaml.YAMLError as e: - hookenv.log("Error parsing YAML volume-map: {}".format(e), - hookenv.ERROR) - errors = True - if volume_map is None: - # probably an empty string - volume_map = {} - elif not isinstance(volume_map, dict): - hookenv.log("Volume-map should be a dictionary, not {}".format( - type(volume_map))) - errors = True - - volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) - if volume_config['device'] and volume_config['ephemeral']: - # asked for ephemeral storage but also defined a volume ID - hookenv.log('A volume is defined for this unit, but ephemeral ' - 'storage was requested', hookenv.ERROR) - errors = True - elif not volume_config['device'] and not volume_config['ephemeral']: - # asked for permanent storage but did not define volume ID - hookenv.log('Ephemeral storage was requested, but there is no volume ' - 'defined for this unit.', hookenv.ERROR) - errors = True - - unit_mount_name = hookenv.local_unit().replace('/', '-') - volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) - - if errors: - return None - return volume_config - - -def mount_volume(config): - if os.path.exists(config['mountpoint']): - if not os.path.isdir(config['mountpoint']): - hookenv.log('Not a directory: {}'.format(config['mountpoint'])) - raise VolumeConfigurationError() - else: - host.mkdir(config['mountpoint']) - if os.path.ismount(config['mountpoint']): - unmount_volume(config) - if not host.mount(config['device'], config['mountpoint'], persist=True): - raise VolumeConfigurationError() - - -def unmount_volume(config): - if os.path.ismount(config['mountpoint']): - if not host.umount(config['mountpoint'], persist=True): - raise VolumeConfigurationError() - - -def managed_mounts(): - '''List of all mounted managed volumes''' - return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) - - -def configure_volume(before_change=lambda: None, after_change=lambda: None): - '''Set up storage (or don't) according to the charm's volume configuration. - Returns the mount point or "ephemeral". before_change and after_change - are optional functions to be called if the volume configuration changes. - ''' - - config = get_config() - if not config: - hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) - raise VolumeConfigurationError() - - if config['ephemeral']: - if os.path.ismount(config['mountpoint']): - before_change() - unmount_volume(config) - after_change() - return 'ephemeral' - else: - # persistent storage - if os.path.ismount(config['mountpoint']): - mounts = dict(managed_mounts()) - if mounts.get(config['mountpoint']) != config['device']: - before_change() - unmount_volume(config) - mount_volume(config) - after_change() - else: - before_change() - mount_volume(config) - after_change() - return config['mountpoint'] diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/hahelpers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py deleted file mode 100644 index a54702bc..00000000 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import os - -from charmhelpers.core import host -from charmhelpers.core.hookenv import ( - config as config_get, - relation_get, - relation_ids, - related_units as relation_list, - log, - INFO, -) - -# This file contains the CA cert from the charms ssl_ca configuration -# option, in future the file name should be updated reflect that. -CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert' - - -def get_cert(cn=None): - # TODO: deal with multiple https endpoints via charm config - cert = config_get('ssl_cert') - key = config_get('ssl_key') - if not (cert and key): - log("Inspecting identity-service relations for SSL certificate.", - level=INFO) - cert = key = None - if cn: - ssl_cert_attr = 'ssl_cert_{}'.format(cn) - ssl_key_attr = 'ssl_key_{}'.format(cn) - else: - ssl_cert_attr = 'ssl_cert' - ssl_key_attr = 'ssl_key' - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - if not cert: - cert = relation_get(ssl_cert_attr, - rid=r_id, unit=unit) - if not key: - key = relation_get(ssl_key_attr, - rid=r_id, unit=unit) - return (cert, key) - - -def get_ca_cert(): - ca_cert = config_get('ssl_ca') - if ca_cert is None: - log("Inspecting identity-service relations for CA SSL certificate.", - level=INFO) - for r_id in (relation_ids('identity-service') + - relation_ids('identity-credentials')): - for unit in relation_list(r_id): - if ca_cert is None: - ca_cert = relation_get('ca_cert', - rid=r_id, unit=unit) - return ca_cert - - -def retrieve_ca_cert(cert_file): - cert = None - if os.path.isfile(cert_file): - with open(cert_file, 'rb') as crt: - cert = crt.read() - return cert - - -def install_ca_cert(ca_cert): - host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py deleted file mode 100644 index 146beba6..00000000 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ /dev/null @@ -1,448 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2012 Canonical Ltd. -# -# Authors: -# James Page -# Adam Gandelman -# - -""" -Helpers for clustering and determining "cluster leadership" and other -clustering-related helpers. -""" - -import functools -import subprocess -import os -import time - -from socket import gethostname as get_unit_hostname - -from charmhelpers.core.hookenv import ( - log, - relation_ids, - related_units as relation_list, - relation_get, - config as config_get, - INFO, - DEBUG, - WARNING, - unit_get, - is_leader as juju_is_leader, - status_set, -) -from charmhelpers.core.host import ( - modulo_distribution, -) -from charmhelpers.core.decorators import ( - retry_on_exception, -) -from charmhelpers.core.strutils import ( - bool_from_string, -) - -DC_RESOURCE_NAME = 'DC' - - -class HAIncompleteConfig(Exception): - pass - - -class HAIncorrectConfig(Exception): - pass - - -class CRMResourceNotFound(Exception): - pass - - -class CRMDCNotFound(Exception): - pass - - -def is_elected_leader(resource): - """ - Returns True if the charm executing this is the elected cluster leader. - - It relies on two mechanisms to determine leadership: - 1. If juju is sufficiently new and leadership election is supported, - the is_leader command will be used. - 2. If the charm is part of a corosync cluster, call corosync to - determine leadership. - 3. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit number". In - other words, the oldest surviving unit. - """ - try: - return juju_is_leader() - except NotImplementedError: - log('Juju leadership election feature not enabled' - ', using fallback support', - level=WARNING) - - if is_clustered(): - if not is_crm_leader(resource): - log('Deferring action to CRM leader.', level=INFO) - return False - else: - peers = peer_units() - if peers and not oldest_peer(peers): - log('Deferring action to oldest service unit.', level=INFO) - return False - return True - - -def is_clustered(): - for r_id in (relation_ids('ha') or []): - for unit in (relation_list(r_id) or []): - clustered = relation_get('clustered', - rid=r_id, - unit=unit) - if clustered: - return True - return False - - -def is_crm_dc(): - """ - Determine leadership by querying the pacemaker Designated Controller - """ - cmd = ['crm', 'status'] - try: - status = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('utf-8') - except subprocess.CalledProcessError as ex: - raise CRMDCNotFound(str(ex)) - - current_dc = '' - for line in status.split('\n'): - if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - # - partition with quorum - current_dc = line.split(':')[1].split()[0] - if current_dc == get_unit_hostname(): - return True - elif current_dc == 'NONE': - raise CRMDCNotFound('Current DC: NONE') - - return False - - -@retry_on_exception(5, base_delay=2, - exc_type=(CRMResourceNotFound, CRMDCNotFound)) -def is_crm_leader(resource, retry=False): - """ - Returns True if the charm calling this is the elected corosync leader, - as returned by calling the external "crm" command. - - We allow this operation to be retried to avoid the possibility of getting a - false negative. See LP #1396246 for more info. - """ - if resource == DC_RESOURCE_NAME: - return is_crm_dc() - cmd = ['crm', 'resource', 'show', resource] - try: - status = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('utf-8') - except subprocess.CalledProcessError: - status = None - - if status and get_unit_hostname() in status: - return True - - if status and "resource %s is NOT running" % (resource) in status: - raise CRMResourceNotFound("CRM resource %s not found" % (resource)) - - return False - - -def is_leader(resource): - log("is_leader is deprecated. Please consider using is_crm_leader " - "instead.", level=WARNING) - return is_crm_leader(resource) - - -def peer_units(peer_relation="cluster"): - peers = [] - for r_id in (relation_ids(peer_relation) or []): - for unit in (relation_list(r_id) or []): - peers.append(unit) - return peers - - -def peer_ips(peer_relation='cluster', addr_key='private-address'): - '''Return a dict of peers and their private-address''' - peers = {} - for r_id in relation_ids(peer_relation): - for unit in relation_list(r_id): - peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) - return peers - - -def oldest_peer(peers): - """Determines who the oldest peer is by comparing unit numbers.""" - local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) - for peer in peers: - remote_unit_no = int(peer.split('/')[1]) - if remote_unit_no < local_unit_no: - return False - return True - - -def eligible_leader(resource): - log("eligible_leader is deprecated. Please consider using " - "is_elected_leader instead.", level=WARNING) - return is_elected_leader(resource) - - -def https(): - ''' - Determines whether enough data has been provided in configuration - or relation data to configure HTTPS - . - returns: boolean - ''' - use_https = config_get('use-https') - if use_https and bool_from_string(use_https): - return True - if config_get('ssl_cert') and config_get('ssl_key'): - return True - for r_id in relation_ids('certificates'): - for unit in relation_list(r_id): - ca = relation_get('ca', rid=r_id, unit=unit) - if ca: - return True - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN - rel_state = [ - relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ca_cert', rid=r_id, unit=unit), - ] - # NOTE: works around (LP: #1203241) - if (None not in rel_state) and ('' not in rel_state): - return True - return False - - -def determine_api_port(public_port, singlenode_mode=False): - ''' - Determine correct API server listening port based on - existence of HTTPS reverse proxy and/or haproxy. - - public_port: int: standard public port for given service - - singlenode_mode: boolean: Shuffle ports when only a single unit is present - - returns: int: the correct listening port for the API service - ''' - i = 0 - if singlenode_mode: - i += 1 - elif len(peer_units()) > 0 or is_clustered(): - i += 1 - if https(): - i += 1 - return public_port - (i * 10) - - -def determine_apache_port(public_port, singlenode_mode=False): - ''' - Description: Determine correct apache listening port based on public IP + - state of the cluster. - - public_port: int: standard public port for given service - - singlenode_mode: boolean: Shuffle ports when only a single unit is present - - returns: int: the correct listening port for the HAProxy service - ''' - i = 0 - if singlenode_mode: - i += 1 - elif len(peer_units()) > 0 or is_clustered(): - i += 1 - return public_port - (i * 10) - - -determine_apache_port_single = functools.partial( - determine_apache_port, singlenode_mode=True) - - -def get_hacluster_config(exclude_keys=None): - ''' - Obtains all relevant configuration from charm configuration required - for initiating a relation to hacluster: - - ha-bindiface, ha-mcastport, vip, os-internal-hostname, - os-admin-hostname, os-public-hostname, os-access-hostname - - param: exclude_keys: list of setting key(s) to be excluded. - returns: dict: A dict containing settings keyed by setting name. - raises: HAIncompleteConfig if settings are missing or incorrect. - ''' - settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', - 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] - conf = {} - for setting in settings: - if exclude_keys and setting in exclude_keys: - continue - - conf[setting] = config_get(setting) - - if not valid_hacluster_config(): - raise HAIncorrectConfig('Insufficient or incorrect config data to ' - 'configure hacluster.') - return conf - - -def valid_hacluster_config(): - ''' - Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname - must be set. - - Note: ha-bindiface and ha-macastport both have defaults and will always - be set. We only care that either vip or dns-ha is set. - - :returns: boolean: valid config returns true. - raises: HAIncompatibileConfig if settings conflict. - raises: HAIncompleteConfig if settings are missing. - ''' - vip = config_get('vip') - dns = config_get('dns-ha') - if not(bool(vip) ^ bool(dns)): - msg = ('HA: Either vip or dns-ha must be set but not both in order to ' - 'use high availability') - status_set('blocked', msg) - raise HAIncorrectConfig(msg) - - # If dns-ha then one of os-*-hostname must be set - if dns: - dns_settings = ['os-internal-hostname', 'os-admin-hostname', - 'os-public-hostname', 'os-access-hostname'] - # At this point it is unknown if one or all of the possible - # network spaces are in HA. Validate at least one is set which is - # the minimum required. - for setting in dns_settings: - if config_get(setting): - log('DNS HA: At least one hostname is set {}: {}' - ''.format(setting, config_get(setting)), - level=DEBUG) - return True - - msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' - 'DNS HA') - status_set('blocked', msg) - raise HAIncompleteConfig(msg) - - log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) - return True - - -def canonical_url(configs, vip_setting='vip'): - ''' - Returns the correct HTTP URL to this host given the state of HTTPS - configuration and hacluster. - - :configs : OSTemplateRenderer: A config tempating object to inspect for - a complete https context. - - :vip_setting: str: Setting in charm config that specifies - VIP address. - ''' - scheme = 'http' - if 'https' in configs.complete_contexts(): - scheme = 'https' - if is_clustered(): - addr = config_get(vip_setting) - else: - addr = unit_get('private-address') - return '%s://%s' % (scheme, addr) - - -def distributed_wait(modulo=None, wait=None, operation_name='operation'): - ''' Distribute operations by waiting based on modulo_distribution - - If modulo and or wait are not set, check config_get for those values. - If config values are not set, default to modulo=3 and wait=30. - - :param modulo: int The modulo number creates the group distribution - :param wait: int The constant time wait value - :param operation_name: string Operation name for status message - i.e. 'restart' - :side effect: Calls config_get() - :side effect: Calls log() - :side effect: Calls status_set() - :side effect: Calls time.sleep() - ''' - if modulo is None: - modulo = config_get('modulo-nodes') or 3 - if wait is None: - wait = config_get('known-wait') or 30 - if juju_is_leader(): - # The leader should never wait - calculated_wait = 0 - else: - # non_zero_wait=True guarantees the non-leader who gets modulo 0 - # will still wait - calculated_wait = modulo_distribution(modulo=modulo, wait=wait, - non_zero_wait=True) - msg = "Waiting {} seconds for {} ...".format(calculated_wait, - operation_name) - log(msg, DEBUG) - status_set('maintenance', msg) - time.sleep(calculated_wait) - - -def get_managed_services_and_ports(services, external_ports, - external_services=None, - port_conv_f=determine_apache_port_single): - """Get the services and ports managed by this charm. - - Return only the services and corresponding ports that are managed by this - charm. This excludes haproxy when there is a relation with hacluster. This - is because this charm passes responsibility for stopping and starting - haproxy to hacluster. - - Similarly, if a relation with hacluster exists then the ports returned by - this method correspond to those managed by the apache server rather than - haproxy. - - :param services: List of services. - :type services: List[str] - :param external_ports: List of ports managed by external services. - :type external_ports: List[int] - :param external_services: List of services to be removed if ha relation is - present. - :type external_services: List[str] - :param port_conv_f: Function to apply to ports to calculate the ports - managed by services controlled by this charm. - :type port_convert_func: f() - :returns: A tuple containing a list of services first followed by a list of - ports. - :rtype: Tuple[List[str], List[int]] - """ - if external_services is None: - external_services = ['haproxy'] - if relation_ids('ha'): - for svc in external_services: - try: - services.remove(svc) - except ValueError: - pass - external_ports = [port_conv_f(p) for p in external_ports] - return services, external_ports diff --git a/hooks/charmhelpers/contrib/hardening/README.hardening.md b/hooks/charmhelpers/contrib/hardening/README.hardening.md deleted file mode 100644 index 91280c03..00000000 --- a/hooks/charmhelpers/contrib/hardening/README.hardening.md +++ /dev/null @@ -1,38 +0,0 @@ -# Juju charm-helpers hardening library - -## Description - -This library provides multiple implementations of system and application -hardening that conform to the standards of http://hardening.io/. - -Current implementations include: - - * OS - * SSH - * MySQL - * Apache - -## Requirements - -* Juju Charms - -## Usage - -1. Synchronise this library into your charm and add the harden() decorator - (from contrib.hardening.harden) to any functions or methods you want to use - to trigger hardening of your application/system. - -2. Add a config option called 'harden' to your charm config.yaml and set it to - a space-delimited list of hardening modules you want to run e.g. "os ssh" - -3. Override any config defaults (contrib.hardening.defaults) by adding a file - called hardening.yaml to your charm root containing the name(s) of the - modules whose settings you want override at root level and then any settings - with overrides e.g. - - os: - general: - desktop_enable: True - -4. Now just run your charm as usual and hardening will be applied each time the - hook runs. diff --git a/hooks/charmhelpers/contrib/hardening/__init__.py b/hooks/charmhelpers/contrib/hardening/__init__.py deleted file mode 100644 index 30a3e943..00000000 --- a/hooks/charmhelpers/contrib/hardening/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/hardening/apache/__init__.py b/hooks/charmhelpers/contrib/hardening/apache/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/hooks/charmhelpers/contrib/hardening/apache/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py b/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py deleted file mode 100644 index 3bc2ebd4..00000000 --- a/hooks/charmhelpers/contrib/hardening/apache/checks/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.apache.checks import config - - -def run_apache_checks(): - log("Starting Apache hardening checks.", level=DEBUG) - checks = config.get_audits() - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("Apache hardening checks complete.", level=DEBUG) diff --git a/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/hooks/charmhelpers/contrib/hardening/apache/checks/config.py deleted file mode 100644 index e81a5f0b..00000000 --- a/hooks/charmhelpers/contrib/hardening/apache/checks/config.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -import subprocess - - -from charmhelpers.core.hookenv import ( - log, - INFO, -) -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - DirectoryPermissionAudit, - NoReadWriteForOther, - TemplatedFile, - DeletedFile -) -from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit -from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get Apache hardening config audits. - - :returns: dictionary of audits - """ - if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: - log("Apache server does not appear to be installed on this node - " - "skipping apache hardening", level=INFO) - return [] - - context = ApacheConfContext() - settings = utils.get_settings('apache') - audits = [ - FilePermissionAudit(paths=os.path.join( - settings['common']['apache_dir'], 'apache2.conf'), - user='root', group='root', mode=0o0640), - - TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'mods-available/alias.conf'), - context, - TEMPLATES_DIR, - mode=0o0640, - user='root', - service_actions=[{'service': 'apache2', - 'actions': ['restart']}]), - - TemplatedFile(os.path.join(settings['common']['apache_dir'], - 'conf-enabled/99-hardening.conf'), - context, - TEMPLATES_DIR, - mode=0o0640, - user='root', - service_actions=[{'service': 'apache2', - 'actions': ['restart']}]), - - DirectoryPermissionAudit(settings['common']['apache_dir'], - user='root', - group='root', - mode=0o0750), - - DisabledModuleAudit(settings['hardening']['modules_to_disable']), - - NoReadWriteForOther(settings['common']['apache_dir']), - - DeletedFile(['/var/www/html/index.html']) - ] - - return audits - - -class ApacheConfContext(object): - """Defines the set of key/value pairs to set in a apache config file. - - This context, when called, will return a dictionary containing the - key/value pairs of setting to specify in the - /etc/apache/conf-enabled/hardening.conf file. - """ - def __call__(self): - settings = utils.get_settings('apache') - ctxt = settings['hardening'] - - out = subprocess.check_output(['apache2', '-v']).decode('utf-8') - ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', - out).group(1) - ctxt['apache_icondir'] = '/usr/share/apache2/icons/' - return ctxt diff --git a/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf b/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf deleted file mode 100644 index 22b68041..00000000 --- a/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf +++ /dev/null @@ -1,32 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### - - - - # http://httpd.apache.org/docs/2.4/upgrading.html - {% if apache_version > '2.2' -%} - Require all granted - {% else -%} - Order Allow,Deny - Deny from all - {% endif %} - - - - - Options -Indexes -FollowSymLinks - AllowOverride None - - - - Options -Indexes -FollowSymLinks - AllowOverride None - - -TraceEnable {{ traceenable }} -ServerTokens {{ servertokens }} - -SSLHonorCipherOrder {{ honor_cipher_order }} -SSLCipherSuite {{ cipher_suite }} diff --git a/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py b/hooks/charmhelpers/contrib/hardening/apache/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf b/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf deleted file mode 100644 index e46a58a3..00000000 --- a/hooks/charmhelpers/contrib/hardening/apache/templates/alias.conf +++ /dev/null @@ -1,31 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### - - # - # Aliases: Add here as many aliases as you need (with no limit). The format is - # Alias fakename realname - # - # Note that if you include a trailing / on fakename then the server will - # require it to be present in the URL. So "/icons" isn't aliased in this - # example, only "/icons/". If the fakename is slash-terminated, then the - # realname must also be slash terminated, and if the fakename omits the - # trailing slash, the realname must also omit it. - # - # We include the /icons/ alias for FancyIndexed directory listings. If - # you do not use FancyIndexing, you may comment this out. - # - Alias /icons/ "{{ apache_icondir }}/" - - - Options -Indexes -MultiViews -FollowSymLinks - AllowOverride None -{% if apache_version == '2.4' -%} - Require all granted -{% else -%} - Order allow,deny - Allow from all -{% endif %} - - diff --git a/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/hooks/charmhelpers/contrib/hardening/audits/__init__.py deleted file mode 100644 index 6dd5b05f..00000000 --- a/hooks/charmhelpers/contrib/hardening/audits/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class BaseAudit(object): # NO-QA - """Base class for hardening checks. - - The lifecycle of a hardening check is to first check to see if the system - is in compliance for the specified check. If it is not in compliance, the - check method will return a value which will be supplied to the. - """ - def __init__(self, *args, **kwargs): - self.unless = kwargs.get('unless', None) - super(BaseAudit, self).__init__() - - def ensure_compliance(self): - """Checks to see if the current hardening check is in compliance or - not. - - If the check that is performed is not in compliance, then an exception - should be raised. - """ - pass - - def _take_action(self): - """Determines whether to perform the action or not. - - Checks whether or not an action should be taken. This is determined by - the truthy value for the unless parameter. If unless is a callback - method, it will be invoked with no parameters in order to determine - whether or not the action should be taken. Otherwise, the truthy value - of the unless attribute will determine if the action should be - performed. - """ - # Do the action if there isn't an unless override. - if self.unless is None: - return True - - # Invoke the callback if there is one. - if hasattr(self.unless, '__call__'): - return not self.unless() - - return not self.unless diff --git a/hooks/charmhelpers/contrib/hardening/audits/apache.py b/hooks/charmhelpers/contrib/hardening/audits/apache.py deleted file mode 100644 index 31db8f62..00000000 --- a/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.core.hookenv import ( - log, - INFO, - ERROR, -) - -from charmhelpers.contrib.hardening.audits import BaseAudit - - -class DisabledModuleAudit(BaseAudit): - """Audits Apache2 modules. - - Determines if the apache2 modules are enabled. If the modules are enabled - then they are removed in the ensure_compliance. - """ - def __init__(self, modules): - if modules is None: - self.modules = [] - elif isinstance(modules, str): - self.modules = [modules] - else: - self.modules = modules - - def ensure_compliance(self): - """Ensures that the modules are not loaded.""" - if not self.modules: - return - - try: - loaded_modules = self._get_loaded_modules() - non_compliant_modules = [] - for module in self.modules: - if module in loaded_modules: - log("Module '%s' is enabled but should not be." % - (module), level=INFO) - non_compliant_modules.append(module) - - if len(non_compliant_modules) == 0: - return - - for module in non_compliant_modules: - self._disable_module(module) - self._restart_apache() - except subprocess.CalledProcessError as e: - log('Error occurred auditing apache module compliance. ' - 'This may have been already reported. ' - 'Output is: %s' % e.output, level=ERROR) - - @staticmethod - def _get_loaded_modules(): - """Returns the modules which are enabled in Apache.""" - output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8') - modules = [] - for line in output.splitlines(): - # Each line of the enabled module output looks like: - # module_name (static|shared) - # Plus a header line at the top of the output which is stripped - # out by the regex. - matcher = re.search(r'^ (\S*)_module (\S*)', line) - if matcher: - modules.append(matcher.group(1)) - return modules - - @staticmethod - def _disable_module(module): - """Disables the specified module in Apache.""" - try: - subprocess.check_call(['a2dismod', module]) - except subprocess.CalledProcessError as e: - # Note: catch error here to allow the attempt of disabling - # multiple modules in one go rather than failing after the - # first module fails. - log('Error occurred disabling module %s. ' - 'Output is: %s' % (module, e.output), level=ERROR) - - @staticmethod - def _restart_apache(): - """Restarts the apache process""" - subprocess.check_output(['service', 'apache2', 'restart']) - - @staticmethod - def is_ssl_enabled(): - """Check if SSL module is enabled or not""" - return 'ssl' in DisabledModuleAudit._get_loaded_modules() diff --git a/hooks/charmhelpers/contrib/hardening/audits/apt.py b/hooks/charmhelpers/contrib/hardening/audits/apt.py deleted file mode 100644 index 1b22925b..00000000 --- a/hooks/charmhelpers/contrib/hardening/audits/apt.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.fetch import ( - apt_cache, - apt_purge -) -from charmhelpers.core.hookenv import ( - log, - DEBUG, - WARNING, -) -from charmhelpers.contrib.hardening.audits import BaseAudit -from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg - - -class AptConfig(BaseAudit): - - def __init__(self, config, **kwargs): - self.config = config - - def verify_config(self): - apt_pkg.init() - for cfg in self.config: - value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) - if value and value != cfg['expected']: - log("APT config '%s' has unexpected value '%s' " - "(expected='%s')" % - (cfg['key'], value, cfg['expected']), level=WARNING) - - def ensure_compliance(self): - self.verify_config() - - -class RestrictedPackages(BaseAudit): - """Class used to audit restricted packages on the system.""" - - def __init__(self, pkgs, **kwargs): - super(RestrictedPackages, self).__init__(**kwargs) - if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'): - self.pkgs = pkgs.split() - else: - self.pkgs = pkgs - - def ensure_compliance(self): - cache = apt_cache() - - for p in self.pkgs: - if p not in cache: - continue - - pkg = cache[p] - if not self.is_virtual_package(pkg): - if not pkg.current_ver: - log("Package '%s' is not installed." % pkg.name, - level=DEBUG) - continue - else: - log("Restricted package '%s' is installed" % pkg.name, - level=WARNING) - self.delete_package(cache, pkg) - else: - log("Checking restricted virtual package '%s' provides" % - pkg.name, level=DEBUG) - self.delete_package(cache, pkg) - - def delete_package(self, cache, pkg): - """Deletes the package from the system. - - Deletes the package form the system, properly handling virtual - packages. - - :param cache: the apt cache - :param pkg: the package to remove - """ - if self.is_virtual_package(pkg): - log("Package '%s' appears to be virtual - purging provides" % - pkg.name, level=DEBUG) - for _p in pkg.provides_list: - self.delete_package(cache, _p[2].parent_pkg) - elif not pkg.current_ver: - log("Package '%s' not installed" % pkg.name, level=DEBUG) - return - else: - log("Purging package '%s'" % pkg.name, level=DEBUG) - apt_purge(pkg.name) - - def is_virtual_package(self, pkg): - return (pkg.get('has_provides', False) and - not pkg.get('has_versions', False)) diff --git a/hooks/charmhelpers/contrib/hardening/audits/file.py b/hooks/charmhelpers/contrib/hardening/audits/file.py deleted file mode 100644 index 84cc2494..00000000 --- a/hooks/charmhelpers/contrib/hardening/audits/file.py +++ /dev/null @@ -1,549 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import grp -import os -import pwd -import re - -from subprocess import ( - CalledProcessError, - check_output, - check_call, -) -from traceback import format_exc -from stat import ( - S_ISGID, - S_ISUID -) - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - INFO, - WARNING, - ERROR, -) -from charmhelpers.core import unitdata -from charmhelpers.core.host import file_hash -from charmhelpers.contrib.hardening.audits import BaseAudit -from charmhelpers.contrib.hardening.templating import ( - get_template_path, - render_and_write, -) -from charmhelpers.contrib.hardening import utils - - -class BaseFileAudit(BaseAudit): - """Base class for file audits. - - Provides api stubs for compliance check flow that must be used by any class - that implemented this one. - """ - - def __init__(self, paths, always_comply=False, *args, **kwargs): - """ - :param paths: string path of list of paths of files we want to apply - compliance checks are criteria to. - :param always_comply: if true compliance criteria is always applied - else compliance is skipped for non-existent - paths. - """ - super(BaseFileAudit, self).__init__(*args, **kwargs) - self.always_comply = always_comply - if isinstance(paths, str) or not hasattr(paths, '__iter__'): - self.paths = [paths] - else: - self.paths = paths - - def ensure_compliance(self): - """Ensure that the all registered files comply to registered criteria. - """ - for p in self.paths: - if os.path.exists(p): - if self.is_compliant(p): - continue - - log('File %s is not in compliance.' % p, level=INFO) - else: - if not self.always_comply: - log("Non-existent path '%s' - skipping compliance check" - % (p), level=INFO) - continue - - if self._take_action(): - log("Applying compliance criteria to '%s'" % (p), level=INFO) - self.comply(p) - - def is_compliant(self, path): - """Audits the path to see if it is compliance. - - :param path: the path to the file that should be checked. - """ - raise NotImplementedError - - def comply(self, path): - """Enforces the compliance of a path. - - :param path: the path to the file that should be enforced. - """ - raise NotImplementedError - - @classmethod - def _get_stat(cls, path): - """Returns the Posix st_stat information for the specified file path. - - :param path: the path to get the st_stat information for. - :returns: an st_stat object for the path or None if the path doesn't - exist. - """ - return os.stat(path) - - -class FilePermissionAudit(BaseFileAudit): - """Implements an audit for file permissions and ownership for a user. - - This class implements functionality that ensures that a specific user/group - will own the file(s) specified and that the permissions specified are - applied properly to the file. - """ - def __init__(self, paths, user, group=None, mode=0o600, **kwargs): - self.user = user - self.group = group - self.mode = mode - super(FilePermissionAudit, self).__init__(paths, user, group, mode, - **kwargs) - - @property - def user(self): - return self._user - - @user.setter - def user(self, name): - try: - user = pwd.getpwnam(name) - except KeyError: - log('Unknown user %s' % name, level=ERROR) - user = None - self._user = user - - @property - def group(self): - return self._group - - @group.setter - def group(self, name): - try: - group = None - if name: - group = grp.getgrnam(name) - else: - group = grp.getgrgid(self.user.pw_gid) - except KeyError: - log('Unknown group %s' % name, level=ERROR) - self._group = group - - def is_compliant(self, path): - """Checks if the path is in compliance. - - Used to determine if the path specified meets the necessary - requirements to be in compliance with the check itself. - - :param path: the file path to check - :returns: True if the path is compliant, False otherwise. - """ - stat = self._get_stat(path) - user = self.user - group = self.group - - compliant = True - if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: - log('File %s is not owned by %s:%s.' % (path, user.pw_name, - group.gr_name), - level=INFO) - compliant = False - - # POSIX refers to the st_mode bits as corresponding to both the - # file type and file permission bits, where the least significant 12 - # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the - # file permission bits (8-0) - perms = stat.st_mode & 0o7777 - if perms != self.mode: - log('File %s has incorrect permissions, currently set to %s' % - (path, oct(stat.st_mode & 0o7777)), level=INFO) - compliant = False - - return compliant - - def comply(self, path): - """Issues a chown and chmod to the file paths specified.""" - utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, - self.mode) - - -class DirectoryPermissionAudit(FilePermissionAudit): - """Performs a permission check for the specified directory path.""" - - def __init__(self, paths, user, group=None, mode=0o600, - recursive=True, **kwargs): - super(DirectoryPermissionAudit, self).__init__(paths, user, group, - mode, **kwargs) - self.recursive = recursive - - def is_compliant(self, path): - """Checks if the directory is compliant. - - Used to determine if the path specified and all of its children - directories are in compliance with the check itself. - - :param path: the directory path to check - :returns: True if the directory tree is compliant, otherwise False. - """ - if not os.path.isdir(path): - log('Path specified %s is not a directory.' % path, level=ERROR) - raise ValueError("%s is not a directory." % path) - - if not self.recursive: - return super(DirectoryPermissionAudit, self).is_compliant(path) - - compliant = True - for root, dirs, _ in os.walk(path): - if len(dirs) > 0: - continue - - if not super(DirectoryPermissionAudit, self).is_compliant(root): - compliant = False - continue - - return compliant - - def comply(self, path): - for root, dirs, _ in os.walk(path): - if len(dirs) > 0: - super(DirectoryPermissionAudit, self).comply(root) - - -class ReadOnly(BaseFileAudit): - """Audits that files and folders are read only.""" - def __init__(self, paths, *args, **kwargs): - super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) - - def is_compliant(self, path): - try: - output = check_output(['find', path, '-perm', '-go+w', - '-type', 'f']).strip() - - # The find above will find any files which have permission sets - # which allow too broad of write access. As such, the path is - # compliant if there is no output. - if output: - return False - - return True - except CalledProcessError as e: - log('Error occurred checking finding writable files for %s. ' - 'Error information is: command %s failed with returncode ' - '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, - format_exc(e)), level=ERROR) - return False - - def comply(self, path): - try: - check_output(['chmod', 'go-w', '-R', path]) - except CalledProcessError as e: - log('Error occurred removing writeable permissions for %s. ' - 'Error information is: command %s failed with returncode ' - '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, - format_exc(e)), level=ERROR) - - -class NoReadWriteForOther(BaseFileAudit): - """Ensures that the files found under the base path are readable or - writable by anyone other than the owner or the group. - """ - def __init__(self, paths): - super(NoReadWriteForOther, self).__init__(paths) - - def is_compliant(self, path): - try: - cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', - '-perm', '-o+w', '-type', 'f'] - output = check_output(cmd).strip() - - # The find above here will find any files which have read or - # write permissions for other, meaning there is too broad of access - # to read/write the file. As such, the path is compliant if there's - # no output. - if output: - return False - - return True - except CalledProcessError as e: - log('Error occurred while finding files which are readable or ' - 'writable to the world in %s. ' - 'Command output is: %s.' % (path, e.output), level=ERROR) - - def comply(self, path): - try: - check_output(['chmod', '-R', 'o-rw', path]) - except CalledProcessError as e: - log('Error occurred attempting to change modes of files under ' - 'path %s. Output of command is: %s' % (path, e.output)) - - -class NoSUIDSGIDAudit(BaseFileAudit): - """Audits that specified files do not have SUID/SGID bits set.""" - def __init__(self, paths, *args, **kwargs): - super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) - - def is_compliant(self, path): - stat = self._get_stat(path) - if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: - return False - - return True - - def comply(self, path): - try: - log('Removing suid/sgid from %s.' % path, level=DEBUG) - check_output(['chmod', '-s', path]) - except CalledProcessError as e: - log('Error occurred removing suid/sgid from %s.' - 'Error information is: command %s failed with returncode ' - '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, - format_exc(e)), level=ERROR) - - -class TemplatedFile(BaseFileAudit): - """The TemplatedFileAudit audits the contents of a templated file. - - This audit renders a file from a template, sets the appropriate file - permissions, then generates a hashsum with which to check the content - changed. - """ - def __init__(self, path, context, template_dir, mode, user='root', - group='root', service_actions=None, **kwargs): - self.context = context - self.user = user - self.group = group - self.mode = mode - self.template_dir = template_dir - self.service_actions = service_actions - super(TemplatedFile, self).__init__(paths=path, always_comply=True, - **kwargs) - - def is_compliant(self, path): - """Determines if the templated file is compliant. - - A templated file is only compliant if it has not changed (as - determined by its sha256 hashsum) AND its file permissions are set - appropriately. - - :param path: the path to check compliance. - """ - same_templates = self.templates_match(path) - same_content = self.contents_match(path) - same_permissions = self.permissions_match(path) - - if same_content and same_permissions and same_templates: - return True - - return False - - def run_service_actions(self): - """Run any actions on services requested.""" - if not self.service_actions: - return - - for svc_action in self.service_actions: - name = svc_action['service'] - actions = svc_action['actions'] - log("Running service '%s' actions '%s'" % (name, actions), - level=DEBUG) - for action in actions: - cmd = ['service', name, action] - try: - check_call(cmd) - except CalledProcessError as exc: - log("Service name='%s' action='%s' failed - %s" % - (name, action, exc), level=WARNING) - - def comply(self, path): - """Ensures the contents and the permissions of the file. - - :param path: the path to correct - """ - dirname = os.path.dirname(path) - if not os.path.exists(dirname): - os.makedirs(dirname) - - self.pre_write() - render_and_write(self.template_dir, path, self.context()) - utils.ensure_permissions(path, self.user, self.group, self.mode) - self.run_service_actions() - self.save_checksum(path) - self.post_write() - - def pre_write(self): - """Invoked prior to writing the template.""" - pass - - def post_write(self): - """Invoked after writing the template.""" - pass - - def templates_match(self, path): - """Determines if the template files are the same. - - The template file equality is determined by the hashsum of the - template files themselves. If there is no hashsum, then the content - cannot be sure to be the same so treat it as if they changed. - Otherwise, return whether or not the hashsums are the same. - - :param path: the path to check - :returns: boolean - """ - template_path = get_template_path(self.template_dir, path) - key = 'hardening:template:%s' % template_path - template_checksum = file_hash(template_path) - kv = unitdata.kv() - stored_tmplt_checksum = kv.get(key) - if not stored_tmplt_checksum: - kv.set(key, template_checksum) - kv.flush() - log('Saved template checksum for %s.' % template_path, - level=DEBUG) - # Since we don't have a template checksum, then assume it doesn't - # match and return that the template is different. - return False - elif stored_tmplt_checksum != template_checksum: - kv.set(key, template_checksum) - kv.flush() - log('Updated template checksum for %s.' % template_path, - level=DEBUG) - return False - - # Here the template hasn't changed based upon the calculated - # checksum of the template and what was previously stored. - return True - - def contents_match(self, path): - """Determines if the file content is the same. - - This is determined by comparing hashsum of the file contents and - the saved hashsum. If there is no hashsum, then the content cannot - be sure to be the same so treat them as if they are not the same. - Otherwise, return True if the hashsums are the same, False if they - are not the same. - - :param path: the file to check. - """ - checksum = file_hash(path) - - kv = unitdata.kv() - stored_checksum = kv.get('hardening:%s' % path) - if not stored_checksum: - # If the checksum hasn't been generated, return False to ensure - # the file is written and the checksum stored. - log('Checksum for %s has not been calculated.' % path, level=DEBUG) - return False - elif stored_checksum != checksum: - log('Checksum mismatch for %s.' % path, level=DEBUG) - return False - - return True - - def permissions_match(self, path): - """Determines if the file owner and permissions match. - - :param path: the path to check. - """ - audit = FilePermissionAudit(path, self.user, self.group, self.mode) - return audit.is_compliant(path) - - def save_checksum(self, path): - """Calculates and saves the checksum for the path specified. - - :param path: the path of the file to save the checksum. - """ - checksum = file_hash(path) - kv = unitdata.kv() - kv.set('hardening:%s' % path, checksum) - kv.flush() - - -class DeletedFile(BaseFileAudit): - """Audit to ensure that a file is deleted.""" - def __init__(self, paths): - super(DeletedFile, self).__init__(paths) - - def is_compliant(self, path): - return not os.path.exists(path) - - def comply(self, path): - os.remove(path) - - -class FileContentAudit(BaseFileAudit): - """Audit the contents of a file.""" - def __init__(self, paths, cases, **kwargs): - # Cases we expect to pass - self.pass_cases = cases.get('pass', []) - # Cases we expect to fail - self.fail_cases = cases.get('fail', []) - super(FileContentAudit, self).__init__(paths, **kwargs) - - def is_compliant(self, path): - """ - Given a set of content matching cases i.e. tuple(regex, bool) where - bool value denotes whether or not regex is expected to match, check that - all cases match as expected with the contents of the file. Cases can be - expected to pass of fail. - - :param path: Path of file to check. - :returns: Boolean value representing whether or not all cases are - found to be compliant. - """ - log("Auditing contents of file '%s'" % (path), level=DEBUG) - with open(path, 'r') as fd: - contents = fd.read() - - matches = 0 - for pattern in self.pass_cases: - key = re.compile(pattern, flags=re.MULTILINE) - results = re.search(key, contents) - if results: - matches += 1 - else: - log("Pattern '%s' was expected to pass but instead it failed" - % (pattern), level=WARNING) - - for pattern in self.fail_cases: - key = re.compile(pattern, flags=re.MULTILINE) - results = re.search(key, contents) - if not results: - matches += 1 - else: - log("Pattern '%s' was expected to fail but instead it passed" - % (pattern), level=WARNING) - - total = len(self.pass_cases) + len(self.fail_cases) - log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) - return matches == total - - def comply(self, *args, **kwargs): - """NOOP since we just issue warnings. This is to avoid the - NotImplememtedError. - """ - log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/hooks/charmhelpers/contrib/hardening/defaults/__init__.py b/hooks/charmhelpers/contrib/hardening/defaults/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml deleted file mode 100644 index 0f940d4c..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# NOTE: this file contains the default configuration for the 'apache' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'apache' as the root key followed by any of the following with new -# values. - -common: - apache_dir: '/etc/apache2' - -hardening: - traceenable: 'off' - allowed_http_methods: "GET POST" - modules_to_disable: [ cgi, cgid ] - servertokens: 'Prod' - honor_cipher_order: 'on' - cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema deleted file mode 100644 index c112137c..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema +++ /dev/null @@ -1,12 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -common: - apache_dir: - traceenable: - -hardening: - allowed_http_methods: - modules_to_disable: - servertokens: - honor_cipher_order: - cipher_suite: diff --git a/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml b/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml deleted file mode 100644 index 682d22bf..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# NOTE: this file contains the default configuration for the 'mysql' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'mysql' as the root key followed by any of the following with new -# values. - -hardening: - mysql-conf: /etc/mysql/my.cnf - hardening-conf: /etc/mysql/conf.d/hardening.cnf - -security: - # @see http://www.symantec.com/connect/articles/securing-mysql-step-step - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot - chroot: None - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create - safe-user-create: 1 - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth - secure-auth: 1 - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links - skip-symbolic-links: 1 - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database - skip-show-database: True - - # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile - local-infile: 0 - - # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs - allow-suspicious-udfs: 0 - - # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges - automatic-sp-privileges: 0 - - # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv - secure-file-priv: /tmp diff --git a/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema deleted file mode 100644 index 2edf325c..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema +++ /dev/null @@ -1,15 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -hardening: - mysql-conf: - hardening-conf: -security: - chroot: - safe-user-create: - secure-auth: - skip-symbolic-links: - skip-show-database: - local-infile: - allow-suspicious-udfs: - automatic-sp-privileges: - secure-file-priv: diff --git a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/hooks/charmhelpers/contrib/hardening/defaults/os.yaml deleted file mode 100644 index 9a8627b5..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml +++ /dev/null @@ -1,68 +0,0 @@ -# NOTE: this file contains the default configuration for the 'os' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'os' as the root key followed by any of the following with new -# values. - -general: - desktop_enable: False # (type:boolean) - -environment: - extra_user_paths: [] - umask: 027 - root_path: / - -auth: - pw_max_age: 60 - # discourage password cycling - pw_min_age: 7 - retries: 5 - lockout_time: 600 - timeout: 60 - allow_homeless: False # (type:boolean) - pam_passwdqc_enable: True # (type:boolean) - pam_passwdqc_options: 'min=disabled,disabled,16,12,8' - root_ttys: - console - tty1 - tty2 - tty3 - tty4 - tty5 - tty6 - uid_min: 1000 - gid_min: 1000 - sys_uid_min: 100 - sys_uid_max: 999 - sys_gid_min: 100 - sys_gid_max: 999 - chfn_restrict: - -security: - users_allow: [] - suid_sgid_enforce: True # (type:boolean) - # user-defined blacklist and whitelist - suid_sgid_blacklist: [] - suid_sgid_whitelist: [] - # if this is True, remove any suid/sgid bits from files that were not in the whitelist - suid_sgid_dry_run_on_unknown: False # (type:boolean) - suid_sgid_remove_from_unknown: False # (type:boolean) - # remove packages with known issues - packages_clean: True # (type:boolean) - packages_list: - xinetd - inetd - ypserv - telnet-server - rsh-server - rsync - kernel_enable_module_loading: True # (type:boolean) - kernel_enable_core_dump: False # (type:boolean) - ssh_tmout: 300 - -sysctl: - kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 - kernel_enable_sysrq: False # (type:boolean) - forwarding: False # (type:boolean) - ipv6_enable: False # (type:boolean) - arp_restricted: True # (type:boolean) diff --git a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema deleted file mode 100644 index cc3b9c20..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema +++ /dev/null @@ -1,43 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -general: - desktop_enable: -environment: - extra_user_paths: - umask: - root_path: -auth: - pw_max_age: - pw_min_age: - retries: - lockout_time: - timeout: - allow_homeless: - pam_passwdqc_enable: - pam_passwdqc_options: - root_ttys: - uid_min: - gid_min: - sys_uid_min: - sys_uid_max: - sys_gid_min: - sys_gid_max: - chfn_restrict: -security: - users_allow: - suid_sgid_enforce: - suid_sgid_blacklist: - suid_sgid_whitelist: - suid_sgid_dry_run_on_unknown: - suid_sgid_remove_from_unknown: - packages_clean: - packages_list: - kernel_enable_module_loading: - kernel_enable_core_dump: - ssh_tmout: -sysctl: - kernel_secure_sysrq: - kernel_enable_sysrq: - forwarding: - ipv6_enable: - arp_restricted: diff --git a/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml b/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml deleted file mode 100644 index cd529bca..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# NOTE: this file contains the default configuration for the 'ssh' hardening -# code. If you want to override any settings you must add them to a file -# called hardening.yaml in the root directory of your charm using the -# name 'ssh' as the root key followed by any of the following with new -# values. - -common: - service_name: 'ssh' - network_ipv6_enable: False # (type:boolean) - ports: [22] - remote_hosts: [] - -client: - package: 'openssh-client' - cbc_required: False # (type:boolean) - weak_hmac: False # (type:boolean) - weak_kex: False # (type:boolean) - roaming: False - password_authentication: 'no' - -server: - host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', - '/etc/ssh/ssh_host_ecdsa_key'] - cbc_required: False # (type:boolean) - weak_hmac: False # (type:boolean) - weak_kex: False # (type:boolean) - allow_root_with_key: False # (type:boolean) - allow_tcp_forwarding: 'no' - allow_agent_forwarding: 'no' - allow_x11_forwarding: 'no' - use_privilege_separation: 'sandbox' - listen_to: ['0.0.0.0'] - use_pam: 'no' - package: 'openssh-server' - password_authentication: 'no' - alive_interval: '600' - alive_count: '3' - sftp_enable: False # (type:boolean) - sftp_group: 'sftponly' - sftp_chroot: '/home/%u' - deny_users: [] - allow_users: [] - deny_groups: [] - allow_groups: [] - print_motd: 'no' - print_last_log: 'no' - use_dns: 'no' - max_auth_tries: 2 - max_sessions: 10 diff --git a/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema deleted file mode 100644 index d05e054b..00000000 --- a/hooks/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema +++ /dev/null @@ -1,42 +0,0 @@ -# NOTE: this schema must contain all valid keys from it's associated defaults -# file. It is used to validate user-provided overrides. -common: - service_name: - network_ipv6_enable: - ports: - remote_hosts: -client: - package: - cbc_required: - weak_hmac: - weak_kex: - roaming: - password_authentication: -server: - host_key_files: - cbc_required: - weak_hmac: - weak_kex: - allow_root_with_key: - allow_tcp_forwarding: - allow_agent_forwarding: - allow_x11_forwarding: - use_privilege_separation: - listen_to: - use_pam: - package: - password_authentication: - alive_interval: - alive_count: - sftp_enable: - sftp_group: - sftp_chroot: - deny_users: - allow_users: - deny_groups: - allow_groups: - print_motd: - print_last_log: - use_dns: - max_auth_tries: - max_sessions: diff --git a/hooks/charmhelpers/contrib/hardening/harden.py b/hooks/charmhelpers/contrib/hardening/harden.py deleted file mode 100644 index 45ad076d..00000000 --- a/hooks/charmhelpers/contrib/hardening/harden.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict - -from charmhelpers.core.hookenv import ( - config, - log, - DEBUG, - WARNING, -) -from charmhelpers.contrib.hardening.host.checks import run_os_checks -from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks -from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks -from charmhelpers.contrib.hardening.apache.checks import run_apache_checks - -_DISABLE_HARDENING_FOR_UNIT_TEST = False - - -def harden(overrides=None): - """Hardening decorator. - - This is the main entry point for running the hardening stack. In order to - run modules of the stack you must add this decorator to charm hook(s) and - ensure that your charm config.yaml contains the 'harden' option set to - one or more of the supported modules. Setting these will cause the - corresponding hardening code to be run when the hook fires. - - This decorator can and should be applied to more than one hook or function - such that hardening modules are called multiple times. This is because - subsequent calls will perform auditing checks that will report any changes - to resources hardened by the first run (and possibly perform compliance - actions as a result of any detected infractions). - - :param overrides: Optional list of stack modules used to override those - provided with 'harden' config. - :returns: Returns value returned by decorated function once executed. - """ - if overrides is None: - overrides = [] - - def _harden_inner1(f): - _logged = False - - def _harden_inner2(*args, **kwargs): - # knock out hardening via a config var; normally it won't get - # disabled. - nonlocal _logged - if _DISABLE_HARDENING_FOR_UNIT_TEST: - return f(*args, **kwargs) - if not _logged: - log("Hardening function '%s'" % (f.__name__), level=DEBUG) - _logged = True - RUN_CATALOG = OrderedDict([('os', run_os_checks), - ('ssh', run_ssh_checks), - ('mysql', run_mysql_checks), - ('apache', run_apache_checks)]) - - enabled = overrides[:] or (config("harden") or "").split() - if enabled: - modules_to_run = [] - # modules will always be performed in the following order - for module, func in RUN_CATALOG.items(): - if module in enabled: - enabled.remove(module) - modules_to_run.append(func) - - if enabled: - log("Unknown hardening modules '%s' - ignoring" % - (', '.join(enabled)), level=WARNING) - - for hardener in modules_to_run: - log("Executing hardening module '%s'" % - (hardener.__name__), level=DEBUG) - hardener() - else: - log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) - - return f(*args, **kwargs) - return _harden_inner2 - - return _harden_inner1 diff --git a/hooks/charmhelpers/contrib/hardening/host/__init__.py b/hooks/charmhelpers/contrib/hardening/host/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py b/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py deleted file mode 100644 index 0e7e409f..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.host.checks import ( - apt, - limits, - login, - minimize_access, - pam, - profile, - securetty, - suid_sgid, - sysctl -) - - -def run_os_checks(): - log("Starting OS hardening checks.", level=DEBUG) - checks = apt.get_audits() - checks.extend(limits.get_audits()) - checks.extend(login.get_audits()) - checks.extend(minimize_access.get_audits()) - checks.extend(pam.get_audits()) - checks.extend(profile.get_audits()) - checks.extend(securetty.get_audits()) - checks.extend(suid_sgid.get_audits()) - checks.extend(sysctl.get_audits()) - - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("OS hardening checks complete.", level=DEBUG) diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/apt.py b/hooks/charmhelpers/contrib/hardening/host/checks/apt.py deleted file mode 100644 index 7ce41b00..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/apt.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.utils import get_settings -from charmhelpers.contrib.hardening.audits.apt import ( - AptConfig, - RestrictedPackages, -) - - -def get_audits(): - """Get OS hardening apt audits. - - :returns: dictionary of audits - """ - audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', - 'expected': 'false'}])] - - settings = get_settings('os') - clean_packages = settings['security']['packages_clean'] - if clean_packages: - security_packages = settings['security']['packages_list'] - if security_packages: - audits.append(RestrictedPackages(security_packages)) - - return audits diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/limits.py b/hooks/charmhelpers/contrib/hardening/host/checks/limits.py deleted file mode 100644 index e94f5ebe..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/limits.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import ( - DirectoryPermissionAudit, - TemplatedFile, -) -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening security limits audits. - - :returns: dictionary of audits - """ - audits = [] - settings = utils.get_settings('os') - - # Ensure that the /etc/security/limits.d directory is only writable - # by the root user, but others can execute and read. - audits.append(DirectoryPermissionAudit('/etc/security/limits.d', - user='root', group='root', - mode=0o755)) - - # If core dumps are not enabled, then don't allow core dumps to be - # created as they may contain sensitive information. - if not settings['security']['kernel_enable_core_dump']: - audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', - SecurityLimitsContext(), - template_dir=TEMPLATES_DIR, - user='root', group='root', mode=0o0440)) - return audits - - -class SecurityLimitsContext(object): - - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'disable_core_dump': - not settings['security']['kernel_enable_core_dump']} - return ctxt diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/login.py b/hooks/charmhelpers/contrib/hardening/host/checks/login.py deleted file mode 100644 index fd500c8b..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/login.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import TemplatedFile -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening login.defs audits. - - :returns: dictionary of audits - """ - audits = [TemplatedFile('/etc/login.defs', LoginContext(), - template_dir=TEMPLATES_DIR, - user='root', group='root', mode=0o0444)] - return audits - - -class LoginContext(object): - - def __call__(self): - settings = utils.get_settings('os') - - # Octal numbers in yaml end up being turned into decimal, - # so check if the umask is entered as a string (e.g. '027') - # or as an octal umask as we know it (e.g. 002). If its not - # a string assume it to be octal and turn it into an octal - # string. - umask = settings['environment']['umask'] - if not isinstance(umask, str): - umask = '%s' % oct(umask) - - ctxt = { - 'additional_user_paths': - settings['environment']['extra_user_paths'], - 'umask': umask, - 'pwd_max_age': settings['auth']['pw_max_age'], - 'pwd_min_age': settings['auth']['pw_min_age'], - 'uid_min': settings['auth']['uid_min'], - 'sys_uid_min': settings['auth']['sys_uid_min'], - 'sys_uid_max': settings['auth']['sys_uid_max'], - 'gid_min': settings['auth']['gid_min'], - 'sys_gid_min': settings['auth']['sys_gid_min'], - 'sys_gid_max': settings['auth']['sys_gid_max'], - 'login_retries': settings['auth']['retries'], - 'login_timeout': settings['auth']['timeout'], - 'chfn_restrict': settings['auth']['chfn_restrict'], - 'allow_login_without_home': settings['auth']['allow_homeless'] - } - - return ctxt diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py deleted file mode 100644 index 6e64be00..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/minimize_access.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - ReadOnly, -) -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening access audits. - - :returns: dictionary of audits - """ - audits = [] - settings = utils.get_settings('os') - - # Remove write permissions from $PATH folders for all regular users. - # This prevents changing system-wide commands from normal users. - path_folders = {'/usr/local/sbin', - '/usr/local/bin', - '/usr/sbin', - '/usr/bin', - '/bin'} - extra_user_paths = settings['environment']['extra_user_paths'] - path_folders.update(extra_user_paths) - audits.append(ReadOnly(path_folders)) - - # Only allow the root user to have access to the shadow file. - audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) - - if 'change_user' not in settings['security']['users_allow']: - # su should only be accessible to user and group root, unless it is - # expressly defined to allow users to change to root via the - # security_users_allow config option. - audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) - - return audits diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/pam.py b/hooks/charmhelpers/contrib/hardening/host/checks/pam.py deleted file mode 100644 index 9b38d5f0..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/pam.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from subprocess import ( - check_output, - CalledProcessError, -) - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) -from charmhelpers.fetch import ( - apt_install, - apt_purge, - apt_update, -) -from charmhelpers.contrib.hardening.audits.file import ( - TemplatedFile, - DeletedFile, -) -from charmhelpers.contrib.hardening import utils -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR - - -def get_audits(): - """Get OS hardening PAM authentication audits. - - :returns: dictionary of audits - """ - audits = [] - - settings = utils.get_settings('os') - - if settings['auth']['pam_passwdqc_enable']: - audits.append(PasswdqcPAM('/etc/passwdqc.conf')) - - if settings['auth']['retries']: - audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) - else: - audits.append(DeletedFile('/usr/share/pam-configs/tally2')) - - return audits - - -class PasswdqcPAMContext(object): - - def __call__(self): - ctxt = {} - settings = utils.get_settings('os') - - ctxt['auth_pam_passwdqc_options'] = \ - settings['auth']['pam_passwdqc_options'] - - return ctxt - - -class PasswdqcPAM(TemplatedFile): - """The PAM Audit verifies the linux PAM settings.""" - def __init__(self, path): - super(PasswdqcPAM, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=PasswdqcPAMContext(), - user='root', - group='root', - mode=0o0640) - - def pre_write(self): - # Always remove? - for pkg in ['libpam-ccreds', 'libpam-cracklib']: - log("Purging package '%s'" % pkg, level=DEBUG), - apt_purge(pkg) - - apt_update(fatal=True) - for pkg in ['libpam-passwdqc']: - log("Installing package '%s'" % pkg, level=DEBUG), - apt_install(pkg) - - def post_write(self): - """Updates the PAM configuration after the file has been written""" - try: - check_output(['pam-auth-update', '--package']) - except CalledProcessError as e: - log('Error calling pam-auth-update: %s' % e, level=ERROR) - - -class Tally2PAMContext(object): - - def __call__(self): - ctxt = {} - settings = utils.get_settings('os') - - ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] - ctxt['auth_retries'] = settings['auth']['retries'] - - return ctxt - - -class Tally2PAM(TemplatedFile): - """The PAM Audit verifies the linux PAM settings.""" - def __init__(self, path): - super(Tally2PAM, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=Tally2PAMContext(), - user='root', - group='root', - mode=0o0640) - - def pre_write(self): - # Always remove? - apt_purge('libpam-ccreds') - apt_update(fatal=True) - apt_install('libpam-modules') - - def post_write(self): - """Updates the PAM configuration after the file has been written""" - try: - check_output(['pam-auth-update', '--package']) - except CalledProcessError as e: - log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/hooks/charmhelpers/contrib/hardening/host/checks/profile.py deleted file mode 100644 index 2727428d..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/profile.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import TemplatedFile -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening profile audits. - - :returns: dictionary of audits - """ - audits = [] - - settings = utils.get_settings('os') - # If core dumps are not enabled, then don't allow core dumps to be - # created as they may contain sensitive information. - if not settings['security']['kernel_enable_core_dump']: - audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', - ProfileContext(), - template_dir=TEMPLATES_DIR, - mode=0o0755, user='root', group='root')) - if settings['security']['ssh_tmout']: - audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', - ProfileContext(), - template_dir=TEMPLATES_DIR, - mode=0o0644, user='root', group='root')) - return audits - - -class ProfileContext(object): - - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'ssh_tmout': - settings['security']['ssh_tmout']} - return ctxt diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py b/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py deleted file mode 100644 index 34cd0217..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/securetty.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.contrib.hardening.audits.file import TemplatedFile -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get OS hardening Secure TTY audits. - - :returns: dictionary of audits - """ - audits = [] - audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), - template_dir=TEMPLATES_DIR, - mode=0o0400, user='root', group='root')) - return audits - - -class SecureTTYContext(object): - - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'ttys': settings['auth']['root_ttys']} - return ctxt diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py deleted file mode 100644 index bcbe3fde..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/suid_sgid.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess - -from charmhelpers.core.hookenv import ( - log, - INFO, -) -from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit -from charmhelpers.contrib.hardening import utils - - -BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', - '/usr/libexec/openssh/ssh-keysign', - '/usr/lib/openssh/ssh-keysign', - '/sbin/netreport', - '/usr/sbin/usernetctl', - '/usr/sbin/userisdnctl', - '/usr/sbin/pppd', - '/usr/bin/lockfile', - '/usr/bin/mail-lock', - '/usr/bin/mail-unlock', - '/usr/bin/mail-touchlock', - '/usr/bin/dotlockfile', - '/usr/bin/arping', - '/usr/sbin/uuidd', - '/usr/bin/mtr', - '/usr/lib/evolution/camel-lock-helper-1.2', - '/usr/lib/pt_chown', - '/usr/lib/eject/dmcrypt-get-device', - '/usr/lib/mc/cons.saver'] - -WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', - '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', - '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', - '/usr/bin/passwd', '/usr/bin/ssh-agent', - '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', - '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', - '/bin/ping6', '/usr/bin/traceroute6.iputils', - '/sbin/mount.nfs', '/sbin/umount.nfs', - '/sbin/mount.nfs4', '/sbin/umount.nfs4', - '/usr/bin/crontab', - '/usr/bin/wall', '/usr/bin/write', - '/usr/bin/screen', - '/usr/bin/mlocate', - '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', - '/bin/fusermount', - '/usr/bin/pkexec', - '/usr/bin/sudo', '/usr/bin/sudoedit', - '/usr/sbin/postdrop', '/usr/sbin/postqueue', - '/usr/sbin/suexec', - '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', - '/usr/kerberos/bin/ksu', - '/usr/sbin/ccreds_validate', - '/usr/bin/Xorg', - '/usr/bin/X', - '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', - '/usr/lib/vte/gnome-pty-helper', - '/usr/lib/libvte9/gnome-pty-helper', - '/usr/lib/libvte-2.90-9/gnome-pty-helper'] - - -def get_audits(): - """Get OS hardening suid/sgid audits. - - :returns: dictionary of audits - """ - checks = [] - settings = utils.get_settings('os') - if not settings['security']['suid_sgid_enforce']: - log("Skipping suid/sgid hardening", level=INFO) - return checks - - # Build the blacklist and whitelist of files for suid/sgid checks. - # There are a total of 4 lists: - # 1. the system blacklist - # 2. the system whitelist - # 3. the user blacklist - # 4. the user whitelist - # - # The blacklist is the set of paths which should NOT have the suid/sgid bit - # set and the whitelist is the set of paths which MAY have the suid/sgid - # bit setl. The user whitelist/blacklist effectively override the system - # whitelist/blacklist. - u_b = settings['security']['suid_sgid_blacklist'] - u_w = settings['security']['suid_sgid_whitelist'] - - blacklist = set(BLACKLIST) - set(u_w + u_b) - whitelist = set(WHITELIST) - set(u_b + u_w) - - checks.append(NoSUIDSGIDAudit(blacklist)) - - dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] - - if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: - # If the policy is a dry_run (e.g. complain only) or remove unknown - # suid/sgid bits then find all of the paths which have the suid/sgid - # bit set and then remove the whitelisted paths. - root_path = settings['environment']['root_path'] - unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) - checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) - - return checks - - -def find_paths_with_suid_sgid(root_path): - """Finds all paths/files which have an suid/sgid bit enabled. - - Starting with the root_path, this will recursively find all paths which - have an suid or sgid bit set. - """ - cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', - '-type', 'f', '!', '-path', '/proc/*', '-print'] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, _ = p.communicate() - return set(out.split('\n')) diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py b/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py deleted file mode 100644 index 8a57d83d..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import platform -import re -import subprocess - -from charmhelpers.core.hookenv import ( - log, - INFO, - WARNING, -) -from charmhelpers.contrib.hardening import utils -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - TemplatedFile, -) -from charmhelpers.contrib.hardening.host import TEMPLATES_DIR - - -SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s -net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s -net.ipv4.conf.all.rp_filter=1 -net.ipv4.conf.default.rp_filter=1 -net.ipv4.icmp_echo_ignore_broadcasts=1 -net.ipv4.icmp_ignore_bogus_error_responses=1 -net.ipv4.icmp_ratelimit=100 -net.ipv4.icmp_ratemask=88089 -net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s -net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s -net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s -net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s -net.ipv4.tcp_rfc1337=1 -net.ipv4.tcp_syncookies=1 -net.ipv4.conf.all.shared_media=1 -net.ipv4.conf.default.shared_media=1 -net.ipv4.conf.all.accept_source_route=0 -net.ipv4.conf.default.accept_source_route=0 -net.ipv4.conf.all.accept_redirects=0 -net.ipv4.conf.default.accept_redirects=0 -net.ipv6.conf.all.accept_redirects=0 -net.ipv6.conf.default.accept_redirects=0 -net.ipv4.conf.all.secure_redirects=0 -net.ipv4.conf.default.secure_redirects=0 -net.ipv4.conf.all.send_redirects=0 -net.ipv4.conf.default.send_redirects=0 -net.ipv4.conf.all.log_martians=0 -net.ipv6.conf.default.router_solicitations=0 -net.ipv6.conf.default.accept_ra_rtr_pref=0 -net.ipv6.conf.default.accept_ra_pinfo=0 -net.ipv6.conf.default.accept_ra_defrtr=0 -net.ipv6.conf.default.autoconf=0 -net.ipv6.conf.default.dad_transmits=0 -net.ipv6.conf.default.max_addresses=1 -net.ipv6.conf.all.accept_ra=0 -net.ipv6.conf.default.accept_ra=0 -kernel.modules_disabled=%(kernel_modules_disabled)s -kernel.sysrq=%(kernel_sysrq)s -fs.suid_dumpable=%(fs_suid_dumpable)s -kernel.randomize_va_space=2 -""" - - -def get_audits(): - """Get OS hardening sysctl audits. - - :returns: dictionary of audits - """ - audits = [] - settings = utils.get_settings('os') - - # Apply the sysctl settings which are configured to be applied. - audits.append(SysctlConf()) - # Make sure that only root has access to the sysctl.conf file, and - # that it is read-only. - audits.append(FilePermissionAudit('/etc/sysctl.conf', - user='root', - group='root', mode=0o0440)) - # If module loading is not enabled, then ensure that the modules - # file has the appropriate permissions and rebuild the initramfs - if not settings['security']['kernel_enable_module_loading']: - audits.append(ModulesTemplate()) - - return audits - - -class ModulesContext(object): - - def __call__(self): - settings = utils.get_settings('os') - with open('/proc/cpuinfo', 'r') as fd: - cpuinfo = fd.readlines() - - for line in cpuinfo: - match = re.search(r"^vendor_id\s+:\s+(.+)", line) - if match: - vendor = match.group(1) - - if vendor == "GenuineIntel": - vendor = "intel" - elif vendor == "AuthenticAMD": - vendor = "amd" - - ctxt = {'arch': platform.processor(), - 'cpuVendor': vendor, - 'desktop_enable': settings['general']['desktop_enable']} - - return ctxt - - -class ModulesTemplate(object): - - def __init__(self): - super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', - ModulesContext(), - templates_dir=TEMPLATES_DIR, - user='root', group='root', - mode=0o0440) - - def post_write(self): - subprocess.check_call(['update-initramfs', '-u']) - - -class SysCtlHardeningContext(object): - def __call__(self): - settings = utils.get_settings('os') - ctxt = {'sysctl': {}} - - log("Applying sysctl settings", level=INFO) - extras = {'net_ipv4_ip_forward': 0, - 'net_ipv6_conf_all_forwarding': 0, - 'net_ipv6_conf_all_disable_ipv6': 1, - 'net_ipv4_tcp_timestamps': 0, - 'net_ipv4_conf_all_arp_ignore': 0, - 'net_ipv4_conf_all_arp_announce': 0, - 'kernel_sysrq': 0, - 'fs_suid_dumpable': 0, - 'kernel_modules_disabled': 1} - - if settings['sysctl']['ipv6_enable']: - extras['net_ipv6_conf_all_disable_ipv6'] = 0 - - if settings['sysctl']['forwarding']: - extras['net_ipv4_ip_forward'] = 1 - extras['net_ipv6_conf_all_forwarding'] = 1 - - if settings['sysctl']['arp_restricted']: - extras['net_ipv4_conf_all_arp_ignore'] = 1 - extras['net_ipv4_conf_all_arp_announce'] = 2 - - if settings['security']['kernel_enable_module_loading']: - extras['kernel_modules_disabled'] = 0 - - if settings['sysctl']['kernel_enable_sysrq']: - sysrq_val = settings['sysctl']['kernel_secure_sysrq'] - extras['kernel_sysrq'] = sysrq_val - - if settings['security']['kernel_enable_core_dump']: - extras['fs_suid_dumpable'] = 1 - - settings.update(extras) - for d in (SYSCTL_DEFAULTS % settings).split(): - d = d.strip().partition('=') - key = d[0].strip() - path = os.path.join('/proc/sys', key.replace('.', '/')) - if not os.path.exists(path): - log("Skipping '%s' since '%s' does not exist" % (key, path), - level=WARNING) - continue - - ctxt['sysctl'][key] = d[2] or None - - return { - 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()] - } - - -class SysctlConf(TemplatedFile): - """An audit check for sysctl settings.""" - def __init__(self): - self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' - super(SysctlConf, self).__init__(self.conffile, - SysCtlHardeningContext(), - template_dir=TEMPLATES_DIR, - user='root', group='root', - mode=0o0440) - - def post_write(self): - try: - subprocess.check_call(['sysctl', '-p', self.conffile]) - except subprocess.CalledProcessError as e: - # NOTE: on some systems if sysctl cannot apply all settings it - # will return non-zero as well. - log("sysctl command returned an error (maybe some " - "keys could not be set) - %s" % (e), - level=WARNING) diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf deleted file mode 100644 index 0014191f..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -{% if disable_core_dump -%} -# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. -* hard core 0 -{% endif %} \ No newline at end of file diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh deleted file mode 100644 index 616cef46..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh +++ /dev/null @@ -1,5 +0,0 @@ -TMOUT={{ tmout }} -readonly TMOUT -export TMOUT - -readonly HISTFILE diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf deleted file mode 100644 index 101f1e1d..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf +++ /dev/null @@ -1,7 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -{% for key, value in sysctl_settings -%} -{{ key }}={{ value }} -{% endfor -%} diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py b/hooks/charmhelpers/contrib/hardening/host/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/login.defs b/hooks/charmhelpers/contrib/hardening/host/templates/login.defs deleted file mode 100644 index 7d107637..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/login.defs +++ /dev/null @@ -1,349 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# -# /etc/login.defs - Configuration control definitions for the login package. -# -# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. -# If unspecified, some arbitrary (and possibly incorrect) value will -# be assumed. All other items are optional - if not specified then -# the described action or option will be inhibited. -# -# Comment lines (lines beginning with "#") and blank lines are ignored. -# -# Modified for Linux. --marekm - -# REQUIRED for useradd/userdel/usermod -# Directory where mailboxes reside, _or_ name of file, relative to the -# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, -# MAIL_DIR takes precedence. -# -# Essentially: -# - MAIL_DIR defines the location of users mail spool files -# (for mbox use) by appending the username to MAIL_DIR as defined -# below. -# - MAIL_FILE defines the location of the users mail spool files as the -# fully-qualified filename obtained by prepending the user home -# directory before $MAIL_FILE -# -# NOTE: This is no more used for setting up users MAIL environment variable -# which is, starting from shadow 4.0.12-1 in Debian, entirely the -# job of the pam_mail PAM modules -# See default PAM configuration files provided for -# login, su, etc. -# -# This is a temporary situation: setting these variables will soon -# move to /etc/default/useradd and the variables will then be -# no more supported -MAIL_DIR /var/mail -#MAIL_FILE .mail - -# -# Enable logging and display of /var/log/faillog login failure info. -# This option conflicts with the pam_tally PAM module. -# -FAILLOG_ENAB yes - -# -# Enable display of unknown usernames when login failures are recorded. -# -# WARNING: Unknown usernames may become world readable. -# See #290803 and #298773 for details about how this could become a security -# concern -LOG_UNKFAIL_ENAB no - -# -# Enable logging of successful logins -# -LOG_OK_LOGINS yes - -# -# Enable "syslog" logging of su activity - in addition to sulog file logging. -# SYSLOG_SG_ENAB does the same for newgrp and sg. -# -SYSLOG_SU_ENAB yes -SYSLOG_SG_ENAB yes - -# -# If defined, all su activity is logged to this file. -# -#SULOG_FILE /var/log/sulog - -# -# If defined, file which maps tty line to TERM environment parameter. -# Each line of the file is in a format something like "vt100 tty01". -# -#TTYTYPE_FILE /etc/ttytype - -# -# If defined, login failures will be logged here in a utmp format -# last, when invoked as lastb, will read /var/log/btmp, so... -# -FTMP_FILE /var/log/btmp - -# -# If defined, the command name to display when running "su -". For -# example, if this is defined as "su" then a "ps" will display the -# command is "-su". If not defined, then "ps" would display the -# name of the shell actually being run, e.g. something like "-sh". -# -SU_NAME su - -# -# If defined, file which inhibits all the usual chatter during the login -# sequence. If a full pathname, then hushed mode will be enabled if the -# user's name or shell are found in the file. If not a full pathname, then -# hushed mode will be enabled if the file exists in the user's home directory. -# -HUSHLOGIN_FILE .hushlogin -#HUSHLOGIN_FILE /etc/hushlogins - -# -# *REQUIRED* The default PATH settings, for superuser and normal users. -# -# (they are minimal, add the rest in the shell startup files) -ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} - -# -# Terminal permissions -# -# TTYGROUP Login tty will be assigned this group ownership. -# TTYPERM Login tty will be set to this permission. -# -# If you have a "write" program which is "setgid" to a special group -# which owns the terminals, define TTYGROUP to the group number and -# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign -# TTYPERM to either 622 or 600. -# -# In Debian /usr/bin/bsd-write or similar programs are setgid tty -# However, the default and recommended value for TTYPERM is still 0600 -# to not allow anyone to write to anyone else console or terminal - -# Users can still allow other people to write them by issuing -# the "mesg y" command. - -TTYGROUP tty -TTYPERM 0600 - -# -# Login configuration initializations: -# -# ERASECHAR Terminal ERASE character ('\010' = backspace). -# KILLCHAR Terminal KILL character ('\025' = CTRL/U). -# UMASK Default "umask" value. -# -# The ERASECHAR and KILLCHAR are used only on System V machines. -# -# UMASK is the default umask value for pam_umask and is used by -# useradd and newusers to set the mode of the new home directories. -# 022 is the "historical" value in Debian for UMASK -# 027, or even 077, could be considered better for privacy -# There is no One True Answer here : each sysadmin must make up his/her -# mind. -# -# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value -# for private user groups, i. e. the uid is the same as gid, and username is -# the same as the primary group name: for these, the user permissions will be -# used as group permissions, e. g. 022 will become 002. -# -# Prefix these values with "0" to get octal, "0x" to get hexadecimal. -# -ERASECHAR 0177 -KILLCHAR 025 -UMASK {{ umask }} - -# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. -# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. -USERGROUPS_ENAB yes - -# -# Password aging controls: -# -# PASS_MAX_DAYS Maximum number of days a password may be used. -# PASS_MIN_DAYS Minimum number of days allowed between password changes. -# PASS_WARN_AGE Number of days warning given before a password expires. -# -PASS_MAX_DAYS {{ pwd_max_age }} -PASS_MIN_DAYS {{ pwd_min_age }} -PASS_WARN_AGE 7 - -# -# Min/max values for automatic uid selection in useradd -# -UID_MIN {{ uid_min }} -UID_MAX 60000 -# System accounts -SYS_UID_MIN {{ sys_uid_min }} -SYS_UID_MAX {{ sys_uid_max }} - -# Min/max values for automatic gid selection in groupadd -GID_MIN {{ gid_min }} -GID_MAX 60000 -# System accounts -SYS_GID_MIN {{ sys_gid_min }} -SYS_GID_MAX {{ sys_gid_max }} - -# -# Max number of login retries if password is bad. This will most likely be -# overridden by PAM, since the default pam_unix module has it's own built -# in of 3 retries. However, this is a safe fallback in case you are using -# an authentication module that does not enforce PAM_MAXTRIES. -# -LOGIN_RETRIES {{ login_retries }} - -# -# Max time in seconds for login -# -LOGIN_TIMEOUT {{ login_timeout }} - -# -# Which fields may be changed by regular users using chfn - use -# any combination of letters "frwh" (full name, room number, work -# phone, home phone). If not defined, no changes are allowed. -# For backward compatibility, "yes" = "rwh" and "no" = "frwh". -# -{% if chfn_restrict %} -CHFN_RESTRICT {{ chfn_restrict }} -{% endif %} - -# -# Should login be allowed if we can't cd to the home directory? -# Default in no. -# -DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} - -# -# If defined, this command is run when removing a user. -# It should remove any at/cron/print jobs etc. owned by -# the user to be removed (passed as the first argument). -# -#USERDEL_CMD /usr/sbin/userdel_local - -# -# Enable setting of the umask group bits to be the same as owner bits -# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is -# the same as gid, and username is the same as the primary group name. -# -# If set to yes, userdel will remove the user´s group if it contains no -# more members, and useradd will create by default a group with the name -# of the user. -# -USERGROUPS_ENAB yes - -# -# Instead of the real user shell, the program specified by this parameter -# will be launched, although its visible name (argv[0]) will be the shell's. -# The program may do whatever it wants (logging, additional authentication, -# banner, ...) before running the actual shell. -# -# FAKE_SHELL /bin/fakeshell - -# -# If defined, either full pathname of a file containing device names or -# a ":" delimited list of device names. Root logins will be allowed only -# upon these devices. -# -# This variable is used by login and su. -# -#CONSOLE /etc/consoles -#CONSOLE console:tty01:tty02:tty03:tty04 - -# -# List of groups to add to the user's supplementary group set -# when logging in on the console (as determined by the CONSOLE -# setting). Default is none. -# -# Use with caution - it is possible for users to gain permanent -# access to these groups, even when not logged in on the console. -# How to do it is left as an exercise for the reader... -# -# This variable is used by login and su. -# -#CONSOLE_GROUPS floppy:audio:cdrom - -# -# If set to "yes", new passwords will be encrypted using the MD5-based -# algorithm compatible with the one used by recent releases of FreeBSD. -# It supports passwords of unlimited length and longer salt strings. -# Set to "no" if you need to copy encrypted passwords to other systems -# which don't understand the new algorithm. Default is "no". -# -# This variable is deprecated. You should use ENCRYPT_METHOD. -# -MD5_CRYPT_ENAB no - -# -# If set to MD5 , MD5-based algorithm will be used for encrypting password -# If set to SHA256, SHA256-based algorithm will be used for encrypting password -# If set to SHA512, SHA512-based algorithm will be used for encrypting password -# If set to DES, DES-based algorithm will be used for encrypting password (default) -# Overrides the MD5_CRYPT_ENAB option -# -# Note: It is recommended to use a value consistent with -# the PAM modules configuration. -# -ENCRYPT_METHOD SHA512 - -# -# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. -# -# Define the number of SHA rounds. -# With a lot of rounds, it is more difficult to brute forcing the password. -# But note also that it more CPU resources will be needed to authenticate -# users. -# -# If not specified, the libc will choose the default number of rounds (5000). -# The values must be inside the 1000-999999999 range. -# If only one of the MIN or MAX values is set, then this value will be used. -# If MIN > MAX, the highest value will be used. -# -# SHA_CRYPT_MIN_ROUNDS 5000 -# SHA_CRYPT_MAX_ROUNDS 5000 - -################# OBSOLETED BY PAM ############## -# # -# These options are now handled by PAM. Please # -# edit the appropriate file in /etc/pam.d/ to # -# enable the equivelants of them. -# -############### - -#MOTD_FILE -#DIALUPS_CHECK_ENAB -#LASTLOG_ENAB -#MAIL_CHECK_ENAB -#OBSCURE_CHECKS_ENAB -#PORTTIME_CHECKS_ENAB -#SU_WHEEL_ONLY -#CRACKLIB_DICTPATH -#PASS_CHANGE_TRIES -#PASS_ALWAYS_WARN -#ENVIRON_FILE -#NOLOGINS_FILE -#ISSUE_FILE -#PASS_MIN_LEN -#PASS_MAX_LEN -#ULIMIT -#ENV_HZ -#CHFN_AUTH -#CHSH_AUTH -#FAIL_DELAY - -################# OBSOLETED ####################### -# # -# These options are no more handled by shadow. # -# # -# Shadow utilities will display a warning if they # -# still appear. # -# # -################################################### - -# CLOSE_SESSIONS -# LOGIN_STRING -# NO_PASSWORD_CONSOLE -# QMAIL_DIR - - - diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/modules b/hooks/charmhelpers/contrib/hardening/host/templates/modules deleted file mode 100644 index ef0354ee..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/modules +++ /dev/null @@ -1,117 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# /etc/modules: kernel modules to load at boot time. -# -# This file contains the names of kernel modules that should be loaded -# at boot time, one per line. Lines beginning with "#" are ignored. -# Parameters can be specified after the module name. - -# Arch -# ---- -# -# Modules for certains builds, contains support modules and some CPU-specific optimizations. - -{% if arch == "x86_64" -%} -# Optimize for x86_64 cryptographic features -twofish-x86_64-3way -twofish-x86_64 -aes-x86_64 -salsa20-x86_64 -blowfish-x86_64 -{% endif -%} - -{% if cpuVendor == "intel" -%} -# Intel-specific optimizations -ghash-clmulni-intel -aesni-intel -kvm-intel -{% endif -%} - -{% if cpuVendor == "amd" -%} -# AMD-specific optimizations -kvm-amd -{% endif -%} - -kvm - - -# Crypto -# ------ - -# Some core modules which comprise strong cryptography. -blowfish_common -blowfish_generic -ctr -cts -lrw -lzo -rmd160 -rmd256 -rmd320 -serpent -sha512_generic -twofish_common -twofish_generic -xts -zlib - - -# Drivers -# ------- - -# Basics -lp -rtc -loop - -# Filesystems -ext2 -btrfs - -{% if desktop_enable -%} -# Desktop -psmouse -snd -snd_ac97_codec -snd_intel8x0 -snd_page_alloc -snd_pcm -snd_timer -soundcore -usbhid -{% endif -%} - -# Lib -# --- -xz - - -# Net -# --- - -# All packets needed for netfilter rules (ie iptables, ebtables). -ip_tables -x_tables -iptable_filter -iptable_nat - -# Targets -ipt_LOG -ipt_REJECT - -# Modules -xt_connlimit -xt_tcpudp -xt_recent -xt_limit -xt_conntrack -nf_conntrack -nf_conntrack_ipv4 -nf_defrag_ipv4 -xt_state -nf_nat - -# Addons -xt_pknock \ No newline at end of file diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf deleted file mode 100644 index f98d14e5..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/passwdqc.conf +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -Name: passwdqc password strength enforcement -Default: yes -Priority: 1024 -Conflicts: cracklib -Password-Type: Primary -Password: - requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh deleted file mode 100644 index fd2de791..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# Disable core dumps via soft limits for all users. Compliance to this setting -# is voluntary and can be modified by users up to a hard limit. This setting is -# a sane default. -ulimit -S -c 0 > /dev/null 2>&1 diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/securetty b/hooks/charmhelpers/contrib/hardening/host/templates/securetty deleted file mode 100644 index 15b18d4e..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/securetty +++ /dev/null @@ -1,11 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# A list of TTYs, from which root can log in -# see `man securetty` for reference -{% if ttys -%} -{% for tty in ttys -%} -{{ tty }} -{% endfor -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/tally2 b/hooks/charmhelpers/contrib/hardening/host/templates/tally2 deleted file mode 100644 index d9620299..00000000 --- a/hooks/charmhelpers/contrib/hardening/host/templates/tally2 +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -Name: tally2 lockout after failed attempts enforcement -Default: yes -Priority: 1024 -Conflicts: cracklib -Auth-Type: Primary -Auth-Initial: - required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} -Account-Type: Primary -Account-Initial: - required pam_tally2.so diff --git a/hooks/charmhelpers/contrib/hardening/mysql/__init__.py b/hooks/charmhelpers/contrib/hardening/mysql/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/hooks/charmhelpers/contrib/hardening/mysql/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py deleted file mode 100644 index 1990d851..00000000 --- a/hooks/charmhelpers/contrib/hardening/mysql/checks/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.mysql.checks import config - - -def run_mysql_checks(): - log("Starting MySQL hardening checks.", level=DEBUG) - checks = config.get_audits() - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("MySQL hardening checks complete.", level=DEBUG) diff --git a/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py b/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py deleted file mode 100644 index 8bf9f36c..00000000 --- a/hooks/charmhelpers/contrib/hardening/mysql/checks/config.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess - -from charmhelpers.core.hookenv import ( - log, - WARNING, -) -from charmhelpers.contrib.hardening.audits.file import ( - FilePermissionAudit, - DirectoryPermissionAudit, - TemplatedFile, -) -from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get MySQL hardening config audits. - - :returns: dictionary of audits - """ - if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: - log("MySQL does not appear to be installed on this node - " - "skipping mysql hardening", level=WARNING) - return [] - - settings = utils.get_settings('mysql') - hardening_settings = settings['hardening'] - my_cnf = hardening_settings['mysql-conf'] - - audits = [ - FilePermissionAudit(paths=[my_cnf], user='root', - group='root', mode=0o0600), - - TemplatedFile(hardening_settings['hardening-conf'], - MySQLConfContext(), - TEMPLATES_DIR, - mode=0o0750, - user='mysql', - group='root', - service_actions=[{'service': 'mysql', - 'actions': ['restart']}]), - - # MySQL and Percona charms do not allow configuration of the - # data directory, so use the default. - DirectoryPermissionAudit('/var/lib/mysql', - user='mysql', - group='mysql', - recursive=False, - mode=0o755), - - DirectoryPermissionAudit('/etc/mysql', - user='root', - group='root', - recursive=False, - mode=0o700), - ] - - return audits - - -class MySQLConfContext(object): - """Defines the set of key/value pairs to set in a mysql config file. - - This context, when called, will return a dictionary containing the - key/value pairs of setting to specify in the - /etc/mysql/conf.d/hardening.cnf file. - """ - def __call__(self): - settings = utils.get_settings('mysql') - return { - 'mysql_settings': [(k, v) for k, v in settings['security'].items()] - } diff --git a/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/hooks/charmhelpers/contrib/hardening/mysql/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf deleted file mode 100644 index 8242586c..00000000 --- a/hooks/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf +++ /dev/null @@ -1,12 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -[mysqld] -{% for setting, value in mysql_settings -%} -{% if value == 'True' -%} -{{ setting }} -{% elif value != 'None' and value != None -%} -{{ setting }} = {{ value }} -{% endif -%} -{% endfor -%} diff --git a/hooks/charmhelpers/contrib/hardening/ssh/__init__.py b/hooks/charmhelpers/contrib/hardening/ssh/__init__.py deleted file mode 100644 index 58bebd84..00000000 --- a/hooks/charmhelpers/contrib/hardening/ssh/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path - -TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py deleted file mode 100644 index edaf484b..00000000 --- a/hooks/charmhelpers/contrib/hardening/ssh/checks/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.contrib.hardening.ssh.checks import config - - -def run_ssh_checks(): - log("Starting SSH hardening checks.", level=DEBUG) - checks = config.get_audits() - for check in checks: - log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) - check.ensure_compliance() - - log("SSH hardening checks complete.", level=DEBUG) diff --git a/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py deleted file mode 100644 index 41bed2d1..00000000 --- a/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_iface_addr, - is_ip, -) -from charmhelpers.core.hookenv import ( - log, - DEBUG, -) -from charmhelpers.fetch import ( - apt_install, - apt_update, -) -from charmhelpers.core.host import ( - lsb_release, - CompareHostReleases, -) -from charmhelpers.contrib.hardening.audits.file import ( - TemplatedFile, - FileContentAudit, -) -from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR -from charmhelpers.contrib.hardening import utils - - -def get_audits(): - """Get SSH hardening config audits. - - :returns: dictionary of audits - """ - audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), - SSHDConfigFileContentAudit()] - return audits - - -class SSHConfigContext(object): - - type = 'client' - - def get_macs(self, allow_weak_mac): - if allow_weak_mac: - weak_macs = 'weak' - else: - weak_macs = 'default' - - default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' - macs = {'default': default, - 'weak': default + ',hmac-sha1'} - - default = ('hmac-sha2-512-etm@openssh.com,' - 'hmac-sha2-256-etm@openssh.com,' - 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' - 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') - macs_66 = {'default': default, - 'weak': default + ',hmac-sha1'} - - # Use newer ciphers on Ubuntu Trusty and above - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) - macs = macs_66 - - return macs[weak_macs] - - def get_kexs(self, allow_weak_kex): - if allow_weak_kex: - weak_kex = 'weak' - else: - weak_kex = 'default' - - default = 'diffie-hellman-group-exchange-sha256' - weak = (default + ',diffie-hellman-group14-sha1,' - 'diffie-hellman-group-exchange-sha1,' - 'diffie-hellman-group1-sha1') - kex = {'default': default, - 'weak': weak} - - default = ('curve25519-sha256@libssh.org,' - 'diffie-hellman-group-exchange-sha256') - weak = (default + ',diffie-hellman-group14-sha1,' - 'diffie-hellman-group-exchange-sha1,' - 'diffie-hellman-group1-sha1') - kex_66 = {'default': default, - 'weak': weak} - - # Use newer kex on Ubuntu Trusty and above - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - log('Detected Ubuntu 14.04 or newer, using new key exchange ' - 'algorithms', level=DEBUG) - kex = kex_66 - - return kex[weak_kex] - - def get_ciphers(self, cbc_required): - if cbc_required: - weak_ciphers = 'weak' - else: - weak_ciphers = 'default' - - default = 'aes256-ctr,aes192-ctr,aes128-ctr' - cipher = {'default': default, - 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} - - default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' - 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') - ciphers_66 = {'default': default, - 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} - - # Use newer ciphers on ubuntu Trusty and above - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - log('Detected Ubuntu 14.04 or newer, using new ciphers', - level=DEBUG) - cipher = ciphers_66 - - return cipher[weak_ciphers] - - def get_listening(self, listen=['0.0.0.0']): - """Returns a list of addresses SSH can list on - - Turns input into a sensible list of IPs SSH can listen on. Input - must be a python list of interface names, IPs and/or CIDRs. - - :param listen: list of IPs, CIDRs, interface names - - :returns: list of IPs available on the host - """ - if listen == ['0.0.0.0']: - return listen - - value = [] - for network in listen: - try: - ip = get_address_in_network(network=network, fatal=True) - except ValueError: - if is_ip(network): - ip = network - else: - try: - ip = get_iface_addr(iface=network, fatal=False)[0] - except IndexError: - continue - value.append(ip) - if value == []: - return ['0.0.0.0'] - return value - - def __call__(self): - settings = utils.get_settings('ssh') - if settings['common']['network_ipv6_enable']: - addr_family = 'any' - else: - addr_family = 'inet' - - ctxt = { - 'addr_family': addr_family, - 'remote_hosts': settings['common']['remote_hosts'], - 'password_auth_allowed': - settings['client']['password_authentication'], - 'ports': settings['common']['ports'], - 'ciphers': self.get_ciphers(settings['client']['cbc_required']), - 'macs': self.get_macs(settings['client']['weak_hmac']), - 'kexs': self.get_kexs(settings['client']['weak_kex']), - 'roaming': settings['client']['roaming'], - } - return ctxt - - -class SSHConfig(TemplatedFile): - def __init__(self): - path = '/etc/ssh/ssh_config' - super(SSHConfig, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=SSHConfigContext(), - user='root', - group='root', - mode=0o0644) - - def pre_write(self): - settings = utils.get_settings('ssh') - apt_update(fatal=True) - apt_install(settings['client']['package']) - if not os.path.exists('/etc/ssh'): - os.makedir('/etc/ssh') - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - def post_write(self): - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - -class SSHDConfigContext(SSHConfigContext): - - type = 'server' - - def __call__(self): - settings = utils.get_settings('ssh') - if settings['common']['network_ipv6_enable']: - addr_family = 'any' - else: - addr_family = 'inet' - - ctxt = { - 'ssh_ip': self.get_listening(settings['server']['listen_to']), - 'password_auth_allowed': - settings['server']['password_authentication'], - 'ports': settings['common']['ports'], - 'addr_family': addr_family, - 'ciphers': self.get_ciphers(settings['server']['cbc_required']), - 'macs': self.get_macs(settings['server']['weak_hmac']), - 'kexs': self.get_kexs(settings['server']['weak_kex']), - 'host_key_files': settings['server']['host_key_files'], - 'allow_root_with_key': settings['server']['allow_root_with_key'], - 'password_authentication': - settings['server']['password_authentication'], - 'use_priv_sep': settings['server']['use_privilege_separation'], - 'use_pam': settings['server']['use_pam'], - 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], - 'print_motd': settings['server']['print_motd'], - 'print_last_log': settings['server']['print_last_log'], - 'client_alive_interval': - settings['server']['alive_interval'], - 'client_alive_count': settings['server']['alive_count'], - 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], - 'allow_agent_forwarding': - settings['server']['allow_agent_forwarding'], - 'deny_users': settings['server']['deny_users'], - 'allow_users': settings['server']['allow_users'], - 'deny_groups': settings['server']['deny_groups'], - 'allow_groups': settings['server']['allow_groups'], - 'use_dns': settings['server']['use_dns'], - 'sftp_enable': settings['server']['sftp_enable'], - 'sftp_group': settings['server']['sftp_group'], - 'sftp_chroot': settings['server']['sftp_chroot'], - 'max_auth_tries': settings['server']['max_auth_tries'], - 'max_sessions': settings['server']['max_sessions'], - } - return ctxt - - -class SSHDConfig(TemplatedFile): - def __init__(self): - path = '/etc/ssh/sshd_config' - super(SSHDConfig, self).__init__(path=path, - template_dir=TEMPLATES_DIR, - context=SSHDConfigContext(), - user='root', - group='root', - mode=0o0600, - service_actions=[{'service': 'ssh', - 'actions': - ['restart']}]) - - def pre_write(self): - settings = utils.get_settings('ssh') - apt_update(fatal=True) - apt_install(settings['server']['package']) - if not os.path.exists('/etc/ssh'): - os.makedir('/etc/ssh') - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - def post_write(self): - # NOTE: don't recurse - utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, - maxdepth=0) - - -class SSHConfigFileContentAudit(FileContentAudit): - def __init__(self): - self.path = '/etc/ssh/ssh_config' - super(SSHConfigFileContentAudit, self).__init__(self.path, {}) - - def is_compliant(self, *args, **kwargs): - self.pass_cases = [] - self.fail_cases = [] - settings = utils.get_settings('ssh') - - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - if not settings['server']['weak_hmac']: - self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['server']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa - - if settings['server']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - if not settings['client']['weak_hmac']: - self.fail_cases.append(r'^MACs.+,hmac-sha1$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['client']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - - if settings['client']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - - if settings['client']['roaming']: - self.pass_cases.append(r'^UseRoaming yes$') - else: - self.fail_cases.append(r'^UseRoaming yes$') - - return super(SSHConfigFileContentAudit, self).is_compliant(*args, - **kwargs) - - -class SSHDConfigFileContentAudit(FileContentAudit): - def __init__(self): - self.path = '/etc/ssh/sshd_config' - super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) - - def is_compliant(self, *args, **kwargs): - self.pass_cases = [] - self.fail_cases = [] - settings = utils.get_settings('ssh') - - _release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(_release) >= 'trusty': - if not settings['server']['weak_hmac']: - self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['server']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa - - if settings['server']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - if not settings['server']['weak_hmac']: - self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') - else: - self.pass_cases.append(r'^MACs.+,hmac-sha1$') - - if settings['server']['weak_kex']: - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - else: - self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa - self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa - - if settings['server']['cbc_required']: - self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - else: - self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') - self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') - - if settings['server']['sftp_enable']: - self.pass_cases.append(r'^Subsystem\ssftp') - else: - self.fail_cases.append(r'^Subsystem\ssftp') - - return super(SSHDConfigFileContentAudit, self).is_compliant(*args, - **kwargs) diff --git a/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/hooks/charmhelpers/contrib/hardening/ssh/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config deleted file mode 100644 index 9742d8e2..00000000 --- a/hooks/charmhelpers/contrib/hardening/ssh/templates/ssh_config +++ /dev/null @@ -1,70 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# This is the ssh client system-wide configuration file. See -# ssh_config(5) for more information. This file provides defaults for -# users, and the values can be changed in per-user configuration files -# or on the command line. - -# Configuration data is parsed as follows: -# 1. command line options -# 2. user-specific file -# 3. system-wide file -# Any configuration value is only changed the first time it is set. -# Thus, host-specific definitions should be at the beginning of the -# configuration file, and defaults at the end. - -# Site-wide defaults for some commonly used options. For a comprehensive -# list of available options, their meanings and defaults, please see the -# ssh_config(5) man page. - -# Restrict the following configuration to be limited to this Host. -{% if remote_hosts -%} -Host {{ ' '.join(remote_hosts) }} -{% endif %} -ForwardAgent no -ForwardX11 no -ForwardX11Trusted yes -RhostsRSAAuthentication no -RSAAuthentication yes -PasswordAuthentication {{ password_auth_allowed }} -HostbasedAuthentication no -GSSAPIAuthentication no -GSSAPIDelegateCredentials no -GSSAPIKeyExchange no -GSSAPITrustDNS no -BatchMode no -CheckHostIP yes -AddressFamily {{ addr_family }} -ConnectTimeout 0 -StrictHostKeyChecking ask -IdentityFile ~/.ssh/identity -IdentityFile ~/.ssh/id_rsa -IdentityFile ~/.ssh/id_dsa -# The port at the destination should be defined -{% for port in ports -%} -Port {{ port }} -{% endfor %} -Protocol 2 -Cipher 3des -{% if ciphers -%} -Ciphers {{ ciphers }} -{%- endif %} -{% if macs -%} -MACs {{ macs }} -{%- endif %} -{% if kexs -%} -KexAlgorithms {{ kexs }} -{%- endif %} -EscapeChar ~ -Tunnel no -TunnelDevice any:any -PermitLocalCommand no -VisualHostKey no -RekeyLimit 1G 1h -SendEnv LANG LC_* -HashKnownHosts yes -{% if roaming -%} -UseRoaming {{ roaming }} -{% endif %} diff --git a/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config deleted file mode 100644 index 5f87298a..00000000 --- a/hooks/charmhelpers/contrib/hardening/ssh/templates/sshd_config +++ /dev/null @@ -1,159 +0,0 @@ -############################################################################### -# WARNING: This configuration file is maintained by Juju. Local changes may -# be overwritten. -############################################################################### -# Package generated configuration file -# See the sshd_config(5) manpage for details - -# What ports, IPs and protocols we listen for -{% for port in ports -%} -Port {{ port }} -{% endfor -%} -AddressFamily {{ addr_family }} -# Use these options to restrict which interfaces/protocols sshd will bind to -{% if ssh_ip -%} -{% for ip in ssh_ip -%} -ListenAddress {{ ip }} -{% endfor %} -{%- else -%} -ListenAddress :: -ListenAddress 0.0.0.0 -{% endif -%} -Protocol 2 -{% if ciphers -%} -Ciphers {{ ciphers }} -{% endif -%} -{% if macs -%} -MACs {{ macs }} -{% endif -%} -{% if kexs -%} -KexAlgorithms {{ kexs }} -{% endif -%} -# HostKeys for protocol version 2 -{% for keyfile in host_key_files -%} -HostKey {{ keyfile }} -{% endfor -%} - -# Privilege Separation is turned on for security -{% if use_priv_sep -%} -UsePrivilegeSeparation {{ use_priv_sep }} -{% endif -%} - -# Lifetime and size of ephemeral version 1 server key -KeyRegenerationInterval 3600 -ServerKeyBits 1024 - -# Logging -SyslogFacility AUTH -LogLevel VERBOSE - -# Authentication: -LoginGraceTime 30s -{% if allow_root_with_key -%} -PermitRootLogin without-password -{% else -%} -PermitRootLogin no -{% endif %} -PermitTunnel no -PermitUserEnvironment no -StrictModes yes - -RSAAuthentication yes -PubkeyAuthentication yes -AuthorizedKeysFile %h/.ssh/authorized_keys - -# Don't read the user's ~/.rhosts and ~/.shosts files -IgnoreRhosts yes -# For this to work you will also need host keys in /etc/ssh_known_hosts -RhostsRSAAuthentication no -# similar for protocol version 2 -HostbasedAuthentication no -# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication -IgnoreUserKnownHosts yes - -# To enable empty passwords, change to yes (NOT RECOMMENDED) -PermitEmptyPasswords no - -# Change to yes to enable challenge-response passwords (beware issues with -# some PAM modules and threads) -ChallengeResponseAuthentication no - -# Change to no to disable tunnelled clear text passwords -PasswordAuthentication {{ password_authentication }} - -# Kerberos options -KerberosAuthentication no -KerberosGetAFSToken no -KerberosOrLocalPasswd no -KerberosTicketCleanup yes - -# GSSAPI options -GSSAPIAuthentication no -GSSAPICleanupCredentials yes - -X11Forwarding {{ allow_x11_forwarding }} -X11DisplayOffset 10 -X11UseLocalhost yes -GatewayPorts no -PrintMotd {{ print_motd }} -PrintLastLog {{ print_last_log }} -TCPKeepAlive no -UseLogin no - -ClientAliveInterval {{ client_alive_interval }} -ClientAliveCountMax {{ client_alive_count }} -AllowTcpForwarding {{ allow_tcp_forwarding }} -AllowAgentForwarding {{ allow_agent_forwarding }} - -MaxStartups 10:30:100 -#Banner /etc/issue.net - -# Allow client to pass locale environment variables -AcceptEnv LANG LC_* - -# Set this to 'yes' to enable PAM authentication, account processing, -# and session processing. If this is enabled, PAM authentication will -# be allowed through the ChallengeResponseAuthentication and -# PasswordAuthentication. Depending on your PAM configuration, -# PAM authentication via ChallengeResponseAuthentication may bypass -# the setting of "PermitRootLogin without-password". -# If you just want the PAM account and session checks to run without -# PAM authentication, then enable this but set PasswordAuthentication -# and ChallengeResponseAuthentication to 'no'. -UsePAM {{ use_pam }} - -{% if deny_users -%} -DenyUsers {{ deny_users }} -{% endif -%} -{% if allow_users -%} -AllowUsers {{ allow_users }} -{% endif -%} -{% if deny_groups -%} -DenyGroups {{ deny_groups }} -{% endif -%} -{% if allow_groups -%} -AllowGroups allow_groups -{% endif -%} -UseDNS {{ use_dns }} -MaxAuthTries {{ max_auth_tries }} -MaxSessions {{ max_sessions }} - -{% if sftp_enable -%} -# Configuration, in case SFTP is used -## override default of no subsystems -## Subsystem sftp /opt/app/openssh5/libexec/sftp-server -Subsystem sftp internal-sftp -l VERBOSE - -## These lines must appear at the *end* of sshd_config -Match Group {{ sftp_group }} -ForceCommand internal-sftp -l VERBOSE -ChrootDirectory {{ sftp_chroot }} -{% else -%} -# Configuration, in case SFTP is used -## override default of no subsystems -## Subsystem sftp /opt/app/openssh5/libexec/sftp-server -## These lines must appear at the *end* of sshd_config -Match Group sftponly -ForceCommand internal-sftp -l VERBOSE -ChrootDirectory /sftpchroot/home/%u -{% endif %} diff --git a/hooks/charmhelpers/contrib/hardening/templating.py b/hooks/charmhelpers/contrib/hardening/templating.py deleted file mode 100644 index 4dee5465..00000000 --- a/hooks/charmhelpers/contrib/hardening/templating.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - WARNING, -) - -try: - from jinja2 import FileSystemLoader, Environment -except ImportError: - from charmhelpers.fetch import apt_install - from charmhelpers.fetch import apt_update - apt_update(fatal=True) - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment - - -# NOTE: function separated from main rendering code to facilitate easier -# mocking in unit tests. -def write(path, data): - with open(path, 'wb') as out: - out.write(data) - - -def get_template_path(template_dir, path): - """Returns the template file which would be used to render the path. - - The path to the template file is returned. - :param template_dir: the directory the templates are located in - :param path: the file path to be written to. - :returns: path to the template file - """ - return os.path.join(template_dir, os.path.basename(path)) - - -def render_and_write(template_dir, path, context): - """Renders the specified template into the file. - - :param template_dir: the directory to load the template from - :param path: the path to write the templated contents to - :param context: the parameters to pass to the rendering engine - """ - env = Environment(loader=FileSystemLoader(template_dir)) - template_file = os.path.basename(path) - template = env.get_template(template_file) - log('Rendering from template: %s' % template.name, level=DEBUG) - rendered_content = template.render(context) - if not rendered_content: - log("Render returned None - skipping '%s'" % path, - level=WARNING) - return - - write(path, rendered_content.encode('utf-8').strip()) - log('Wrote template %s' % path, level=DEBUG) diff --git a/hooks/charmhelpers/contrib/hardening/utils.py b/hooks/charmhelpers/contrib/hardening/utils.py deleted file mode 100644 index f93851a9..00000000 --- a/hooks/charmhelpers/contrib/hardening/utils.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2016-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import grp -import os -import pwd -import yaml - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - INFO, - WARNING, - ERROR, -) - - -# Global settings cache. Since each hook fire entails a fresh module import it -# is safe to hold this in memory and not risk missing config changes (since -# they will result in a new hook fire and thus re-import). -__SETTINGS__ = {} - - -def _get_defaults(modules): - """Load the default config for the provided modules. - - :param modules: stack modules config defaults to lookup. - :returns: modules default config dictionary. - """ - default = os.path.join(os.path.dirname(__file__), - 'defaults/%s.yaml' % (modules)) - return yaml.safe_load(open(default)) - - -def _get_schema(modules): - """Load the config schema for the provided modules. - - NOTE: this schema is intended to have 1-1 relationship with they keys in - the default config and is used a means to verify valid overrides provided - by the user. - - :param modules: stack modules config schema to lookup. - :returns: modules default schema dictionary. - """ - schema = os.path.join(os.path.dirname(__file__), - 'defaults/%s.yaml.schema' % (modules)) - return yaml.safe_load(open(schema)) - - -def _get_user_provided_overrides(modules): - """Load user-provided config overrides. - - :param modules: stack modules to lookup in user overrides yaml file. - :returns: overrides dictionary. - """ - overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], - 'hardening.yaml') - if os.path.exists(overrides): - log("Found user-provided config overrides file '%s'" % - (overrides), level=DEBUG) - settings = yaml.safe_load(open(overrides)) - if settings and settings.get(modules): - log("Applying '%s' overrides" % (modules), level=DEBUG) - return settings.get(modules) - - log("No overrides found for '%s'" % (modules), level=DEBUG) - else: - log("No hardening config overrides file '%s' found in charm " - "root dir" % (overrides), level=DEBUG) - - return {} - - -def _apply_overrides(settings, overrides, schema): - """Get overrides config overlaid onto modules defaults. - - :param modules: require stack modules config. - :returns: dictionary of modules config with user overrides applied. - """ - if overrides: - for k, v in overrides.items(): - if k in schema: - if schema[k] is None: - settings[k] = v - elif type(schema[k]) is dict: - settings[k] = _apply_overrides(settings[k], overrides[k], - schema[k]) - else: - raise Exception("Unexpected type found in schema '%s'" % - type(schema[k]), level=ERROR) - else: - log("Unknown override key '%s' - ignoring" % (k), level=INFO) - - return settings - - -def get_settings(modules): - global __SETTINGS__ - if modules in __SETTINGS__: - return __SETTINGS__[modules] - - schema = _get_schema(modules) - settings = _get_defaults(modules) - overrides = _get_user_provided_overrides(modules) - __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) - return __SETTINGS__[modules] - - -def ensure_permissions(path, user, group, permissions, maxdepth=-1): - """Ensure permissions for path. - - If path is a file, apply to file and return. If path is a directory, - apply recursively (if required) to directory contents and return. - - :param user: user name - :param group: group name - :param permissions: octal permissions - :param maxdepth: maximum recursion depth. A negative maxdepth allows - infinite recursion and maxdepth=0 means no recursion. - :returns: None - """ - if not os.path.exists(path): - log("File '%s' does not exist - cannot set permissions" % (path), - level=WARNING) - return - - _user = pwd.getpwnam(user) - os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) - os.chmod(path, permissions) - - if maxdepth == 0: - log("Max recursion depth reached - skipping further recursion", - level=DEBUG) - return - elif maxdepth > 0: - maxdepth -= 1 - - if os.path.isdir(path): - contents = glob.glob("%s/*" % (path)) - for c in contents: - ensure_permissions(c, user=user, group=group, - permissions=permissions, maxdepth=maxdepth) diff --git a/hooks/charmhelpers/contrib/network/__init__.py b/hooks/charmhelpers/contrib/network/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/network/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py deleted file mode 100644 index de56584d..00000000 --- a/hooks/charmhelpers/contrib/network/ip.py +++ /dev/null @@ -1,590 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import re -import subprocess -import socket - -from functools import partial - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import ( - config, - log, - network_get_primary_address, - unit_get, - WARNING, - NoNetworkBinding, -) - -from charmhelpers.core.host import ( - lsb_release, - CompareHostReleases, -) - -try: - import netifaces -except ImportError: - apt_update(fatal=True) - apt_install('python3-netifaces', fatal=True) - import netifaces - -try: - import netaddr -except ImportError: - apt_update(fatal=True) - apt_install('python3-netaddr', fatal=True) - import netaddr - - -def _validate_cidr(network): - try: - netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - -def no_ip_found_error_out(network): - errmsg = ("No IP address found in network(s): %s" % network) - raise ValueError(errmsg) - - -def _get_ipv6_network_from_address(address): - """Get an netaddr.IPNetwork for the given IPv6 address - :param address: a dict as returned by netifaces.ifaddresses - :returns netaddr.IPNetwork: None if the address is a link local or loopback - address - """ - if address['addr'].startswith('fe80') or address['addr'] == "::1": - return None - - prefix = address['netmask'].split("/") - if len(prefix) > 1: - netmask = prefix[1] - else: - netmask = address['netmask'] - return netaddr.IPNetwork("%s/%s" % (address['addr'], - netmask)) - - -def get_address_in_network(network, fallback=None, fatal=False): - """Get an IPv4 or IPv6 address within the network from the host. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. Supports multiple networks as a space-delimited list. - :param fallback (str): If no address is found, return fallback. - :param fatal (boolean): If no address is found, fallback is not - set and fatal is True then exit(1). - """ - if network is None: - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - else: - return None - - networks = network.split() or [network] - for network in networks: - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - try: - addresses = netifaces.ifaddresses(iface) - except ValueError: - # If an instance was deleted between - # netifaces.interfaces() run and now, its interfaces are gone - continue - if network.version == 4 and netifaces.AF_INET in addresses: - for addr in addresses[netifaces.AF_INET]: - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - cidr = _get_ipv6_network_from_address(addr) - if cidr and cidr in network: - return str(cidr.ip) - - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - - return None - - -def is_ipv6(address): - """Determine whether provided address is IPv6 or not.""" - try: - address = netaddr.IPAddress(address) - except netaddr.AddrFormatError: - # probably a hostname - so not an address at all! - return False - - return address.version == 6 - - -def is_address_in_network(network, address): - """ - Determine whether the provided address is within a network range. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - :param address: An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :returns boolean: Flag indicating whether address is in network. - """ - try: - network = netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - try: - address = netaddr.IPAddress(address) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Address (%s) is not in correct presentation format" % - address) - - if address in network: - return True - else: - return False - - -def _get_for_address(address, key): - """Retrieve an attribute of or the physical interface that - the IP address provided could be bound to. - - :param address (str): An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :param key: 'iface' for the physical interface name or an attribute - of the configured interface, for example 'netmask'. - :returns str: Requested attribute or None if address is not bindable. - """ - address = netaddr.IPAddress(address) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if address.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - else: - return addresses[netifaces.AF_INET][0][key] - - if address.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - network = _get_ipv6_network_from_address(addr) - if not network: - continue - - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - return None - - -get_iface_for_address = partial(_get_for_address, key='iface') - - -get_netmask_for_address = partial(_get_for_address, key='netmask') - - -def resolve_network_cidr(ip_address): - ''' - Resolves the full address cidr of an ip_address based on - configured network interfaces - ''' - netmask = get_netmask_for_address(ip_address) - return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) - - -def format_ipv6_addr(address): - """If address is IPv6, wrap it in '[]' otherwise return None. - - This is required by most configuration files when specifying IPv6 - addresses. - """ - if is_ipv6(address): - return "[%s]" % address - - return None - - -def is_ipv6_disabled(): - try: - result = subprocess.check_output( - ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], - stderr=subprocess.STDOUT, - universal_newlines=True) - except subprocess.CalledProcessError: - return True - - return "net.ipv6.conf.all.disable_ipv6 = 1" in result - - -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, - fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any. - - :param iface: network interface on which address(es) are expected to - be found. - :param inet_type: inet address family - :param inc_aliases: include alias interfaces in search - :param fatal: if True, raise exception if address not found - :param exc_list: list of addresses to ignore - :return: list of ip addresses - """ - # Extract nic if passed /dev/ethX - if '/' in iface: - iface = iface.split('/')[-1] - - if not exc_list: - exc_list = [] - - try: - inet_num = getattr(netifaces, inet_type) - except AttributeError: - raise Exception("Unknown inet type '%s'" % str(inet_type)) - - interfaces = netifaces.interfaces() - if inc_aliases: - ifaces = [] - for _iface in interfaces: - if iface == _iface or _iface.split(':')[0] == iface: - ifaces.append(_iface) - - if fatal and not ifaces: - raise Exception("Invalid interface '%s'" % iface) - - ifaces.sort() - else: - if iface not in interfaces: - if fatal: - raise Exception("Interface '%s' not found " % (iface)) - else: - return [] - - else: - ifaces = [iface] - - addresses = [] - for netiface in ifaces: - net_info = netifaces.ifaddresses(netiface) - if inet_num in net_info: - for entry in net_info[inet_num]: - if 'addr' in entry and entry['addr'] not in exc_list: - addresses.append(entry['addr']) - - if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % - (iface, inet_type)) - - return sorted(addresses) - - -get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - - -def get_iface_from_addr(addr): - """Work out on which interface the provided address is configured.""" - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - for inet_type in addresses: - for _addr in addresses[inet_type]: - _addr = _addr['addr'] - # link local - ll_key = re.compile("(.+)%.*") - raw = re.match(ll_key, _addr) - if raw: - _addr = raw.group(1) - - if _addr == addr: - log("Address '%s' is configured on iface '%s'" % - (addr, iface)) - return iface - - msg = "Unable to infer net iface on which '%s' is configured" % (addr) - raise Exception(msg) - - -def sniff_iface(f): - """Ensure decorated function is called with a value for iface. - - If no iface provided, inject net iface inferred from unit private address. - """ - def iface_sniffer(*args, **kwargs): - if not kwargs.get('iface', None): - kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) - - return f(*args, **kwargs) - - return iface_sniffer - - -@sniff_iface -def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, - dynamic_only=True): - """Get assigned IPv6 address for a given interface. - - Returns list of addresses found. If no address found, returns empty list. - - If iface is None, we infer the current primary interface by doing a reverse - lookup on the unit private-address. - - We currently only support scope global IPv6 addresses i.e. non-temporary - addresses. If no global IPv6 address is found, return the first one found - in the ipv6 address list. - - :param iface: network interface on which ipv6 address(es) are expected to - be found. - :param inc_aliases: include alias interfaces in search - :param fatal: if True, raise exception if address not found - :param exc_list: list of addresses to ignore - :param dynamic_only: only recognise dynamic addresses - :return: list of ipv6 addresses - """ - addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', - inc_aliases=inc_aliases, fatal=fatal, - exc_list=exc_list) - - if addresses: - global_addrs = [] - for addr in addresses: - key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") - m = re.match(key_scope_link_local, addr) - if m: - eui_64_mac = m.group(1) - iface = m.group(2) - else: - global_addrs.append(addr) - - if global_addrs: - # Make sure any found global addresses are not temporary - cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output( - cmd).decode('UTF-8', errors='replace') - if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") - else: - key = re.compile("inet6 (.+)/[0-9]+ scope global.*") - - addrs = [] - for line in out.split('\n'): - line = line.strip() - m = re.match(key, line) - if m and 'temporary' not in line: - # Return the first valid address we find - for addr in global_addrs: - if m.group(1) == addr: - if not dynamic_only or \ - m.group(1).endswith(eui_64_mac): - addrs.append(addr) - - if addrs: - return addrs - - if fatal: - raise Exception("Interface '%s' does not have a scope global " - "non-temporary ipv6 address." % iface) - - return [] - - -def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """Return a list of bridges on the system.""" - b_regex = "%s/*/bridge" % vnic_dir - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] - - -def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """Return a list of nics comprising a given bridge on the system.""" - brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_regex)] - - -def is_bridge_member(nic): - """Check if a given nic is a member of a bridge.""" - for bridge in get_bridges(): - if nic in get_bridge_nics(bridge): - return True - - return False - - -def is_ip(address): - """ - Returns True if address is a valid IP address. - """ - try: - # Test to see if already an IPv4/IPv6 address - address = netaddr.IPAddress(address) - return True - except (netaddr.AddrFormatError, ValueError): - return False - - -def ns_query(address): - try: - import dns.resolver - except ImportError: - apt_install('python3-dnspython', fatal=True) - import dns.resolver - - if isinstance(address, dns.name.Name): - rtype = 'PTR' - elif isinstance(address, str): - rtype = 'A' - else: - return None - - try: - answers = dns.resolver.query(address, rtype) - except dns.resolver.NXDOMAIN: - return None - - if answers: - return str(answers[0]) - return None - - -def get_host_ip(hostname, fallback=None): - """ - Resolves the IP for a given hostname, or returns - the input if it is already an IP. - """ - if is_ip(hostname): - return hostname - - ip_addr = ns_query(hostname) - if not ip_addr: - try: - ip_addr = socket.gethostbyname(hostname) - except Exception: - log("Failed to resolve hostname '%s'" % (hostname), - level=WARNING) - return fallback - return ip_addr - - -def get_hostname(address, fqdn=True): - """ - Resolves hostname for given IP, or returns the input - if it is already a hostname. - """ - if is_ip(address): - try: - import dns.reversename - except ImportError: - apt_install("python3-dnspython", fatal=True) - import dns.reversename - - rev = dns.reversename.from_address(address) - result = ns_query(rev) - - if not result: - try: - result = socket.gethostbyaddr(address)[0] - except Exception: - return None - else: - result = address - - if fqdn: - # strip trailing . - if result.endswith('.'): - return result[:-1] - else: - return result - else: - return result.split('.')[0] - - -def port_has_listener(address, port): - """ - Returns True if the address:port is open and being listened to, - else False. - - @param address: an IP address or hostname - @param port: integer port - - Note calls 'zc' via a subprocess shell - """ - cmd = ['nc', '-z', address, str(port)] - result = subprocess.call(cmd) - return not(bool(result)) - - -def assert_charm_supports_ipv6(): - """Check whether we are able to support charms ipv6.""" - release = lsb_release()['DISTRIB_CODENAME'].lower() - if CompareHostReleases(release) < "trusty": - raise Exception("IPv6 is not supported in the charms for Ubuntu " - "versions less than Trusty 14.04") - - -def get_relation_ip(interface, cidr_network=None): - """Return this unit's IP for the given interface. - - Allow for an arbitrary interface to use with network-get to select an IP. - Handle all address selection options including passed cidr network and - IPv6. - - Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') - - @param interface: string name of the relation. - @param cidr_network: string CIDR Network to select an address from. - @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. - @returns IPv6 or IPv4 address - """ - # Select the interface address first - # For possible use as a fallback below with get_address_in_network - try: - # Get the interface specific IP - address = network_get_primary_address(interface) - except NotImplementedError: - # If network-get is not available - address = get_host_ip(unit_get('private-address')) - except NoNetworkBinding: - log("No network binding for {}".format(interface), WARNING) - address = get_host_ip(unit_get('private-address')) - - if config('prefer-ipv6'): - # Currently IPv6 has priority, eventually we want IPv6 to just be - # another network space. - assert_charm_supports_ipv6() - return get_ipv6_addr()[0] - elif cidr_network: - # If a specific CIDR network is passed get the address from that - # network. - return get_address_in_network(cidr_network, address) - - # Return the interface address - return address diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/openstack/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/openstack/alternatives.py deleted file mode 100644 index 547de09c..00000000 --- a/hooks/charmhelpers/contrib/openstack/alternatives.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' Helper for managing alternatives for file conflict resolution ''' - -import subprocess -import shutil -import os - - -def install_alternative(name, target, source, priority=50): - ''' Install alternative configuration ''' - if (os.path.exists(target) and not os.path.islink(target)): - # Move existing file/directory away before installing - shutil.move(target, '{}.bak'.format(target)) - cmd = [ - 'update-alternatives', '--force', '--install', - target, name, source, str(priority) - ] - subprocess.check_call(cmd) - - -def remove_alternative(name, source): - """Remove an installed alternative configuration file - - :param name: string name of the alternative to remove - :param source: string full path to alternative to remove - """ - cmd = [ - 'update-alternatives', '--remove', - name, source - ] - subprocess.check_call(cmd) diff --git a/hooks/charmhelpers/contrib/openstack/audits/__init__.py b/hooks/charmhelpers/contrib/openstack/audits/__init__.py deleted file mode 100644 index 7f7e5f79..00000000 --- a/hooks/charmhelpers/contrib/openstack/audits/__init__.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2019 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""OpenStack Security Audit code""" - -import collections -from enum import Enum -import traceback - -from charmhelpers.core.host import cmp_pkgrevno -import charmhelpers.contrib.openstack.utils as openstack_utils -import charmhelpers.core.hookenv as hookenv - - -class AuditType(Enum): - OpenStackSecurityGuide = 1 - - -_audits = {} - -Audit = collections.namedtuple('Audit', 'func filters') - - -def audit(*args): - """Decorator to register an audit. - - These are used to generate audits that can be run on a - deployed system that matches the given configuration - - :param args: List of functions to filter tests against - :type args: List[Callable[Dict]] - """ - def wrapper(f): - test_name = f.__name__ - if _audits.get(test_name): - raise RuntimeError( - "Test name '{}' used more than once" - .format(test_name)) - non_callables = [fn for fn in args if not callable(fn)] - if non_callables: - raise RuntimeError( - "Configuration includes non-callable filters: {}" - .format(non_callables)) - _audits[test_name] = Audit(func=f, filters=args) - return f - return wrapper - - -def is_audit_type(*args): - """This audit is included in the specified kinds of audits. - - :param *args: List of AuditTypes to include this audit in - :type args: List[AuditType] - :rtype: Callable[Dict] - """ - def _is_audit_type(audit_options): - if audit_options.get('audit_type') in args: - return True - else: - return False - return _is_audit_type - - -def since_package(pkg, pkg_version): - """This audit should be run after the specified package version (incl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The package version - :type release: str - :rtype: Callable[Dict] - """ - def _since_package(audit_options=None): - return cmp_pkgrevno(pkg, pkg_version) >= 0 - - return _since_package - - -def before_package(pkg, pkg_version): - """This audit should be run before the specified package version (excl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The package version - :type release: str - :rtype: Callable[Dict] - """ - def _before_package(audit_options=None): - return not since_package(pkg, pkg_version)() - - return _before_package - - -def since_openstack_release(pkg, release): - """This audit should run after the specified OpenStack version (incl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The OpenStack release codename - :type release: str - :rtype: Callable[Dict] - """ - def _since_openstack_release(audit_options=None): - _release = openstack_utils.get_os_codename_package(pkg) - return openstack_utils.CompareOpenStackReleases(_release) >= release - - return _since_openstack_release - - -def before_openstack_release(pkg, release): - """This audit should run before the specified OpenStack version (excl). - - :param pkg: Package name to compare - :type pkg: str - :param release: The OpenStack release codename - :type release: str - :rtype: Callable[Dict] - """ - def _before_openstack_release(audit_options=None): - return not since_openstack_release(pkg, release)() - - return _before_openstack_release - - -def it_has_config(config_key): - """This audit should be run based on specified config keys. - - :param config_key: Config key to look for - :type config_key: str - :rtype: Callable[Dict] - """ - def _it_has_config(audit_options): - return audit_options.get(config_key) is not None - - return _it_has_config - - -def run(audit_options): - """Run the configured audits with the specified audit_options. - - :param audit_options: Configuration for the audit - :type audit_options: Config - - :rtype: Dict[str, str] - """ - errors = {} - results = {} - for name, audit in sorted(_audits.items()): - result_name = name.replace('_', '-') - if result_name in audit_options.get('excludes', []): - print( - "Skipping {} because it is" - "excluded in audit config" - .format(result_name)) - continue - if all(p(audit_options) for p in audit.filters): - try: - audit.func(audit_options) - print("{}: PASS".format(name)) - results[result_name] = { - 'success': True, - } - except AssertionError as e: - print("{}: FAIL ({})".format(name, e)) - results[result_name] = { - 'success': False, - 'message': e, - } - except Exception as e: - print("{}: ERROR ({})".format(name, e)) - errors[name] = e - results[result_name] = { - 'success': False, - 'message': e, - } - for name, error in errors.items(): - print("=" * 20) - print("Error in {}: ".format(name)) - traceback.print_tb(error.__traceback__) - print() - return results - - -def action_parse_results(result): - """Parse the result of `run` in the context of an action. - - :param result: The result of running the security-checklist - action on a unit - :type result: Dict[str, Dict[str, str]] - :rtype: int - """ - passed = True - for test, result in result.items(): - if result['success']: - hookenv.action_set({test: 'PASS'}) - else: - hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) - passed = False - if not passed: - hookenv.action_fail("One or more tests failed") - return 0 if passed else 1 diff --git a/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py deleted file mode 100644 index 79740ed0..00000000 --- a/hooks/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2019 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import configparser -import glob -import os.path -import subprocess - -from charmhelpers.contrib.openstack.audits import ( - audit, - AuditType, - # filters - is_audit_type, - it_has_config, -) - -from charmhelpers.core.hookenv import ( - cached, -) - -""" -The Security Guide suggests a specific list of files inside the -config directory for the service having 640 specifically, but -by ensuring the containing directory is 750, only the owner can -write, and only the group can read files within the directory. - -By restricting access to the containing directory, we can more -effectively ensure that there is no accidental leakage if a new -file is added to the service without being added to the security -guide, and to this check. -""" -FILE_ASSERTIONS = { - 'barbican': { - '/etc/barbican': {'group': 'barbican', 'mode': '750'}, - }, - 'ceph-mon': { - '/var/lib/charm/ceph-mon/ceph.conf': - {'owner': 'root', 'group': 'root', 'mode': '644'}, - '/etc/ceph/ceph.client.admin.keyring': - {'owner': 'ceph', 'group': 'ceph'}, - '/etc/ceph/rbdmap': {'mode': '644'}, - '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, - '/var/lib/ceph/bootstrap-*/ceph.keyring': - {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} - }, - 'ceph-osd': { - '/var/lib/charm/ceph-osd/ceph.conf': - {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, - '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, - '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, - '/var/lib/ceph/bootstrap-*/ceph.keyring': - {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, - '/var/lib/ceph/radosgw': - {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, - }, - 'cinder': { - '/etc/cinder': {'group': 'cinder', 'mode': '750'}, - }, - 'glance': { - '/etc/glance': {'group': 'glance', 'mode': '750'}, - }, - 'keystone': { - '/etc/keystone': - {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, - }, - 'manilla': { - '/etc/manila': {'group': 'manilla', 'mode': '750'}, - }, - 'neutron-gateway': { - '/etc/neutron': {'group': 'neutron', 'mode': '750'}, - }, - 'neutron-api': { - '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, - }, - 'nova-cloud-controller': { - '/etc/nova': {'group': 'nova', 'mode': '750'}, - }, - 'nova-compute': { - '/etc/nova/': {'group': 'nova', 'mode': '750'}, - }, - 'openstack-dashboard': { - # From security guide - '/etc/openstack-dashboard/local_settings.py': - {'group': 'horizon', 'mode': '640'}, - }, -} - -Ownership = collections.namedtuple('Ownership', 'owner group mode') - - -@cached -def _stat(file): - """ - Get the Ownership information from a file. - - :param file: The path to a file to stat - :type file: str - :returns: owner, group, and mode of the specified file - :rtype: Ownership - :raises subprocess.CalledProcessError: If the underlying stat fails - """ - out = subprocess.check_output( - ['stat', '-c', '%U %G %a', file]).decode('utf-8') - return Ownership(*out.strip().split(' ')) - - -@cached -def _config_ini(path): - """ - Parse an ini file - - :param path: The path to a file to parse - :type file: str - :returns: Configuration contained in path - :rtype: Dict - """ - # When strict is enabled, duplicate options are not allowed in the - # parsed INI; however, Oslo allows duplicate values. This change - # causes us to ignore the duplicate values which is acceptable as - # long as we don't validate any multi-value options - conf = configparser.ConfigParser(strict=False) - conf.read(path) - return dict(conf) - - -def _validate_file_ownership(owner, group, file_name, optional=False): - """ - Validate that a specified file is owned by `owner:group`. - - :param owner: Name of the owner - :type owner: str - :param group: Name of the group - :type group: str - :param file_name: Path to the file to verify - :type file_name: str - :param optional: Is this file optional, - ie: Should this test fail when it's missing - :type optional: bool - """ - try: - ownership = _stat(file_name) - except subprocess.CalledProcessError as e: - print("Error reading file: {}".format(e)) - if not optional: - assert False, "Specified file does not exist: {}".format(file_name) - assert owner == ownership.owner, \ - "{} has an incorrect owner: {} should be {}".format( - file_name, ownership.owner, owner) - assert group == ownership.group, \ - "{} has an incorrect group: {} should be {}".format( - file_name, ownership.group, group) - print("Validate ownership of {}: PASS".format(file_name)) - - -def _validate_file_mode(mode, file_name, optional=False): - """ - Validate that a specified file has the specified permissions. - - :param mode: file mode that is desires - :type owner: str - :param file_name: Path to the file to verify - :type file_name: str - :param optional: Is this file optional, - ie: Should this test fail when it's missing - :type optional: bool - """ - try: - ownership = _stat(file_name) - except subprocess.CalledProcessError as e: - print("Error reading file: {}".format(e)) - if not optional: - assert False, "Specified file does not exist: {}".format(file_name) - assert mode == ownership.mode, \ - "{} has an incorrect mode: {} should be {}".format( - file_name, ownership.mode, mode) - print("Validate mode of {}: PASS".format(file_name)) - - -@cached -def _config_section(config, section): - """Read the configuration file and return a section.""" - path = os.path.join(config.get('config_path'), config.get('config_file')) - conf = _config_ini(path) - return conf.get(section) - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide), - it_has_config('files')) -def validate_file_ownership(config): - """Verify that configuration files are owned by the correct user/group.""" - files = config.get('files', {}) - for file_name, options in files.items(): - for key in options.keys(): - if key not in ["owner", "group", "mode"]: - raise RuntimeError( - "Invalid ownership configuration: {}".format(key)) - owner = options.get('owner', config.get('owner', 'root')) - group = options.get('group', config.get('group', 'root')) - optional = options.get('optional', config.get('optional', False)) - if '*' in file_name: - for file in glob.glob(file_name): - if file not in files.keys(): - if os.path.isfile(file): - _validate_file_ownership(owner, group, file, optional) - else: - if os.path.isfile(file_name): - _validate_file_ownership(owner, group, file_name, optional) - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide), - it_has_config('files')) -def validate_file_permissions(config): - """Verify that permissions on configuration files are secure enough.""" - files = config.get('files', {}) - for file_name, options in files.items(): - for key in options.keys(): - if key not in ["owner", "group", "mode"]: - raise RuntimeError( - "Invalid ownership configuration: {}".format(key)) - mode = options.get('mode', config.get('permissions', '600')) - optional = options.get('optional', config.get('optional', False)) - if '*' in file_name: - for file in glob.glob(file_name): - if file not in files.keys(): - if os.path.isfile(file): - _validate_file_mode(mode, file, optional) - else: - if os.path.isfile(file_name): - _validate_file_mode(mode, file_name, optional) - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) -def validate_uses_keystone(audit_options): - """Validate that the service uses Keystone for authentication.""" - section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') - assert section is not None, "Missing section 'api / DEFAULT'" - assert section.get('auth_strategy') == "keystone", \ - "Application is not using Keystone" - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) -def validate_uses_tls_for_keystone(audit_options): - """Verify that TLS is used to communicate with Keystone.""" - section = _config_section(audit_options, 'keystone_authtoken') - assert section is not None, "Missing section 'keystone_authtoken'" - assert not section.get('insecure') and \ - "https://" in section.get("auth_uri"), \ - "TLS is not used for Keystone" - - -@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) -def validate_uses_tls_for_glance(audit_options): - """Verify that TLS is used to communicate with Glance.""" - section = _config_section(audit_options, 'glance') - assert section is not None, "Missing section 'glance'" - assert not section.get('insecure') and \ - "https://" in section.get("api_servers"), \ - "TLS is not used for Glance" diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py deleted file mode 100644 index 5c961c58..00000000 --- a/hooks/charmhelpers/contrib/openstack/cert_utils.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Common python helper functions used for OpenStack charm certificates. - -import os -import json -from base64 import b64decode - -from charmhelpers.contrib.network.ip import ( - get_hostname, - resolve_network_cidr, -) -from charmhelpers.core.hookenv import ( - local_unit, - network_get_primary_address, - config, - related_units, - relation_get, - relation_ids, - remote_service_name, - NoNetworkBinding, - log, - WARNING, - INFO, -) -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - get_vip_in_network, - ADDRESS_MAP, - get_default_api_bindings, - local_address, -) -from charmhelpers.contrib.network.ip import ( - get_relation_ip, -) - -from charmhelpers.core.host import ( - ca_cert_absolute_path, - install_ca_cert, - mkdir, - write_file, -) - -from charmhelpers.contrib.hahelpers.apache import ( - CONFIG_CA_CERT_FILE, -) - - -class CertRequest(object): - - """Create a request for certificates to be generated - """ - - def __init__(self, json_encode=True): - self.entries = [] - self.hostname_entry = None - self.json_encode = json_encode - - def add_entry(self, net_type, cn, addresses): - """Add a request to the batch - - :param net_type: str network space name request is for - :param cn: str Canonical Name for certificate - :param addresses: [] List of addresses to be used as SANs - """ - self.entries.append({ - 'cn': cn, - 'addresses': addresses}) - - def add_hostname_cn(self): - """Add a request for the hostname of the machine""" - ip = local_address(unit_get_fallback='private-address') - addresses = [ip] - # If a vip is being used without os-hostname config or - # network spaces then we need to ensure the local units - # cert has the appropriate vip in the SAN list - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - addresses.append(vip) - self.hostname_entry = { - 'cn': get_hostname(ip), - 'addresses': addresses} - - def add_hostname_cn_ip(self, addresses): - """Add an address to the SAN list for the hostname request - - :param addr: [] List of address to be added - """ - for addr in addresses: - if addr not in self.hostname_entry['addresses']: - self.hostname_entry['addresses'].append(addr) - - def get_request(self): - """Generate request from the batched up entries - - """ - if self.hostname_entry: - self.entries.append(self.hostname_entry) - request = {} - for entry in self.entries: - sans = sorted(list(set(entry['addresses']))) - request[entry['cn']] = {'sans': sans} - if self.json_encode: - req = {'cert_requests': json.dumps(request, sort_keys=True)} - else: - req = {'cert_requests': request} - req['unit_name'] = local_unit().replace('/', '_') - return req - - -def get_certificate_request(json_encode=True, bindings=None): - """Generate a certificate requests based on the network configuration - - :param json_encode: Encode request in JSON or not. Used for setting - directly on a relation. - :type json_encode: boolean - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - :returns: CertRequest request as dictionary or JSON string. - :rtype: Union[dict, json] - """ - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - req = CertRequest(json_encode=json_encode) - req.add_hostname_cn() - # Add os-hostname entries - _sans = get_certificate_sans(bindings=bindings) - - # Handle specific hostnames per binding - for binding in bindings: - try: - hostname_override = config(ADDRESS_MAP[binding]['override']) - except KeyError: - hostname_override = None - try: - try: - net_addr = resolve_address(endpoint_type=binding) - except KeyError: - net_addr = None - ip = network_get_primary_address(binding) - addresses = [net_addr, ip] - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - addresses.append(vip) - - # Clear any Nones or duplicates - addresses = list(set([i for i in addresses if i])) - # Add hostname certificate request - if hostname_override: - req.add_entry( - binding, - hostname_override, - addresses) - # Remove hostname specific addresses from _sans - for addr in addresses: - try: - _sans.remove(addr) - except (ValueError, KeyError): - pass - - except NoNetworkBinding: - log("Skipping request for certificate for ip in {} space, no " - "local address found".format(binding), WARNING) - # Guarantee all SANs are covered - # These are network addresses with no corresponding hostname. - # Add the ips to the hostname cert to allow for this. - req.add_hostname_cn_ip(_sans) - return req.get_request() - - -def get_certificate_sans(bindings=None): - """Get all possible IP addresses for certificate SANs. - - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - :returns: List of binding string names - :rtype: List[str] - """ - _sans = [local_address(unit_get_fallback='private-address')] - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - - for binding in bindings: - # Check for config override - try: - net_config = config(ADDRESS_MAP[binding]['config']) - except KeyError: - # There is no configuration network for this binding name - net_config = None - # Using resolve_address is likely redundant. Keeping it here in - # case there is an edge case it handles. - try: - net_addr = resolve_address(endpoint_type=binding) - except KeyError: - net_addr = None - ip = get_relation_ip(binding, cidr_network=net_config) - _sans = _sans + [net_addr, ip] - vip = get_vip_in_network(resolve_network_cidr(ip)) - if vip: - _sans.append(vip) - # Clear any Nones and duplicates - return list(set([i for i in _sans if i])) - - -def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None): - """Create symlinks for SAN records - - :param ssl_dir: str Directory to create symlinks in - :param custom_hostname_link: str Additional link to be created - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - """ - - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - - # This includes the hostname cert and any specific bindng certs: - # admin, internal, public - req = get_certificate_request(json_encode=False, bindings=bindings)["cert_requests"] - # Specific certs - for cert_req in req.keys(): - requested_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(cert_req)) - requested_key = os.path.join( - ssl_dir, - 'key_{}'.format(cert_req)) - for addr in req[cert_req]['sans']: - cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) - key = os.path.join(ssl_dir, 'key_{}'.format(addr)) - if os.path.isfile(requested_cert) and not os.path.isfile(cert): - os.symlink(requested_cert, cert) - os.symlink(requested_key, key) - - # Handle custom hostnames - hostname = get_hostname(local_address(unit_get_fallback='private-address')) - hostname_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(hostname)) - hostname_key = os.path.join( - ssl_dir, - 'key_{}'.format(hostname)) - if custom_hostname_link: - custom_cert = os.path.join( - ssl_dir, - 'cert_{}'.format(custom_hostname_link)) - custom_key = os.path.join( - ssl_dir, - 'key_{}'.format(custom_hostname_link)) - if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): - os.symlink(hostname_cert, custom_cert) - os.symlink(hostname_key, custom_key) - - -def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): - """Install the certs passed into the ssl dir and append the chain if - provided. - - :param ssl_dir: str Directory to create symlinks in - :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} - :param chain: str Chain to be appended to certs - :param user: (Optional) Owner of certificate files. Defaults to 'root' - :type user: str - :param group: (Optional) Group of certificate files. Defaults to 'root' - :type group: str - """ - for cn, bundle in certs.items(): - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - cert_data = bundle['cert'] - if chain: - # Append chain file so that clients that trust the root CA will - # trust certs signed by an intermediate in the chain - cert_data = cert_data + os.linesep + chain - write_file( - path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, - content=cert_data, perms=0o640) - write_file( - path=os.path.join(ssl_dir, key_filename), owner=user, group=group, - content=bundle['key'], perms=0o640) - - -def get_cert_relation_ca_name(cert_relation_id=None): - """Determine CA certificate name as provided by relation. - - The filename on disk depends on the name chosen for the application on the - providing end of the certificates relation. - - :param cert_relation_id: (Optional) Relation id providing the certs - :type cert_relation_id: str - :returns: CA certificate filename without path nor extension - :rtype: str - """ - if cert_relation_id is None: - try: - cert_relation_id = relation_ids('certificates')[0] - except IndexError: - return '' - return '{}_juju_ca_cert'.format( - remote_service_name(relid=cert_relation_id)) - - -def _manage_ca_certs(ca, cert_relation_id): - """Manage CA certs. - - :param ca: CA Certificate from certificate relation. - :type ca: str - :param cert_relation_id: Relation id providing the certs - :type cert_relation_id: str - """ - config_ssl_ca = config('ssl_ca') - config_cert_file = ca_cert_absolute_path(CONFIG_CA_CERT_FILE) - if config_ssl_ca: - log("Installing CA certificate from charm ssl_ca config to {}".format( - config_cert_file), INFO) - install_ca_cert( - b64decode(config_ssl_ca).rstrip(), - name=CONFIG_CA_CERT_FILE) - elif os.path.exists(config_cert_file): - log("Removing CA certificate {}".format(config_cert_file), INFO) - os.remove(config_cert_file) - log("Installing CA certificate from certificate relation", INFO) - install_ca_cert( - ca.encode(), - name=get_cert_relation_ca_name(cert_relation_id)) - - -def process_certificates(service_name, relation_id, unit, - custom_hostname_link=None, user='root', group='root', - bindings=None): - """Process the certificates supplied down the relation - - :param service_name: str Name of service the certificates are for. - :param relation_id: str Relation id providing the certs - :param unit: str Unit providing the certs - :param custom_hostname_link: str Name of custom link to create - :param user: (Optional) Owner of certificate files. Defaults to 'root' - :type user: str - :param group: (Optional) Group of certificate files. Defaults to 'root' - :type group: str - :param bindings: List of bindings to check in addition to default api - bindings. - :type bindings: list of strings - :returns: True if certificates processed for local unit or False - :rtype: bool - """ - if bindings: - # Add default API bindings to bindings list - bindings = list(bindings + get_default_api_bindings()) - else: - # Use default API bindings - bindings = get_default_api_bindings() - - data = relation_get(rid=relation_id, unit=unit) - ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) - mkdir(path=ssl_dir) - name = local_unit().replace('/', '_') - certs = data.get('{}.processed_requests'.format(name)) - chain = data.get('chain') - ca = data.get('ca') - if certs: - certs = json.loads(certs) - _manage_ca_certs(ca, relation_id) - install_certs(ssl_dir, certs, chain, user=user, group=group) - create_ip_cert_links( - ssl_dir, - custom_hostname_link=custom_hostname_link, - bindings=bindings) - return True - return False - - -def get_requests_for_local_unit(relation_name=None): - """Extract any certificates data targeted at this unit down relation_name. - - :param relation_name: str Name of relation to check for data. - :returns: List of bundles of certificates. - :rtype: List of dicts - """ - local_name = local_unit().replace('/', '_') - raw_certs_key = '{}.processed_requests'.format(local_name) - relation_name = relation_name or 'certificates' - bundles = [] - for rid in relation_ids(relation_name): - for unit in related_units(rid): - data = relation_get(rid=rid, unit=unit) - if data.get(raw_certs_key): - bundles.append({ - 'ca': data['ca'], - 'chain': data.get('chain'), - 'certs': json.loads(data[raw_certs_key])}) - return bundles - - -def get_bundle_for_cn(cn, relation_name=None): - """Extract certificates for the given cn. - - :param cn: str Canonical Name on certificate. - :param relation_name: str Relation to check for certificates down. - :returns: Dictionary of certificate data, - :rtype: dict. - """ - entries = get_requests_for_local_unit(relation_name) - cert_bundle = {} - for entry in entries: - for _cn, bundle in entry['certs'].items(): - if _cn == cn: - cert_bundle = { - 'cert': bundle['cert'], - 'key': bundle['key'], - 'chain': entry['chain'], - 'ca': entry['ca']} - break - if cert_bundle: - break - return cert_bundle diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py deleted file mode 100644 index 32c69ff7..00000000 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ /dev/null @@ -1,3361 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import copy -import enum -import glob -import hashlib -import json -import math -import os -import re -import socket -import time - -from base64 import b64decode -from subprocess import ( - check_call, - check_output, - CalledProcessError) - -import charmhelpers.contrib.storage.linux.ceph as ch_ceph - -from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( - _config_ini as config_ini -) - -from charmhelpers.fetch import ( - apt_install, - filter_installed_packages, -) -from charmhelpers.core.hookenv import ( - NoNetworkBinding, - config, - is_relation_made, - local_unit, - log, - relation_get, - relation_ids, - related_units, - relation_set, - unit_private_ip, - charm_name, - DEBUG, - INFO, - ERROR, - status_set, - network_get_primary_address, - WARNING, - service_name, -) - -from charmhelpers.core.sysctl import create as sysctl_create -from charmhelpers.core.strutils import bool_from_string -from charmhelpers.contrib.openstack.exceptions import OSContextError - -from charmhelpers.core.host import ( - get_bond_master, - is_phy_iface, - list_nics, - get_nic_hwaddr, - mkdir, - write_file, - pwgen, - lsb_release, - CompareHostReleases, -) -from charmhelpers.contrib.hahelpers.cluster import ( - determine_apache_port, - determine_api_port, - https, - is_clustered, -) -from charmhelpers.contrib.hahelpers.apache import ( - get_cert, - get_ca_cert, - install_ca_cert, -) -from charmhelpers.contrib.openstack.neutron import ( - neutron_plugin_attribute, - parse_data_port_mappings, -) -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - INTERNAL, - ADMIN, - PUBLIC, - ADDRESS_MAP, - local_address, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_ipv4_addr, - get_ipv6_addr, - get_netmask_for_address, - format_ipv6_addr, - is_bridge_member, - is_ipv6_disabled, - get_relation_ip, -) -from charmhelpers.contrib.openstack.utils import ( - config_flags_parser, - get_os_codename_install_source, - enable_memcache, - CompareOpenStackReleases, - os_release, -) -from charmhelpers.core.unitdata import kv - -from charmhelpers.contrib.hardware import pci - -try: - import psutil -except ImportError: - apt_install('python3-psutil', fatal=True) - import psutil - -CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' -ADDRESS_TYPES = ['admin', 'internal', 'public'] -HAPROXY_RUN_DIR = '/var/run/haproxy/' -DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" - - -def ensure_packages(packages): - """Install but do not upgrade required plugin packages.""" - required = filter_installed_packages(packages) - if required: - apt_install(required, fatal=True) - - -def context_complete(ctxt): - _missing = [k for k, v in ctxt.items() if v is None or v == ''] - - if _missing: - log('Missing required data: %s' % ' '.join(_missing), level=INFO) - return False - - return True - - -class OSContextGenerator(object): - """Base class for all context generators.""" - interfaces = [] - related = False - complete = False - missing_data = [] - - def __call__(self): - raise NotImplementedError - - def context_complete(self, ctxt): - """Check for missing data for the required context data. - Set self.missing_data if it exists and return False. - Set self.complete if no missing data and return True. - """ - # Fresh start - self.complete = False - self.missing_data = [] - for k, v in ctxt.items(): - if v is None or v == '': - if k not in self.missing_data: - self.missing_data.append(k) - - if self.missing_data: - self.complete = False - log('Missing required data: %s' % ' '.join(self.missing_data), - level=INFO) - else: - self.complete = True - return self.complete - - def get_related(self): - """Check if any of the context interfaces have relation ids. - Set self.related and return True if one of the interfaces - has relation ids. - """ - # Fresh start - self.related = False - try: - for interface in self.interfaces: - if relation_ids(interface): - self.related = True - return self.related - except AttributeError as e: - log("{} {}" - "".format(self, e), 'INFO') - return self.related - - -class SharedDBContext(OSContextGenerator): - interfaces = ['shared-db'] - - def __init__(self, database=None, user=None, relation_prefix=None, - ssl_dir=None, relation_id=None): - """Allows inspecting relation for settings prefixed with - relation_prefix. This is useful for parsing access for multiple - databases returned via the shared-db interface (eg, nova_password, - quantum_password) - """ - self.relation_prefix = relation_prefix - self.database = database - self.user = user - self.ssl_dir = ssl_dir - self.rel_name = self.interfaces[0] - self.relation_id = relation_id - - def __call__(self): - self.database = self.database or config('database') - self.user = self.user or config('database-user') - if None in [self.database, self.user]: - log("Could not generate shared_db context. Missing required charm " - "config options. (database name and user)", level=ERROR) - raise OSContextError - - ctxt = {} - - # NOTE(jamespage) if mysql charm provides a network upon which - # access to the database should be made, reconfigure relation - # with the service units local address and defer execution - access_network = relation_get('access-network') - if access_network is not None: - if self.relation_prefix is not None: - hostname_key = "{}_hostname".format(self.relation_prefix) - else: - hostname_key = "hostname" - access_hostname = get_address_in_network( - access_network, - local_address(unit_get_fallback='private-address')) - set_hostname = relation_get(attribute=hostname_key, - unit=local_unit()) - if set_hostname != access_hostname: - relation_set(relation_settings={hostname_key: access_hostname}) - return None # Defer any further hook execution for now.... - - password_setting = 'password' - if self.relation_prefix: - password_setting = self.relation_prefix + '_password' - - if self.relation_id: - rids = [self.relation_id] - else: - rids = relation_ids(self.interfaces[0]) - - rel = (get_os_codename_install_source(config('openstack-origin')) or - 'icehouse') - for rid in rids: - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - host = rdata.get('db_host') - host = format_ipv6_addr(host) or host - ctxt = { - 'database_host': host, - 'database': self.database, - 'database_user': self.user, - 'database_password': rdata.get(password_setting), - 'database_type': 'mysql+pymysql' - } - # Port is being introduced with LP Bug #1876188 - # but it not currently required and may not be set in all - # cases, particularly in classic charms. - port = rdata.get('db_port') - if port: - ctxt['database_port'] = port - if CompareOpenStackReleases(rel) < 'queens': - ctxt['database_type'] = 'mysql' - if self.context_complete(ctxt): - db_ssl(rdata, ctxt, self.ssl_dir) - return ctxt - return {} - - -class PostgresqlDBContext(OSContextGenerator): - interfaces = ['pgsql-db'] - - def __init__(self, database=None): - self.database = database - - def __call__(self): - self.database = self.database or config('database') - if self.database is None: - log('Could not generate postgresql_db context. Missing required ' - 'charm config options. (database name)', level=ERROR) - raise OSContextError - - ctxt = {} - for rid in relation_ids(self.interfaces[0]): - self.related = True - for unit in related_units(rid): - rel_host = relation_get('host', rid=rid, unit=unit) - rel_user = relation_get('user', rid=rid, unit=unit) - rel_passwd = relation_get('password', rid=rid, unit=unit) - ctxt = {'database_host': rel_host, - 'database': self.database, - 'database_user': rel_user, - 'database_password': rel_passwd, - 'database_type': 'postgresql'} - if self.context_complete(ctxt): - return ctxt - - return {} - - -def db_ssl(rdata, ctxt, ssl_dir): - if 'ssl_ca' in rdata and ssl_dir: - ca_path = os.path.join(ssl_dir, 'db-client.ca') - with open(ca_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_ca'])) - - ctxt['database_ssl_ca'] = ca_path - elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found", level=INFO) - return ctxt - - if 'ssl_cert' in rdata: - cert_path = os.path.join( - ssl_dir, 'db-client.cert') - if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity", level=INFO) - time.sleep(60) - - with open(cert_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_cert'])) - - ctxt['database_ssl_cert'] = cert_path - key_path = os.path.join(ssl_dir, 'db-client.key') - with open(key_path, 'wb') as fh: - fh.write(b64decode(rdata['ssl_key'])) - - ctxt['database_ssl_key'] = key_path - - return ctxt - - -class IdentityServiceContext(OSContextGenerator): - - def __init__(self, - service=None, - service_user=None, - rel_name='identity-service'): - self.service = service - self.service_user = service_user - self.rel_name = rel_name - self.interfaces = [self.rel_name] - - def _setup_pki_cache(self): - if self.service and self.service_user: - # This is required for pki token signing if we don't want /tmp to - # be used. - cachedir = '/var/cache/%s' % (self.service) - if not os.path.isdir(cachedir): - log("Creating service cache dir %s" % (cachedir), level=DEBUG) - mkdir(path=cachedir, owner=self.service_user, - group=self.service_user, perms=0o700) - - return cachedir - return None - - def _get_pkg_name(self, python_name='keystonemiddleware'): - """Get corresponding distro installed package for python - package name. - - :param python_name: nameof the python package - :type: string - """ - pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) - - for pkg in pkg_names: - if not filter_installed_packages((pkg,)): - return pkg - - return None - - def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): - """Build Jinja2 context for full rendering of [keystone_authtoken] - section with variable names included. Re-constructed from former - template 'section-keystone-auth-mitaka'. - - :param ctxt: Jinja2 context returned from self.__call__() - :type: dict - :param keystonemiddleware_os_rel: OpenStack release name of - keystonemiddleware package installed - """ - c = collections.OrderedDict((('auth_type', 'password'),)) - - # 'www_authenticate_uri' replaced 'auth_uri' since Stein, - # see keystonemiddleware upstream sources for more info - if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': - c.update(( - ('www_authenticate_uri', "{}://{}:{}/v3".format( - ctxt.get('service_protocol', ''), - ctxt.get('service_host', ''), - ctxt.get('service_port', ''))),)) - else: - c.update(( - ('auth_uri', "{}://{}:{}/v3".format( - ctxt.get('service_protocol', ''), - ctxt.get('service_host', ''), - ctxt.get('service_port', ''))),)) - - c.update(( - ('auth_url', "{}://{}:{}/v3".format( - ctxt.get('auth_protocol', ''), - ctxt.get('auth_host', ''), - ctxt.get('auth_port', ''))), - ('project_domain_name', ctxt.get('admin_domain_name', '')), - ('user_domain_name', ctxt.get('admin_domain_name', '')), - ('project_name', ctxt.get('admin_tenant_name', '')), - ('username', ctxt.get('admin_user', '')), - ('password', ctxt.get('admin_password', '')), - ('signing_dir', ctxt.get('signing_dir', '')),)) - - if ctxt.get('service_type'): - c.update((('service_type', ctxt.get('service_type')),)) - - return c - - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - - keystonemiddleware_os_release = None - if self._get_pkg_name(): - keystonemiddleware_os_release = os_release(self._get_pkg_name()) - - cachedir = self._setup_pki_cache() - if cachedir: - ctxt['signing_dir'] = cachedir - - for rid in relation_ids(self.rel_name): - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - serv_host = rdata.get('service_host') - serv_host = format_ipv6_addr(serv_host) or serv_host - auth_host = rdata.get('auth_host') - auth_host = format_ipv6_addr(auth_host) or auth_host - int_host = rdata.get('internal_host') - int_host = format_ipv6_addr(int_host) or int_host - svc_protocol = rdata.get('service_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - int_protocol = rdata.get('internal_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'internal_host': int_host, - 'internal_port': rdata.get('internal_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol, - 'internal_protocol': int_protocol, - 'api_version': api_version}) - - if rdata.get('service_type'): - ctxt['service_type'] = rdata.get('service_type') - - if float(api_version) > 2: - ctxt.update({ - 'admin_domain_name': rdata.get('service_domain'), - 'service_project_id': rdata.get('service_tenant_id'), - 'service_domain_id': rdata.get('service_domain_id')}) - - # we keep all veriables in ctxt for compatibility and - # add nested dictionary for keystone_authtoken generic - # templating - if keystonemiddleware_os_release: - ctxt['keystone_authtoken'] = \ - self._get_keystone_authtoken_ctxt( - ctxt, keystonemiddleware_os_release) - - if self.context_complete(ctxt): - # NOTE(jamespage) this is required for >= icehouse - # so a missing value just indicates keystone needs - # upgrading - ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') - ctxt['admin_domain_id'] = rdata.get('service_domain_id') - return ctxt - - return {} - - -class IdentityCredentialsContext(IdentityServiceContext): - '''Context for identity-credentials interface type''' - - def __init__(self, - service=None, - service_user=None, - rel_name='identity-credentials'): - super(IdentityCredentialsContext, self).__init__(service, - service_user, - rel_name) - - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - - cachedir = self._setup_pki_cache() - if cachedir: - ctxt['signing_dir'] = cachedir - - for rid in relation_ids(self.rel_name): - self.related = True - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - credentials_host = rdata.get('credentials_host') - credentials_host = ( - format_ipv6_addr(credentials_host) or credentials_host - ) - auth_host = rdata.get('auth_host') - auth_host = format_ipv6_addr(auth_host) or auth_host - svc_protocol = rdata.get('credentials_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({ - 'service_port': rdata.get('credentials_port'), - 'service_host': credentials_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('credentials_project'), - 'admin_tenant_id': rdata.get('credentials_project_id'), - 'admin_user': rdata.get('credentials_username'), - 'admin_password': rdata.get('credentials_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol, - 'api_version': api_version - }) - - if rdata.get('service_type'): - ctxt['service_type'] = rdata.get('service_type') - - if float(api_version) > 2: - ctxt.update({'admin_domain_name': - rdata.get('domain')}) - - if self.context_complete(ctxt): - return ctxt - - return {} - - -class NovaVendorMetadataContext(OSContextGenerator): - """Context used for configuring nova vendor metadata on nova.conf file.""" - - def __init__(self, os_release_pkg, interfaces=None): - """Initialize the NovaVendorMetadataContext object. - - :param os_release_pkg: the package name to extract the OpenStack - release codename from. - :type os_release_pkg: str - :param interfaces: list of string values to be used as the Context's - relation interfaces. - :type interfaces: List[str] - """ - self.os_release_pkg = os_release_pkg - if interfaces is not None: - self.interfaces = interfaces - - def __call__(self): - cmp_os_release = CompareOpenStackReleases( - os_release(self.os_release_pkg)) - ctxt = {'vendor_data': False} - - vdata_providers = [] - vdata = config('vendor-data') - vdata_url = config('vendor-data-url') - - if vdata: - try: - # validate the JSON. If invalid, we do not set anything here - json.loads(vdata) - except (TypeError, ValueError) as e: - log('Error decoding vendor-data. {}'.format(e), level=ERROR) - else: - ctxt['vendor_data'] = True - # Mitaka does not support DynamicJSON - # so vendordata_providers is not needed - if cmp_os_release > 'mitaka': - vdata_providers.append('StaticJSON') - - if vdata_url: - if cmp_os_release > 'mitaka': - ctxt['vendor_data_url'] = vdata_url - vdata_providers.append('DynamicJSON') - else: - log('Dynamic vendor data unsupported' - ' for {}.'.format(cmp_os_release), level=ERROR) - if vdata_providers: - ctxt['vendordata_providers'] = ','.join(vdata_providers) - - return ctxt - - -class NovaVendorMetadataJSONContext(OSContextGenerator): - """Context used for writing nova vendor metadata json file.""" - - def __init__(self, os_release_pkg): - """Initialize the NovaVendorMetadataJSONContext object. - - :param os_release_pkg: the package name to extract the OpenStack - release codename from. - :type os_release_pkg: str - """ - self.os_release_pkg = os_release_pkg - - def __call__(self): - ctxt = {'vendor_data_json': '{}'} - - vdata = config('vendor-data') - if vdata: - try: - # validate the JSON. If invalid, we return empty. - json.loads(vdata) - except (TypeError, ValueError) as e: - log('Error decoding vendor-data. {}'.format(e), level=ERROR) - else: - ctxt['vendor_data_json'] = vdata - - return ctxt - - -class AMQPContext(OSContextGenerator): - - def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, - relation_id=None): - self.ssl_dir = ssl_dir - self.rel_name = rel_name - self.relation_prefix = relation_prefix - self.interfaces = [rel_name] - self.relation_id = relation_id - - def __call__(self): - log('Generating template context for amqp', level=DEBUG) - conf = config() - if self.relation_prefix: - user_setting = '%s-rabbit-user' % (self.relation_prefix) - vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) - else: - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' - - try: - username = conf[user_setting] - vhost = conf[vhost_setting] - except KeyError as e: - log('Could not generate shared_db context. Missing required charm ' - 'config options: %s.' % e, level=ERROR) - raise OSContextError - - ctxt = {} - if self.relation_id: - rids = [self.relation_id] - else: - rids = relation_ids(self.rel_name) - for rid in rids: - ha_vip_only = False - self.related = True - transport_hosts = None - rabbitmq_port = '5672' - for unit in related_units(rid): - if relation_get('clustered', rid=rid, unit=unit): - ctxt['clustered'] = True - vip = relation_get('vip', rid=rid, unit=unit) - vip = format_ipv6_addr(vip) or vip - ctxt['rabbitmq_host'] = vip - transport_hosts = [vip] - else: - host = relation_get('private-address', rid=rid, unit=unit) - host = format_ipv6_addr(host) or host - ctxt['rabbitmq_host'] = host - transport_hosts = [host] - - ctxt.update({ - 'rabbitmq_user': username, - 'rabbitmq_password': relation_get('password', rid=rid, - unit=unit), - 'rabbitmq_virtual_host': vhost, - }) - - ssl_port = relation_get('ssl_port', rid=rid, unit=unit) - if ssl_port: - ctxt['rabbit_ssl_port'] = ssl_port - rabbitmq_port = ssl_port - - ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) - if ssl_ca: - ctxt['rabbit_ssl_ca'] = ssl_ca - - if relation_get('ha_queues', rid=rid, unit=unit) is not None: - ctxt['rabbitmq_ha_queues'] = True - - ha_vip_only = relation_get('ha-vip-only', - rid=rid, unit=unit) is not None - - if self.context_complete(ctxt): - if 'rabbit_ssl_ca' in ctxt: - if not self.ssl_dir: - log("Charm not setup for ssl support but ssl ca " - "found", level=INFO) - break - - ca_path = os.path.join( - self.ssl_dir, 'rabbit-client-ca.pem') - with open(ca_path, 'wb') as fh: - fh.write(b64decode(ctxt['rabbit_ssl_ca'])) - ctxt['rabbit_ssl_ca'] = ca_path - - # Sufficient information found = break out! - break - - # Used for active/active rabbitmq >= grizzly - if (('clustered' not in ctxt or ha_vip_only) and - len(related_units(rid)) > 1): - rabbitmq_hosts = [] - for unit in related_units(rid): - host = relation_get('private-address', rid=rid, unit=unit) - if not relation_get('password', rid=rid, unit=unit): - log( - ("Skipping {} password not sent which indicates " - "unit is not ready.".format(host)), - level=DEBUG) - continue - host = format_ipv6_addr(host) or host - rabbitmq_hosts.append(host) - - rabbitmq_hosts = sorted(rabbitmq_hosts) - ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) - transport_hosts = rabbitmq_hosts - - if transport_hosts: - transport_url_hosts = ','.join([ - "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], - ctxt['rabbitmq_password'], - host_, - rabbitmq_port) - for host_ in transport_hosts]) - ctxt['transport_url'] = "rabbit://{}/{}".format( - transport_url_hosts, vhost) - - oslo_messaging_flags = conf.get('oslo-messaging-flags', None) - if oslo_messaging_flags: - ctxt['oslo_messaging_flags'] = config_flags_parser( - oslo_messaging_flags) - - oslo_messaging_driver = conf.get( - 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) - if oslo_messaging_driver: - ctxt['oslo_messaging_driver'] = oslo_messaging_driver - - notification_format = conf.get('notification-format', None) - if notification_format: - ctxt['notification_format'] = notification_format - - notification_topics = conf.get('notification-topics', None) - if notification_topics: - ctxt['notification_topics'] = notification_topics - - send_notifications_to_logs = conf.get('send-notifications-to-logs', None) - if send_notifications_to_logs: - ctxt['send_notifications_to_logs'] = send_notifications_to_logs - - if not self.complete: - return {} - - return ctxt - - -class CephContext(OSContextGenerator): - """Generates context for /etc/ceph/ceph.conf templates.""" - interfaces = ['ceph'] - - def __call__(self): - if not relation_ids('ceph'): - return {} - - log('Generating template context for ceph', level=DEBUG) - mon_hosts = [] - ctxt = { - 'use_syslog': str(config('use-syslog')).lower() - } - for rid in relation_ids('ceph'): - for unit in related_units(rid): - if not ctxt.get('auth'): - ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) - if not ctxt.get('key'): - ctxt['key'] = relation_get('key', rid=rid, unit=unit) - if not ctxt.get('rbd_features'): - default_features = relation_get('rbd-features', rid=rid, unit=unit) - if default_features is not None: - ctxt['rbd_features'] = default_features - - ceph_addrs = relation_get('ceph-public-address', rid=rid, - unit=unit) - if ceph_addrs: - for addr in ceph_addrs.split(' '): - mon_hosts.append(format_ipv6_addr(addr) or addr) - else: - priv_addr = relation_get('private-address', rid=rid, - unit=unit) - mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) - - ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) - - if config('pool-type') and config('pool-type') == 'erasure-coded': - base_pool_name = config('rbd-pool') or config('rbd-pool-name') - if not base_pool_name: - base_pool_name = service_name() - ctxt['rbd_default_data_pool'] = base_pool_name - - if not os.path.isdir('/etc/ceph'): - os.mkdir('/etc/ceph') - - if not self.context_complete(ctxt): - return {} - - ensure_packages(['ceph-common']) - return ctxt - - def context_complete(self, ctxt): - """Overridden here to ensure the context is actually complete. - - We set `key` and `auth` to None here, by default, to ensure - that the context will always evaluate to incomplete until the - Ceph relation has actually sent these details; otherwise, - there is a potential race condition between the relation - appearing and the first unit actually setting this data on the - relation. - - :param ctxt: The current context members - :type ctxt: Dict[str, ANY] - :returns: True if the context is complete - :rtype: bool - """ - if 'auth' not in ctxt or 'key' not in ctxt: - return False - return super(CephContext, self).context_complete(ctxt) - - -class HAProxyContext(OSContextGenerator): - """Provides half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - - :side effect: mkdir is called on HAPROXY_RUN_DIR - """ - interfaces = ['cluster'] - - def __init__(self, singlenode_mode=False, - address_types=ADDRESS_TYPES): - self.address_types = address_types - self.singlenode_mode = singlenode_mode - - def __call__(self): - if not os.path.isdir(HAPROXY_RUN_DIR): - mkdir(path=HAPROXY_RUN_DIR) - if not relation_ids('cluster') and not self.singlenode_mode: - return {} - - l_unit = local_unit().replace('/', '-') - cluster_hosts = collections.OrderedDict() - - # NOTE(jamespage): build out map of configured network endpoints - # and associated backends - for addr_type in self.address_types: - cfg_opt = 'os-{}-network'.format(addr_type) - # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather - # than 'internal' - if addr_type == 'internal': - _addr_map_type = INTERNAL - else: - _addr_map_type = addr_type - # Network spaces aware - laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], - config(cfg_opt)) - if laddr: - netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = { - 'network': "{}/{}".format(laddr, - netmask), - 'backends': collections.OrderedDict([(l_unit, - laddr)]) - } - for rid in relation_ids('cluster'): - for unit in sorted(related_units(rid)): - # API Charms will need to set {addr_type}-address with - # get_relation_ip(addr_type) - _laddr = relation_get('{}-address'.format(addr_type), - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[laddr]['backends'][_unit] = _laddr - - # NOTE(jamespage) add backend based on get_relation_ip - this - # will either be the only backend or the fallback if no acls - # match in the frontend - # Network spaces aware - addr = get_relation_ip('cluster') - cluster_hosts[addr] = {} - netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = { - 'network': "{}/{}".format(addr, netmask), - 'backends': collections.OrderedDict([(l_unit, - addr)]) - } - for rid in relation_ids('cluster'): - for unit in sorted(related_units(rid)): - # API Charms will need to set their private-address with - # get_relation_ip('cluster') - _laddr = relation_get('private-address', - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[addr]['backends'][_unit] = _laddr - - ctxt = { - 'frontends': cluster_hosts, - 'default_backend': addr - } - - if config('haproxy-server-timeout'): - ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') - - if config('haproxy-client-timeout'): - ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') - - if config('haproxy-queue-timeout'): - ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') - - if config('haproxy-connect-timeout'): - ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') - - if config('prefer-ipv6'): - ctxt['local_host'] = 'ip6-localhost' - ctxt['haproxy_host'] = '::' - else: - ctxt['local_host'] = '127.0.0.1' - ctxt['haproxy_host'] = '0.0.0.0' - - ctxt['ipv6_enabled'] = not is_ipv6_disabled() - - ctxt['stat_port'] = '8888' - - db = kv() - ctxt['stat_password'] = db.get('stat-password') - if not ctxt['stat_password']: - ctxt['stat_password'] = db.set('stat-password', - pwgen(32)) - db.flush() - - for frontend in cluster_hosts: - if (len(cluster_hosts[frontend]['backends']) > 1 or - self.singlenode_mode): - # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.', - level=DEBUG) - with open('/etc/default/haproxy', 'w') as out: - out.write('ENABLED=1\n') - - return ctxt - - log('HAProxy context is incomplete, this unit has no peers.', - level=INFO) - return {} - - -class ImageServiceContext(OSContextGenerator): - interfaces = ['image-service'] - - def __call__(self): - """Obtains the glance API server from the image-service relation. - Useful in nova and cinder (currently). - """ - log('Generating template context for image-service.', level=DEBUG) - rids = relation_ids('image-service') - if not rids: - return {} - - for rid in rids: - for unit in related_units(rid): - api_server = relation_get('glance-api-server', - rid=rid, unit=unit) - if api_server: - return {'glance_api_servers': api_server} - - log("ImageService context is incomplete. Missing required relation " - "data.", level=INFO) - return {} - - -class ApacheSSLContext(OSContextGenerator): - """Generates a context for an apache vhost configuration that configures - HTTPS reverse proxying for one or many endpoints. Generated context - looks something like:: - - { - 'namespace': 'cinder', - 'private_address': 'iscsi.mycinderhost.com', - 'endpoints': [(8776, 8766), (8777, 8767)] - } - - The endpoints list consists of a tuples mapping external ports - to internal ports. - """ - interfaces = ['https'] - - # charms should inherit this context and set external ports - # and service namespace accordingly. - external_ports = [] - service_namespace = None - user = group = 'root' - - def enable_modules(self): - cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] - check_call(cmd) - - def configure_cert(self, cn=None): - ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - mkdir(path=ssl_dir) - cert, key = get_cert(cn) - if cert and key: - if cn: - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - else: - cert_filename = 'cert' - key_filename = 'key' - - write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert), owner=self.user, - group=self.group, perms=0o640) - write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key), owner=self.user, - group=self.group, perms=0o640) - - def configure_ca(self): - ca_cert = get_ca_cert() - if ca_cert: - install_ca_cert(b64decode(ca_cert)) - - def canonical_names(self): - """Figure out which canonical names clients will access this service. - """ - cns = [] - for r_id in relation_ids('identity-service'): - for unit in related_units(r_id): - rdata = relation_get(rid=r_id, unit=unit) - for k in rdata: - if k.startswith('ssl_key_'): - cns.append(k.lstrip('ssl_key_')) - - return sorted(list(set(cns))) - - def get_network_addresses(self): - """For each network configured, return corresponding address and - hostnamr or vip (if available). - - Returns a list of tuples of the form: - - [(address_in_net_a, hostname_in_net_a), - (address_in_net_b, hostname_in_net_b), - ...] - - or, if no hostnames(s) available: - - [(address_in_net_a, vip_in_net_a), - (address_in_net_b, vip_in_net_b), - ...] - - or, if no vip(s) available: - - [(address_in_net_a, address_in_net_a), - (address_in_net_b, address_in_net_b), - ...] - """ - addresses = [] - for net_type in [INTERNAL, ADMIN, PUBLIC]: - net_config = config(ADDRESS_MAP[net_type]['config']) - # NOTE(jamespage): Fallback must always be private address - # as this is used to bind services on the - # local unit. - fallback = local_address(unit_get_fallback="private-address") - if net_config: - addr = get_address_in_network(net_config, - fallback) - else: - try: - addr = network_get_primary_address( - ADDRESS_MAP[net_type]['binding'] - ) - except (NotImplementedError, NoNetworkBinding): - addr = fallback - - endpoint = resolve_address(net_type) - addresses.append((addr, endpoint)) - - # Log the set of addresses to have a trail log and capture if tuples - # change over time in the same unit (LP: #1952414). - sorted_addresses = sorted(set(addresses)) - log('get_network_addresses: {}'.format(sorted_addresses)) - return sorted_addresses - - def __call__(self): - if isinstance(self.external_ports, str): - self.external_ports = [self.external_ports] - - if not self.external_ports or not https(): - return {} - - use_keystone_ca = True - for rid in relation_ids('certificates'): - if related_units(rid): - use_keystone_ca = False - - if use_keystone_ca: - self.configure_ca() - - self.enable_modules() - - ctxt = {'namespace': self.service_namespace, - 'endpoints': [], - 'ext_ports': []} - - if use_keystone_ca: - cns = self.canonical_names() - if cns: - for cn in cns: - self.configure_cert(cn) - else: - # Expect cert/key provided in config (currently assumed that ca - # uses ip for cn) - for net_type in (INTERNAL, ADMIN, PUBLIC): - cn = resolve_address(endpoint_type=net_type) - self.configure_cert(cn) - - addresses = self.get_network_addresses() - for address, endpoint in addresses: - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port, - singlenode_mode=True) - int_port = determine_api_port(api_port, singlenode_mode=True) - portmap = (address, endpoint, int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) - ctxt['ext_ports'].append(int(ext_port)) - - ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) - return ctxt - - -class NeutronContext(OSContextGenerator): - interfaces = [] - - @property - def plugin(self): - return None - - @property - def network_manager(self): - return None - - @property - def packages(self): - return neutron_plugin_attribute(self.plugin, 'packages', - self.network_manager) - - @property - def neutron_security_groups(self): - return None - - def _ensure_packages(self): - for pkgs in self.packages: - ensure_packages(pkgs) - - def ovs_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return ovs_ctxt - - def nuage_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nuage_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'vsp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nuage_ctxt - - def nvp_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nvp_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nvp_ctxt - - def n1kv_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - n1kv_config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - n1kv_user_config_flags = config('n1kv-config-flags') - restrict_policy_profiles = config('n1kv-restrict-policy-profiles') - n1kv_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': restrict_policy_profiles} - - if n1kv_user_config_flags: - flags = config_flags_parser(n1kv_user_config_flags) - n1kv_ctxt['user_config_flags'] = flags - - return n1kv_ctxt - - def calico_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - calico_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'Calico', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return calico_ctxt - - def neutron_ctxt(self): - if https(): - proto = 'https' - else: - proto = 'http' - - if is_clustered(): - host = config('vip') - else: - host = local_address(unit_get_fallback='private-address') - - ctxt = {'network_manager': self.network_manager, - 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} - return ctxt - - def pg_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'plumgrid', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - return ovs_ctxt - - def midonet_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - midonet_config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - mido_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'midonet', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': midonet_config} - - return mido_ctxt - - def __call__(self): - if self.network_manager not in ['quantum', 'neutron']: - return {} - - if not self.plugin: - return {} - - ctxt = self.neutron_ctxt() - - if self.plugin == 'ovs': - ctxt.update(self.ovs_ctxt()) - elif self.plugin in ['nvp', 'nsx']: - ctxt.update(self.nvp_ctxt()) - elif self.plugin == 'n1kv': - ctxt.update(self.n1kv_ctxt()) - elif self.plugin == 'Calico': - ctxt.update(self.calico_ctxt()) - elif self.plugin == 'vsp': - ctxt.update(self.nuage_ctxt()) - elif self.plugin == 'plumgrid': - ctxt.update(self.pg_ctxt()) - elif self.plugin == 'midonet': - ctxt.update(self.midonet_ctxt()) - - alchemy_flags = config('neutron-alchemy-flags') - if alchemy_flags: - flags = config_flags_parser(alchemy_flags) - ctxt['neutron_alchemy_flags'] = flags - - return ctxt - - -class NeutronPortContext(OSContextGenerator): - - def resolve_ports(self, ports): - """Resolve NICs not yet bound to bridge(s) - - If hwaddress provided then returns resolved hwaddress otherwise NIC. - """ - if not ports: - return None - - hwaddr_to_nic = {} - hwaddr_to_ip = {} - extant_nics = list_nics() - - for nic in extant_nics: - # Ignore virtual interfaces (bond masters will be identified from - # their slaves) - if not is_phy_iface(nic): - continue - - _nic = get_bond_master(nic) - if _nic: - log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), - level=DEBUG) - nic = _nic - - hwaddr = get_nic_hwaddr(nic) - hwaddr_to_nic[hwaddr] = nic - addresses = get_ipv4_addr(nic, fatal=False) - addresses += get_ipv6_addr(iface=nic, fatal=False) - hwaddr_to_ip[hwaddr] = addresses - - resolved = [] - mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) - for entry in ports: - if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT have an IP address - if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: - # If the nic is part of a bridge then don't use it - if is_bridge_member(hwaddr_to_nic[entry]): - continue - - # Entry is a MAC address for a valid interface that doesn't - # have an IP address assigned yet. - resolved.append(hwaddr_to_nic[entry]) - elif entry in extant_nics: - # If the passed entry is not a MAC address and the interface - # exists, assume it's a valid interface, and that the user put - # it there on purpose (we can trust it to be the real external - # network). - resolved.append(entry) - - # Ensure no duplicates - return list(set(resolved)) - - -class OSConfigFlagContext(OSContextGenerator): - """Provides support for user-defined config flags. - - Users can define a comma-seperated list of key=value pairs - in the charm configuration and apply them at any point in - any file by using a template flag. - - Sometimes users might want config flags inserted within a - specific section so this class allows users to specify the - template flag name, allowing for multiple template flags - (sections) within the same context. - - NOTE: the value of config-flags may be a comma-separated list of - key=value pairs and some Openstack config files support - comma-separated lists as values. - """ - - def __init__(self, charm_flag='config-flags', - template_flag='user_config_flags'): - """ - :param charm_flag: config flags in charm configuration. - :param template_flag: insert point for user-defined flags in template - file. - """ - super(OSConfigFlagContext, self).__init__() - self._charm_flag = charm_flag - self._template_flag = template_flag - - def __call__(self): - config_flags = config(self._charm_flag) - if not config_flags: - return {} - - return {self._template_flag: - config_flags_parser(config_flags)} - - -class LibvirtConfigFlagsContext(OSContextGenerator): - """ - This context provides support for extending - the libvirt section through user-defined flags. - """ - def __call__(self): - ctxt = {} - libvirt_flags = config('libvirt-flags') - if libvirt_flags: - ctxt['libvirt_flags'] = config_flags_parser( - libvirt_flags) - return ctxt - - -class SubordinateConfigContext(OSContextGenerator): - - """ - Responsible for inspecting relations to subordinates that - may be exporting required config via a json blob. - - The subordinate interface allows subordinates to export their - configuration requirements to the principle for multiple config - files and multiple services. Ie, a subordinate that has interfaces - to both glance and nova may export to following yaml blob as json:: - - glance: - /etc/glance/glance-api.conf: - sections: - DEFAULT: - - [key1, value1] - /etc/glance/glance-registry.conf: - MYSECTION: - - [key2, value2] - nova: - /etc/nova/nova.conf: - sections: - DEFAULT: - - [key3, value3] - - - It is then up to the principle charms to subscribe this context to - the service+config file it is interestd in. Configuration data will - be available in the template context, in glance's case, as:: - - ctxt = { - ... other context ... - 'subordinate_configuration': { - 'DEFAULT': { - 'key1': 'value1', - }, - 'MYSECTION': { - 'key2': 'value2', - }, - } - } - """ - - def __init__(self, service, config_file, interface): - """ - :param service : Service name key to query in any subordinate - data found - :param config_file : Service's config file to query sections - :param interface : Subordinate interface to inspect - """ - self.config_file = config_file - if isinstance(service, list): - self.services = service - else: - self.services = [service] - if isinstance(interface, list): - self.interfaces = interface - else: - self.interfaces = [interface] - - def __call__(self): - ctxt = {'sections': {}} - rids = [] - for interface in self.interfaces: - rids.extend(relation_ids(interface)) - for rid in rids: - for unit in related_units(rid): - sub_config = relation_get('subordinate_configuration', - rid=rid, unit=unit) - if sub_config and sub_config != '': - try: - sub_config = json.loads(sub_config) - except Exception: - log('Could not parse JSON from ' - 'subordinate_configuration setting from %s' - % rid, level=ERROR) - continue - - for service in self.services: - if service not in sub_config: - log('Found subordinate_configuration on %s but it ' - 'contained nothing for %s service' - % (rid, service), level=INFO) - continue - - sub_config = sub_config[service] - if self.config_file not in sub_config: - log('Found subordinate_configuration on %s but it ' - 'contained nothing for %s' - % (rid, self.config_file), level=INFO) - continue - - sub_config = sub_config[self.config_file] - for k, v in sub_config.items(): - if k == 'sections': - for section, config_list in v.items(): - log("adding section '%s'" % (section), - level=DEBUG) - if ctxt[k].get(section): - ctxt[k][section].extend(config_list) - else: - ctxt[k][section] = config_list - else: - ctxt[k] = v - if self.context_complete(ctxt): - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) - return ctxt - else: - return {} - - def context_complete(self, ctxt): - """Overridden here to ensure the context is actually complete. - - :param ctxt: The current context members - :type ctxt: Dict[str, ANY] - :returns: True if the context is complete - :rtype: bool - """ - if not ctxt.get('sections'): - return False - return super(SubordinateConfigContext, self).context_complete(ctxt) - - -class LogLevelContext(OSContextGenerator): - - def __call__(self): - ctxt = {} - ctxt['debug'] = \ - False if config('debug') is None else config('debug') - ctxt['verbose'] = \ - False if config('verbose') is None else config('verbose') - - return ctxt - - -class SyslogContext(OSContextGenerator): - - def __call__(self): - ctxt = {'use_syslog': config('use-syslog')} - return ctxt - - -class BindHostContext(OSContextGenerator): - - def __call__(self): - if config('prefer-ipv6'): - return {'bind_host': '::'} - else: - return {'bind_host': '0.0.0.0'} - - -MAX_DEFAULT_WORKERS = 4 -DEFAULT_MULTIPLIER = 2 - - -def _calculate_workers(): - ''' - Determine the number of worker processes based on the CPU - count of the unit containing the application. - - Workers will be limited to MAX_DEFAULT_WORKERS in - container environments where no worker-multipler configuration - option been set. - - @returns int: number of worker processes to use - ''' - multiplier = config('worker-multiplier') - - # distinguish an empty config and an explicit config as 0.0 - if multiplier is None: - multiplier = DEFAULT_MULTIPLIER - - count = int(_num_cpus() * multiplier) - if count <= 0: - # assign at least one worker - count = 1 - - if config('worker-multiplier') is None: - # NOTE(jamespage): Limit unconfigured worker-multiplier - # to MAX_DEFAULT_WORKERS to avoid insane - # worker configuration on large servers - # Reference: https://pad.lv/1665270 - count = min(count, MAX_DEFAULT_WORKERS) - - return count - - -def _num_cpus(): - ''' - Compatibility wrapper for calculating the number of CPU's - a unit has. - - @returns: int: number of CPU cores detected - ''' - try: - return psutil.cpu_count() - except AttributeError: - return psutil.NUM_CPUS - - -class WorkerConfigContext(OSContextGenerator): - - def __call__(self): - ctxt = {"workers": _calculate_workers()} - return ctxt - - -class WSGIWorkerConfigContext(WorkerConfigContext): - - def __init__(self, name=None, script=None, admin_script=None, - public_script=None, user=None, group=None, - process_weight=1.00, - admin_process_weight=0.25, public_process_weight=0.75): - self.service_name = name - self.user = user or name - self.group = group or name - self.script = script - self.admin_script = admin_script - self.public_script = public_script - self.process_weight = process_weight - self.admin_process_weight = admin_process_weight - self.public_process_weight = public_process_weight - - def __call__(self): - total_processes = _calculate_workers() - ctxt = { - "service_name": self.service_name, - "user": self.user, - "group": self.group, - "script": self.script, - "admin_script": self.admin_script, - "public_script": self.public_script, - "processes": int(math.ceil(self.process_weight * total_processes)), - "admin_processes": int(math.ceil(self.admin_process_weight * - total_processes)), - "public_processes": int(math.ceil(self.public_process_weight * - total_processes)), - "threads": 1, - } - return ctxt - - -class ZeroMQContext(OSContextGenerator): - interfaces = ['zeromq-configuration'] - - def __call__(self): - ctxt = {} - if is_relation_made('zeromq-configuration', 'host'): - for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) - - return ctxt - - -class NotificationDriverContext(OSContextGenerator): - - def __init__(self, zmq_relation='zeromq-configuration', - amqp_relation='amqp'): - """ - :param zmq_relation: Name of Zeromq relation to check - """ - self.zmq_relation = zmq_relation - self.amqp_relation = amqp_relation - - def __call__(self): - ctxt = {'notifications': 'False'} - if is_relation_made(self.amqp_relation): - ctxt['notifications'] = "True" - - return ctxt - - -class SysctlContext(OSContextGenerator): - """This context check if the 'sysctl' option exists on configuration - then creates a file with the loaded contents""" - def __call__(self): - sysctl_dict = config('sysctl') - if sysctl_dict: - sysctl_create(sysctl_dict, - '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) - return {'sysctl': sysctl_dict} - - -class NeutronAPIContext(OSContextGenerator): - ''' - Inspects current neutron-plugin-api relation for neutron settings. Return - defaults if it is not present. - ''' - interfaces = ['neutron-plugin-api'] - - def __call__(self): - self.neutron_defaults = { - 'l2_population': { - 'rel_key': 'l2-population', - 'default': False, - }, - 'overlay_network_type': { - 'rel_key': 'overlay-network-type', - 'default': 'gre', - }, - 'neutron_security_groups': { - 'rel_key': 'neutron-security-groups', - 'default': False, - }, - 'network_device_mtu': { - 'rel_key': 'network-device-mtu', - 'default': None, - }, - 'enable_dvr': { - 'rel_key': 'enable-dvr', - 'default': False, - }, - 'enable_l3ha': { - 'rel_key': 'enable-l3ha', - 'default': False, - }, - 'dns_domain': { - 'rel_key': 'dns-domain', - 'default': None, - }, - 'polling_interval': { - 'rel_key': 'polling-interval', - 'default': 2, - }, - 'rpc_response_timeout': { - 'rel_key': 'rpc-response-timeout', - 'default': 60, - }, - 'report_interval': { - 'rel_key': 'report-interval', - 'default': 30, - }, - 'enable_qos': { - 'rel_key': 'enable-qos', - 'default': False, - }, - 'enable_nsg_logging': { - 'rel_key': 'enable-nsg-logging', - 'default': False, - }, - 'enable_nfg_logging': { - 'rel_key': 'enable-nfg-logging', - 'default': False, - }, - 'enable_port_forwarding': { - 'rel_key': 'enable-port-forwarding', - 'default': False, - }, - 'enable_fwaas': { - 'rel_key': 'enable-fwaas', - 'default': False, - }, - 'global_physnet_mtu': { - 'rel_key': 'global-physnet-mtu', - 'default': 1500, - }, - 'physical_network_mtus': { - 'rel_key': 'physical-network-mtus', - 'default': None, - }, - } - ctxt = self.get_neutron_options({}) - for rid in relation_ids('neutron-plugin-api'): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - # The l2-population key is used by the context as a way of - # checking if the api service on the other end is sending data - # in a recent format. - if 'l2-population' in rdata: - ctxt.update(self.get_neutron_options(rdata)) - - extension_drivers = [] - - if ctxt['enable_qos']: - extension_drivers.append('qos') - - if ctxt['enable_nsg_logging']: - extension_drivers.append('log') - - ctxt['extension_drivers'] = ','.join(extension_drivers) - - l3_extension_plugins = [] - - if ctxt['enable_port_forwarding']: - l3_extension_plugins.append('port_forwarding') - - if ctxt['enable_fwaas']: - l3_extension_plugins.append('fwaas_v2') - if ctxt['enable_nfg_logging']: - l3_extension_plugins.append('fwaas_v2_log') - - ctxt['l3_extension_plugins'] = l3_extension_plugins - - return ctxt - - def get_neutron_options(self, rdata): - settings = {} - for nkey in self.neutron_defaults.keys(): - defv = self.neutron_defaults[nkey]['default'] - rkey = self.neutron_defaults[nkey]['rel_key'] - if rkey in rdata.keys(): - if type(defv) is bool: - settings[nkey] = bool_from_string(rdata[rkey]) - else: - settings[nkey] = rdata[rkey] - else: - settings[nkey] = defv - return settings - - -class ExternalPortContext(NeutronPortContext): - - def __call__(self): - ctxt = {} - ports = config('ext-port') - if ports: - ports = [p.strip() for p in ports.split()] - ports = self.resolve_ports(ports) - if ports: - ctxt = {"ext_port": ports[0]} - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - if mtu: - ctxt['ext_port_mtu'] = mtu - - return ctxt - - -class DataPortContext(NeutronPortContext): - - def __call__(self): - ports = config('data-port') - if ports: - # Map of {bridge:port/mac} - portmap = parse_data_port_mappings(ports) - ports = portmap.keys() - # Resolve provided ports or mac addresses and filter out those - # already attached to a bridge. - resolved = self.resolve_ports(ports) - # Rebuild port index using resolved and filtered ports. - normalized = {get_nic_hwaddr(port): port for port in resolved - if port not in ports} - normalized.update({port: port for port in resolved - if port in ports}) - if resolved: - return { - normalized[port]: bridge - for port, bridge in portmap.items() - if port in normalized.keys() - } - - return None - - -class PhyNICMTUContext(DataPortContext): - - def __call__(self): - ctxt = {} - mappings = super(PhyNICMTUContext, self).__call__() - if mappings and mappings.keys(): - ports = sorted(mappings.keys()) - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - all_ports = set() - # If any of ports is a vlan device, its underlying device must have - # mtu applied first. - for port in ports: - for lport in glob.glob("/sys/class/net/%s/lower_*" % port): - lport = os.path.basename(lport) - all_ports.add(lport.split('_')[1]) - - all_ports = list(all_ports) - all_ports.extend(ports) - if mtu: - ctxt["devs"] = '\\n'.join(all_ports) - ctxt['mtu'] = mtu - - return ctxt - - -class NetworkServiceContext(OSContextGenerator): - - def __init__(self, rel_name='quantum-network-service'): - self.rel_name = rel_name - self.interfaces = [rel_name] - - def __call__(self): - for rid in relation_ids(self.rel_name): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - ctxt = { - 'keystone_host': rdata.get('keystone_host'), - 'service_port': rdata.get('service_port'), - 'auth_port': rdata.get('auth_port'), - 'service_tenant': rdata.get('service_tenant'), - 'service_username': rdata.get('service_username'), - 'service_password': rdata.get('service_password'), - 'quantum_host': rdata.get('quantum_host'), - 'quantum_port': rdata.get('quantum_port'), - 'quantum_url': rdata.get('quantum_url'), - 'region': rdata.get('region'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - 'api_version': - rdata.get('api_version') or '2.0', - } - if self.context_complete(ctxt): - return ctxt - return {} - - -class InternalEndpointContext(OSContextGenerator): - """Internal endpoint context. - - This context provides the endpoint type used for communication between - services e.g. between Nova and Cinder internally. Openstack uses Public - endpoints by default so this allows admins to optionally use internal - endpoints. - """ - def __call__(self): - return {'use_internal_endpoints': config('use-internal-endpoints')} - - -class VolumeAPIContext(InternalEndpointContext): - """Volume API context. - - This context provides information regarding the volume endpoint to use - when communicating between services. It determines which version of the - API is appropriate for use. - - This value will be determined in the resulting context dictionary - returned from calling the VolumeAPIContext object. Information provided - by this context is as follows: - - volume_api_version: the volume api version to use, currently - 'v2' or 'v3' - volume_catalog_info: the information to use for a cinder client - configuration that consumes API endpoints from the keystone - catalog. This is defined as the type:name:endpoint_type string. - """ - # FIXME(wolsen) This implementation is based on the provider being able - # to specify the package version to check but does not guarantee that the - # volume service api version selected is available. In practice, it is - # quite likely the volume service *is* providing the v3 volume service. - # This should be resolved when the service-discovery spec is implemented. - def __init__(self, pkg): - """ - Creates a new VolumeAPIContext for use in determining which version - of the Volume API should be used for communication. A package codename - should be supplied for determining the currently installed OpenStack - version. - - :param pkg: the package codename to use in order to determine the - component version (e.g. nova-common). See - charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. - """ - super(VolumeAPIContext, self).__init__() - self._ctxt = None - if not pkg: - raise ValueError('package name must be provided in order to ' - 'determine current OpenStack version.') - self.pkg = pkg - - @property - def ctxt(self): - if self._ctxt is not None: - return self._ctxt - self._ctxt = self._determine_ctxt() - return self._ctxt - - def _determine_ctxt(self): - """Determines the Volume API endpoint information. - - Determines the appropriate version of the API that should be used - as well as the catalog_info string that would be supplied. Returns - a dict containing the volume_api_version and the volume_catalog_info. - """ - rel = os_release(self.pkg) - version = '2' - if CompareOpenStackReleases(rel) >= 'pike': - version = '3' - - service_type = 'volumev{version}'.format(version=version) - service_name = 'cinderv{version}'.format(version=version) - endpoint_type = 'publicURL' - if config('use-internal-endpoints'): - endpoint_type = 'internalURL' - catalog_info = '{type}:{name}:{endpoint}'.format( - type=service_type, name=service_name, endpoint=endpoint_type) - - return { - 'volume_api_version': version, - 'volume_catalog_info': catalog_info, - } - - def __call__(self): - return self.ctxt - - -class AppArmorContext(OSContextGenerator): - """Base class for apparmor contexts.""" - - def __init__(self, profile_name=None): - self._ctxt = None - self.aa_profile = profile_name - self.aa_utils_packages = ['apparmor-utils'] - - @property - def ctxt(self): - if self._ctxt is not None: - return self._ctxt - self._ctxt = self._determine_ctxt() - return self._ctxt - - def _determine_ctxt(self): - """ - Validate aa-profile-mode settings is disable, enforce, or complain. - - :return ctxt: Dictionary of the apparmor profile or None - """ - if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: - ctxt = {'aa_profile_mode': config('aa-profile-mode'), - 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} - if self.aa_profile: - ctxt['aa_profile'] = self.aa_profile - else: - ctxt = None - return ctxt - - def __call__(self): - return self.ctxt - - def install_aa_utils(self): - """ - Install packages required for apparmor configuration. - """ - log("Installing apparmor utils.") - ensure_packages(self.aa_utils_packages) - - def manually_disable_aa_profile(self): - """ - Manually disable an apparmor profile. - - If aa-profile-mode is set to disabled (default) this is required as the - template has been written but apparmor is yet unaware of the profile - and aa-disable aa-profile fails. Without this the profile would kick - into enforce mode on the next service restart. - - """ - profile_path = '/etc/apparmor.d' - disable_path = '/etc/apparmor.d/disable' - if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): - os.symlink(os.path.join(profile_path, self.aa_profile), - os.path.join(disable_path, self.aa_profile)) - - def setup_aa_profile(self): - """ - Setup an apparmor profile. - The ctxt dictionary will contain the apparmor profile mode and - the apparmor profile name. - Makes calls out to aa-disable, aa-complain, or aa-enforce to setup - the apparmor profile. - """ - self() - if not self.ctxt: - log("Not enabling apparmor Profile") - return - self.install_aa_utils() - cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] - cmd.append(self.ctxt['aa_profile']) - log("Setting up the apparmor profile for {} in {} mode." - "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) - try: - check_call(cmd) - except CalledProcessError as e: - # If aa-profile-mode is set to disabled (default) manual - # disabling is required as the template has been written but - # apparmor is yet unaware of the profile and aa-disable aa-profile - # fails. If aa-disable learns to read profile files first this can - # be removed. - if self.ctxt['aa_profile_mode'] == 'disable': - log("Manually disabling the apparmor profile for {}." - "".format(self.ctxt['aa_profile'])) - self.manually_disable_aa_profile() - return - status_set('blocked', "Apparmor profile {} failed to be set to {}." - "".format(self.ctxt['aa_profile'], - self.ctxt['aa_profile_mode'])) - raise e - - -class MemcacheContext(OSContextGenerator): - """Memcache context - - This context provides options for configuring a local memcache client and - server for both IPv4 and IPv6 - """ - - def __init__(self, package=None): - """ - @param package: Package to examine to extrapolate OpenStack release. - Used when charms have no openstack-origin config - option (ie subordinates) - """ - self.package = package - - def __call__(self): - ctxt = {} - ctxt['use_memcache'] = enable_memcache(package=self.package) - if ctxt['use_memcache']: - # Trusty version of memcached does not support ::1 as a listen - # address so use host file entry instead - release = lsb_release()['DISTRIB_CODENAME'].lower() - if is_ipv6_disabled(): - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '127.0.0.1' - else: - ctxt['memcache_server'] = 'localhost' - ctxt['memcache_server_formatted'] = '127.0.0.1' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = '{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) - else: - if CompareHostReleases(release) > 'trusty': - ctxt['memcache_server'] = '::1' - else: - ctxt['memcache_server'] = 'ip6-localhost' - ctxt['memcache_server_formatted'] = '[::1]' - ctxt['memcache_port'] = '11211' - ctxt['memcache_url'] = 'inet6:{}:{}'.format( - ctxt['memcache_server_formatted'], - ctxt['memcache_port']) - return ctxt - - -class EnsureDirContext(OSContextGenerator): - ''' - Serves as a generic context to create a directory as a side-effect. - - Useful for software that supports drop-in files (.d) in conjunction - with config option-based templates. Examples include: - * OpenStack oslo.policy drop-in files; - * systemd drop-in config files; - * other software that supports overriding defaults with .d files - - Another use-case is when a subordinate generates a configuration for - primary to render in a separate directory. - - Some software requires a user to create a target directory to be - scanned for drop-in files with a specific format. This is why this - context is needed to do that before rendering a template. - ''' - - def __init__(self, dirname, **kwargs): - '''Used merely to ensure that a given directory exists.''' - self.dirname = dirname - self.kwargs = kwargs - - def __call__(self): - mkdir(self.dirname, **self.kwargs) - return {} - - -class VersionsContext(OSContextGenerator): - """Context to return the openstack and operating system versions. - - """ - def __init__(self, pkg='python-keystone'): - """Initialise context. - - :param pkg: Package to extrapolate openstack version from. - :type pkg: str - """ - self.pkg = pkg - - def __call__(self): - ostack = os_release(self.pkg) - osystem = lsb_release()['DISTRIB_CODENAME'].lower() - return { - 'openstack_release': ostack, - 'operating_system_release': osystem} - - -class LogrotateContext(OSContextGenerator): - """Common context generator for logrotate.""" - - def __init__(self, location, interval, count): - """ - :param location: Absolute path for the logrotate config file - :type location: str - :param interval: The interval for the rotations. Valid values are - 'daily', 'weekly', 'monthly', 'yearly' - :type interval: str - :param count: The logrotate count option configures the 'count' times - the log files are being rotated before being - :type count: int - """ - self.location = location - self.interval = interval - self.count = 'rotate {}'.format(count) - - def __call__(self): - ctxt = { - 'logrotate_logs_location': self.location, - 'logrotate_interval': self.interval, - 'logrotate_count': self.count, - } - return ctxt - - -class HostInfoContext(OSContextGenerator): - """Context to provide host information.""" - - def __init__(self, use_fqdn_hint_cb=None): - """Initialize HostInfoContext - - :param use_fqdn_hint_cb: Callback whose return value used to populate - `use_fqdn_hint` - :type use_fqdn_hint_cb: Callable[[], bool] - """ - # Store callback used to get hint for whether FQDN should be used - - # Depending on the workload a charm manages, the use of FQDN vs. - # shortname may be a deploy-time decision, i.e. behaviour can not - # change on charm upgrade or post-deployment configuration change. - - # The hint is passed on as a flag in the context to allow the decision - # to be made in the Jinja2 configuration template. - self.use_fqdn_hint_cb = use_fqdn_hint_cb - - def _get_canonical_name(self, name=None): - """Get the official FQDN of the host - - The implementation of ``socket.getfqdn()`` in the standard Python - library does not exhaust all methods of getting the official name - of a host ref Python issue https://bugs.python.org/issue5004 - - This function mimics the behaviour of a call to ``hostname -f`` to - get the official FQDN but returns an empty string if it is - unsuccessful. - - :param name: Shortname to get FQDN on - :type name: Optional[str] - :returns: The official FQDN for host or empty string ('') - :rtype: str - """ - name = name or socket.gethostname() - fqdn = '' - - try: - addrs = socket.getaddrinfo( - name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) - except OSError: - pass - else: - for addr in addrs: - if addr[3]: - if '.' in addr[3]: - fqdn = addr[3] - break - return fqdn - - def __call__(self): - name = socket.gethostname() - ctxt = { - 'host_fqdn': self._get_canonical_name(name) or name, - 'host': name, - 'use_fqdn_hint': ( - self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) - } - return ctxt - - -def validate_ovs_use_veth(*args, **kwargs): - """Validate OVS use veth setting for dhcp agents - - The ovs_use_veth setting is considered immutable as it will break existing - deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It - turns out this is no longer necessary. Ideally, all new deployments would - have this set to False. - - This function validates that the config value does not conflict with - previously deployed settings in dhcp_agent.ini. - - See LP Bug#1831935 for details. - - :returns: Status state and message - :rtype: Union[(None, None), (string, string)] - """ - existing_ovs_use_veth = ( - DHCPAgentContext.get_existing_ovs_use_veth()) - config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() - - # Check settings are set and not None - if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: - # Check for mismatch between existing config ini and juju config - if existing_ovs_use_veth != config_ovs_use_veth: - # Stop the line to avoid breakage - msg = ( - "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " - "does not match the juju config setting, {}. This may lead to " - "VMs being unable to receive a DHCP IP. Either change the " - "juju config setting or dhcp agents may need to be recreated." - .format(existing_ovs_use_veth, config_ovs_use_veth)) - log(msg, ERROR) - return ( - "blocked", - "Mismatched existing and configured ovs-use-veth. See log.") - - # Everything is OK - return None, None - - -class DHCPAgentContext(OSContextGenerator): - - def __call__(self): - """Return the DHCPAGentContext. - - Return all DHCP Agent INI related configuration. - ovs unit is attached to (as a subordinate) and the 'dns_domain' from - the neutron-plugin-api relations (if one is set). - - :returns: Dictionary context - :rtype: Dict - """ - - ctxt = {} - dnsmasq_flags = config('dnsmasq-flags') - if dnsmasq_flags: - ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) - ctxt['dns_servers'] = config('dns-servers') - - neutron_api_settings = NeutronAPIContext()() - - ctxt['debug'] = config('debug') - ctxt['instance_mtu'] = config('instance-mtu') - ctxt['ovs_use_veth'] = self.get_ovs_use_veth() - - ctxt['enable_metadata_network'] = config('enable-metadata-network') - ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') - - if neutron_api_settings.get('dns_domain'): - ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') - - # Override user supplied config for these plugins as these settings are - # mandatory - if config('plugin') in ['nvp', 'nsx', 'n1kv']: - ctxt['enable_metadata_network'] = True - ctxt['enable_isolated_metadata'] = True - - ctxt['append_ovs_config'] = False - cmp_release = CompareOpenStackReleases( - os_release('neutron-common', base='icehouse')) - if cmp_release >= 'queens' and config('enable-dpdk'): - ctxt['append_ovs_config'] = True - - return ctxt - - @staticmethod - def get_existing_ovs_use_veth(): - """Return existing ovs_use_veth setting from dhcp_agent.ini. - - :returns: Boolean value of existing ovs_use_veth setting or None - :rtype: Optional[Bool] - """ - DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" - existing_ovs_use_veth = None - # If there is a dhcp_agent.ini file read the current setting - if os.path.isfile(DHCP_AGENT_INI): - # config_ini does the right thing and returns None if the setting - # is commented. - existing_ovs_use_veth = ( - config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) - # Convert to Bool if necessary - if isinstance(existing_ovs_use_veth, str): - return bool_from_string(existing_ovs_use_veth) - return existing_ovs_use_veth - - @staticmethod - def parse_ovs_use_veth(): - """Parse the ovs-use-veth config setting. - - Parse the string config setting for ovs-use-veth and return a boolean - or None. - - bool_from_string will raise a ValueError if the string is not falsy or - truthy. - - :raises: ValueError for invalid input - :returns: Boolean value of ovs-use-veth or None - :rtype: Optional[Bool] - """ - _config = config("ovs-use-veth") - # An unset parameter returns None. Just in case we will also check for - # an empty string: "". Ironically, (the problem we are trying to avoid) - # "False" returns True and "" returns False. - if _config is None or not _config: - # Return None - return - # bool_from_string handles many variations of true and false strings - # as well as upper and lowercases including: - # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] - return bool_from_string(_config) - - def get_ovs_use_veth(self): - """Return correct ovs_use_veth setting for use in dhcp_agent.ini. - - Get the right value from config or existing dhcp_agent.ini file. - Existing has precedence. Attempt to default to "False" without - disrupting existing deployments. Handle existing deployments and - upgrades safely. See LP Bug#1831935 - - :returns: Value to use for ovs_use_veth setting - :rtype: Bool - """ - _existing = self.get_existing_ovs_use_veth() - if _existing is not None: - return _existing - - _config = self.parse_ovs_use_veth() - if _config is None: - # New better default - return False - else: - return _config - - -EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) - - -def resolve_pci_from_mapping_config(config_key): - """Resolve local PCI devices from MAC addresses in mapping config. - - Note that this function keeps record of mac->PCI address lookups - in the local unit db as the devices will disappaear from the system - once bound. - - :param config_key: Configuration option key to parse data from - :type config_key: str - :returns: PCI device address to Tuple(entity, mac) map - :rtype: collections.OrderedDict[str,Tuple[str,str]] - """ - devices = pci.PCINetDevices() - resolved_devices = collections.OrderedDict() - db = kv() - # Note that ``parse_data_port_mappings`` returns Dict regardless of input - for mac, entity in parse_data_port_mappings(config(config_key)).items(): - pcidev = devices.get_device_from_mac(mac) - if pcidev: - # NOTE: store mac->pci allocation as post binding - # it disappears from PCIDevices. - db.set(mac, pcidev.pci_address) - db.flush() - - pci_address = db.get(mac) - if pci_address: - resolved_devices[pci_address] = EntityMac(entity, mac) - - return resolved_devices - - -class DPDKDeviceContext(OSContextGenerator): - - def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): - """Initialize DPDKDeviceContext. - - :param driver_key: Key to use when retrieving driver config. - :type driver_key: str - :param bridges_key: Key to use when retrieving bridge config. - :type bridges_key: str - :param bonds_key: Key to use when retrieving bonds config. - :type bonds_key: str - """ - self.driver_key = driver_key or 'dpdk-driver' - self.bridges_key = bridges_key or 'data-port' - self.bonds_key = bonds_key or 'dpdk-bond-mappings' - - def __call__(self): - """Populate context. - - :returns: context - :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] - """ - driver = config(self.driver_key) - if driver is None: - return {} - # Resolve PCI devices for both directly used devices (_bridges) - # and devices for use in dpdk bonds (_bonds) - pci_devices = resolve_pci_from_mapping_config(self.bridges_key) - pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) - return {'devices': pci_devices, - 'driver': driver} - - -class OVSDPDKDeviceContext(OSContextGenerator): - - def __init__(self, bridges_key=None, bonds_key=None): - """Initialize OVSDPDKDeviceContext. - - :param bridges_key: Key to use when retrieving bridge config. - :type bridges_key: str - :param bonds_key: Key to use when retrieving bonds config. - :type bonds_key: str - """ - self.bridges_key = bridges_key or 'data-port' - self.bonds_key = bonds_key or 'dpdk-bond-mappings' - - @staticmethod - def _parse_cpu_list(cpulist): - """Parses a linux cpulist for a numa node - - :returns: list of cores - :rtype: List[int] - """ - cores = [] - ranges = cpulist.split(',') - for cpu_range in ranges: - if "-" in cpu_range: - cpu_min_max = cpu_range.split('-') - cores += range(int(cpu_min_max[0]), - int(cpu_min_max[1]) + 1) - else: - cores.append(int(cpu_range)) - return cores - - def _numa_node_cores(self): - """Get map of numa node -> cpu core - - :returns: map of numa node -> cpu core - :rtype: Dict[str,List[int]] - """ - nodes = {} - node_regex = '/sys/devices/system/node/node*' - for node in glob.glob(node_regex): - index = node.lstrip('/sys/devices/system/node/node') - with open(os.path.join(node, 'cpulist')) as cpulist: - nodes[index] = self._parse_cpu_list(cpulist.read().strip()) - return nodes - - def cpu_mask(self): - """Get hex formatted CPU mask - - The mask is based on using the first config:dpdk-socket-cores - cores of each NUMA node in the unit. - :returns: hex formatted CPU mask - :rtype: str - """ - return self.cpu_masks()['dpdk_lcore_mask'] - - def cpu_masks(self): - """Get hex formatted CPU masks - - The mask is based on using the first config:dpdk-socket-cores - cores of each NUMA node in the unit, followed by the - next config:pmd-socket-cores - - :returns: Dict of hex formatted CPU masks - :rtype: Dict[str, str] - """ - num_lcores = config('dpdk-socket-cores') - pmd_cores = config('pmd-socket-cores') - lcore_mask = 0 - pmd_mask = 0 - for cores in self._numa_node_cores().values(): - for core in cores[:num_lcores]: - lcore_mask = lcore_mask | 1 << core - for core in cores[num_lcores:][:pmd_cores]: - pmd_mask = pmd_mask | 1 << core - return { - 'pmd_cpu_mask': format(pmd_mask, '#04x'), - 'dpdk_lcore_mask': format(lcore_mask, '#04x')} - - def socket_memory(self): - """Formatted list of socket memory configuration per socket. - - :returns: socket memory configuration per socket. - :rtype: str - """ - lscpu_out = check_output( - ['lscpu', '-p=socket']).decode('UTF-8').strip() - sockets = set() - for line in lscpu_out.split('\n'): - try: - sockets.add(int(line)) - except ValueError: - # lscpu output is headed by comments so ignore them. - pass - sm_size = config('dpdk-socket-memory') - mem_list = [str(sm_size) for _ in sockets] - if mem_list: - return ','.join(mem_list) - else: - return str(sm_size) - - def devices(self): - """List of PCI devices for use by DPDK - - :returns: List of PCI devices for use by DPDK - :rtype: collections.OrderedDict[str,str] - """ - pci_devices = resolve_pci_from_mapping_config(self.bridges_key) - pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) - return pci_devices - - def _formatted_whitelist(self, flag): - """Flag formatted list of devices to whitelist - - :param flag: flag format to use - :type flag: str - :rtype: str - """ - whitelist = [] - for device in self.devices(): - whitelist.append(flag.format(device=device)) - return ' '.join(whitelist) - - def device_whitelist(self): - """Formatted list of devices to whitelist for dpdk - - using the old style '-w' flag - - :returns: devices to whitelist prefixed by '-w ' - :rtype: str - """ - return self._formatted_whitelist('-w {device}') - - def pci_whitelist(self): - """Formatted list of devices to whitelist for dpdk - - using the new style '--pci-whitelist' flag - - :returns: devices to whitelist prefixed by '--pci-whitelist ' - :rtype: str - """ - return self._formatted_whitelist('--pci-whitelist {device}') - - def __call__(self): - """Populate context. - - :returns: context - :rtype: Dict[str,Union[bool,str]] - """ - ctxt = {} - whitelist = self.device_whitelist() - if whitelist: - ctxt['dpdk_enabled'] = config('enable-dpdk') - ctxt['device_whitelist'] = self.device_whitelist() - ctxt['socket_memory'] = self.socket_memory() - ctxt['cpu_mask'] = self.cpu_mask() - return ctxt - - -class BridgePortInterfaceMap(object): - """Build a map of bridge ports and interfaces from charm configuration. - - NOTE: the handling of this detail in the charm is pre-deprecated. - - The long term goal is for network connectivity detail to be modelled in - the server provisioning layer (such as MAAS) which in turn will provide - a Netplan YAML description that will be used to drive Open vSwitch. - - Until we get to that reality the charm will need to configure this - detail based on application level configuration options. - - There is a established way of mapping interfaces to ports and bridges - in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we - will carry that forward. - - The relationship between bridge, port and interface(s). - +--------+ - | bridge | - +--------+ - | - +----------------+ - | port aka. bond | - +----------------+ - | | - +-+ +-+ - |i| |i| - |n| |n| - |t| |t| - |0| |N| - +-+ +-+ - """ - class interface_type(enum.Enum): - """Supported interface types. - - Supported interface types can be found in the ``iface_types`` column - in the ``Open_vSwitch`` table on a running system. - """ - dpdk = 'dpdk' - internal = 'internal' - system = 'system' - - def __str__(self): - """Return string representation of value. - - :returns: string representation of value. - :rtype: str - """ - return self.value - - def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, - global_mtu=None): - """Initialize map. - - :param bridges_key: Name of bridge:interface/port map config key - (default: 'data-port') - :type bridges_key: Optional[str] - :param bonds_key: Name of port-name:interface map config key - (default: 'dpdk-bond-mappings') - :type bonds_key: Optional[str] - :param enable_dpdk_key: Name of DPDK toggle config key - (default: 'enable-dpdk') - :type enable_dpdk_key: Optional[str] - :param global_mtu: Set a MTU on all interfaces at map initialization. - - The default is to have Open vSwitch get this from the underlying - interface as set up by bare metal provisioning. - - Note that you can augment the MTU on an individual interface basis - like this: - - ifdatamap = bpi.get_ifdatamap(bridge, port) - ifdatamap = { - port: { - **ifdata, - **{'mtu-request': my_individual_mtu_map[port]}, - } - for port, ifdata in ifdatamap.items() - } - :type global_mtu: Optional[int] - """ - bridges_key = bridges_key or 'data-port' - bonds_key = bonds_key or 'dpdk-bond-mappings' - enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' - self._map = collections.defaultdict( - lambda: collections.defaultdict(dict)) - self._ifname_mac_map = collections.defaultdict(list) - self._mac_ifname_map = {} - self._mac_pci_address_map = {} - - # First we iterate over the list of physical interfaces visible to the - # system and update interface name to mac and mac to interface name map - for ifname in list_nics(): - if not is_phy_iface(ifname): - continue - mac = get_nic_hwaddr(ifname) - self._ifname_mac_map[ifname] = [mac] - self._mac_ifname_map[mac] = ifname - - # check if interface is part of a linux bond - _bond_name = get_bond_master(ifname) - if _bond_name and _bond_name != ifname: - log('Add linux bond "{}" to map for physical interface "{}" ' - 'with mac "{}".'.format(_bond_name, ifname, mac), - level=DEBUG) - # for bonds we want to be able to get a list of the mac - # addresses for the physical interfaces the bond is made up of. - if self._ifname_mac_map.get(_bond_name): - self._ifname_mac_map[_bond_name].append(mac) - else: - self._ifname_mac_map[_bond_name] = [mac] - - # In light of the pre-deprecation notice in the docstring of this - # class we will expose the ability to configure OVS bonds as a - # DPDK-only feature, but generally use the data structures internally. - if config(enable_dpdk_key): - # resolve PCI address of interfaces listed in the bridges and bonds - # charm configuration options. Note that for already bound - # interfaces the helper will retrieve MAC address from the unit - # KV store as the information is no longer available in sysfs. - _pci_bridge_mac = resolve_pci_from_mapping_config( - bridges_key) - _pci_bond_mac = resolve_pci_from_mapping_config( - bonds_key) - - for pci_address, bridge_mac in _pci_bridge_mac.items(): - if bridge_mac.mac in self._mac_ifname_map: - # if we already have the interface name in our map it is - # visible to the system and therefore not bound to DPDK - continue - ifname = 'dpdk-{}'.format( - hashlib.sha1( - pci_address.encode('UTF-8')).hexdigest()[:7]) - self._ifname_mac_map[ifname] = [bridge_mac.mac] - self._mac_ifname_map[bridge_mac.mac] = ifname - self._mac_pci_address_map[bridge_mac.mac] = pci_address - - for pci_address, bond_mac in _pci_bond_mac.items(): - # for bonds we want to be able to get a list of macs from - # the bond name and also get at the interface name made up - # of the hash of the PCI address - ifname = 'dpdk-{}'.format( - hashlib.sha1( - pci_address.encode('UTF-8')).hexdigest()[:7]) - self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) - self._mac_ifname_map[bond_mac.mac] = ifname - self._mac_pci_address_map[bond_mac.mac] = pci_address - - config_bridges = config(bridges_key) or '' - for bridge, ifname_or_mac in ( - pair.split(':', 1) - for pair in config_bridges.split()): - if ':' in ifname_or_mac: - try: - ifname = self.ifname_from_mac(ifname_or_mac) - except KeyError: - # The interface is destined for a different unit in the - # deployment. - continue - macs = [ifname_or_mac] - else: - ifname = ifname_or_mac - macs = self.macs_from_ifname(ifname_or_mac) - - portname = ifname - for mac in macs: - try: - pci_address = self.pci_address_from_mac(mac) - iftype = self.interface_type.dpdk - ifname = self.ifname_from_mac(mac) - except KeyError: - pci_address = None - iftype = self.interface_type.system - - self.add_interface( - bridge, portname, ifname, iftype, pci_address, global_mtu) - - if not macs: - # We have not mapped the interface and it is probably some sort - # of virtual interface. Our user have put it in the config with - # a purpose so let's carry out their wish. LP: #1884743 - log('Add unmapped interface from config: name "{}" bridge "{}"' - .format(ifname, bridge), - level=DEBUG) - self.add_interface( - bridge, ifname, ifname, self.interface_type.system, None, - global_mtu) - - def __getitem__(self, key): - """Provide a Dict-like interface, get value of item. - - :param key: Key to look up value from. - :type key: any - :returns: Value - :rtype: any - """ - return self._map.__getitem__(key) - - def __iter__(self): - """Provide a Dict-like interface, iterate over keys. - - :returns: Iterator - :rtype: Iterator[any] - """ - return self._map.__iter__() - - def __len__(self): - """Provide a Dict-like interface, measure the length of internal map. - - :returns: Length - :rtype: int - """ - return len(self._map) - - def items(self): - """Provide a Dict-like interface, iterate over items. - - :returns: Key Value pairs - :rtype: Iterator[any, any] - """ - return self._map.items() - - def keys(self): - """Provide a Dict-like interface, iterate over keys. - - :returns: Iterator - :rtype: Iterator[any] - """ - return self._map.keys() - - def ifname_from_mac(self, mac): - """ - :returns: Name of interface - :rtype: str - :raises: KeyError - """ - return (get_bond_master(self._mac_ifname_map[mac]) or - self._mac_ifname_map[mac]) - - def macs_from_ifname(self, ifname): - """ - :returns: List of hardware address (MAC) of interface - :rtype: List[str] - :raises: KeyError - """ - return self._ifname_mac_map[ifname] - - def pci_address_from_mac(self, mac): - """ - :param mac: Hardware address (MAC) of interface - :type mac: str - :returns: PCI address of device associated with mac - :rtype: str - :raises: KeyError - """ - return self._mac_pci_address_map[mac] - - def add_interface(self, bridge, port, ifname, iftype, - pci_address, mtu_request): - """Add an interface to the map. - - :param bridge: Name of bridge on which the bond will be added - :type bridge: str - :param port: Name of port which will represent the bond on bridge - :type port: str - :param ifname: Name of interface that will make up the bonded port - :type ifname: str - :param iftype: Type of interface - :type iftype: BridgeBondMap.interface_type - :param pci_address: PCI address of interface - :type pci_address: Optional[str] - :param mtu_request: MTU to request for interface - :type mtu_request: Optional[int] - """ - self._map[bridge][port][ifname] = { - 'type': str(iftype), - } - if pci_address: - self._map[bridge][port][ifname].update({ - 'pci-address': pci_address, - }) - if mtu_request is not None: - self._map[bridge][port][ifname].update({ - 'mtu-request': str(mtu_request) - }) - - def get_ifdatamap(self, bridge, port): - """Get structure suitable for charmhelpers.contrib.network.ovs helpers. - - :param bridge: Name of bridge on which the port will be added - :type bridge: str - :param port: Name of port which will represent one or more interfaces - :type port: str - """ - for _bridge, _ports in self.items(): - for _port, _interfaces in _ports.items(): - if _bridge == bridge and _port == port: - ifdatamap = {} - for name, data in _interfaces.items(): - ifdatamap.update({ - name: { - 'type': data['type'], - }, - }) - if data.get('mtu-request') is not None: - ifdatamap[name].update({ - 'mtu_request': data['mtu-request'], - }) - if data.get('pci-address'): - ifdatamap[name].update({ - 'options': { - 'dpdk-devargs': data['pci-address'], - }, - }) - return ifdatamap - - -class BondConfig(object): - """Container and helpers for bond configuration options. - - Data is put into a dictionary and a convenient config get interface is - provided. - """ - - DEFAULT_LACP_CONFIG = { - 'mode': 'balance-tcp', - 'lacp': 'active', - 'lacp-time': 'fast' - } - ALL_BONDS = 'ALL_BONDS' - - BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] - BOND_LACP = ['active', 'passive', 'off'] - BOND_LACP_TIME = ['fast', 'slow'] - - def __init__(self, config_key=None): - """Parse specified configuration option. - - :param config_key: Configuration key to retrieve data from - (default: ``dpdk-bond-config``) - :type config_key: Optional[str] - """ - self.config_key = config_key or 'dpdk-bond-config' - - self.lacp_config = { - self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) - } - - lacp_config = config(self.config_key) - if lacp_config: - lacp_config_map = lacp_config.split() - for entry in lacp_config_map: - bond, entry = entry.partition(':')[0:3:2] - if not bond: - bond = self.ALL_BONDS - - mode, entry = entry.partition(':')[0:3:2] - if not mode: - mode = self.DEFAULT_LACP_CONFIG['mode'] - assert mode in self.BOND_MODES, \ - "Bond mode {} is invalid".format(mode) - - lacp, entry = entry.partition(':')[0:3:2] - if not lacp: - lacp = self.DEFAULT_LACP_CONFIG['lacp'] - assert lacp in self.BOND_LACP, \ - "Bond lacp {} is invalid".format(lacp) - - lacp_time, entry = entry.partition(':')[0:3:2] - if not lacp_time: - lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] - assert lacp_time in self.BOND_LACP_TIME, \ - "Bond lacp-time {} is invalid".format(lacp_time) - - self.lacp_config[bond] = { - 'mode': mode, - 'lacp': lacp, - 'lacp-time': lacp_time - } - - def get_bond_config(self, bond): - """Get the LACP configuration for a bond - - :param bond: the bond name - :return: a dictionary with the configuration of the bond - :rtype: Dict[str,Dict[str,str]] - """ - return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) - - def get_ovs_portdata(self, bond): - """Get structure suitable for charmhelpers.contrib.network.ovs helpers. - - :param bond: the bond name - :return: a dictionary with the configuration of the bond - :rtype: Dict[str,Union[str,Dict[str,str]]] - """ - bond_config = self.get_bond_config(bond) - return { - 'bond_mode': bond_config['mode'], - 'lacp': bond_config['lacp'], - 'other_config': { - 'lacp-time': bond_config['lacp-time'], - }, - } - - -class SRIOVContext(OSContextGenerator): - """Provide context for configuring SR-IOV devices.""" - - class sriov_config_mode(enum.Enum): - """Mode in which SR-IOV is configured. - - The configuration option identified by the ``numvfs_key`` parameter - is overloaded and defines in which mode the charm should interpret - the other SR-IOV-related configuration options. - """ - auto = 'auto' - blanket = 'blanket' - explicit = 'explicit' - - PCIDeviceNumVFs = collections.namedtuple( - 'PCIDeviceNumVFs', ['device', 'numvfs']) - - def _determine_numvfs(self, device, sriov_numvfs): - """Determine number of Virtual Functions (VFs) configured for device. - - :param device: Object describing a PCI Network interface card (NIC)/ - :type device: contrib.hardware.pci.PCINetDevice - :param sriov_numvfs: Number of VFs requested for blanket configuration. - :type sriov_numvfs: int - :returns: Number of VFs to configure for device - :rtype: Optional[int] - """ - - def _get_capped_numvfs(requested): - """Get a number of VFs that does not exceed individual card limits. - - Depending and make and model of NIC the number of VFs supported - vary. Requesting more VFs than a card support would be a fatal - error, cap the requested number at the total number of VFs each - individual card supports. - - :param requested: Number of VFs requested - :type requested: int - :returns: Number of VFs allowed - :rtype: int - """ - actual = min(int(requested), int(device.sriov_totalvfs)) - if actual < int(requested): - log('Requested VFs ({}) too high for device {}. Falling back ' - 'to value supported by device: {}' - .format(requested, device.interface_name, - device.sriov_totalvfs), - level=WARNING) - return actual - - if self._sriov_config_mode == self.sriov_config_mode.auto: - # auto-mode - # - # If device mapping configuration is present, return information - # on cards with mapping. - # - # If no device mapping configuration is present, return information - # for all cards. - # - # The maximum number of VFs supported by card will be used. - if (self._sriov_mapped_devices and - device.interface_name not in self._sriov_mapped_devices): - log('SR-IOV configured in auto mode: No device mapping for {}' - .format(device.interface_name), - level=DEBUG) - return - return _get_capped_numvfs(device.sriov_totalvfs) - elif self._sriov_config_mode == self.sriov_config_mode.blanket: - # blanket-mode - # - # User has specified a number of VFs that should apply to all - # cards with support for VFs. - return _get_capped_numvfs(sriov_numvfs) - elif self._sriov_config_mode == self.sriov_config_mode.explicit: - # explicit-mode - # - # User has given a list of interface names and associated number of - # VFs - if device.interface_name not in self._sriov_config_devices: - log('SR-IOV configured in explicit mode: No device:numvfs ' - 'pair for device {}, skipping.' - .format(device.interface_name), - level=DEBUG) - return - return _get_capped_numvfs( - self._sriov_config_devices[device.interface_name]) - else: - raise RuntimeError('This should not be reached') - - def __init__(self, numvfs_key=None, device_mappings_key=None): - """Initialize map from PCI devices and configuration options. - - :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') - :type numvfs_key: Optional[str] - :param device_mappings_key: Config key for device mappings - (default: 'sriov-device-mappings') - :type device_mappings_key: Optional[str] - :raises: RuntimeError - """ - numvfs_key = numvfs_key or 'sriov-numvfs' - device_mappings_key = device_mappings_key or 'sriov-device-mappings' - - devices = pci.PCINetDevices() - charm_config = config() - sriov_numvfs = charm_config.get(numvfs_key) or '' - sriov_device_mappings = charm_config.get(device_mappings_key) or '' - - # create list of devices from sriov_device_mappings config option - self._sriov_mapped_devices = [ - pair.split(':', 1)[1] - for pair in sriov_device_mappings.split() - ] - - # create map of device:numvfs from sriov_numvfs config option - self._sriov_config_devices = { - ifname: numvfs for ifname, numvfs in ( - pair.split(':', 1) for pair in sriov_numvfs.split() - if ':' in sriov_numvfs) - } - - # determine configuration mode from contents of sriov_numvfs - if sriov_numvfs == 'auto': - self._sriov_config_mode = self.sriov_config_mode.auto - elif sriov_numvfs.isdigit(): - self._sriov_config_mode = self.sriov_config_mode.blanket - elif ':' in sriov_numvfs: - self._sriov_config_mode = self.sriov_config_mode.explicit - else: - raise RuntimeError('Unable to determine mode of SR-IOV ' - 'configuration.') - - self._map = { - device.pci_address: self.PCIDeviceNumVFs( - device, self._determine_numvfs(device, sriov_numvfs)) - for device in devices.pci_devices - if device.sriov and - self._determine_numvfs(device, sriov_numvfs) is not None - } - - def __call__(self): - """Provide backward compatible SR-IOV context. - - :returns: Map interface name: min(configured, max) virtual functions. - Example: - { - 'eth0': 16, - 'eth1': 32, - 'eth2': 64, - } - :rtype: Dict[str,int] - """ - return { - pcidnvfs.device.interface_name: pcidnvfs.numvfs - for _, pcidnvfs in self._map.items() - } - - @property - def get_map(self): - """Provide map of configured SR-IOV capable PCI devices. - - :returns: Map PCI-address: (PCIDevice, min(configured, max) VFs. - Example: - { - '0000:81:00.0': self.PCIDeviceNumVFs(, 32), - '0000:81:00.1': self.PCIDeviceNumVFs(, 32), - } - :rtype: Dict[str, self.PCIDeviceNumVFs] - """ - return self._map - - -class CephBlueStoreCompressionContext(OSContextGenerator): - """Ceph BlueStore compression options.""" - - # Tuple with Tuples that map configuration option name to CephBrokerRq op - # property name - options = ( - ('bluestore-compression-algorithm', - 'compression-algorithm'), - ('bluestore-compression-mode', - 'compression-mode'), - ('bluestore-compression-required-ratio', - 'compression-required-ratio'), - ('bluestore-compression-min-blob-size', - 'compression-min-blob-size'), - ('bluestore-compression-min-blob-size-hdd', - 'compression-min-blob-size-hdd'), - ('bluestore-compression-min-blob-size-ssd', - 'compression-min-blob-size-ssd'), - ('bluestore-compression-max-blob-size', - 'compression-max-blob-size'), - ('bluestore-compression-max-blob-size-hdd', - 'compression-max-blob-size-hdd'), - ('bluestore-compression-max-blob-size-ssd', - 'compression-max-blob-size-ssd'), - ) - - def __init__(self): - """Initialize context by loading values from charm config. - - We keep two maps, one suitable for use with CephBrokerRq's and one - suitable for template generation. - """ - charm_config = config() - - # CephBrokerRq op map - self.op = {} - # Context exposed for template generation - self.ctxt = {} - for config_key, op_key in self.options: - value = charm_config.get(config_key) - self.ctxt.update({config_key.replace('-', '_'): value}) - self.op.update({op_key: value}) - - def __call__(self): - """Get context. - - :returns: Context - :rtype: Dict[str,any] - """ - return self.ctxt - - def get_op(self): - """Get values for use in CephBrokerRq op. - - :returns: Context values with CephBrokerRq op property name as key. - :rtype: Dict[str,any] - """ - return self.op - - def get_kwargs(self): - """Get values for use as keyword arguments. - - :returns: Context values with key suitable for use as kwargs to - CephBrokerRq add_op_create_*_pool methods. - :rtype: Dict[str,any] - """ - return { - k.replace('-', '_'): v - for k, v in self.op.items() - } - - def validate(self): - """Validate options. - - :raises: AssertionError - """ - # We slip in a dummy name on class instantiation to allow validation of - # the other options. It will not affect further use. - # - # NOTE: once we retire Python 3.5 we can fold this into a in-line - # dictionary comprehension in the call to the initializer. - dummy_op = {'name': 'dummy-name'} - dummy_op.update(self.op) - pool = ch_ceph.BasePool('dummy-service', op=dummy_op) - pool.validate() diff --git a/hooks/charmhelpers/contrib/openstack/deferred_events.py b/hooks/charmhelpers/contrib/openstack/deferred_events.py deleted file mode 100644 index 94eacf6c..00000000 --- a/hooks/charmhelpers/contrib/openstack/deferred_events.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright 2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Module for managing deferred service events. - -This module is used to manage deferred service events from both charm actions -and package actions. -""" - -import datetime -import glob -import yaml -import os -import time -import uuid - -import charmhelpers.contrib.openstack.policy_rcd as policy_rcd -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as host -import charmhelpers.core.unitdata as unitdata - -import subprocess - - -# Deferred events generated from the charm are stored along side those -# generated from packaging. -DEFERRED_EVENTS_DIR = policy_rcd.POLICY_DEFERRED_EVENTS_DIR - - -class ServiceEvent(): - - def __init__(self, timestamp, service, reason, action, - policy_requestor_name=None, policy_requestor_type=None): - self.timestamp = timestamp - self.service = service - self.reason = reason - self.action = action - if policy_requestor_name: - self.policy_requestor_name = policy_requestor_name - else: - self.policy_requestor_name = hookenv.service_name() - if policy_requestor_type: - self.policy_requestor_type = policy_requestor_type - else: - self.policy_requestor_type = 'charm' - - def __eq__(self, other): - for attr in vars(self): - if getattr(self, attr) != getattr(other, attr): - return False - return True - - def matching_request(self, other): - for attr in ['service', 'action', 'reason']: - if getattr(self, attr) != getattr(other, attr): - return False - return True - - @classmethod - def from_dict(cls, data): - return cls( - data['timestamp'], - data['service'], - data['reason'], - data['action'], - data.get('policy_requestor_name'), - data.get('policy_requestor_type')) - - -def deferred_events_files(): - """Deferred event files - - Deferred event files that were generated by service_name() policy. - - :returns: Deferred event files - :rtype: List[str] - """ - return glob.glob('{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) - - -def read_event_file(file_name): - """Read a file and return the corresponding objects. - - :param file_name: Name of file to read. - :type file_name: str - :returns: ServiceEvent from file. - :rtype: ServiceEvent - """ - with open(file_name, 'r') as f: - contents = yaml.safe_load(f) - event = ServiceEvent( - contents['timestamp'], - contents['service'], - contents['reason'], - contents['action'], - policy_requestor_name=contents.get('policy_requestor_name'), - policy_requestor_type=contents.get('policy_requestor_type')) - return event - - -def deferred_events(): - """Get list of deferred events. - - List of deferred events. Events are represented by dicts of the form: - - { - action: restart, - policy_requestor_name: neutron-openvswitch, - policy_requestor_type: charm, - reason: 'Pkg update', - service: openvswitch-switch, - time: 1614328743} - - :returns: List of deferred events. - :rtype: List[ServiceEvent] - """ - events = [] - for defer_file in deferred_events_files(): - events.append((defer_file, read_event_file(defer_file))) - return events - - -def duplicate_event_files(event): - """Get list of event files that have equivalent deferred events. - - :param event: Event to compare - :type event: ServiceEvent - :returns: List of event files - :rtype: List[str] - """ - duplicates = [] - for event_file, existing_event in deferred_events(): - if event.matching_request(existing_event): - duplicates.append(event_file) - return duplicates - - -def get_event_record_file(policy_requestor_type, policy_requestor_name): - """Generate filename for storing a new event. - - :param policy_requestor_type: System that blocked event - :type policy_requestor_type: str - :param policy_requestor_name: Name of application that blocked event - :type policy_requestor_name: str - :returns: File name - :rtype: str - """ - file_name = '{}/{}-{}-{}.deferred'.format( - DEFERRED_EVENTS_DIR, - policy_requestor_type, - policy_requestor_name, - uuid.uuid1()) - return file_name - - -def save_event(event): - """Write deferred events to backend. - - :param event: Event to save - :type event: ServiceEvent - """ - requestor_name = hookenv.service_name() - requestor_type = 'charm' - init_policy_log_dir() - if duplicate_event_files(event): - hookenv.log( - "Not writing new event, existing event found. {} {} {}".format( - event.service, - event.action, - event.reason), - level="DEBUG") - else: - record_file = get_event_record_file( - policy_requestor_type=requestor_type, - policy_requestor_name=requestor_name) - - with open(record_file, 'w') as f: - data = { - 'timestamp': event.timestamp, - 'service': event.service, - 'action': event.action, - 'reason': event.reason, - 'policy_requestor_type': requestor_type, - 'policy_requestor_name': requestor_name} - yaml.dump(data, f) - - -def clear_deferred_events(svcs, action): - """Remove any outstanding deferred events. - - Remove a deferred event if its service is in the services list and its - action matches. - - :param svcs: List of services to remove. - :type svcs: List[str] - :param action: Action to remove - :type action: str - """ - # XXX This function is not currently processing the action. It needs to - # match the action and also take account of try-restart and the - # equivalnce of stop-start and restart. - for defer_file in deferred_events_files(): - deferred_event = read_event_file(defer_file) - if deferred_event.service in svcs: - os.remove(defer_file) - - -def init_policy_log_dir(): - """Ensure directory to store events exists.""" - if not os.path.exists(DEFERRED_EVENTS_DIR): - os.mkdir(DEFERRED_EVENTS_DIR) - - -def get_deferred_events(): - """Return a list of deferred events requested by the charm and packages. - - :returns: List of deferred events - :rtype: List[ServiceEvent] - """ - events = [] - for _, event in deferred_events(): - events.append(event) - return events - - -def get_deferred_restarts(): - """List of deferred restart events requested by the charm and packages. - - :returns: List of deferred restarts - :rtype: List[ServiceEvent] - """ - return [e for e in get_deferred_events() if e.action == 'restart'] - - -def clear_deferred_restarts(services): - """Clear deferred restart events targeted at `services`. - - :param services: Services with deferred actions to clear. - :type services: List[str] - """ - clear_deferred_events(services, 'restart') - - -def process_svc_restart(service): - """Respond to a service restart having occurred. - - :param service: Services that the action was performed against. - :type service: str - """ - clear_deferred_restarts([service]) - - -def is_restart_permitted(): - """Check whether restarts are permitted. - - :returns: Whether restarts are permitted - :rtype: bool - """ - if hookenv.config('enable-auto-restarts') is None: - return True - return hookenv.config('enable-auto-restarts') - - -def check_and_record_restart_request(service, changed_files): - """Check if restarts are permitted, if they are not log the request. - - :param service: Service to be restarted - :type service: str - :param changed_files: Files that have changed to trigger restarts. - :type changed_files: List[str] - :returns: Whether restarts are permitted - :rtype: bool - """ - changed_files = sorted(list(set(changed_files))) - permitted = is_restart_permitted() - if not permitted: - save_event(ServiceEvent( - timestamp=round(time.time()), - service=service, - reason='File(s) changed: {}'.format( - ', '.join(changed_files)), - action='restart')) - return permitted - - -def deferrable_svc_restart(service, reason=None): - """Restarts service if permitted, if not defer it. - - :param service: Service to be restarted - :type service: str - :param reason: Reason for restart - :type reason: Union[str, None] - """ - if is_restart_permitted(): - host.service_restart(service) - else: - save_event(ServiceEvent( - timestamp=round(time.time()), - service=service, - reason=reason, - action='restart')) - - -def configure_deferred_restarts(services): - """Setup deferred restarts. - - :param services: Services to block restarts of. - :type services: List[str] - """ - policy_rcd.install_policy_rcd() - if is_restart_permitted(): - policy_rcd.remove_policy_file() - else: - blocked_actions = ['stop', 'restart', 'try-restart'] - for svc in services: - policy_rcd.add_policy_block(svc, blocked_actions) - - -def get_service_start_time(service): - """Find point in time when the systemd unit transitioned to active state. - - :param service: Services to check timetsamp of. - :type service: str - """ - start_time = None - out = subprocess.check_output( - [ - 'systemctl', - 'show', - service, - '--property=ActiveEnterTimestamp']) - str_time = out.decode().rstrip().replace('ActiveEnterTimestamp=', '') - if str_time: - start_time = datetime.datetime.strptime( - str_time, - '%a %Y-%m-%d %H:%M:%S %Z') - return start_time - - -def check_restart_timestamps(): - """Check deferred restarts against systemd units start time. - - Check if a service has a deferred event and clear it if it has been - subsequently restarted. - """ - for event in get_deferred_restarts(): - start_time = get_service_start_time(event.service) - deferred_restart_time = datetime.datetime.fromtimestamp( - event.timestamp) - if start_time and start_time < deferred_restart_time: - hookenv.log( - ("Restart still required, {} was started at {}, restart was " - "requested after that at {}").format( - event.service, - start_time, - deferred_restart_time), - level='DEBUG') - else: - clear_deferred_restarts([event.service]) - - -def set_deferred_hook(hookname): - """Record that a hook has been deferred. - - :param hookname: Name of hook that was deferred. - :type hookname: str - """ - with unitdata.HookData()() as t: - kv = t[0] - deferred_hooks = kv.get('deferred-hooks', []) - if hookname not in deferred_hooks: - deferred_hooks.append(hookname) - kv.set('deferred-hooks', sorted(list(set(deferred_hooks)))) - - -def get_deferred_hooks(): - """Get a list of deferred hooks. - - :returns: List of hook names. - :rtype: List[str] - """ - with unitdata.HookData()() as t: - kv = t[0] - return kv.get('deferred-hooks', []) - - -def clear_deferred_hooks(): - """Clear any deferred hooks.""" - with unitdata.HookData()() as t: - kv = t[0] - kv.set('deferred-hooks', []) - - -def clear_deferred_hook(hookname): - """Clear a specific deferred hooks. - - :param hookname: Name of hook to remove. - :type hookname: str - """ - with unitdata.HookData()() as t: - kv = t[0] - deferred_hooks = kv.get('deferred-hooks', []) - if hookname in deferred_hooks: - deferred_hooks.remove(hookname) - kv.set('deferred-hooks', deferred_hooks) diff --git a/hooks/charmhelpers/contrib/openstack/exceptions.py b/hooks/charmhelpers/contrib/openstack/exceptions.py deleted file mode 100644 index b2330637..00000000 --- a/hooks/charmhelpers/contrib/openstack/exceptions.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class OSContextError(Exception): - """Raised when an error occurs during context generation. - - This exception is principally used in contrib.openstack.context - """ - pass - - -class ServiceActionError(Exception): - """Raised when a service action (stop/start/ etc) failed.""" - pass diff --git a/hooks/charmhelpers/contrib/openstack/files/__init__.py b/hooks/charmhelpers/contrib/openstack/files/__init__.py deleted file mode 100644 index 9df5f746..00000000 --- a/hooks/charmhelpers/contrib/openstack/files/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module diff --git a/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py deleted file mode 100755 index 5f392b3c..00000000 --- a/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python3 - -# Copyright 2014-2022 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Checks for services with deferred restarts. - -This Nagios check will parse /var/lib/policy-rd.d/ -to find any restarts that are currently deferred. -""" - -import argparse -import glob -import sys -import yaml - - -DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' - - -def get_deferred_events(): - """Return a list of deferred events dicts from policy-rc.d files. - - Events are read from DEFERRED_EVENTS_DIR and are of the form: - { - action: restart, - policy_requestor_name: rabbitmq-server, - policy_requestor_type: charm, - reason: 'Pkg update', - service: rabbitmq-server, - time: 1614328743 - } - - :raises OSError: Raised in case of a system error while reading a policy file - :raises yaml.YAMLError: Raised if parsing a policy file fails - - :returns: List of deferred event dictionaries - :rtype: list - """ - deferred_events_files = glob.glob( - '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) - - deferred_events = [] - for event_file in deferred_events_files: - with open(event_file, 'r') as f: - event = yaml.safe_load(f) - deferred_events.append(event) - - return deferred_events - - -def get_deferred_restart_services(application=None): - """Returns a list of services with deferred restarts. - - :param str application: Name of the application that blocked the service restart. - If application is None, all services with deferred restarts - are returned. Services which are blocked by a non-charm - requestor are always returned. - - :raises OSError: Raised in case of a system error while reading a policy file - :raises yaml.YAMLError: Raised if parsing a policy file fails - - :returns: List of services with deferred restarts belonging to application. - :rtype: list - """ - - deferred_restart_events = filter( - lambda e: e['action'] == 'restart', get_deferred_events()) - - deferred_restart_services = set() - for restart_event in deferred_restart_events: - if application: - if ( - restart_event['policy_requestor_type'] != 'charm' or - restart_event['policy_requestor_type'] == 'charm' and - restart_event['policy_requestor_name'] == application - ): - deferred_restart_services.add(restart_event['service']) - else: - deferred_restart_services.add(restart_event['service']) - - return list(deferred_restart_services) - - -def main(): - """Check for services with deferred restarts.""" - parser = argparse.ArgumentParser( - description='Check for services with deferred restarts') - parser.add_argument( - '--application', help='Check services belonging to this application only') - - args = parser.parse_args() - - services = set(get_deferred_restart_services(args.application)) - - if len(services) == 0: - print('OK: No deferred service restarts.') - sys.exit(0) - else: - print( - 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) - sys.exit(1) - - -if __name__ == '__main__': - try: - main() - except OSError as e: - print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) - sys.exit(1) - except yaml.YAMLError as e: - print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) - sys.exit(1) - except Exception as e: - print('CRITICAL: An unknown error occurred: {}'.format(str(e))) - sys.exit(1) diff --git a/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py b/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py deleted file mode 100755 index 431e972b..00000000 --- a/hooks/charmhelpers/contrib/openstack/files/policy_rc_d_script.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python3 - -"""This script is an implementation of policy-rc.d - -For further information on policy-rc.d see *1 - -*1 https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt -""" -import collections -import glob -import os -import logging -import sys -import time -import uuid -import yaml - - -SystemPolicy = collections.namedtuple( - 'SystemPolicy', - [ - 'policy_requestor_name', - 'policy_requestor_type', - 'service', - 'blocked_actions']) - -DEFAULT_POLICY_CONFIG_DIR = '/etc/policy-rc.d' -DEFAULT_POLICY_LOG_DIR = '/var/lib/policy-rc.d' - - -def read_policy_file(policy_file): - """Return system policies from given file. - - :param file_name: Name of file to read. - :type file_name: str - :returns: Policy - :rtype: List[SystemPolicy] - """ - policies = [] - if os.path.exists(policy_file): - with open(policy_file, 'r') as f: - policy = yaml.safe_load(f) - for service, actions in policy['blocked_actions'].items(): - service = service.replace('.service', '') - policies.append(SystemPolicy( - policy_requestor_name=policy['policy_requestor_name'], - policy_requestor_type=policy['policy_requestor_type'], - service=service, - blocked_actions=actions)) - return policies - - -def get_policies(policy_config_dir): - """Return all system policies in policy_config_dir. - - :param policy_config_dir: Name of file to read. - :type policy_config_dir: str - :returns: Policy - :rtype: List[SystemPolicy] - """ - _policy = [] - for f in glob.glob('{}/*.policy'.format(policy_config_dir)): - _policy.extend(read_policy_file(f)) - return _policy - - -def record_blocked_action(service, action, blocking_policies, policy_log_dir): - """Record that an action was requested but deniedl - - :param service: Service that was blocked - :type service: str - :param action: Action that was blocked. - :type action: str - :param blocking_policies: Policies that blocked the action on the service. - :type blocking_policies: List[SystemPolicy] - :param policy_log_dir: Directory to place the blocking action record. - :type policy_log_dir: str - """ - if not os.path.exists(policy_log_dir): - os.mkdir(policy_log_dir) - seconds = round(time.time()) - for policy in blocking_policies: - if not os.path.exists(policy_log_dir): - os.mkdir(policy_log_dir) - file_name = '{}/{}-{}-{}.deferred'.format( - policy_log_dir, - policy.policy_requestor_type, - policy.policy_requestor_name, - uuid.uuid1()) - with open(file_name, 'w') as f: - data = { - 'timestamp': seconds, - 'service': service, - 'action': action, - 'reason': 'Package update', - 'policy_requestor_type': policy.policy_requestor_type, - 'policy_requestor_name': policy.policy_requestor_name} - yaml.dump(data, f) - - -def get_blocking_policies(service, action, policy_config_dir): - """Record that an action was requested but deniedl - - :param service: Service that action is requested against. - :type service: str - :param action: Action that is requested. - :type action: str - :param policy_config_dir: Directory that stores policy files. - :type policy_config_dir: str - :returns: Policies - :rtype: List[SystemPolicy] - """ - service = service.replace('.service', '') - blocking_policies = [ - policy - for policy in get_policies(policy_config_dir) - if policy.service == service and action in policy.blocked_actions] - return blocking_policies - - -def process_action_request(service, action, policy_config_dir, policy_log_dir): - """Take the requested action against service and check if it is permitted. - - :param service: Service that action is requested against. - :type service: str - :param action: Action that is requested. - :type action: str - :param policy_config_dir: Directory that stores policy files. - :type policy_config_dir: str - :param policy_log_dir: Directory that stores policy files. - :type policy_log_dir: str - :returns: Tuple of whether the action is permitted and explanation. - :rtype: (boolean, str) - """ - blocking_policies = get_blocking_policies( - service, - action, - policy_config_dir) - if blocking_policies: - policy_msg = [ - '{} {}'.format(p.policy_requestor_type, p.policy_requestor_name) - for p in sorted(blocking_policies)] - message = '{} of {} blocked by {}'.format( - action, - service, - ', '.join(policy_msg)) - record_blocked_action( - service, - action, - blocking_policies, - policy_log_dir) - action_permitted = False - else: - message = "Permitting {} {}".format(service, action) - action_permitted = True - return action_permitted, message - - -def main(): - logging.basicConfig( - filename='/var/log/policy-rc.d.log', - level=logging.DEBUG, - format='%(asctime)s %(message)s') - - service = sys.argv[1] - action = sys.argv[2] - - permitted, message = process_action_request( - service, - action, - DEFAULT_POLICY_CONFIG_DIR, - DEFAULT_POLICY_LOG_DIR) - logging.info(message) - - # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt - # Exit status codes: - # 0 - action allowed - # 1 - unknown action (therefore, undefined policy) - # 100 - unknown initscript id - # 101 - action forbidden by policy - # 102 - subsystem error - # 103 - syntax error - # 104 - [reserved] - # 105 - behaviour uncertain, policy undefined. - # 106 - action not allowed. Use the returned fallback actions - # (which are implied to be "allowed") instead. - - if permitted: - return 0 - else: - return 101 - - -if __name__ == "__main__": - rc = main() - sys.exit(rc) diff --git a/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/hooks/charmhelpers/contrib/openstack/ha/__init__.py deleted file mode 100644 index 9b088de8..00000000 --- a/hooks/charmhelpers/contrib/openstack/ha/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py deleted file mode 100644 index a5cbdf53..00000000 --- a/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ /dev/null @@ -1,348 +0,0 @@ -# Copyright 2014-2016 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2016 Canonical Ltd. -# -# Authors: -# Openstack Charmers < -# - -""" -Helpers for high availability. -""" - -import hashlib -import json - -import re - -from charmhelpers.core.hookenv import ( - expected_related_units, - log, - relation_set, - charm_name, - config, - status_set, - DEBUG, -) - -from charmhelpers.core.host import ( - lsb_release -) - -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - is_ipv6, -) - -from charmhelpers.contrib.network.ip import ( - get_iface_for_address, - get_netmask_for_address, -) - -from charmhelpers.contrib.hahelpers.cluster import ( - get_hacluster_config -) - -JSON_ENCODE_OPTIONS = dict( - sort_keys=True, - allow_nan=False, - indent=None, - separators=(',', ':'), -) - -VIP_GROUP_NAME = 'grp_{service}_vips' -DNSHA_GROUP_NAME = 'grp_{service}_hostnames' - - -class DNSHAException(Exception): - """Raised when an error occurs setting up DNS HA - """ - - pass - - -def update_dns_ha_resource_params(resources, resource_params, - relation_id=None, - crm_ocf='ocf:maas:dns'): - """ Configure DNS-HA resources based on provided configuration and - update resource dictionaries for the HA relation. - - @param resources: Pointer to dictionary of resources. - Usually instantiated in ha_joined(). - @param resource_params: Pointer to dictionary of resource parameters. - Usually instantiated in ha_joined() - @param relation_id: Relation ID of the ha relation - @param crm_ocf: Corosync Open Cluster Framework resource agent to use for - DNS HA - """ - _relation_data = {'resources': {}, 'resource_params': {}} - update_hacluster_dns_ha(charm_name(), - _relation_data, - crm_ocf) - resources.update(_relation_data['resources']) - resource_params.update(_relation_data['resource_params']) - relation_set(relation_id=relation_id, groups=_relation_data['groups']) - - -def assert_charm_supports_dns_ha(): - """Validate prerequisites for DNS HA - The MAAS client is only available on Xenial or greater - - :raises DNSHAException: if release is < 16.04 - """ - if lsb_release().get('DISTRIB_RELEASE') < '16.04': - msg = ('DNS HA is only supported on 16.04 and greater ' - 'versions of Ubuntu.') - status_set('blocked', msg) - raise DNSHAException(msg) - return True - - -def expect_ha(): - """ Determine if the unit expects to be in HA - - Check juju goal-state if ha relation is expected, check for VIP or dns-ha - settings which indicate the unit should expect to be related to hacluster. - - @returns boolean - """ - ha_related_units = [] - try: - ha_related_units = list(expected_related_units(reltype='ha')) - except (NotImplementedError, KeyError): - pass - return len(ha_related_units) > 0 or config('vip') or config('dns-ha') - - -def generate_ha_relation_data(service, - extra_settings=None, - haproxy_enabled=True): - """ Generate relation data for ha relation - - Based on configuration options and unit interfaces, generate a json - encoded dict of relation data items for the hacluster relation, - providing configuration for DNS HA or VIP's + haproxy clone sets. - - Example of supplying additional settings:: - - COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' - AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' - AGENT_CA_PARAMS = 'op monitor interval="5s"' - - ha_console_settings = { - 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, - 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, - 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, - 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) - generate_ha_relation_data('nova', extra_settings=ha_console_settings) - - - @param service: Name of the service being configured - @param extra_settings: Dict of additional resource data - @returns dict: json encoded data for use with relation_set - """ - _relation_data = {'resources': {}, 'resource_params': {}} - - if haproxy_enabled: - _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' - _haproxy_res = 'res_{}_haproxy'.format(service) - _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} - _relation_data['resource_params'] = { - _haproxy_res: '{} op monitor interval="5s"'.format(_meta) - } - _relation_data['init_services'] = {_haproxy_res: 'haproxy'} - _relation_data['clones'] = { - 'cl_{}_haproxy'.format(service): _haproxy_res - } - - if extra_settings: - for k, v in extra_settings.items(): - if _relation_data.get(k): - _relation_data[k].update(v) - else: - _relation_data[k] = v - - if config('dns-ha'): - update_hacluster_dns_ha(service, _relation_data) - else: - update_hacluster_vip(service, _relation_data) - - return { - 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) - for k, v in _relation_data.items() if v - } - - -def update_hacluster_dns_ha(service, relation_data, - crm_ocf='ocf:maas:dns'): - """ Configure DNS-HA resources based on provided configuration - - @param service: Name of the service being configured - @param relation_data: Pointer to dictionary of relation data. - @param crm_ocf: Corosync Open Cluster Framework resource agent to use for - DNS HA - """ - # Validate the charm environment for DNS HA - assert_charm_supports_dns_ha() - - settings = ['os-admin-hostname', 'os-internal-hostname', - 'os-public-hostname', 'os-access-hostname'] - - # Check which DNS settings are set and update dictionaries - hostname_group = [] - for setting in settings: - hostname = config(setting) - if hostname is None: - log('DNS HA: Hostname setting {} is None. Ignoring.' - ''.format(setting), - DEBUG) - continue - m = re.search('os-(.+?)-hostname', setting) - if m: - endpoint_type = m.group(1) - # resolve_address's ADDRESS_MAP uses 'int' not 'internal' - if endpoint_type == 'internal': - endpoint_type = 'int' - else: - msg = ('Unexpected DNS hostname setting: {}. ' - 'Cannot determine endpoint_type name' - ''.format(setting)) - status_set('blocked', msg) - raise DNSHAException(msg) - - hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) - if hostname_key in hostname_group: - log('DNS HA: Resource {}: {} already exists in ' - 'hostname group - skipping'.format(hostname_key, hostname), - DEBUG) - continue - - hostname_group.append(hostname_key) - relation_data['resources'][hostname_key] = crm_ocf - relation_data['resource_params'][hostname_key] = ( - 'params fqdn="{}" ip_address="{}"' - .format(hostname, resolve_address(endpoint_type=endpoint_type, - override=False))) - - if len(hostname_group) >= 1: - log('DNS HA: Hostname group is set with {} as members. ' - 'Informing the ha relation'.format(' '.join(hostname_group)), - DEBUG) - relation_data['groups'] = { - DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) - } - else: - msg = 'DNS HA: Hostname group has no members.' - status_set('blocked', msg) - raise DNSHAException(msg) - - -def get_vip_settings(vip): - """Calculate which nic is on the correct network for the given vip. - - If nic or netmask discovery fail then fallback to using charm supplied - config. If fallback is used this is indicated via the fallback variable. - - @param vip: VIP to lookup nic and cidr for. - @returns (str, str, bool): eg (iface, netmask, fallback) - """ - iface = get_iface_for_address(vip) - netmask = get_netmask_for_address(vip) - fallback = False - if iface is None: - iface = config('vip_iface') - fallback = True - if netmask is None: - netmask = config('vip_cidr') - fallback = True - return iface, netmask, fallback - - -def update_hacluster_vip(service, relation_data): - """ Configure VIP resources based on provided configuration - - @param service: Name of the service being configured - @param relation_data: Pointer to dictionary of relation data. - """ - cluster_config = get_hacluster_config() - vip_group = [] - vips_to_delete = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' - - iface, netmask, fallback = get_vip_settings(vip) - - vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' - if iface is not None: - # NOTE(jamespage): Delete old VIP resources - # Old style naming encoding iface in name - # does not work well in environments where - # interface/subnet wiring is not consistent - vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vips_to_delete: - vip_key = '{}_{}'.format(vip_key, vip_params) - vips_to_delete.append(vip_key) - - vip_key = 'res_{}_{}_vip'.format( - service, - hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) - - relation_data['resources'][vip_key] = res_vip - # NOTE(jamespage): - # Use option provided vip params if these where used - # instead of auto-detected values - if fallback: - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}" {vip_monitoring}'.format( - ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask, - vip_monitoring=vip_monitoring)) - else: - # NOTE(jamespage): - # let heartbeat figure out which interface and - # netmask to configure, which works nicely - # when network interface naming is not - # consistent across units. - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" {vip_monitoring}'.format( - ip=vip_params, - vip=vip, - vip_monitoring=vip_monitoring)) - - vip_group.append(vip_key) - - if vips_to_delete: - try: - relation_data['delete_resources'].extend(vips_to_delete) - except KeyError: - relation_data['delete_resources'] = vips_to_delete - - if len(vip_group) >= 1: - key = VIP_GROUP_NAME.format(service=service) - try: - relation_data['groups'][key] = ' '.join(vip_group) - except KeyError: - relation_data['groups'] = { - key: ' '.join(vip_group) - } diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py deleted file mode 100644 index b8c94c56..00000000 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.core.hookenv import ( - NoNetworkBinding, - config, - unit_get, - service_name, - network_get_primary_address, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - is_address_in_network, - is_ipv6, - get_ipv6_addr, - resolve_network_cidr, -) -from charmhelpers.contrib.hahelpers.cluster import is_clustered - -PUBLIC = 'public' -INTERNAL = 'int' -ADMIN = 'admin' -ACCESS = 'access' - -# TODO: reconcile 'int' vs 'internal' binding names -ADDRESS_MAP = { - PUBLIC: { - 'binding': 'public', - 'config': 'os-public-network', - 'fallback': 'public-address', - 'override': 'os-public-hostname', - }, - INTERNAL: { - 'binding': 'internal', - 'config': 'os-internal-network', - 'fallback': 'private-address', - 'override': 'os-internal-hostname', - }, - ADMIN: { - 'binding': 'admin', - 'config': 'os-admin-network', - 'fallback': 'private-address', - 'override': 'os-admin-hostname', - }, - ACCESS: { - 'binding': 'access', - 'config': 'access-network', - 'fallback': 'private-address', - 'override': 'os-access-hostname', - }, - # Note (thedac) bridge to begin the reconciliation between 'int' vs - # 'internal' binding names - 'internal': { - 'binding': 'internal', - 'config': 'os-internal-network', - 'fallback': 'private-address', - 'override': 'os-internal-hostname', - }, -} - - -def canonical_url(configs, endpoint_type=PUBLIC): - """Returns the correct HTTP URL to this host given the state of HTTPS - configuration, hacluster and charm configuration. - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param endpoint_type: str endpoint type to resolve. - :param returns: str base URL for services on the current service unit. - """ - scheme = _get_scheme(configs) - - address = resolve_address(endpoint_type) - if is_ipv6(address): - address = "[{}]".format(address) - - return '%s://%s' % (scheme, address) - - -def _get_scheme(configs): - """Returns the scheme to use for the url (either http or https) - depending upon whether https is in the configs value. - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :returns: either 'http' or 'https' depending on whether https is - configured within the configs context. - """ - scheme = 'http' - if configs and 'https' in configs.complete_contexts(): - scheme = 'https' - return scheme - - -def _get_address_override(endpoint_type=PUBLIC): - """Returns any address overrides that the user has defined based on the - endpoint type. - - Note: this function allows for the service name to be inserted into the - address if the user specifies {service_name}.somehost.org. - - :param endpoint_type: the type of endpoint to retrieve the override - value for. - :returns: any endpoint address or hostname that the user has overridden - or None if an override is not present. - """ - override_key = ADDRESS_MAP[endpoint_type]['override'] - addr_override = config(override_key) - if not addr_override: - return None - else: - return addr_override.format(service_name=service_name()) - - -def local_address(unit_get_fallback='public-address'): - """Return a network address for this unit. - - Attempt to retrieve a 'default' IP address for this unit - from network-get. If this is running with an old version of Juju then - fallback to unit_get. - - Note on juju < 2.9 the binding to juju-info may not exist, so fall back to - the unit-get. - - :param unit_get_fallback: Either 'public-address' or 'private-address'. - Only used with old versions of Juju. - :type unit_get_fallback: str - :returns: IP Address - :rtype: str - """ - try: - return network_get_primary_address('juju-info') - except (NotImplementedError, NoNetworkBinding): - return unit_get(unit_get_fallback) - - -def resolve_address(endpoint_type=PUBLIC, override=True): - """Return unit address depending on net config. - - If unit is clustered with vip(s) and has net splits defined, return vip on - correct network. If clustered with no nets defined, return primary vip. - - If not clustered, return unit address ensuring address is on configured net - split if one is configured, or a Juju 2.0 extra-binding has been used. - - :param endpoint_type: Network endpoing type - :param override: Accept hostname overrides or not - """ - resolved_address = None - if override: - resolved_address = _get_address_override(endpoint_type) - if resolved_address: - return resolved_address - - vips = config('vip') - if vips: - vips = vips.split() - - net_type = ADDRESS_MAP[endpoint_type]['config'] - net_addr = config(net_type) - net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] - binding = ADDRESS_MAP[endpoint_type]['binding'] - clustered = is_clustered() - - if clustered and vips: - if net_addr: - for vip in vips: - if is_address_in_network(net_addr, vip): - resolved_address = vip - break - else: - # NOTE: endeavour to check vips against network space - # bindings - try: - bound_cidr = resolve_network_cidr( - network_get_primary_address(binding) - ) - for vip in vips: - if is_address_in_network(bound_cidr, vip): - resolved_address = vip - break - except (NotImplementedError, NoNetworkBinding): - # If no net-splits configured and no support for extra - # bindings/network spaces so we expect a single vip - resolved_address = vips[0] - else: - if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr(exc_list=vips)[0] - else: - fallback_addr = local_address(unit_get_fallback=net_fallback) - - if net_addr: - resolved_address = get_address_in_network(net_addr, fallback_addr) - else: - # NOTE: only try to use extra bindings if legacy network - # configuration is not in use - try: - resolved_address = network_get_primary_address(binding) - except (NotImplementedError, NoNetworkBinding): - resolved_address = fallback_addr - - if resolved_address is None: - raise ValueError("Unable to resolve a suitable IP address based on " - "charm state and configuration. (net_type=%s, " - "clustered=%s)" % (net_type, clustered)) - - return resolved_address - - -def get_vip_in_network(network): - matching_vip = None - vips = config('vip') - if vips: - for vip in vips.split(): - if is_address_in_network(network, vip): - matching_vip = vip - return matching_vip - - -def get_default_api_bindings(): - _default_bindings = [] - for binding in [INTERNAL, ADMIN, PUBLIC]: - _default_bindings.append(ADDRESS_MAP[binding]['binding']) - return _default_bindings diff --git a/hooks/charmhelpers/contrib/openstack/keystone.py b/hooks/charmhelpers/contrib/openstack/keystone.py deleted file mode 100644 index 5775aa44..00000000 --- a/hooks/charmhelpers/contrib/openstack/keystone.py +++ /dev/null @@ -1,170 +0,0 @@ -# -# Copyright 2017 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from charmhelpers.fetch import apt_install -from charmhelpers.contrib.openstack.context import IdentityServiceContext -from charmhelpers.core.hookenv import ( - log, - ERROR, -) - - -def get_api_suffix(api_version): - """Return the formatted api suffix for the given version - @param api_version: version of the keystone endpoint - @returns the api suffix formatted according to the given api - version - """ - return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' - - -def format_endpoint(schema, addr, port, api_version): - """Return a formatted keystone endpoint - @param schema: http or https - @param addr: ipv4/ipv6 host of the keystone service - @param port: port of the keystone service - @param api_version: 2 or 3 - @returns a fully formatted keystone endpoint - """ - return '{}://{}:{}/{}/'.format(schema, addr, port, - get_api_suffix(api_version)) - - -def get_keystone_manager(endpoint, api_version, **kwargs): - """Return a keystonemanager for the correct API version - - @param endpoint: the keystone endpoint to point client at - @param api_version: version of the keystone api the client should use - @param kwargs: token or username/tenant/password information - @returns keystonemanager class used for interrogating keystone - """ - if api_version == 2: - return KeystoneManager2(endpoint, **kwargs) - if api_version == 3: - return KeystoneManager3(endpoint, **kwargs) - raise ValueError('No manager found for api version {}'.format(api_version)) - - -def get_keystone_manager_from_identity_service_context(): - """Return a keystonmanager generated from a - instance of charmhelpers.contrib.openstack.context.IdentityServiceContext - @returns keystonamenager instance - """ - context = IdentityServiceContext()() - if not context: - msg = "Identity service context cannot be generated" - log(msg, level=ERROR) - raise ValueError(msg) - - endpoint = format_endpoint(context['service_protocol'], - context['service_host'], - context['service_port'], - context['api_version']) - - if context['api_version'] in (2, "2.0"): - api_version = 2 - else: - api_version = 3 - - return get_keystone_manager(endpoint, api_version, - username=context['admin_user'], - password=context['admin_password'], - tenant_name=context['admin_tenant_name']) - - -class KeystoneManager(object): - - def resolve_service_id(self, service_name=None, service_type=None): - """Find the service_id of a given service""" - services = [s._info for s in self.api.services.list()] - - service_name = service_name.lower() - for s in services: - name = s['name'].lower() - if service_type and service_name: - if (service_name == name and service_type == s['type']): - return s['id'] - elif service_name and service_name == name: - return s['id'] - elif service_type and service_type == s['type']: - return s['id'] - return None - - def service_exists(self, service_name=None, service_type=None): - """Determine if the given service exists on the service list""" - return self.resolve_service_id(service_name, service_type) is not None - - -class KeystoneManager2(KeystoneManager): - - def __init__(self, endpoint, **kwargs): - try: - from keystoneclient.v2_0 import client - from keystoneclient.auth.identity import v2 - from keystoneclient import session - except ImportError: - apt_install(["python3-keystoneclient"], fatal=True) - - from keystoneclient.v2_0 import client - from keystoneclient.auth.identity import v2 - from keystoneclient import session - - self.api_version = 2 - - token = kwargs.get("token", None) - if token: - api = client.Client(endpoint=endpoint, token=token) - else: - auth = v2.Password(username=kwargs.get("username"), - password=kwargs.get("password"), - tenant_name=kwargs.get("tenant_name"), - auth_url=endpoint) - sess = session.Session(auth=auth) - api = client.Client(session=sess) - - self.api = api - - -class KeystoneManager3(KeystoneManager): - - def __init__(self, endpoint, **kwargs): - try: - from keystoneclient.v3 import client - from keystoneclient.auth import token_endpoint - from keystoneclient import session - from keystoneclient.auth.identity import v3 - except ImportError: - apt_install(["python3-keystoneclient"], fatal=True) - - from keystoneclient.v3 import client - from keystoneclient.auth import token_endpoint - from keystoneclient import session - from keystoneclient.auth.identity import v3 - - self.api_version = 3 - - token = kwargs.get("token", None) - if token: - auth = token_endpoint.Token(endpoint=endpoint, - token=token) - sess = session.Session(auth=auth) - else: - auth = v3.Password(auth_url=endpoint, - user_id=kwargs.get("username"), - password=kwargs.get("password"), - project_id=kwargs.get("tenant_name")) - sess = session.Session(auth=auth) - - self.api = client.Client(session=sess) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py deleted file mode 100644 index 47772467..00000000 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Various utilities for dealing with Neutron and the renaming from Quantum. - -from subprocess import check_output - -from charmhelpers.core.hookenv import ( - config, - log, - ERROR, -) - -from charmhelpers.contrib.openstack.utils import ( - os_release, - CompareOpenStackReleases, -) - - -def headers_package(): - """Ensures correct linux-headers for running kernel are installed, - for building DKMS package""" - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - return 'linux-headers-%s' % kver - - -QUANTUM_CONF_DIR = '/etc/quantum' - - -def kernel_version(): - """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - kver = kver.split('.') - return (int(kver[0]), int(kver[1])) - - -def determine_dkms_package(): - """ Determine which DKMS package should be used based on kernel version """ - # NOTE: 3.13 kernels have support for GRE and VXLAN native - if kernel_version() >= (3, 13): - return [] - else: - return [headers_package(), 'openvswitch-datapath-dkms'] - - -# legacy - - -def quantum_plugins(): - return { - 'ovs': { - 'config': '/etc/quantum/plugins/openvswitch/' - 'ovs_quantum_plugin.ini', - 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' - 'OVSQuantumPluginV2', - 'contexts': [], - 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [determine_dkms_package(), - ['quantum-plugin-openvswitch-agent']], - 'server_packages': ['quantum-server', - 'quantum-plugin-openvswitch'], - 'server_services': ['quantum-server'] - }, - 'nvp': { - 'config': '/etc/quantum/plugins/nicira/nvp.ini', - 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' - 'QuantumPlugin.NvpPluginV2', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['quantum-server', - 'quantum-plugin-nicira'], - 'server_services': ['quantum-server'] - } - } - - -NEUTRON_CONF_DIR = '/etc/neutron' - - -def neutron_plugins(): - release = os_release('nova-common') - plugins = { - 'ovs': { - 'config': '/etc/neutron/plugins/openvswitch/' - 'ovs_neutron_plugin.ini', - 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' - 'OVSNeutronPluginV2', - 'contexts': [], - 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [determine_dkms_package(), - ['neutron-plugin-openvswitch-agent']], - 'server_packages': ['neutron-server', - 'neutron-plugin-openvswitch'], - 'server_services': ['neutron-server'] - }, - 'nvp': { - 'config': '/etc/neutron/plugins/nicira/nvp.ini', - 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' - 'NeutronPlugin.NvpPluginV2', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-nicira'], - 'server_services': ['neutron-server'] - }, - 'nsx': { - 'config': '/etc/neutron/plugins/vmware/nsx.ini', - 'driver': 'vmware', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-vmware'], - 'server_services': ['neutron-server'] - }, - 'n1kv': { - 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', - 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [], - 'services': [], - 'packages': [determine_dkms_package(), - ['neutron-plugin-cisco']], - 'server_packages': ['neutron-server', - 'neutron-plugin-cisco'], - 'server_services': ['neutron-server'] - }, - 'Calico': { - 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', - 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [], - 'services': ['calico-felix', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata', - 'etcd'], - 'packages': [determine_dkms_package(), - ['calico-compute', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata', - 'etcd']], - 'server_packages': ['neutron-server', 'calico-control', 'etcd'], - 'server_services': ['neutron-server', 'etcd'] - }, - 'vsp': { - 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', - 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], - 'server_services': ['neutron-server'] - }, - 'plumgrid': { - 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' - '.plumgrid_plugin.NeutronPluginPLUMgridV2'), - 'contexts': [], - 'services': [], - 'packages': ['plumgrid-lxc', - 'iovisor-dkms'], - 'server_packages': ['neutron-server', - 'neutron-plugin-plumgrid'], - 'server_services': ['neutron-server'] - }, - 'midonet': { - 'config': '/etc/neutron/plugins/midonet/midonet.ini', - 'driver': 'midonet.neutron.plugin.MidonetPluginV2', - 'contexts': [], - 'services': [], - 'packages': [determine_dkms_package()], - 'server_packages': ['neutron-server', - 'python-neutron-plugin-midonet'], - 'server_services': ['neutron-server'] - } - } - if CompareOpenStackReleases(release) >= 'icehouse': - # NOTE: patch in ml2 plugin for icehouse onwards - plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' - plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' - plugins['ovs']['server_packages'] = ['neutron-server', - 'neutron-plugin-ml2'] - # NOTE: patch in vmware renames nvp->nsx for icehouse onwards - plugins['nvp'] = plugins['nsx'] - if CompareOpenStackReleases(release) >= 'kilo': - plugins['midonet']['driver'] = ( - 'neutron.plugins.midonet.plugin.MidonetPluginV2') - if CompareOpenStackReleases(release) >= 'liberty': - plugins['midonet']['driver'] = ( - 'midonet.neutron.plugin_v1.MidonetPluginV2') - plugins['midonet']['server_packages'].remove( - 'python-neutron-plugin-midonet') - plugins['midonet']['server_packages'].append( - 'python-networking-midonet') - plugins['plumgrid']['driver'] = ( - 'networking_plumgrid.neutron.plugins' - '.plugin.NeutronPluginPLUMgridV2') - plugins['plumgrid']['server_packages'].remove( - 'neutron-plugin-plumgrid') - if CompareOpenStackReleases(release) >= 'mitaka': - plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') - plugins['nsx']['server_packages'].append('python-vmware-nsx') - plugins['nsx']['config'] = '/etc/neutron/nsx.ini' - plugins['vsp']['driver'] = ( - 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') - if CompareOpenStackReleases(release) >= 'newton': - plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' - plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' - plugins['vsp']['server_packages'] = ['neutron-server', - 'neutron-plugin-ml2'] - return plugins - - -def neutron_plugin_attribute(plugin, attr, net_manager=None): - manager = net_manager or network_manager() - if manager == 'quantum': - plugins = quantum_plugins() - elif manager == 'neutron': - plugins = neutron_plugins() - else: - log("Network manager '%s' does not support plugins." % (manager), - level=ERROR) - raise Exception - - try: - _plugin = plugins[plugin] - except KeyError: - log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) - raise Exception - - try: - return _plugin[attr] - except KeyError: - return None - - -def network_manager(): - ''' - Deals with the renaming of Quantum to Neutron in H and any situations - that require compatibility (eg, deploying H with network-manager=quantum, - upgrading from G). - ''' - release = os_release('nova-common') - manager = config('network-manager').lower() - - if manager not in ['quantum', 'neutron']: - return manager - - if release in ['essex']: - # E does not support neutron - log('Neutron networking not supported in Essex.', level=ERROR) - raise Exception - elif release in ['folsom', 'grizzly']: - # neutron is named quantum in F and G - return 'quantum' - else: - # ensure accurate naming for all releases post-H - return 'neutron' - - -def parse_mappings(mappings, key_rvalue=False): - """By default mappings are lvalue keyed. - - If key_rvalue is True, the mapping will be reversed to allow multiple - configs for the same lvalue. - """ - parsed = {} - if mappings: - mappings = mappings.split() - for m in mappings: - p = m.partition(':') - - if key_rvalue: - key_index = 2 - val_index = 0 - # if there is no rvalue skip to next - if not p[1]: - continue - else: - key_index = 0 - val_index = 2 - - key = p[key_index].strip() - parsed[key] = p[val_index].strip() - - return parsed - - -def parse_bridge_mappings(mappings): - """Parse bridge mappings. - - Mappings must be a space-delimited list of provider:bridge mappings. - - Returns dict of the form {provider:bridge}. - """ - return parse_mappings(mappings) - - -def parse_data_port_mappings(mappings, default_bridge='br-data'): - """Parse data port mappings. - - Mappings must be a space-delimited list of bridge:port. - - Returns dict of the form {port:bridge} where ports may be mac addresses or - interface names. - """ - - # NOTE(dosaboy): we use rvalue for key to allow multiple values to be - # proposed for since it may be a mac address which will differ - # across units this allowing first-known-good to be chosen. - _mappings = parse_mappings(mappings, key_rvalue=True) - if not _mappings or list(_mappings.values()) == ['']: - if not mappings: - return {} - - # For backwards-compatibility we need to support port-only provided in - # config. - _mappings = {mappings.split()[0]: default_bridge} - - ports = _mappings.keys() - if len(set(ports)) != len(ports): - raise Exception("It is not allowed to have the same port configured " - "on more than one bridge") - - return _mappings - - -def parse_vlan_range_mappings(mappings): - """Parse vlan range mappings. - - Mappings must be a space-delimited list of provider:start:end mappings. - - The start:end range is optional and may be omitted. - - Returns dict of the form {provider: (start, end)}. - """ - _mappings = parse_mappings(mappings) - return {p: tuple(r.split(':')) for p, r in _mappings.items()} diff --git a/hooks/charmhelpers/contrib/openstack/policy_rcd.py b/hooks/charmhelpers/contrib/openstack/policy_rcd.py deleted file mode 100644 index ecffbc68..00000000 --- a/hooks/charmhelpers/contrib/openstack/policy_rcd.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Module for managing policy-rc.d script and associated files. - -This module manages the installation of /usr/sbin/policy-rc.d, the -policy files and the event files. When a package update occurs the -packaging system calls: - -policy-rc.d [options] - -The return code of the script determines if the packaging system -will perform that action on the given service. The policy-rc.d -implementation installed by this module checks if an action is -permitted by checking policy files placed in /etc/policy-rc.d. -If a policy file exists which denies the requested action then -this is recorded in an event file which is placed in -/var/lib/policy-rc.d. -""" - -import os -import shutil -import tempfile -import yaml - -import charmhelpers.contrib.openstack.files as os_files -import charmhelpers.contrib.openstack.alternatives as alternatives -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as host - -POLICY_HEADER = """# Managed by juju\n""" -POLICY_DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' -POLICY_CONFIG_DIR = '/etc/policy-rc.d' - - -def get_policy_file_name(): - """Get the name of the policy file for this application. - - :returns: Policy file name - :rtype: str - """ - application_name = hookenv.service_name() - return '{}/charm-{}.policy'.format(POLICY_CONFIG_DIR, application_name) - - -def read_default_policy_file(): - """Return the policy file. - - A policy is in the form: - blocked_actions: - neutron-dhcp-agent: [restart, stop, try-restart] - neutron-l3-agent: [restart, stop, try-restart] - neutron-metadata-agent: [restart, stop, try-restart] - neutron-openvswitch-agent: [restart, stop, try-restart] - openvswitch-switch: [restart, stop, try-restart] - ovs-vswitchd: [restart, stop, try-restart] - ovs-vswitchd-dpdk: [restart, stop, try-restart] - ovsdb-server: [restart, stop, try-restart] - policy_requestor_name: neutron-openvswitch - policy_requestor_type: charm - - :returns: Policy - :rtype: Dict[str, Union[str, Dict[str, List[str]]] - """ - policy = {} - policy_file = get_policy_file_name() - if os.path.exists(policy_file): - with open(policy_file, 'r') as f: - policy = yaml.safe_load(f) - return policy - - -def write_policy_file(policy_file, policy): - """Write policy to disk. - - :param policy_file: Name of policy file - :type policy_file: str - :param policy: Policy - :type policy: Dict[str, Union[str, Dict[str, List[str]]]] - """ - with tempfile.NamedTemporaryFile('w', delete=False) as f: - f.write(POLICY_HEADER) - yaml.dump(policy, f) - tmp_file_name = f.name - shutil.move(tmp_file_name, policy_file) - - -def remove_policy_file(): - """Remove policy file.""" - try: - os.remove(get_policy_file_name()) - except FileNotFoundError: - pass - - -def install_policy_rcd(): - """Install policy-rc.d components.""" - source_file_dir = os.path.dirname(os.path.abspath(os_files.__file__)) - policy_rcd_exec = "/var/lib/charm/{}/policy-rc.d".format( - hookenv.service_name()) - host.mkdir(os.path.dirname(policy_rcd_exec)) - shutil.copy2( - '{}/policy_rc_d_script.py'.format(source_file_dir), - policy_rcd_exec) - # policy-rc.d must be installed via the alternatives system: - # https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt - if not os.path.exists('/usr/sbin/policy-rc.d'): - alternatives.install_alternative( - 'policy-rc.d', - '/usr/sbin/policy-rc.d', - policy_rcd_exec) - host.mkdir(POLICY_CONFIG_DIR) - - -def get_default_policy(): - """Return the default policy structure. - - :returns: Policy - :rtype: Dict[str, Union[str, Dict[str, List[str]]] - """ - policy = { - 'policy_requestor_name': hookenv.service_name(), - 'policy_requestor_type': 'charm', - 'blocked_actions': {}} - return policy - - -def add_policy_block(service, blocked_actions): - """Update a policy file with new list of actions. - - :param service: Service name - :type service: str - :param blocked_actions: Action to block - :type blocked_actions: List[str] - """ - policy = read_default_policy_file() or get_default_policy() - policy_file = get_policy_file_name() - if policy['blocked_actions'].get(service): - policy['blocked_actions'][service].extend(blocked_actions) - else: - policy['blocked_actions'][service] = blocked_actions - policy['blocked_actions'][service] = sorted( - list(set(policy['blocked_actions'][service]))) - write_policy_file(policy_file, policy) - - -def remove_policy_block(service, unblocked_actions): - """Remove list of actions from policy file. - - :param service: Service name - :type service: str - :param unblocked_actions: Action to unblock - :type unblocked_actions: List[str] - """ - policy_file = get_policy_file_name() - policy = read_default_policy_file() - for action in unblocked_actions: - try: - policy['blocked_actions'][service].remove(action) - except (KeyError, ValueError): - continue - write_policy_file(policy_file, policy) diff --git a/hooks/charmhelpers/contrib/openstack/policyd.py b/hooks/charmhelpers/contrib/openstack/policyd.py deleted file mode 100644 index 767943c2..00000000 --- a/hooks/charmhelpers/contrib/openstack/policyd.py +++ /dev/null @@ -1,763 +0,0 @@ -# Copyright 2019-2021 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import contextlib -import os -import shutil -import yaml -import zipfile - -import charmhelpers -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as ch_host - -# Features provided by this module: - -""" -Policy.d helper functions -========================= - -The functions in this module are designed, as a set, to provide an easy-to-use -set of hooks for classic charms to add in /etc//policy.d/ -directory override YAML files. - -(For charms.openstack charms, a mixin class is provided for this -functionality). - -In order to "hook" this functionality into a (classic) charm, two functions are -provided: - - maybe_do_policyd_overrides(openstack_release, - service, - blacklist_paths=none, - blacklist_keys=none, - template_function=none, - restart_handler=none) - - maybe_do_policyd_overrides_on_config_changed(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None - -(See the docstrings for details on the parameters) - -The functions should be called from the install and upgrade hooks in the charm. -The `maybe_do_policyd_overrides_on_config_changed` function is designed to be -called on the config-changed hook, in that it does an additional check to -ensure that an already overridden policy.d in an upgrade or install hooks isn't -repeated. - -In order the *enable* this functionality, the charm's install, config_changed, -and upgrade_charm hooks need to be modified, and a new config option (see -below) needs to be added. The README for the charm should also be updated. - -Examples from the keystone charm are: - -@hooks.hook('install.real') -@harden() -def install(): - ... - # call the policy overrides handler which will install any policy overrides - maybe_do_policyd_overrides(os_release('keystone'), 'keystone') - - -@hooks.hook('config-changed') -@restart_on_change(restart_map(), restart_functions=restart_function_map()) -@harden() -def config_changed(): - ... - # call the policy overrides handler which will install any policy overrides - maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), - 'keystone') - -@hooks.hook('upgrade-charm') -@restart_on_change(restart_map(), stopstart=True) -@harden() -def upgrade_charm(): - ... - # call the policy overrides handler which will install any policy overrides - maybe_do_policyd_overrides(os_release('keystone'), 'keystone') - -Status Line -=========== - -The workload status code in charm-helpers has been modified to detect if -policy.d override code has been incorporated into the charm by checking for the -new config variable (in the config.yaml). If it has been, then the workload -status line will automatically show "PO:" at the beginning of the workload -status for that unit/service if the config option is set. If the policy -override is broken, the "PO (broken):" will be shown. No changes to the charm -(apart from those already mentioned) are needed to enable this functionality. -(charms.openstack charms also get this functionality, but please see that -library for further details). -""" - -# The config.yaml for the charm should contain the following for the config -# option: - -""" - use-policyd-override: - type: boolean - default: False - description: | - If True then use the resource file named 'policyd-override' to install - override YAML files in the service's policy.d directory. The resource - file should be a ZIP file containing at least one yaml file with a .yaml - or .yml extension. If False then remove the overrides. -""" - -# The metadata.yaml for the charm should contain the following: -""" -resources: - policyd-override: - type: file - filename: policyd-override.zip - description: The policy.d overrides file -""" - -# The README for the charm should contain the following: -""" -Policy Overrides ----------------- - -This feature allows for policy overrides using the `policy.d` directory. This -is an **advanced** feature and the policies that the OpenStack service supports -should be clearly and unambiguously understood before trying to override, or -add to, the default policies that the service uses. The charm also has some -policy defaults. They should also be understood before being overridden. - -> **Caution**: It is possible to break the system (for tenants and other - services) if policies are incorrectly applied to the service. - -Policy overrides are YAML files that contain rules that will add to, or -override, existing policy rules in the service. The `policy.d` directory is -a place to put the YAML override files. This charm owns the -`/etc/keystone/policy.d` directory, and as such, any manual changes to it will -be overwritten on charm upgrades. - -Overrides are provided to the charm using a Juju resource called -`policyd-override`. The resource is a ZIP file. This file, say -`overrides.zip`, is attached to the charm by: - - - juju attach-resource policyd-override=overrides.zip - -The policy override is enabled in the charm using: - - juju config use-policyd-override=true - -When `use-policyd-override` is `True` the status line of the charm will be -prefixed with `PO:` indicating that policies have been overridden. If the -installation of the policy override YAML files failed for any reason then the -status line will be prefixed with `PO (broken):`. The log file for the charm -will indicate the reason. No policy override files are installed if the `PO -(broken):` is shown. The status line indicates that the overrides are broken, -not that the policy for the service has failed. The policy will be the defaults -for the charm and service. - -Policy overrides on one service may affect the functionality of another -service. Therefore, it may be necessary to provide policy overrides for -multiple service charms to achieve a consistent set of policies across the -OpenStack system. The charms for the other services that may need overrides -should be checked to ensure that they support overrides before proceeding. -""" - -POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] -POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] -POLICYD_RESOURCE_NAME = "policyd-override" -POLICYD_CONFIG_NAME = "use-policyd-override" -POLICYD_SUCCESS_FILENAME = "policyd-override-success" -POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO -POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") - - -class BadPolicyZipFile(Exception): - - def __init__(self, log_message): - self.log_message = log_message - - def __str__(self): - return self.log_message - - -class BadPolicyYamlFile(Exception): - - def __init__(self, log_message): - self.log_message = log_message - - def __str__(self): - return self.log_message - - -def is_policyd_override_valid_on_this_release(openstack_release): - """Check that the charm is running on at least Ubuntu Xenial, and at - least the queens release. - - :param openstack_release: the release codename that is installed. - :type openstack_release: str - :returns: True if okay - :rtype: bool - """ - # NOTE(ajkavanagh) circular import! This is because the status message - # generation code in utils has to call into this module, but this function - # needs the CompareOpenStackReleases() function. The only way to solve - # this is either to put ALL of this module into utils, or refactor one or - # other of the CompareOpenStackReleases or status message generation code - # into a 3rd module. - import charmhelpers.contrib.openstack.utils as ch_utils - return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' - - -def maybe_do_policyd_overrides(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None, - user=None, - group=None, - config_changed=False): - """If the config option is set, get the resource file and process it to - enable the policy.d overrides for the service passed. - - The param `openstack_release` is required as the policyd overrides feature - is only supported on openstack_release "queens" or later, and on ubuntu - "xenial" or later. Prior to these versions, this feature is a NOP. - - The optional template_function is a function that accepts a string and has - an opportunity to modify the loaded file prior to it being read by - yaml.safe_load(). This allows the charm to perform "templating" using - charm derived data. - - The param blacklist_paths are paths (that are in the service's policy.d - directory that should not be touched). - - The param blacklist_keys are keys that must not appear in the yaml file. - If they do, then the whole policy.d file fails. - - The yaml file extracted from the resource_file (which is a zipped file) has - its file path reconstructed. This, also, must not match any path in the - black list. - - The param restart_handler is an optional Callable that is called to perform - the service restart if the policy.d file is changed. This should normally - be None as oslo.policy automatically picks up changes in the policy.d - directory. However, for any services where this is buggy then a - restart_handler can be used to force the policy.d files to be read. - - If the config_changed param is True, then the handling is slightly - different: It will only perform the policyd overrides if the config is True - and the success file doesn't exist. Otherwise, it does nothing as the - resource file has already been processed. - - :param openstack_release: The openstack release that is installed. - :type openstack_release: str - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the string - prior to being processed as a Yaml document. - :type template_function: Union[None, Callable[[str], str]] - :param restart_handler: The function to call if the service should be - restarted. - :type restart_handler: Union[None, Callable[]] - :param user: The user to create/write files/directories as - :type user: Union[None, str] - :param group: the group to create/write files/directories as - :type group: Union[None, str] - :param config_changed: Set to True for config_changed hook. - :type config_changed: bool - """ - _user = service if user is None else user - _group = service if group is None else group - if not is_policyd_override_valid_on_this_release(openstack_release): - return - hookenv.log("Running maybe_do_policyd_overrides", - level=POLICYD_LOG_LEVEL_DEFAULT) - config = hookenv.config() - try: - if not config.get(POLICYD_CONFIG_NAME, False): - clean_policyd_dir_for(service, - blacklist_paths, - user=_user, - group=_group) - if (os.path.isfile(_policy_success_file()) and - restart_handler is not None and - callable(restart_handler)): - restart_handler() - remove_policy_success_file() - return - except Exception as e: - hookenv.log("... ERROR: Exception is: {}".format(str(e)), - level=POLICYD_CONFIG_NAME) - import traceback - hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) - return - # if the policyd overrides have been performed when doing config_changed - # just return - if config_changed and is_policy_success_file_set(): - hookenv.log("... already setup, so skipping.", - level=POLICYD_LOG_LEVEL_DEFAULT) - return - # from now on it should succeed; if it doesn't then status line will show - # broken. - resource_filename = get_policy_resource_filename() - restart = process_policy_resource_file( - resource_filename, service, blacklist_paths, blacklist_keys, - template_function) - if restart and restart_handler is not None and callable(restart_handler): - restart_handler() - - -@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead") -def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): - """This function is designed to be called from the config changed hook. - - DEPRECATED: please use maybe_do_policyd_overrides() with the param - `config_changed` as `True`. - - See maybe_do_policyd_overrides() for more details on the params. - """ - if 'config_changed' not in kwargs.keys(): - kwargs['config_changed'] = True - return maybe_do_policyd_overrides(*args, **kwargs) - - -def get_policy_resource_filename(): - """Function to extract the policy resource filename - - :returns: The filename of the resource, if set, otherwise, if an error - occurs, then None is returned. - :rtype: Union[str, None] - """ - try: - return hookenv.resource_get(POLICYD_RESOURCE_NAME) - except Exception: - return None - - -@contextlib.contextmanager -def open_and_filter_yaml_files(filepath, has_subdirs=False): - """Validate that the filepath provided is a zip file and contains at least - one (.yaml|.yml) file, and that the files are not duplicated when the zip - file is flattened. Note that the yaml files are not checked. This is the - first stage in validating the policy zipfile; individual yaml files are not - checked for validity or black listed keys. - - If the has_subdirs param is True, then the files are flattened to the first - directory, and the files in the root are ignored. - - An example of use is: - - with open_and_filter_yaml_files(some_path) as zfp, g: - for zipinfo in g: - # do something with zipinfo ... - - :param filepath: a filepath object that can be opened by zipfile - :type filepath: Union[AnyStr, os.PathLike[AntStr]] - :param has_subdirs: Keep first level of subdirectories in yaml file. - :type has_subdirs: bool - :returns: (zfp handle, - a generator of the (name, filename, ZipInfo object) tuples) as a - tuple. - :rtype: ContextManager[(zipfile.ZipFile, - Generator[(name, str, str, zipfile.ZipInfo)])] - :raises: zipfile.BadZipFile - :raises: BadPolicyZipFile if duplicated yaml or missing - :raises: IOError if the filepath is not found - """ - with zipfile.ZipFile(filepath, 'r') as zfp: - # first pass through; check for duplicates and at least one yaml file. - names = collections.defaultdict(int) - yamlfiles = _yamlfiles(zfp, has_subdirs) - for name, _, _, _ in yamlfiles: - names[name] += 1 - # There must be at least 1 yaml file. - if len(names.keys()) == 0: - raise BadPolicyZipFile("contains no yaml files with {} extensions." - .format(", ".join(POLICYD_VALID_EXTS))) - # There must be no duplicates - duplicates = [n for n, c in names.items() if c > 1] - if duplicates: - raise BadPolicyZipFile("{} have duplicates in the zip file." - .format(", ".join(duplicates))) - # Finally, let's yield the generator - yield (zfp, yamlfiles) - - -def _yamlfiles(zipfile, has_subdirs=False): - """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) - and the infolist item from a zipfile. - - If the `has_subdirs` param is True, the the only yaml files that have a - directory component are read, and then first part of the directory - component is kept, along with the filename in the name. e.g. an entry with - a filename of: - - compute/someotherdir/override.yaml - - is returned as: - - compute/override, yaml, override.yaml, - - This is to help with the special, additional, processing that the dashboard - charm requires. - - :param zipfile: the zipfile to read zipinfo items from - :type zipfile: zipfile.ZipFile - :param has_subdirs: Keep first level of subdirectories in yaml file. - :type has_subdirs: bool - :returns: generator of (name, ext, filename, info item) for each - self-identified yaml file. - :rtype: List[(str, str, str, zipfile.ZipInfo)] - """ - files = [] - for infolist_item in zipfile.infolist(): - try: - if infolist_item.is_dir(): - continue - except AttributeError: - # fallback to "old" way to determine dir entry for pre-py36 - if infolist_item.filename.endswith('/'): - continue - _dir, name_ext = os.path.split(infolist_item.filename) - name, ext = os.path.splitext(name_ext) - if has_subdirs and _dir != "": - name = os.path.join(_dir.split(os.path.sep)[0], name) - ext = ext.lower() - if ext and ext in POLICYD_VALID_EXTS: - files.append((name, ext, name_ext, infolist_item)) - return files - - -def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): - """Read, validate and return the (first) yaml document from the stream. - - The doc is read, and checked for a yaml file. The the top-level keys are - checked against the blacklist_keys provided. If there are problems then an - Exception is raised. Otherwise the yaml document is returned as a Python - object that can be dumped back as a yaml file on the system. - - The yaml file must only consist of a str:str mapping, and if not then the - yaml file is rejected. - - :param stream_or_doc: the file object to read the yaml from - :type stream_or_doc: Union[AnyStr, IO[AnyStr]] - :param blacklist_keys: Any keys, which if in the yaml file, should cause - and error. - :type blacklisted_keys: Union[None, List[str]] - :returns: the yaml file as a python document - :rtype: Dict[str, str] - :raises: yaml.YAMLError if there is a problem with the document - :raises: BadPolicyYamlFile if file doesn't look right or there are - blacklisted keys in the file. - """ - blacklist_keys = blacklist_keys or [] - blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) - doc = yaml.safe_load(stream_or_doc) - if not isinstance(doc, dict): - raise BadPolicyYamlFile("doesn't look like a policy file?") - keys = set(doc.keys()) - blacklisted_keys_present = keys.intersection(blacklist_keys) - if blacklisted_keys_present: - raise BadPolicyYamlFile("blacklisted keys {} present." - .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, str) for k in keys): - raise BadPolicyYamlFile("keys in yaml aren't all strings?") - # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, str) for v in doc.values()): - raise BadPolicyYamlFile("values in yaml aren't all strings?") - return doc - - -def policyd_dir_for(service): - """Return the policy directory for the named service. - - :param service: str - :returns: the policy.d override directory. - :rtype: os.PathLike[str] - """ - return os.path.join("/", "etc", service, "policy.d") - - -def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): - """Clean out the policyd directory except for items that should be kept. - - The keep_paths, if used, should be set to the full path of the files that - should be kept in the policyd directory for the service. Note that the - service name is passed in, and then the policyd_dir_for() function is used. - This is so that a coding error doesn't result in a sudden deletion of the - charm (say). - - :param service: the service name to use to construct the policy.d dir. - :type service: str - :param keep_paths: optional list of paths to not delete. - :type keep_paths: Union[None, List[str]] - :param user: The user to create/write files/directories as - :type user: Union[None, str] - :param group: the group to create/write files/directories as - :type group: Union[None, str] - """ - _user = service if user is None else user - _group = service if group is None else group - keep_paths = keep_paths or [] - path = policyd_dir_for(service) - hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) - if not os.path.exists(path): - ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - for direntry in os.scandir(path): - # see if the path should be kept. - if direntry.path in keep_paths: - continue - # we remove any directories; it's ours and there shouldn't be any - if direntry.is_dir(): - shutil.rmtree(direntry.path) - else: - os.remove(direntry.path) - - -def maybe_create_directory_for(path, user, group): - """For the filename 'path', ensure that the directory for that path exists. - - Note that if the directory already exists then the permissions are NOT - changed. - - :param path: the filename including the path to it. - :type path: str - :param user: the user to create the directory as - :param group: the group to create the directory as - """ - _dir, _ = os.path.split(path) - if not os.path.exists(_dir): - ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) - - -def path_for_policy_file(service, name): - """Return the full path for a policy.d file that will be written to the - service's policy.d directory. - - It is constructed using policyd_dir_for(), the name and the ".yaml" - extension. - - For horizon, for example, it's a bit more complicated. The name param is - actually "override_service_dir/a_name", where target_service needs to be - one the allowed horizon override services. This translation and check is - done in the _yamlfiles() function. - - :param service: the service name - :type service: str - :param name: the name for the policy override - :type name: str - :returns: the full path name for the file - :rtype: os.PathLike[str] - """ - return os.path.join(policyd_dir_for(service), name + ".yaml") - - -def _policy_success_file(): - """Return the file name for a successful drop of policy.d overrides - - :returns: the path name for the file. - :rtype: str - """ - return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) - - -def remove_policy_success_file(): - """Remove the file that indicates successful policyd override.""" - try: - os.remove(_policy_success_file()) - except Exception: - pass - - -def set_policy_success_file(): - """Set the file that indicates successful policyd override.""" - open(_policy_success_file(), "w").close() - - -def is_policy_success_file_set(): - """Returns True if the policy success file has been set. - - This indicates that policies are overridden and working properly. - - :returns: True if the policy file is set - :rtype: bool - """ - return os.path.isfile(_policy_success_file()) - - -def policyd_status_message_prefix(): - """Return the prefix str for the status line. - - "PO:" indicating that the policy overrides are in place, or "PO (broken):" - if the policy is supposed to be working but there is no success file. - - :returns: the prefix - :rtype: str - """ - if is_policy_success_file_set(): - return "PO:" - return "PO (broken):" - - -def process_policy_resource_file(resource_file, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - preserve_topdir=False, - preprocess_filename=None, - user=None, - group=None): - """Process the resource file (which should contain at least one yaml file) - and write those files to the service's policy.d directory. - - The optional template_function is a function that accepts a python - string and has an opportunity to modify the document - prior to it being read by the yaml.safe_load() function and written to - disk. Note that this function does *not* say how the templating is done - - this is up to the charm to implement its chosen method. - - The param blacklist_paths are paths (that are in the service's policy.d - directory that should not be touched). - - The param blacklist_keys are keys that must not appear in the yaml file. - If they do, then the whole policy.d file fails. - - The yaml file extracted from the resource_file (which is a zipped file) has - its file path reconstructed. This, also, must not match any path in the - black list. - - The yaml filename can be modified in two ways. If the `preserve_topdir` - param is True, then files will be flattened to the top dir. This allows - for creating sets of files that can be grouped into a single level tree - structure. - - Secondly, if the `preprocess_filename` param is not None and callable() - then the name is passed to that function for preprocessing before being - converted to the end location. This is to allow munging of the filename - prior to being tested for a blacklist path. - - If any error occurs, then the policy.d directory is cleared, the error is - written to the log, and the status line will eventually show as failed. - - :param resource_file: The zipped file to open and extract yaml files form. - :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the yaml - document. - :type template_function: Union[None, Callable[[AnyStr], AnyStr]] - :param preserve_topdir: Keep the toplevel subdir - :type preserve_topdir: bool - :param preprocess_filename: Optional function to use to process filenames - extracted from the resource file. - :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] - :param user: The user to create/write files/directories as - :type user: Union[None, str] - :param group: the group to create/write files/directories as - :type group: Union[None, str] - :returns: True if the processing was successful, False if not. - :rtype: boolean - """ - hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) - blacklist_paths = blacklist_paths or [] - completed = False - _preprocess = None - if preprocess_filename is not None and callable(preprocess_filename): - _preprocess = preprocess_filename - _user = service if user is None else user - _group = service if group is None else group - try: - with open_and_filter_yaml_files( - resource_file, preserve_topdir) as (zfp, gen): - # first clear out the policy.d directory and clear success - remove_policy_success_file() - clean_policyd_dir_for(service, - blacklist_paths, - user=_user, - group=_group) - for name, ext, filename, zipinfo in gen: - # See if the name should be preprocessed. - if _preprocess is not None: - name = _preprocess(name) - # construct a name for the output file. - yaml_filename = path_for_policy_file(service, name) - if yaml_filename in blacklist_paths: - raise BadPolicyZipFile("policy.d name {} is blacklisted" - .format(yaml_filename)) - with zfp.open(zipinfo) as fp: - doc = fp.read() - # if template_function is not None, then offer the document - # to the template function - if ext in POLICYD_TEMPLATE_EXTS: - if (template_function is None or not - callable(template_function)): - raise BadPolicyZipFile( - "Template {} but no template_function is " - "available".format(filename)) - doc = template_function(doc) - yaml_doc = read_and_validate_yaml(doc, blacklist_keys) - # we may have to create the directory - maybe_create_directory_for(yaml_filename, _user, _group) - ch_host.write_file(yaml_filename, - yaml.dump(yaml_doc).encode('utf-8'), - _user, - _group) - # Every thing worked, so we mark up a success. - completed = True - except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: - hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), - level=POLICYD_LOG_LEVEL_DEFAULT) - except IOError as e: - # technically this shouldn't happen; it would be a programming error as - # the filename comes from Juju and thus, should exist. - hookenv.log( - "File {} failed with IOError. This really shouldn't happen" - " -- error: {}".format(resource_file, str(e)), - level=POLICYD_LOG_LEVEL_DEFAULT) - except Exception as e: - import traceback - hookenv.log("General Exception({}) during policyd processing" - .format(str(e)), - level=POLICYD_LOG_LEVEL_DEFAULT) - hookenv.log(traceback.format_exc()) - finally: - if not completed: - hookenv.log("Processing {} failed: cleaning policy.d directory" - .format(resource_file), - level=POLICYD_LOG_LEVEL_DEFAULT) - clean_policyd_dir_for(service, - blacklist_paths, - user=_user, - group=_group) - else: - # touch the success filename - hookenv.log("policy.d overrides installed.", - level=POLICYD_LOG_LEVEL_DEFAULT) - set_policy_success_file() - return completed diff --git a/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/hooks/charmhelpers/contrib/openstack/ssh_migrations.py deleted file mode 100644 index 96b9f71d..00000000 --- a/hooks/charmhelpers/contrib/openstack/ssh_migrations.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright 2018 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess - -from charmhelpers.core.hookenv import ( - ERROR, - log, - relation_get, -) -from charmhelpers.contrib.network.ip import ( - is_ipv6, - ns_query, -) -from charmhelpers.contrib.openstack.utils import ( - get_hostname, - get_host_ip, - is_ip, -) - -NOVA_SSH_DIR = '/etc/nova/compute_ssh/' - - -def ssh_directory_for_unit(application_name, user=None): - """Return the directory used to store ssh assets for the application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Fully qualified directory path. - :rtype: str - """ - if user: - application_name = "{}_{}".format(application_name, user) - _dir = os.path.join(NOVA_SSH_DIR, application_name) - for d in [NOVA_SSH_DIR, _dir]: - if not os.path.isdir(d): - os.mkdir(d) - for f in ['authorized_keys', 'known_hosts']: - f = os.path.join(_dir, f) - if not os.path.isfile(f): - open(f, 'w').close() - return _dir - - -def known_hosts(application_name, user=None): - """Return the known hosts file for the application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Fully qualified path to file. - :rtype: str - """ - return os.path.join( - ssh_directory_for_unit(application_name, user), - 'known_hosts') - - -def authorized_keys(application_name, user=None): - """Return the authorized keys file for the application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Fully qualified path to file. - :rtype: str - """ - return os.path.join( - ssh_directory_for_unit(application_name, user), - 'authorized_keys') - - -def ssh_known_host_key(host, application_name, user=None): - """Return the first entry in known_hosts for host. - - :param host: hostname to lookup in file. - :type host: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Host key - :rtype: str or None - """ - cmd = [ - 'ssh-keygen', - '-f', known_hosts(application_name, user), - '-H', - '-F', - host] - try: - # The first line of output is like '# Host xx found: line 1 type RSA', - # which should be excluded. - output = subprocess.check_output(cmd) - except subprocess.CalledProcessError as e: - # RC of 1 seems to be legitimate for most ssh-keygen -F calls. - if e.returncode == 1: - output = e.output - else: - raise - output = output.strip() - - if output: - # Bug #1500589 cmd has 0 rc on precise if entry not present - lines = output.split('\n') - if len(lines) >= 1: - return lines[0] - - return None - - -def remove_known_host(host, application_name, user=None): - """Remove the entry in known_hosts for host. - - :param host: hostname to lookup in file. - :type host: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - log('Removing SSH known host entry for compute host at %s' % host) - cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] - subprocess.check_call(cmd) - - -def is_same_key(key_1, key_2): - """Extract the key from two host entries and compare them. - - :param key_1: Host key - :type key_1: str - :param key_2: Host key - :type key_2: str - """ - # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' - # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare - # the part start with 'ssh-rsa' followed with '= ', because the hash - # value in the beginning will change each time. - k_1 = key_1.split('= ')[1] - k_2 = key_2.split('= ')[1] - return k_1 == k_2 - - -def add_known_host(host, application_name, user=None): - """Add the given host key to the known hosts file. - - :param host: host name - :type host: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] - try: - remote_key = subprocess.check_output(cmd).strip() - except Exception as e: - log('Could not obtain SSH host key from %s' % host, level=ERROR) - raise e - - current_key = ssh_known_host_key(host, application_name, user) - if current_key and remote_key: - if is_same_key(remote_key, current_key): - log('Known host key for compute host %s up to date.' % host) - return - else: - remove_known_host(host, application_name, user) - - log('Adding SSH host key to known hosts for compute node at %s.' % host) - with open(known_hosts(application_name, user), 'a') as out: - out.write("{}\n".format(remote_key)) - - -def ssh_authorized_key_exists(public_key, application_name, user=None): - """Check if given key is in the authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Whether given key is in the authorized_key file. - :rtype: boolean - """ - with open(authorized_keys(application_name, user)) as keys: - return ('%s' % public_key) in keys.read() - - -def add_authorized_key(public_key, application_name, user=None): - """Add given key to the authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - with open(authorized_keys(application_name, user), 'a') as keys: - keys.write("{}\n".format(public_key)) - - -def ssh_compute_add_host_and_key(public_key, hostname, private_address, - application_name, user=None): - """Add a compute nodes ssh details to local cache. - - Collect various hostname variations and add the corresponding host keys to - the local known hosts file. Finally, add the supplied public key to the - authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param hostname: Hostname to collect host keys from. - :type hostname: str - :param private_address:aCorresponding private address for hostname - :type private_address: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - # If remote compute node hands us a hostname, ensure we have a - # known hosts entry for its IP, hostname and FQDN. - hosts = [private_address] - - if not is_ipv6(private_address): - if hostname: - hosts.append(hostname) - - if is_ip(private_address): - hn = get_hostname(private_address) - if hn: - hosts.append(hn) - short = hn.split('.')[0] - if ns_query(short): - hosts.append(short) - else: - hosts.append(get_host_ip(private_address)) - short = private_address.split('.')[0] - if ns_query(short): - hosts.append(short) - - for host in list(set(hosts)): - add_known_host(host, application_name, user) - - if not ssh_authorized_key_exists(public_key, application_name, user): - log('Saving SSH authorized key for compute host at %s.' % - private_address) - add_authorized_key(public_key, application_name, user) - - -def ssh_compute_add(public_key, application_name, rid=None, unit=None, - user=None): - """Add a compute nodes ssh details to local cache. - - Collect various hostname variations and add the corresponding host keys to - the local known hosts file. Finally, add the supplied public key to the - authorized_key file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param rid: Relation id of the relation between this charm and the app. If - none is supplied it is assumed its the relation relating to - the current hook context. - :type rid: str - :param unit: Unit to add ssh asserts for if none is supplied it is assumed - its the unit relating to the current hook context. - :type unit: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - relation_data = relation_get(rid=rid, unit=unit) - ssh_compute_add_host_and_key( - public_key, - relation_data.get('hostname'), - relation_data.get('private-address'), - application_name, - user=user) - - -def ssh_known_hosts_lines(application_name, user=None): - """Return contents of known_hosts file for given application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - known_hosts_list = [] - with open(known_hosts(application_name, user)) as hosts: - for hosts_line in hosts: - if hosts_line.rstrip(): - known_hosts_list.append(hosts_line.rstrip()) - return(known_hosts_list) - - -def ssh_authorized_keys_lines(application_name, user=None): - """Return contents of authorized_keys file for given application. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - authorized_keys_list = [] - - with open(authorized_keys(application_name, user)) as keys: - for authkey_line in keys: - if authkey_line.rstrip(): - authorized_keys_list.append(authkey_line.rstrip()) - return(authorized_keys_list) - - -def ssh_compute_remove(public_key, application_name, user=None): - """Remove given public key from authorized_keys file. - - :param public_key: Public key. - :type public_key: str - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - """ - if not (os.path.isfile(authorized_keys(application_name, user)) or - os.path.isfile(known_hosts(application_name, user))): - return - - keys = ssh_authorized_keys_lines(application_name, user=None) - keys = [k.strip() for k in keys] - - if public_key not in keys: - return - - [keys.remove(key) for key in keys if key == public_key] - - with open(authorized_keys(application_name, user), 'w') as _keys: - keys = '\n'.join(keys) - if not keys.endswith('\n'): - keys += '\n' - _keys.write(keys) - - -def get_ssh_settings(application_name, user=None): - """Retrieve the known host entries and public keys for application - - Retrieve the known host entries and public keys for application for all - units of the given application related to this application for the - app + user combination. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :param user: The user that the ssh asserts are for. - :type user: str - :returns: Public keys + host keys for all units for app + user combination. - :rtype: dict - """ - settings = {} - keys = {} - prefix = '' - if user: - prefix = '{}_'.format(user) - - for i, line in enumerate(ssh_known_hosts_lines( - application_name=application_name, user=user)): - settings['{}known_hosts_{}'.format(prefix, i)] = line - if settings: - settings['{}known_hosts_max_index'.format(prefix)] = len( - settings.keys()) - - for i, line in enumerate(ssh_authorized_keys_lines( - application_name=application_name, user=user)): - keys['{}authorized_keys_{}'.format(prefix, i)] = line - if keys: - keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) - settings.update(keys) - return settings - - -def get_all_user_ssh_settings(application_name): - """Retrieve the known host entries and public keys for application - - Retrieve the known host entries and public keys for application for all - units of the given application related to this application for root user - and nova user. - - :param application_name: Name of application eg nova-compute-something - :type application_name: str - :returns: Public keys + host keys for all units for app + user combination. - :rtype: dict - """ - settings = get_ssh_settings(application_name) - settings.update(get_ssh_settings(application_name, user='nova')) - return settings diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py deleted file mode 100644 index 9df5f746..00000000 --- a/hooks/charmhelpers/contrib/openstack/templates/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py deleted file mode 100644 index 3b7c6a9f..00000000 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ /dev/null @@ -1,370 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import ( - log, - ERROR, - INFO, - TRACE -) -from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES - -try: - from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions -except ImportError: - apt_update(fatal=True) - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions - - -class OSConfigException(Exception): - pass - - -def get_loader(templates_dir, os_release): - """ - Create a jinja2.ChoiceLoader containing template dirs up to - and including os_release. If directory template directory - is missing at templates_dir, it will be omitted from the loader. - templates_dir is added to the bottom of the search list as a base - loading dir. - - A charm may also ship a templates dir with this module - and it will be appended to the bottom of the search list, eg:: - - hooks/charmhelpers/contrib/openstack/templates - - :param templates_dir (str): Base template directory containing release - sub-directories. - :param os_release (str): OpenStack release codename to construct template - loader. - :returns: jinja2.ChoiceLoader constructed with a list of - jinja2.FilesystemLoaders, ordered in descending - order by OpenStack release. - """ - tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in OPENSTACK_CODENAMES.values()] - - if not os.path.isdir(templates_dir): - log('Templates directory not found @ %s.' % templates_dir, - level=ERROR) - raise OSConfigException - - # the bottom contains tempaltes_dir and possibly a common templates dir - # shipped with the helper. - loaders = [FileSystemLoader(templates_dir)] - helper_templates = os.path.join(os.path.dirname(__file__), 'templates') - if os.path.isdir(helper_templates): - loaders.append(FileSystemLoader(helper_templates)) - - for rel, tmpl_dir in tmpl_dirs: - if os.path.isdir(tmpl_dir): - loaders.insert(0, FileSystemLoader(tmpl_dir)) - if rel == os_release: - break - # demote this log to the lowest level; we don't really need to see these - # lots in production even when debugging. - log('Creating choice loader with dirs: %s' % - [l.searchpath for l in loaders], level=TRACE) - return ChoiceLoader(loaders) - - -class OSConfigTemplate(object): - """ - Associates a config file template with a list of context generators. - Responsible for constructing a template context based on those generators. - """ - - def __init__(self, config_file, contexts, config_template=None): - self.config_file = config_file - - if hasattr(contexts, '__call__'): - self.contexts = [contexts] - else: - self.contexts = contexts - - self._complete_contexts = [] - - self.config_template = config_template - - def context(self): - ctxt = {} - for context in self.contexts: - _ctxt = context() - if _ctxt: - ctxt.update(_ctxt) - # track interfaces for every complete context. - [self._complete_contexts.append(interface) - for interface in context.interfaces - if interface not in self._complete_contexts] - return ctxt - - def complete_contexts(self): - ''' - Return a list of interfaces that have satisfied contexts. - ''' - if self._complete_contexts: - return self._complete_contexts - self.context() - return self._complete_contexts - - @property - def is_string_template(self): - """:returns: Boolean if this instance is a template initialised with a string""" - return self.config_template is not None - - -class OSConfigRenderer(object): - """ - This class provides a common templating system to be used by OpenStack - charms. It is intended to help charms share common code and templates, - and ease the burden of managing config templates across multiple OpenStack - releases. - - Basic usage:: - - # import some common context generates from charmhelpers - from charmhelpers.contrib.openstack import context - - # Create a renderer object for a specific OS release. - configs = OSConfigRenderer(templates_dir='/tmp/templates', - openstack_release='folsom') - # register some config files with context generators. - configs.register(config_file='/etc/nova/nova.conf', - contexts=[context.SharedDBContext(), - context.AMQPContext()]) - configs.register(config_file='/etc/nova/api-paste.ini', - contexts=[context.IdentityServiceContext()]) - configs.register(config_file='/etc/haproxy/haproxy.conf', - contexts=[context.HAProxyContext()]) - configs.register(config_file='/etc/keystone/policy.d/extra.cfg', - contexts=[context.ExtraPolicyContext() - context.KeystoneContext()], - config_template=hookenv.config('extra-policy')) - # write out a single config - configs.write('/etc/nova/nova.conf') - # write out all registered configs - configs.write_all() - - **OpenStack Releases and template loading** - - When the object is instantiated, it is associated with a specific OS - release. This dictates how the template loader will be constructed. - - The constructed loader attempts to load the template from several places - in the following order: - - from the most recent OS release-specific template dir (if one exists) - - the base templates_dir - - a template directory shipped in the charm with this helper file. - - For the example above, '/tmp/templates' contains the following structure:: - - /tmp/templates/nova.conf - /tmp/templates/api-paste.ini - /tmp/templates/grizzly/api-paste.ini - /tmp/templates/havana/api-paste.ini - - Since it was registered with the grizzly release, it first searches - the grizzly directory for nova.conf, then the templates dir. - - When writing api-paste.ini, it will find the template in the grizzly - directory. - - If the object were created with folsom, it would fall back to the - base templates dir for its api-paste.ini template. - - This system should help manage changes in config files through - openstack releases, allowing charms to fall back to the most recently - updated config template for a given release - - The haproxy.conf, since it is not shipped in the templates dir, will - be loaded from the module directory's template directory, eg - $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows - us to ship common templates (haproxy, apache) with the helpers. - - **Context generators** - - Context generators are used to generate template contexts during hook - execution. Doing so may require inspecting service relations, charm - config, etc. When registered, a config file is associated with a list - of generators. When a template is rendered and written, all context - generates are called in a chain to generate the context dictionary - passed to the jinja2 template. See context.py for more info. - """ - def __init__(self, templates_dir, openstack_release): - if not os.path.isdir(templates_dir): - log('Could not locate templates dir %s' % templates_dir, - level=ERROR) - raise OSConfigException - - self.templates_dir = templates_dir - self.openstack_release = openstack_release - self.templates = {} - self._tmpl_env = None - - if None in [Environment, ChoiceLoader, FileSystemLoader]: - # if this code is running, the object is created pre-install hook. - # jinja2 shouldn't get touched until the module is reloaded on next - # hook execution, with proper jinja2 bits successfully imported. - apt_install('python3-jinja2') - - def register(self, config_file, contexts, config_template=None): - """ - Register a config file with a list of context generators to be called - during rendering. - config_template can be used to load a template from a string instead of - using template loaders and template files. - :param config_file (str): a path where a config file will be rendered - :param contexts (list): a list of context dictionaries with kv pairs - :param config_template (str): an optional template string to use - """ - self.templates[config_file] = OSConfigTemplate( - config_file=config_file, - contexts=contexts, - config_template=config_template - ) - log('Registered config file: {}'.format(config_file), - level=INFO) - - def _get_tmpl_env(self): - if not self._tmpl_env: - loader = get_loader(self.templates_dir, self.openstack_release) - self._tmpl_env = Environment(loader=loader) - - def _get_template(self, template): - self._get_tmpl_env() - template = self._tmpl_env.get_template(template) - log('Loaded template from {}'.format(template.filename), - level=INFO) - return template - - def _get_template_from_string(self, ostmpl): - ''' - Get a jinja2 template object from a string. - :param ostmpl: OSConfigTemplate to use as a data source. - ''' - self._get_tmpl_env() - template = self._tmpl_env.from_string(ostmpl.config_template) - log('Loaded a template from a string for {}'.format( - ostmpl.config_file), - level=INFO) - return template - - def render(self, config_file): - if config_file not in self.templates: - log('Config not registered: {}'.format(config_file), level=ERROR) - raise OSConfigException - - ostmpl = self.templates[config_file] - ctxt = ostmpl.context() - - if ostmpl.is_string_template: - template = self._get_template_from_string(ostmpl) - log('Rendering from a string template: ' - '{}'.format(config_file), - level=INFO) - else: - _tmpl = os.path.basename(config_file) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound: - # if no template is found with basename, try looking - # for it using a munged full path, eg: - # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf - _tmpl = '_'.join(config_file.split('/')[1:]) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound as e: - log('Could not load template from {} by {} or {}.' - ''.format( - self.templates_dir, - os.path.basename(config_file), - _tmpl - ), - level=ERROR) - raise e - - log('Rendering from template: {}'.format(config_file), - level=INFO) - return template.render(ctxt) - - def write(self, config_file): - """ - Write a single config file, raises if config file is not registered. - """ - if config_file not in self.templates: - log('Config not registered: %s' % config_file, level=ERROR) - raise OSConfigException - - _out = self.render(config_file).encode('UTF-8') - - with open(config_file, 'wb') as out: - out.write(_out) - - log('Wrote template %s.' % config_file, level=INFO) - - def write_all(self): - """ - Write out all registered config files. - """ - for k in self.templates.keys(): - self.write(k) - - def set_release(self, openstack_release): - """ - Resets the template environment and generates a new template loader - based on a the new openstack release. - """ - self._tmpl_env = None - self.openstack_release = openstack_release - self._get_tmpl_env() - - def complete_contexts(self): - ''' - Returns a list of context interfaces that yield a complete context. - ''' - interfaces = [] - for i in self.templates.values(): - interfaces.extend(i.complete_contexts()) - return interfaces - - def get_incomplete_context_data(self, interfaces): - ''' - Return dictionary of relation status of interfaces and any missing - required context data. Example: - {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, - 'zeromq-configuration': {'related': False}} - ''' - incomplete_context_data = {} - - for i in self.templates.values(): - for context in i.contexts: - for interface in interfaces: - related = False - if interface in context.interfaces: - related = context.get_related() - missing_data = context.missing_data - if missing_data: - incomplete_context_data[interface] = {'missing_data': missing_data} - if related: - if incomplete_context_data.get(interface): - incomplete_context_data[interface].update({'related': True}) - else: - incomplete_context_data[interface] = {'related': True} - else: - incomplete_context_data[interface] = {'related': False} - return incomplete_context_data diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py deleted file mode 100644 index c8747c16..00000000 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ /dev/null @@ -1,2694 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Common python helper functions used for OpenStack charms. -from collections import OrderedDict, namedtuple -from functools import partial, wraps - -import subprocess -import json -import operator -import os -import sys -import re -import itertools -import functools - -import traceback -import uuid -import yaml - -from charmhelpers import deprecate - -from charmhelpers.contrib.network import ip - -from charmhelpers.core import decorators, unitdata - -import charmhelpers.contrib.openstack.deferred_events as deferred_events - -from charmhelpers.core.hookenv import ( - WORKLOAD_STATES, - action_fail, - action_get, - action_set, - config, - expected_peer_units, - expected_related_units, - log as juju_log, - charm_dir, - INFO, - ERROR, - metadata, - related_units, - relation_get, - relation_id, - relation_ids, - relation_set, - service_name as ch_service_name, - status_set, - hook_name, - application_version_set, - cached, - leader_set, - leader_get, - local_unit, -) - -from charmhelpers.core.strutils import ( - BasicStringComparator, - bool_from_string, -) - -from charmhelpers.contrib.storage.linux.lvm import ( - deactivate_lvm_volume_group, - is_lvm_physical_volume, - remove_lvm_physical_volume, -) - -from charmhelpers.contrib.network.ip import ( - get_ipv6_addr, - is_ipv6, - port_has_listener, -) - -from charmhelpers.core.host import ( - lsb_release, - mounts, - umount, - service_running, - service_pause, - service_resume, - service_stop, - service_start, - restart_on_change_helper, -) - -from charmhelpers.fetch import ( - apt_cache, - apt_install, - import_key as fetch_import_key, - add_source as fetch_add_source, - SourceConfigError, - GPGKeyError, - get_upstream_version, - filter_installed_packages, - filter_missing_packages, - ubuntu_apt_pkg as apt, - OPENSTACK_RELEASES, - UBUNTU_OPENSTACK_RELEASE, -) - -from charmhelpers.fetch.snap import ( - snap_install, - snap_refresh, - valid_snap_channel, -) - -from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk -from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device -from charmhelpers.contrib.openstack.exceptions import OSContextError, ServiceActionError -from charmhelpers.contrib.openstack.policyd import ( - policyd_status_message_prefix, - POLICYD_CONFIG_NAME, -) - -from charmhelpers.contrib.openstack.ha.utils import ( - expect_ha, -) - -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' - -DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' - 'restricted main multiverse universe') - -OPENSTACK_CODENAMES = OrderedDict([ - # NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version - # number. This just means the i-th version of the year yyyy. - ('2011.2', 'diablo'), - ('2012.1', 'essex'), - ('2012.2', 'folsom'), - ('2013.1', 'grizzly'), - ('2013.2', 'havana'), - ('2014.1', 'icehouse'), - ('2014.2', 'juno'), - ('2015.1', 'kilo'), - ('2015.2', 'liberty'), - ('2016.1', 'mitaka'), - ('2016.2', 'newton'), - ('2017.1', 'ocata'), - ('2017.2', 'pike'), - ('2018.1', 'queens'), - ('2018.2', 'rocky'), - ('2019.1', 'stein'), - ('2019.2', 'train'), - ('2020.1', 'ussuri'), - ('2020.2', 'victoria'), - ('2021.1', 'wallaby'), - ('2021.2', 'xena'), - ('2022.1', 'yoga'), -]) - -# The ugly duckling - must list releases oldest to newest -SWIFT_CODENAMES = OrderedDict([ - ('diablo', - ['1.4.3']), - ('essex', - ['1.4.8']), - ('folsom', - ['1.7.4']), - ('grizzly', - ['1.7.6', '1.7.7', '1.8.0']), - ('havana', - ['1.9.0', '1.9.1', '1.10.0']), - ('icehouse', - ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), - ('juno', - ['2.0.0', '2.1.0', '2.2.0']), - ('kilo', - ['2.2.1', '2.2.2']), - ('liberty', - ['2.3.0', '2.4.0', '2.5.0']), - ('mitaka', - ['2.5.0', '2.6.0', '2.7.0']), - ('newton', - ['2.8.0', '2.9.0', '2.10.0']), - ('ocata', - ['2.11.0', '2.12.0', '2.13.0']), - ('pike', - ['2.13.0', '2.15.0']), - ('queens', - ['2.16.0', '2.17.0']), - ('rocky', - ['2.18.0', '2.19.0']), - ('stein', - ['2.20.0', '2.21.0']), - ('train', - ['2.22.0', '2.23.0']), - ('ussuri', - ['2.24.0', '2.25.0']), - ('victoria', - ['2.25.0', '2.26.0']), -]) - -# >= Liberty version->codename mapping -PACKAGE_CODENAMES = { - 'nova-common': OrderedDict([ - ('12', 'liberty'), - ('13', 'mitaka'), - ('14', 'newton'), - ('15', 'ocata'), - ('16', 'pike'), - ('17', 'queens'), - ('18', 'rocky'), - ('19', 'stein'), - ('20', 'train'), - ('21', 'ussuri'), - ('22', 'victoria'), - ]), - 'neutron-common': OrderedDict([ - ('7', 'liberty'), - ('8', 'mitaka'), - ('9', 'newton'), - ('10', 'ocata'), - ('11', 'pike'), - ('12', 'queens'), - ('13', 'rocky'), - ('14', 'stein'), - ('15', 'train'), - ('16', 'ussuri'), - ('17', 'victoria'), - ]), - 'cinder-common': OrderedDict([ - ('7', 'liberty'), - ('8', 'mitaka'), - ('9', 'newton'), - ('10', 'ocata'), - ('11', 'pike'), - ('12', 'queens'), - ('13', 'rocky'), - ('14', 'stein'), - ('15', 'train'), - ('16', 'ussuri'), - ('17', 'victoria'), - ]), - 'keystone': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ('15', 'stein'), - ('16', 'train'), - ('17', 'ussuri'), - ('18', 'victoria'), - ]), - 'horizon-common': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ('15', 'stein'), - ('16', 'train'), - ('18', 'ussuri'), # Note this was actually 17.0 - 18.3 - ('19', 'victoria'), # Note this is really 18.6 - ]), - 'ceilometer-common': OrderedDict([ - ('5', 'liberty'), - ('6', 'mitaka'), - ('7', 'newton'), - ('8', 'ocata'), - ('9', 'pike'), - ('10', 'queens'), - ('11', 'rocky'), - ('12', 'stein'), - ('13', 'train'), - ('14', 'ussuri'), - ('15', 'victoria'), - ]), - 'heat-common': OrderedDict([ - ('5', 'liberty'), - ('6', 'mitaka'), - ('7', 'newton'), - ('8', 'ocata'), - ('9', 'pike'), - ('10', 'queens'), - ('11', 'rocky'), - ('12', 'stein'), - ('13', 'train'), - ('14', 'ussuri'), - ('15', 'victoria'), - ]), - 'glance-common': OrderedDict([ - ('11', 'liberty'), - ('12', 'mitaka'), - ('13', 'newton'), - ('14', 'ocata'), - ('15', 'pike'), - ('16', 'queens'), - ('17', 'rocky'), - ('18', 'stein'), - ('19', 'train'), - ('20', 'ussuri'), - ('21', 'victoria'), - ]), - 'openstack-dashboard': OrderedDict([ - ('8', 'liberty'), - ('9', 'mitaka'), - ('10', 'newton'), - ('11', 'ocata'), - ('12', 'pike'), - ('13', 'queens'), - ('14', 'rocky'), - ('15', 'stein'), - ('16', 'train'), - ('18', 'ussuri'), - ('19', 'victoria'), - ]), -} - -DEFAULT_LOOPBACK_SIZE = '5G' - -DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' - -DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] - - -class CompareOpenStackReleases(BasicStringComparator): - """Provide comparisons of OpenStack releases. - - Use in the form of - - if CompareOpenStackReleases(release) > 'mitaka': - # do something with mitaka - """ - _list = OPENSTACK_RELEASES - - -def error_out(msg): - juju_log("FATAL ERROR: %s" % msg, level='ERROR') - sys.exit(1) - - -def get_installed_semantic_versioned_packages(): - '''Get a list of installed packages which have OpenStack semantic versioning - - :returns List of installed packages - :rtype: [pkg1, pkg2, ...] - ''' - return filter_missing_packages(PACKAGE_CODENAMES.keys()) - - -def get_os_codename_install_source(src): - '''Derive OpenStack release codename from a given installation source.''' - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = '' - if src is None: - return rel - if src in OPENSTACK_RELEASES: - return src - if src in ['distro', 'distro-proposed', 'proposed']: - try: - rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] - except KeyError: - e = 'Could not derive openstack release for '\ - 'this Ubuntu release: %s' % ubuntu_rel - error_out(e) - return rel - - if src.startswith('cloud:'): - ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('-')[1].split('/')[0] - return ca_rel - - # Best guess match based on deb string provided - if (src.startswith('deb') or - src.startswith('ppa') or - src.startswith('snap')): - for v in OPENSTACK_CODENAMES.values(): - if v in src: - return v - - -def get_os_version_install_source(src): - codename = get_os_codename_install_source(src) - return get_os_version_codename(codename) - - -def get_os_codename_version(vers): - '''Determine OpenStack codename from version number.''' - try: - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): - '''Determine OpenStack version number from codename.''' - for k, v in version_map.items(): - if v == codename: - return k - e = 'Could not derive OpenStack version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_os_version_codename_swift(codename): - '''Determine OpenStack version number of swift from codename.''' - # for k, v in six.iteritems(SWIFT_CODENAMES): - for k, v in SWIFT_CODENAMES.items(): - if k == codename: - return v[-1] - e = 'Could not derive swift version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_swift_codename(version): - '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] - - if len(codenames) > 1: - # If more than one release codename contains this version we determine - # the actual codename based on the highest available install source. - for codename in reversed(codenames): - releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in releases.items() if codename in v] - ret = (subprocess - .check_output(['apt-cache', 'policy', 'swift']) - .decode('UTF-8')) - if codename in ret or release[0] in ret: - return codename - elif len(codenames) == 1: - return codenames[0] - - # NOTE: fallback - attempt to match with just major.minor version - match = re.match(r'^(\d+)\.(\d+)', version) - if match: - major_minor_version = match.group(0) - for codename, versions in SWIFT_CODENAMES.items(): - for release_version in versions: - if release_version.startswith(major_minor_version): - return codename - - return None - - -def get_os_codename_package(package, fatal=True): - """Derive OpenStack release codename from an installed package. - - Initially, see if the openstack-release pkg is available (by trying to - install it) and use it instead. - - If it isn't then it falls back to the existing method of checking the - version of the package passed and then resolving the version from that - using lookup tables. - - Note: if possible, charms should use get_installed_os_version() to - determine the version of the "openstack-release" pkg. - - :param package: the package to test for version information. - :type package: str - :param fatal: If True (default), then die via error_out() - :type fatal: bool - :returns: the OpenStack release codename (e.g. ussuri) - :rtype: str - """ - - codename = get_installed_os_version() - if codename: - return codename - - if snap_install_requested(): - cmd = ['snap', 'list', package] - try: - out = subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return None - lines = out.split('\n') - for line in lines: - if package in line: - # Second item in list is Version - return line.split()[1] - - cache = apt_cache() - - try: - pkg = cache[package] - except Exception: - if not fatal: - return None - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation '\ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - if not fatal: - return None - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - if 'swift' in pkg.name: - # Fully x.y.z match for swift versions - match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) - else: - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match(r'^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - - # Generate a major version number for newer semantic - # versions of openstack projects - major_vers = vers.split('.')[0] - # >= Liberty independent project versions - if (package in PACKAGE_CODENAMES and - major_vers in PACKAGE_CODENAMES[package]): - return PACKAGE_CODENAMES[package][major_vers] - else: - # < Liberty co-ordinated project versions - try: - if 'swift' in pkg.name: - return get_swift_codename(vers) - else: - return OPENSTACK_CODENAMES[vers] - except KeyError: - if not fatal: - return None - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_package(pkg, fatal=True): - '''Derive OpenStack version number from an installed package.''' - codename = get_os_codename_package(pkg, fatal=fatal) - - if not codename: - return None - - if 'swift' in pkg: - vers_map = SWIFT_CODENAMES - for cname, version in vers_map.items(): - if cname == codename: - return version[-1] - else: - vers_map = OPENSTACK_CODENAMES - for version, cname in vers_map.items(): - if cname == codename: - return version - - -def get_installed_os_version(): - """Determine the OpenStack release code name from openstack-release pkg. - - This uses the "openstack-release" pkg (if it exists) to return the - OpenStack release codename (e.g. usurri, mitaka, ocata, etc.) - - Note, it caches the result so that it is only done once per hook. - - :returns: the OpenStack release codename, if available - :rtype: Optional[str] - """ - @cached - def _do_install(): - apt_install(filter_installed_packages(['openstack-release']), - fatal=False, quiet=True) - - _do_install() - return openstack_release().get('OPENSTACK_CODENAME') - - -@cached -def openstack_release(): - """Return /etc/os-release in a dict.""" - d = {} - try: - with open('/etc/openstack-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - except FileNotFoundError: - pass - return d - - -# Module local cache variable for the os_release. -_os_rel = None - - -def reset_os_release(): - '''Unset the cached os_release version''' - global _os_rel - _os_rel = None - - -def os_release(package, base=None, reset_cache=False, source_key=None): - """Returns OpenStack release codename from a cached global. - - If reset_cache then unset the cached os_release version and return the - freshly determined version. - - If the codename can not be determined from either an installed package or - the installation source, the earliest release supported by the charm should - be returned. - - :param package: Name of package to determine release from - :type package: str - :param base: Fallback codename if endavours to determine from package fail - :type base: Optional[str] - :param reset_cache: Reset any cached codename value - :type reset_cache: bool - :param source_key: Name of source configuration option - (default: 'openstack-origin') - :type source_key: Optional[str] - :returns: OpenStack release codename - :rtype: str - """ - source_key = source_key or 'openstack-origin' - if not base: - base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] - global _os_rel - if reset_cache: - reset_os_release() - if _os_rel: - return _os_rel - _os_rel = ( - get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config(source_key)) or - base) - return _os_rel - - -@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) -def import_key(keyid): - """Import a key, either ASCII armored, or a GPG key id. - - @param keyid: the key in ASCII armor format, or a GPG key id. - @raises SystemExit() via sys.exit() on failure. - """ - try: - return fetch_import_key(keyid) - except GPGKeyError as e: - error_out("Could not import key: {}".format(str(e))) - - -def get_source_and_pgp_key(source_and_key): - """Look for a pgp key ID or ascii-armor key in the given input. - - :param source_and_key: String, "source_spec|keyid" where '|keyid' is - optional. - :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id - if there was no '|' in the source_and_key string. - """ - try: - source, key = source_and_key.split('|', 2) - return source, key or None - except ValueError: - return source_and_key, None - - -@deprecate("use charmhelpers.fetch.add_source() instead.", - "2017-07", log=juju_log) -def configure_installation_source(source_plus_key): - """Configure an installation source. - - The functionality is provided by charmhelpers.fetch.add_source() - The difference between the two functions is that add_source() signature - requires the key to be passed directly, whereas this function passes an - optional key by appending '|' to the end of the source specification - 'source'. - - Another difference from add_source() is that the function calls sys.exit(1) - if the configuration fails, whereas add_source() raises - SourceConfigurationError(). Another difference, is that add_source() - silently fails (with a juju_log command) if there is no matching source to - configure, whereas this function fails with a sys.exit(1) - - :param source: String_plus_key -- see above for details. - - Note that the behaviour on error is to log the error to the juju log and - then call sys.exit(1). - """ - if source_plus_key.startswith('snap'): - # Do nothing for snap installs - return - # extract the key if there is one, denoted by a '|' in the rel - source, key = get_source_and_pgp_key(source_plus_key) - - # handle the ordinary sources via add_source - try: - fetch_add_source(source, key, fail_invalid=True) - except SourceConfigError as se: - error_out(str(se)) - - -def config_value_changed(option): - """ - Determine if config value changed since last call to this function. - """ - hook_data = unitdata.HookData() - with hook_data(): - db = unitdata.kv() - current = config(option) - saved = db.get(option) - db.set(option, current) - if saved is None: - return False - return current != saved - - -def get_endpoint_key(service_name, relation_id, unit_name): - """Return the key used to refer to an ep changed notification from a unit. - - :param service_name: Service name eg nova, neutron, placement etc - :type service_name: str - :param relation_id: The id of the relation the unit is on. - :type relation_id: str - :param unit_name: The name of the unit publishing the notification. - :type unit_name: str - :returns: The key used to refer to an ep changed notification from a unit - :rtype: str - """ - return '{}-{}-{}'.format( - service_name, - relation_id.replace(':', '_'), - unit_name.replace('/', '_')) - - -def get_endpoint_notifications(service_names, rel_name='identity-service'): - """Return all notifications for the given services. - - :param service_names: List of service name. - :type service_name: List - :param rel_name: Name of the relation to query - :type rel_name: str - :returns: A dict containing the source of the notification and its nonce. - :rtype: Dict[str, str] - """ - notifications = {} - for rid in relation_ids(rel_name): - for unit in related_units(relid=rid): - ep_changed_json = relation_get( - rid=rid, - unit=unit, - attribute='ep_changed') - if ep_changed_json: - ep_changed = json.loads(ep_changed_json) - for service in service_names: - if ep_changed.get(service): - key = get_endpoint_key(service, rid, unit) - notifications[key] = ep_changed[service] - return notifications - - -def endpoint_changed(service_name, rel_name='identity-service'): - """Whether a new notification has been received for an endpoint. - - :param service_name: Service name eg nova, neutron, placement etc - :type service_name: str - :param rel_name: Name of the relation to query - :type rel_name: str - :returns: Whether endpoint has changed - :rtype: bool - """ - changed = False - with unitdata.HookData()() as t: - db = t[0] - notifications = get_endpoint_notifications( - [service_name], - rel_name=rel_name) - for key, nonce in notifications.items(): - if db.get(key) != nonce: - juju_log(('New endpoint change notification found: ' - '{}={}').format(key, nonce), - 'INFO') - changed = True - break - return changed - - -def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): - """Save the endpoint triggers in db so it can be tracked if they changed. - - :param service_names: List of service name. - :type service_name: List - :param rel_name: Name of the relation to query - :type rel_name: str - """ - with unitdata.HookData()() as t: - db = t[0] - notifications = get_endpoint_notifications( - service_names, - rel_name=rel_name) - for key, nonce in notifications.items(): - db.set(key, nonce) - - -def save_script_rc(script_path="scripts/scriptrc", **env_vars): - """ - Write an rc file in the charm-delivered directory containing - exported environment variables provided by env_vars. Any charm scripts run - outside the juju hook environment can source this scriptrc to obtain - updated config information necessary to perform health checks or - service changes. - """ - juju_rc_path = "%s/%s" % (charm_dir(), script_path) - if not os.path.exists(os.path.dirname(juju_rc_path)): - os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wt') as rc_script: - rc_script.write("#!/bin/bash\n") - for u, p in env_vars.items(): - if u != "script_path": - rc_script.write('export %s=%s\n' % (u, p)) - - -def openstack_upgrade_available(package): - """ - Determines if an OpenStack upgrade is available from installation - source, based on version of installed package. - - :param package: str: Name of installed package. - - :returns: bool: : Returns True if configured installation source offers - a newer version of package. - """ - - src = config('openstack-origin') - cur_vers = get_os_version_package(package) - if not cur_vers: - # The package has not been installed yet do not attempt upgrade - return False - if "swift" in package: - codename = get_os_codename_install_source(src) - avail_vers = get_os_version_codename_swift(codename) - else: - try: - avail_vers = get_os_version_install_source(src) - except Exception: - avail_vers = cur_vers - apt.init() - return apt.version_compare(avail_vers, cur_vers) >= 1 - - -def ensure_block_device(block_device): - ''' - Confirm block_device, create as loopback if necessary. - - :param block_device: str: Full path of block device to ensure. - - :returns: str: Full path of ensured block device. - ''' - _none = ['None', 'none', None] - if (block_device in _none): - error_out('prepare_storage(): Missing required input: block_device=%s.' - % block_device) - - if block_device.startswith('/dev/'): - bdev = block_device - elif block_device.startswith('/'): - _bd = block_device.split('|') - if len(_bd) == 2: - bdev, size = _bd - else: - bdev = block_device - size = DEFAULT_LOOPBACK_SIZE - bdev = ensure_loopback_device(bdev, size) - else: - bdev = '/dev/%s' % block_device - - if not is_block_device(bdev): - error_out('Failed to locate valid block device at %s' % bdev) - - return bdev - - -def clean_storage(block_device): - ''' - Ensures a block device is clean. That is: - - unmounted - - any lvm volume groups are deactivated - - any lvm physical device signatures removed - - partition table wiped - - :param block_device: str: Full path to block device to clean. - ''' - for mp, d in mounts(): - if d == block_device: - juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % - (d, mp), level=INFO) - umount(mp, persist=True) - - if is_lvm_physical_volume(block_device): - deactivate_lvm_volume_group(block_device) - remove_lvm_physical_volume(block_device) - else: - zap_disk(block_device) - - -is_ip = ip.is_ip -ns_query = ip.ns_query -get_host_ip = ip.get_host_ip -get_hostname = ip.get_hostname - - -def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): - mm_map = {} - if os.path.isfile(mm_file): - with open(mm_file, 'r') as f: - mm_map = json.load(f) - return mm_map - - -def sync_db_with_multi_ipv6_addresses(database, database_user, - relation_prefix=None): - hosts = get_ipv6_addr(dynamic_only=False) - - if config('vip'): - vips = config('vip').split() - for vip in vips: - if vip and is_ipv6(vip): - hosts.append(vip) - - kwargs = {'database': database, - 'username': database_user, - 'hostname': json.dumps(hosts)} - - if relation_prefix: - for key in list(kwargs.keys()): - kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] - del kwargs[key] - - for rid in relation_ids('shared-db'): - relation_set(relation_id=rid, **kwargs) - - -def os_requires_version(ostack_release, pkg): - """ - Decorator for hook to specify minimum supported release - """ - def wrap(f): - @wraps(f) - def wrapped_f(*args): - if os_release(pkg) < ostack_release: - raise Exception("This hook is not supported on releases" - " before %s" % ostack_release) - f(*args) - return wrapped_f - return wrap - - -def os_workload_status(configs, required_interfaces, charm_func=None): - """ - Decorator to set workload status based on complete contexts - """ - def wrap(f): - @wraps(f) - def wrapped_f(*args, **kwargs): - # Run the original function first - f(*args, **kwargs) - # Set workload status now that contexts have been - # acted on - set_os_workload_status(configs, required_interfaces, charm_func) - return wrapped_f - return wrap - - -def set_os_workload_status(configs, required_interfaces, charm_func=None, - services=None, ports=None): - """Set the state of the workload status for the charm. - - This calls _determine_os_workload_status() to get the new state, message - and sets the status using status_set() - - @param configs: a templating.OSConfigRenderer() object - @param required_interfaces: {generic: [specific, specific2, ...]} - @param charm_func: a callable function that returns state, message. The - signature is charm_func(configs) -> (state, message) - @param services: list of strings OR dictionary specifying services/ports - @param ports: OPTIONAL list of port numbers. - @returns state, message: the new workload status, user message - """ - state, message = _determine_os_workload_status( - configs, required_interfaces, charm_func, services, ports) - status_set(state, message) - - -def _determine_os_workload_status( - configs, required_interfaces, charm_func=None, - services=None, ports=None): - """Determine the state of the workload status for the charm. - - This function returns the new workload status for the charm based - on the state of the interfaces, the paused state and whether the - services are actually running and any specified ports are open. - - This checks: - - 1. if the unit should be paused, that it is actually paused. If so the - state is 'maintenance' + message, else 'broken'. - 2. that the interfaces/relations are complete. If they are not then - it sets the state to either 'broken' or 'waiting' and an appropriate - message. - 3. If all the relation data is set, then it checks that the actual - services really are running. If not it sets the state to 'broken'. - - If everything is okay then the state returns 'active'. - - @param configs: a templating.OSConfigRenderer() object - @param required_interfaces: {generic: [specific, specific2, ...]} - @param charm_func: a callable function that returns state, message. The - signature is charm_func(configs) -> (state, message) - @param services: list of strings OR dictionary specifying services/ports - @param ports: OPTIONAL list of port numbers. - @returns state, message: the new workload status, user message - """ - state, message = _ows_check_if_paused(services, ports) - - if state is None: - state, message = _ows_check_generic_interfaces( - configs, required_interfaces) - - if state != 'maintenance' and charm_func: - # _ows_check_charm_func() may modify the state, message - state, message = _ows_check_charm_func( - state, message, lambda: charm_func(configs)) - - if state is None: - state, message = ows_check_services_running(services, ports) - - if state is None: - state = 'active' - message = "Unit is ready" - juju_log(message, 'INFO') - - try: - if config(POLICYD_CONFIG_NAME): - message = "{} {}".format(policyd_status_message_prefix(), message) - # Get deferred restarts events that have been triggered by a policy - # written by this charm. - deferred_restarts = list(set( - [e.service - for e in deferred_events.get_deferred_restarts() - if e.policy_requestor_name == ch_service_name()])) - if deferred_restarts: - svc_msg = "Services queued for restart: {}".format( - ', '.join(sorted(deferred_restarts))) - message = "{}. {}".format(message, svc_msg) - deferred_hooks = deferred_events.get_deferred_hooks() - if deferred_hooks: - svc_msg = "Hooks skipped due to disabled auto restarts: {}".format( - ', '.join(sorted(deferred_hooks))) - message = "{}. {}".format(message, svc_msg) - - except Exception: - pass - - return state, message - - -def _ows_check_if_paused(services=None, ports=None): - """Check if the unit is supposed to be paused, and if so check that the - services/ports (if passed) are actually stopped/not being listened to. - - If the unit isn't supposed to be paused, just return None, None - - If the unit is performing a series upgrade, return a message indicating - this. - - @param services: OPTIONAL services spec or list of service names. - @param ports: OPTIONAL list of port numbers. - @returns state, message or None, None - """ - if is_unit_upgrading_set(): - state, message = check_actually_paused(services=services, - ports=ports) - if state is None: - # we're paused okay, so set maintenance and return - state = "blocked" - message = ("Ready for do-release-upgrade and reboot. " - "Set complete when finished.") - return state, message - - if is_unit_paused_set(): - state, message = check_actually_paused(services=services, - ports=ports) - if state is None: - # we're paused okay, so set maintenance and return - state = "maintenance" - message = "Paused. Use 'resume' action to resume normal service." - return state, message - return None, None - - -def _ows_check_generic_interfaces(configs, required_interfaces): - """Check the complete contexts to determine the workload status. - - - Checks for missing or incomplete contexts - - juju log details of missing required data. - - determines the correct workload status - - creates an appropriate message for status_set(...) - - if there are no problems then the function returns None, None - - @param configs: a templating.OSConfigRenderer() object - @params required_interfaces: {generic_interface: [specific_interface], } - @returns state, message or None, None - """ - incomplete_rel_data = incomplete_relation_data(configs, - required_interfaces) - state = None - message = None - missing_relations = set() - incomplete_relations = set() - - for generic_interface, relations_states in incomplete_rel_data.items(): - related_interface = None - missing_data = {} - # Related or not? - for interface, relation_state in relations_states.items(): - if relation_state.get('related'): - related_interface = interface - missing_data = relation_state.get('missing_data') - break - # No relation ID for the generic_interface? - if not related_interface: - juju_log("{} relation is missing and must be related for " - "functionality. ".format(generic_interface), 'WARN') - state = 'blocked' - missing_relations.add(generic_interface) - else: - # Relation ID eists but no related unit - if not missing_data: - # Edge case - relation ID exists but departings - _hook_name = hook_name() - if (('departed' in _hook_name or 'broken' in _hook_name) and - related_interface in _hook_name): - state = 'blocked' - missing_relations.add(generic_interface) - juju_log("{} relation's interface, {}, " - "relationship is departed or broken " - "and is required for functionality." - "".format(generic_interface, related_interface), - "WARN") - # Normal case relation ID exists but no related unit - # (joining) - else: - juju_log("{} relations's interface, {}, is related but has" - " no units in the relation." - "".format(generic_interface, related_interface), - "INFO") - # Related unit exists and data missing on the relation - else: - juju_log("{} relation's interface, {}, is related awaiting " - "the following data from the relationship: {}. " - "".format(generic_interface, related_interface, - ", ".join(missing_data)), "INFO") - if state != 'blocked': - state = 'waiting' - if generic_interface not in missing_relations: - incomplete_relations.add(generic_interface) - - if missing_relations: - message = "Missing relations: {}".format(", ".join(missing_relations)) - if incomplete_relations: - message += "; incomplete relations: {}" \ - "".format(", ".join(incomplete_relations)) - state = 'blocked' - elif incomplete_relations: - message = "Incomplete relations: {}" \ - "".format(", ".join(incomplete_relations)) - state = 'waiting' - - return state, message - - -def _ows_check_charm_func(state, message, charm_func_with_configs): - """Run a custom check function for the charm to see if it wants to - change the state. This is only run if not in 'maintenance' and - tests to see if the new state is more important that the previous - one determined by the interfaces/relations check. - - @param state: the previously determined state so far. - @param message: the user orientated message so far. - @param charm_func: a callable function that returns state, message - @returns state, message strings. - """ - if charm_func_with_configs: - charm_state, charm_message = charm_func_with_configs() - if (charm_state != 'active' and - charm_state != 'unknown' and - charm_state is not None): - state = workload_state_compare(state, charm_state) - if message: - charm_message = charm_message.replace("Incomplete relations: ", - "") - message = "{}, {}".format(message, charm_message) - else: - message = charm_message - return state, message - - -@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) -def _ows_check_services_running(services, ports): - return ows_check_services_running(services, ports) - - -def ows_check_services_running(services, ports): - """Check that the services that should be running are actually running - and that any ports specified are being listened to. - - @param services: list of strings OR dictionary specifying services/ports - @param ports: list of ports - @returns state, message: strings or None, None - """ - messages = [] - state = None - if services is not None: - services = _extract_services_list_helper(services) - services_running, running = _check_running_services(services) - if not all(running): - messages.append( - "Services not running that should be: {}" - .format(", ".join(_filter_tuples(services_running, False)))) - state = 'blocked' - # also verify that the ports that should be open are open - # NB, that ServiceManager objects only OPTIONALLY have ports - map_not_open, ports_open = ( - _check_listening_on_services_ports(services)) - if not all(ports_open): - # find which service has missing ports. They are in service - # order which makes it a bit easier. - message_parts = {service: ", ".join([str(v) for v in open_ports]) - for service, open_ports in map_not_open.items()} - message = ", ".join( - ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) - messages.append( - "Services with ports not open that should be: {}" - .format(message)) - state = 'blocked' - - if ports is not None: - # and we can also check ports which we don't know the service for - ports_open, ports_open_bools = _check_listening_on_ports_list(ports) - if not all(ports_open_bools): - messages.append( - "Ports which should be open, but are not: {}" - .format(", ".join([str(p) for p, v in ports_open - if not v]))) - state = 'blocked' - - if state is not None: - message = "; ".join(messages) - return state, message - - return None, None - - -def _extract_services_list_helper(services): - """Extract a OrderedDict of {service: [ports]} of the supplied services - for use by the other functions. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param services: see above - @returns OrderedDict(service: [ports], ...) - """ - if services is None: - return {} - if isinstance(services, dict): - services = services.values() - # either extract the list of services from the dictionary, or if - # it is a simple string, use that. i.e. works with mixed lists. - _s = OrderedDict() - for s in services: - if isinstance(s, dict) and 'service' in s: - _s[s['service']] = s.get('ports', []) - if isinstance(s, str): - _s[s] = [] - return _s - - -def _check_running_services(services): - """Check that the services dict provided is actually running and provide - a list of (service, boolean) tuples for each service. - - Returns both a zipped list of (service, boolean) and a list of booleans - in the same order as the services. - - @param services: OrderedDict of strings: [ports], one for each service to - check. - @returns [(service, boolean), ...], : results for checks - [boolean] : just the result of the service checks - """ - services_running = [service_running(s) for s in services] - return list(zip(services, services_running)), services_running - - -def _check_listening_on_services_ports(services, test=False): - """Check that the unit is actually listening (has the port open) on the - ports that the service specifies are open. If test is True then the - function returns the services with ports that are open rather than - closed. - - Returns an OrderedDict of service: ports and a list of booleans - - @param services: OrderedDict(service: [port, ...], ...) - @param test: default=False, if False, test for closed, otherwise open. - @returns OrderedDict(service: [port-not-open, ...]...), [boolean] - """ - test = not(not(test)) # ensure test is True or False - all_ports = list(itertools.chain(*services.values())) - ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] - map_ports = OrderedDict() - matched_ports = [p for p, opened in zip(all_ports, ports_states) - if opened == test] # essentially opened xor test - for service, ports in services.items(): - set_ports = set(ports).intersection(matched_ports) - if set_ports: - map_ports[service] = set_ports - return map_ports, ports_states - - -def _check_listening_on_ports_list(ports): - """Check that the ports list given are being listened to - - Returns a list of ports being listened to and a list of the - booleans. - - @param ports: LIST of port numbers. - @returns [(port_num, boolean), ...], [boolean] - """ - ports_open = [port_has_listener('0.0.0.0', p) for p in ports] - return zip(ports, ports_open), ports_open - - -def _filter_tuples(services_states, state): - """Return a simple list from a list of tuples according to the condition - - @param services_states: LIST of (string, boolean): service and running - state. - @param state: Boolean to match the tuple against. - @returns [LIST of strings] that matched the tuple RHS. - """ - return [s for s, b in services_states if b == state] - - -def workload_state_compare(current_workload_state, workload_state): - """ Return highest priority of two states""" - hierarchy = {'unknown': -1, - 'active': 0, - 'maintenance': 1, - 'waiting': 2, - 'blocked': 3, - } - - if hierarchy.get(workload_state) is None: - workload_state = 'unknown' - if hierarchy.get(current_workload_state) is None: - current_workload_state = 'unknown' - - # Set workload_state based on hierarchy of statuses - if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): - return current_workload_state - else: - return workload_state - - -def incomplete_relation_data(configs, required_interfaces): - """Check complete contexts against required_interfaces - Return dictionary of incomplete relation data. - - configs is an OSConfigRenderer object with configs registered - - required_interfaces is a dictionary of required general interfaces - with dictionary values of possible specific interfaces. - Example: - required_interfaces = {'database': ['shared-db', 'pgsql-db']} - - The interface is said to be satisfied if anyone of the interfaces in the - list has a complete context. - - Return dictionary of incomplete or missing required contexts with relation - status of interfaces and any missing data points. Example: - {'message': - {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, - 'zeromq-configuration': {'related': False}}, - 'identity': - {'identity-service': {'related': False}}, - 'database': - {'pgsql-db': {'related': False}, - 'shared-db': {'related': True}}} - """ - complete_ctxts = configs.complete_contexts() - incomplete_relations = [ - svc_type - for svc_type, interfaces in required_interfaces.items() - if not set(interfaces).intersection(complete_ctxts)] - return { - i: configs.get_incomplete_context_data(required_interfaces[i]) - for i in incomplete_relations} - - -def do_action_openstack_upgrade(package, upgrade_callback, configs): - """Perform action-managed OpenStack upgrade. - - Upgrades packages to the configured openstack-origin version and sets - the corresponding action status as a result. - - For backwards compatibility a config flag (action-managed-upgrade) must - be set for this code to run, otherwise a full service level upgrade will - fire on config-changed. - - @param package: package name for determining if openstack upgrade available - @param upgrade_callback: function callback to charm's upgrade function - @param configs: templating object derived from OSConfigRenderer class - - @return: True if upgrade successful; False if upgrade failed or skipped - """ - ret = False - - if openstack_upgrade_available(package): - if config('action-managed-upgrade'): - juju_log('Upgrading OpenStack release') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('upgrade callback resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade'}) - else: - action_set({'outcome': 'no upgrade available'}) - - return ret - - -def do_action_package_upgrade(package, upgrade_callback, configs): - """Perform package upgrade within the current OpenStack release. - - Upgrades packages only if there is not an openstack upgrade available, - and sets the corresponding action status as a result. - - @param package: package name for determining if openstack upgrade available - @param upgrade_callback: function callback to charm's upgrade function - @param configs: templating object derived from OSConfigRenderer class - - @return: True if upgrade successful; False if upgrade failed or skipped - """ - ret = False - - if not openstack_upgrade_available(package): - juju_log('Upgrading packages') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('upgrade callback resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'upgrade skipped because an openstack upgrade ' - 'is available'}) - - return ret - - -def remote_restart(rel_name, remote_service=None): - trigger = { - 'restart-trigger': str(uuid.uuid4()), - } - if remote_service: - trigger['remote-service'] = remote_service - for rid in relation_ids(rel_name): - # This subordinate can be related to two separate services using - # different subordinate relations so only issue the restart if - # the principle is connected down the relation we think it is - if related_units(relid=rid): - relation_set(relation_id=rid, - relation_settings=trigger, - ) - - -def check_actually_paused(services=None, ports=None): - """Check that services listed in the services object and ports - are actually closed (not listened to), to verify that the unit is - properly paused. - - @param services: See _extract_services_list_helper - @returns status, : string for status (None if okay) - message : string for problem for status_set - """ - state = None - message = None - messages = [] - if services is not None: - services = _extract_services_list_helper(services) - services_running, services_states = _check_running_services(services) - if any(services_states): - # there shouldn't be any running so this is a problem - messages.append("these services running: {}" - .format(", ".join( - _filter_tuples(services_running, True)))) - state = "blocked" - ports_open, ports_open_bools = ( - _check_listening_on_services_ports(services, True)) - if any(ports_open_bools): - message_parts = {service: ", ".join([str(v) for v in open_ports]) - for service, open_ports in ports_open.items()} - message = ", ".join( - ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) - messages.append( - "these service:ports are open: {}".format(message)) - state = 'blocked' - if ports is not None: - ports_open, bools = _check_listening_on_ports_list(ports) - if any(bools): - messages.append( - "these ports which should be closed, but are open: {}" - .format(", ".join([str(p) for p, v in ports_open if v]))) - state = 'blocked' - if messages: - message = ("Services should be paused but {}" - .format(", ".join(messages))) - return state, message - - -def set_unit_paused(): - """Set the unit to a paused state in the local kv() store. - This does NOT actually pause the unit - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', True) - - -def clear_unit_paused(): - """Clear the unit from a paused state in the local kv() store - This does NOT actually restart any services - it only clears the - local state. - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-paused', False) - - -def is_unit_paused_set(): - """Return the state of the kv().get('unit-paused'). - This does NOT verify that the unit really is paused. - - To help with units that don't have HookData() (testing) - if it excepts, return False - """ - try: - with unitdata.HookData()() as t: - kv = t[0] - # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) - except Exception: - return False - - -def is_hook_allowed(hookname, check_deferred_restarts=True): - """Check if hook can run. - - :param hookname: Name of hook to check.. - :type hookname: str - :param check_deferred_restarts: Whether to check deferred restarts. - :type check_deferred_restarts: bool - """ - permitted = True - reasons = [] - if is_unit_paused_set(): - reasons.append( - "Unit is pause or upgrading. Skipping {}".format(hookname)) - permitted = False - - if check_deferred_restarts: - if deferred_events.is_restart_permitted(): - permitted = True - deferred_events.clear_deferred_hook(hookname) - else: - if not config().changed('enable-auto-restarts'): - deferred_events.set_deferred_hook(hookname) - reasons.append("auto restarts are disabled") - permitted = False - return permitted, " and ".join(reasons) - - -def manage_payload_services(action, services=None, charm_func=None): - """Run an action against all services. - - An optional charm_func() can be called. It should raise an Exception to - indicate that the function failed. If it was successful it should return - None or an optional message. - - The signature for charm_func is: - charm_func() -> message: str - - charm_func() is executed after any services are stopped, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - :param action: Action to run: pause, resume, start or stop. - :type action: str - :param services: See above - :type services: See above - :param charm_func: function to run for custom charm pausing. - :type charm_func: f() - :returns: Status boolean and list of messages - :rtype: (bool, []) - :raises: RuntimeError - """ - actions = { - 'pause': service_pause, - 'resume': service_resume, - 'start': service_start, - 'stop': service_stop} - action = action.lower() - if action not in actions.keys(): - raise RuntimeError( - "action: {} must be one of: {}".format(action, - ', '.join(actions.keys()))) - services = _extract_services_list_helper(services) - messages = [] - success = True - if services: - for service in services.keys(): - rc = actions[action](service) - if not rc: - success = False - messages.append("{} didn't {} cleanly.".format(service, - action)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - success = False - messages.append(str(e)) - return success, messages - - -def make_wait_for_ports_barrier(ports, retry_count=5): - """Make a function to wait for port shutdowns. - - Create a function which closes over the provided ports. The function will - retry probing ports until they are closed or the retry count has been reached. - - """ - @decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1) - def retry_port_check(): - _, ports_states = _check_listening_on_ports_list(ports) - juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG") - return any(ports_states) - return retry_port_check - - -def pause_unit(assess_status_func, services=None, ports=None, - charm_func=None): - """Pause a unit by stopping the services and setting 'unit-paused' - in the local kv() store. - - Also checks that the services have stopped and ports are no longer - being listened to. - - An optional charm_func() can be called that can either raise an - Exception or return non None, None to indicate that the unit - didn't pause cleanly. - - The signature for charm_func is: - charm_func() -> message: string - - charm_func() is executed after any services are stopped, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param assess_status_func: (f() -> message: string | None) or None - @param services: OPTIONAL see above - @param ports: OPTIONAL list of port - @param charm_func: function to run for custom charm pausing. - @returns None - @raises Exception(message) on an error for action_fail(). - """ - _, messages = manage_payload_services( - 'pause', - services=services, - charm_func=charm_func) - set_unit_paused() - - if assess_status_func: - message = assess_status_func() - if message: - messages.append(message) - if messages and not is_unit_upgrading_set(): - raise Exception("Couldn't pause: {}".format("; ".join(messages))) - - -def resume_unit(assess_status_func, services=None, ports=None, - charm_func=None): - """Resume a unit by starting the services and clearning 'unit-paused' - in the local kv() store. - - Also checks that the services have started and ports are being listened to. - - An optional charm_func() can be called that can either raise an - Exception or return non None to indicate that the unit - didn't resume cleanly. - - The signature for charm_func is: - charm_func() -> message: string - - charm_func() is executed after any services are started, if supplied. - - The services object can either be: - - None : no services were passed (an empty dict is returned) - - a list of strings - - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} - - An array of [{'service': service_name, ...}, ...] - - @param assess_status_func: (f() -> message: string | None) or None - @param services: OPTIONAL see above - @param ports: OPTIONAL list of port - @param charm_func: function to run for custom charm resuming. - @returns None - @raises Exception(message) on an error for action_fail(). - """ - _, messages = manage_payload_services( - 'resume', - services=services, - charm_func=charm_func) - clear_unit_paused() - if assess_status_func: - message = assess_status_func() - if message: - messages.append(message) - if messages: - raise Exception("Couldn't resume: {}".format("; ".join(messages))) - - -def restart_services_action(services=None, when_all_stopped_func=None, - deferred_only=None): - """Manage a service restart request via charm action. - - :param services: Services to be restarted - :type model_name: List[str] - :param when_all_stopped_func: Function to call when all services are - stopped. - :type when_all_stopped_func: Callable[] - :param model_name: Only restart services which have a deferred restart - event. - :type model_name: bool - """ - if services and deferred_only: - raise ValueError( - "services and deferred_only are mutually exclusive") - if deferred_only: - services = list(set( - [a.service for a in deferred_events.get_deferred_restarts()])) - _, messages = manage_payload_services( - 'stop', - services=services, - charm_func=when_all_stopped_func) - if messages: - raise ServiceActionError( - "Error processing service stop request: {}".format( - "; ".join(messages))) - _, messages = manage_payload_services( - 'start', - services=services) - if messages: - raise ServiceActionError( - "Error processing service start request: {}".format( - "; ".join(messages))) - deferred_events.clear_deferred_restarts(services) - - -def make_assess_status_func(*args, **kwargs): - """Creates an assess_status_func() suitable for handing to pause_unit() - and resume_unit(). - - This uses the _determine_os_workload_status(...) function to determine - what the workload_status should be for the unit. If the unit is - not in maintenance or active states, then the message is returned to - the caller. This is so an action that doesn't result in either a - complete pause or complete resume can signal failure with an action_fail() - """ - def _assess_status_func(): - state, message = _determine_os_workload_status(*args, **kwargs) - status_set(state, message) - if state not in ['maintenance', 'active']: - return message - return None - - return _assess_status_func - - -def pausable_restart_on_change(restart_map, stopstart=False, - restart_functions=None, - can_restart_now_f=None, - post_svc_restart_f=None, - pre_restarts_wait_f=None): - """A restart_on_change decorator that checks to see if the unit is - paused. If it is paused then the decorated function doesn't fire. - - This is provided as a helper, as the @restart_on_change(...) decorator - is in core.host, yet the openstack specific helpers are in this file - (contrib.openstack.utils). Thus, this needs to be an optional feature - for openstack charms (or charms that wish to use the openstack - pause/resume type features). - - It is used as follows: - - from contrib.openstack.utils import ( - pausable_restart_on_change as restart_on_change) - - @restart_on_change(restart_map, stopstart=) - def some_hook(...): - pass - - see core.utils.restart_on_change() for more details. - - Note restart_map can be a callable, in which case, restart_map is only - evaluated at runtime. This means that it is lazy and the underlying - function won't be called if the decorated function is never called. Note, - retains backwards compatibility for passing a non-callable dictionary. - - :param f: function to decorate. - :type f: Callable - :param restart_map: Optionally callable, which then returns the restart_map or - the restart map {conf_file: [services]} - :type restart_map: Union[Callable[[],], Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - :returns: decorator to use a restart_on_change with pausability - :rtype: decorator - - - """ - def wrap(f): - __restart_map_cache = None - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - nonlocal __restart_map_cache - if is_unit_paused_set(): - return f(*args, **kwargs) - if __restart_map_cache is None: - __restart_map_cache = restart_map() \ - if callable(restart_map) else restart_map - # otherwise, normal restart_on_change functionality - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), - __restart_map_cache, - stopstart, - restart_functions, - can_restart_now_f, - post_svc_restart_f, - pre_restarts_wait_f) - return wrapped_f - return wrap - - -def ordered(orderme): - """Converts the provided dictionary into a collections.OrderedDict. - - The items in the returned OrderedDict will be inserted based on the - natural sort order of the keys. Nested dictionaries will also be sorted - in order to ensure fully predictable ordering. - - :param orderme: the dict to order - :return: collections.OrderedDict - :raises: ValueError: if `orderme` isn't a dict instance. - """ - if not isinstance(orderme, dict): - raise ValueError('argument must be a dict type') - - result = OrderedDict() - for k, v in sorted(orderme.items(), key=lambda x: x[0]): - if isinstance(v, dict): - result[k] = ordered(v) - else: - result[k] = v - - return result - - -def config_flags_parser(config_flags): - """Parses config flags string into dict. - - This parsing method supports a few different formats for the config - flag values to be parsed: - - 1. A string in the simple format of key=value pairs, with the possibility - of specifying multiple key value pairs within the same string. For - example, a string in the format of 'key1=value1, key2=value2' will - return a dict of: - - {'key1': 'value1', 'key2': 'value2'}. - - 2. A string in the above format, but supporting a comma-delimited list - of values for the same key. For example, a string in the format of - 'key1=value1, key2=value3,value4,value5' will return a dict of: - - {'key1': 'value1', 'key2': 'value2,value3,value4'} - - 3. A string containing a colon character (:) prior to an equal - character (=) will be treated as yaml and parsed as such. This can be - used to specify more complex key value pairs. For example, - a string in the format of 'key1: subkey1=value1, subkey2=value2' will - return a dict of: - - {'key1', 'subkey1=value1, subkey2=value2'} - - The provided config_flags string may be a list of comma-separated values - which themselves may be comma-separated list of values. - """ - # If we find a colon before an equals sign then treat it as yaml. - # Note: limit it to finding the colon first since this indicates assignment - # for inline yaml. - colon = config_flags.find(':') - equals = config_flags.find('=') - if colon > 0: - if colon < equals or equals < 0: - return ordered(yaml.safe_load(config_flags)) - - if config_flags.find('==') >= 0: - juju_log("config_flags is not in expected format (key=value)", - level=ERROR) - raise OSContextError - - # strip the following from each value. - post_strippers = ' ,' - # we strip any leading/trailing '=' or ' ' from the string then - # split on '='. - split = config_flags.strip(' =').split('=') - limit = len(split) - flags = OrderedDict() - for i in range(0, limit - 1): - current = split[i] - next = split[i + 1] - vindex = next.rfind(',') - if (i == limit - 2) or (vindex < 0): - value = next - else: - value = next[:vindex] - - if i == 0: - key = current - else: - # if this not the first entry, expect an embedded key. - index = current.rfind(',') - if index < 0: - juju_log("Invalid config value(s) at index %s" % (i), - level=ERROR) - raise OSContextError - key = current[index + 1:] - - # Add to collection. - flags[key.strip(post_strippers)] = value.rstrip(post_strippers) - - return flags - - -def os_application_version_set(package): - '''Set version of application for Juju 2.0 and later''' - application_version = get_upstream_version(package) - # NOTE(jamespage) if not able to figure out package version, fallback to - # openstack codename version detection. - if not application_version: - application_version_set(os_release(package)) - else: - application_version_set(application_version) - - -def os_application_status_set(check_function): - """Run the supplied function and set the application status accordingly. - - :param check_function: Function to run to get app states and messages. - :type check_function: function - """ - state, message = check_function() - status_set(state, message, application=True) - - -def enable_memcache(source=None, release=None, package=None): - """Determine if memcache should be enabled on the local unit - - @param release: release of OpenStack currently deployed - @param package: package to derive OpenStack version deployed - @returns boolean Whether memcache should be enabled - """ - _release = None - if release: - _release = release - else: - _release = os_release(package) - if not _release: - _release = get_os_codename_install_source(source) - - return CompareOpenStackReleases(_release) >= 'mitaka' - - -def token_cache_pkgs(source=None, release=None): - """Determine additional packages needed for token caching - - @param source: source string for charm - @param release: release of OpenStack currently deployed - @returns List of package to enable token caching - """ - packages = [] - if enable_memcache(source=source, release=release): - packages.extend(['memcached', 'python-memcache']) - return packages - - -def update_json_file(filename, items): - """Updates the json `filename` with a given dict. - :param filename: path to json file (e.g. /etc/glance/policy.json) - :param items: dict of items to update - """ - if not items: - return - - with open(filename) as fd: - policy = json.load(fd) - - # Compare before and after and if nothing has changed don't write the file - # since that could cause unnecessary service restarts. - before = json.dumps(policy, indent=4, sort_keys=True) - policy.update(items) - after = json.dumps(policy, indent=4, sort_keys=True) - if before == after: - return - - with open(filename, "w") as fd: - fd.write(after) - - -@cached -def snap_install_requested(): - """ Determine if installing from snaps - - If openstack-origin is of the form snap:track/channel[/branch] - and channel is in SNAPS_CHANNELS return True. - """ - origin = config('openstack-origin') or "" - if not origin.startswith('snap:'): - return False - - _src = origin[5:] - if '/' in _src: - channel = _src.split('/')[1] - else: - # Handle snap:track with no channel - channel = 'stable' - return valid_snap_channel(channel) - - -def get_snaps_install_info_from_origin(snaps, src, mode='classic'): - """Generate a dictionary of snap install information from origin - - @param snaps: List of snaps - @param src: String of openstack-origin or source of the form - snap:track/channel - @param mode: String classic, devmode or jailmode - @returns: Dictionary of snaps with channels and modes - """ - - if not src.startswith('snap:'): - juju_log("Snap source is not a snap origin", 'WARN') - return {} - - _src = src[5:] - channel = '--channel={}'.format(_src) - - return {snap: {'channel': channel, 'mode': mode} - for snap in snaps} - - -def install_os_snaps(snaps, refresh=False): - """Install OpenStack snaps from channel and with mode - - @param snaps: Dictionary of snaps with channels and modes of the form: - {'snap_name': {'channel': 'snap_channel', - 'mode': 'snap_mode'}} - Where channel is a snapstore channel and mode is --classic, --devmode - or --jailmode. - @param post_snap_install: Callback function to run after snaps have been - installed - """ - - def _ensure_flag(flag): - if flag.startswith('--'): - return flag - return '--{}'.format(flag) - - if refresh: - for snap in snaps.keys(): - snap_refresh(snap, - _ensure_flag(snaps[snap]['channel']), - _ensure_flag(snaps[snap]['mode'])) - else: - for snap in snaps.keys(): - snap_install(snap, - _ensure_flag(snaps[snap]['channel']), - _ensure_flag(snaps[snap]['mode'])) - - -def set_unit_upgrading(): - """Set the unit to a upgrading state in the local kv() store. - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-upgrading', True) - - -def clear_unit_upgrading(): - """Clear the unit from a upgrading state in the local kv() store - """ - with unitdata.HookData()() as t: - kv = t[0] - kv.set('unit-upgrading', False) - - -def is_unit_upgrading_set(): - """Return the state of the kv().get('unit-upgrading'). - - To help with units that don't have HookData() (testing) - if it excepts, return False - """ - try: - with unitdata.HookData()() as t: - kv = t[0] - # transform something truth-y into a Boolean. - return not(not(kv.get('unit-upgrading'))) - except Exception: - return False - - -def series_upgrade_prepare(pause_unit_helper=None, configs=None): - """ Run common series upgrade prepare tasks. - - :param pause_unit_helper: function: Function to pause unit - :param configs: OSConfigRenderer object: Configurations - :returns None: - """ - set_unit_upgrading() - if pause_unit_helper and configs: - if not is_unit_paused_set(): - pause_unit_helper(configs) - - -def series_upgrade_complete(resume_unit_helper=None, configs=None): - """ Run common series upgrade complete tasks. - - :param resume_unit_helper: function: Function to resume unit - :param configs: OSConfigRenderer object: Configurations - :returns None: - """ - clear_unit_paused() - clear_unit_upgrading() - if configs: - configs.write_all() - if resume_unit_helper: - resume_unit_helper(configs) - - -def is_db_initialised(): - """Check leader storage to see if database has been initialised. - - :returns: Whether DB has been initialised - :rtype: bool - """ - db_initialised = None - if leader_get('db-initialised') is None: - juju_log( - 'db-initialised key missing, assuming db is not initialised', - 'DEBUG') - db_initialised = False - else: - db_initialised = bool_from_string(leader_get('db-initialised')) - juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') - return db_initialised - - -def set_db_initialised(): - """Add flag to leader storage to indicate database has been initialised. - """ - juju_log('Setting db-initialised to True', 'DEBUG') - leader_set({'db-initialised': True}) - - -def is_db_maintenance_mode(relid=None): - """Check relation data from notifications of db in maintenance mode. - - :returns: Whether db has notified it is in maintenance mode. - :rtype: bool - """ - juju_log('Checking for maintenance notifications', 'DEBUG') - if relid: - r_ids = [relid] - else: - r_ids = relation_ids('shared-db') - rids_units = [(r, u) for r in r_ids for u in related_units(r)] - notifications = [] - for r_id, unit in rids_units: - settings = relation_get(unit=unit, rid=r_id) - for key, value in settings.items(): - if value and key in DB_MAINTENANCE_KEYS: - juju_log( - 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), - 'DEBUG') - try: - notifications.append(bool_from_string(value)) - except ValueError: - juju_log( - 'Could not discern bool from {}'.format(value), - 'WARN') - pass - return True in notifications - - -@cached -def container_scoped_relations(): - """Get all the container scoped relations - - :returns: List of relation names - :rtype: List - """ - md = metadata() - relations = [] - for relation_type in ('provides', 'requires', 'peers'): - for relation in md.get(relation_type, []): - if md[relation_type][relation].get('scope') == 'container': - relations.append(relation) - return relations - - -def container_scoped_relation_get(attribute=None): - """Get relation data from all container scoped relations. - - :param attribute: Name of attribute to get - :type attribute: Optional[str] - :returns: Iterator with relation data - :rtype: Iterator[Optional[any]] - """ - for endpoint_name in container_scoped_relations(): - for rid in relation_ids(endpoint_name): - for unit in related_units(rid): - yield relation_get( - attribute=attribute, - unit=unit, - rid=rid) - - -def is_db_ready(use_current_context=False, rel_name=None): - """Check remote database is ready to be used. - - Database relations are expected to provide a list of 'allowed' units to - confirm that the database is ready for use by those units. - - If db relation has provided this information and local unit is a member, - returns True otherwise False. - - :param use_current_context: Whether to limit checks to current hook - context. - :type use_current_context: bool - :param rel_name: Name of relation to check - :type rel_name: string - :returns: Whether remote db is ready. - :rtype: bool - :raises: Exception - """ - key = 'allowed_units' - - rel_name = rel_name or 'shared-db' - this_unit = local_unit() - - if use_current_context: - if relation_id() in relation_ids(rel_name): - rids_units = [(None, None)] - else: - raise Exception("use_current_context=True but not in {} " - "rel hook contexts (currently in {})." - .format(rel_name, relation_id())) - else: - rids_units = [(r_id, u) - for r_id in relation_ids(rel_name) - for u in related_units(r_id)] - - for rid, unit in rids_units: - allowed_units = relation_get(rid=rid, unit=unit, attribute=key) - if allowed_units and this_unit in allowed_units.split(): - juju_log("This unit ({}) is in allowed unit list from {}".format( - this_unit, - unit), 'DEBUG') - return True - - juju_log("This unit was not found in any allowed unit list") - return False - - -def is_expected_scale(peer_relation_name='cluster'): - """Query juju goal-state to determine whether our peer- and dependency- - relations are at the expected scale. - - Useful for deferring per unit per relation housekeeping work until we are - ready to complete it successfully and without unnecessary repetiton. - - Always returns True if version of juju used does not support goal-state. - - :param peer_relation_name: Name of peer relation - :type rel_name: string - :returns: True or False - :rtype: bool - """ - def _get_relation_id(rel_type): - return next((rid for rid in relation_ids(reltype=rel_type)), None) - - Relation = namedtuple('Relation', 'rel_type rel_id') - peer_rid = _get_relation_id(peer_relation_name) - # Units with no peers should still have a peer relation. - if not peer_rid: - juju_log('Not at expected scale, no peer relation found', 'DEBUG') - return False - expected_relations = [ - Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] - if expect_ha(): - expected_relations.append( - Relation( - rel_type='ha', - rel_id=_get_relation_id('ha'))) - juju_log( - 'Checking scale of {} relations'.format( - ','.join([r.rel_type for r in expected_relations])), - 'DEBUG') - try: - if (len(related_units(relid=peer_rid)) < - len(list(expected_peer_units()))): - return False - for rel in expected_relations: - if not rel.rel_id: - juju_log( - 'Expected to find {} relation, but it is missing'.format( - rel.rel_type), - 'DEBUG') - return False - # Goal state returns every unit even for container scoped - # relations but the charm only ever has a relation with - # the local unit. - if rel.rel_type in container_scoped_relations(): - expected_count = 1 - else: - expected_count = len( - list(expected_related_units(reltype=rel.rel_type))) - if len(related_units(relid=rel.rel_id)) < expected_count: - juju_log( - ('Not at expected scale, not enough units on {} ' - 'relation'.format(rel.rel_type)), - 'DEBUG') - return False - except NotImplementedError: - return True - juju_log('All checks have passed, unit is at expected scale', 'DEBUG') - return True - - -def get_peer_key(unit_name): - """Get the peer key for this unit. - - The peer key is the key a unit uses to publish its status down the peer - relation - - :param unit_name: Name of unit - :type unit_name: string - :returns: Peer key for given unit - :rtype: string - """ - return 'unit-state-{}'.format(unit_name.replace('/', '-')) - - -UNIT_READY = 'READY' -UNIT_NOTREADY = 'NOTREADY' -UNIT_UNKNOWN = 'UNKNOWN' -UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] - - -def inform_peers_unit_state(state, relation_name='cluster'): - """Inform peers of the state of this unit. - - :param state: State of unit to publish - :type state: string - :param relation_name: Name of relation to publish state on - :type relation_name: string - """ - if state not in UNIT_STATES: - raise ValueError( - "Setting invalid state {} for unit".format(state)) - this_unit = local_unit() - for r_id in relation_ids(relation_name): - juju_log('Telling peer behind relation {} that {} is {}'.format( - r_id, this_unit, state), 'DEBUG') - relation_set(relation_id=r_id, - relation_settings={ - get_peer_key(this_unit): state}) - - -def get_peers_unit_state(relation_name='cluster'): - """Get the state of all peers. - - :param relation_name: Name of relation to check peers on. - :type relation_name: string - :returns: Unit states keyed on unit name. - :rtype: dict - :raises: ValueError - """ - r_ids = relation_ids(relation_name) - rids_units = [(r, u) for r in r_ids for u in related_units(r)] - unit_states = {} - for r_id, unit in rids_units: - settings = relation_get(unit=unit, rid=r_id) - unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) - if unit_states[unit] not in UNIT_STATES: - raise ValueError( - "Unit in unknown state {}".format(unit_states[unit])) - return unit_states - - -def are_peers_ready(relation_name='cluster'): - """Check if all peers are ready. - - :param relation_name: Name of relation to check peers on. - :type relation_name: string - :returns: Whether all units are ready. - :rtype: bool - """ - unit_states = get_peers_unit_state(relation_name).values() - juju_log('{} peers are in the following states: {}'.format( - relation_name, unit_states), 'DEBUG') - return all(state == UNIT_READY for state in unit_states) - - -def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): - """Inform peers if this unit is ready. - - The check function should return a tuple (state, message). A state - of 'READY' indicates the unit is READY. - - :param check_unit_ready_func: Function to run to check readiness - :type check_unit_ready_func: function - :param relation_name: Name of relation to check peers on. - :type relation_name: string - """ - unit_ready, msg = check_unit_ready_func() - if unit_ready: - state = UNIT_READY - else: - state = UNIT_NOTREADY - juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') - inform_peers_unit_state(state, relation_name) - - -def check_api_unit_ready(check_db_ready=True): - """Check if this unit is ready. - - :param check_db_ready: Include checks of database readiness. - :type check_db_ready: bool - :returns: Whether unit state is ready and status message - :rtype: (bool, str) - """ - unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) - return unit_state == WORKLOAD_STATES.ACTIVE, msg - - -def get_api_unit_status(check_db_ready=True): - """Return a workload status and message for this unit. - - :param check_db_ready: Include checks of database readiness. - :type check_db_ready: bool - :returns: Workload state and message - :rtype: (bool, str) - """ - unit_state = WORKLOAD_STATES.ACTIVE - msg = 'Unit is ready' - if is_db_maintenance_mode(): - unit_state = WORKLOAD_STATES.MAINTENANCE - msg = 'Database in maintenance mode.' - elif is_unit_paused_set(): - unit_state = WORKLOAD_STATES.BLOCKED - msg = 'Unit paused.' - elif check_db_ready and not is_db_ready(): - unit_state = WORKLOAD_STATES.WAITING - msg = 'Allowed_units list provided but this unit not present' - elif not is_db_initialised(): - unit_state = WORKLOAD_STATES.WAITING - msg = 'Database not initialised' - elif not is_expected_scale(): - unit_state = WORKLOAD_STATES.WAITING - msg = 'Charm and its dependencies not yet at expected scale' - juju_log(msg, 'DEBUG') - return unit_state, msg - - -def check_api_application_ready(): - """Check if this application is ready. - - :returns: Whether application state is ready and status message - :rtype: (bool, str) - """ - app_state, msg = get_api_application_status() - return app_state == WORKLOAD_STATES.ACTIVE, msg - - -def get_api_application_status(): - """Return a workload status and message for this application. - - :returns: Workload state and message - :rtype: (bool, str) - """ - app_state, msg = get_api_unit_status() - if app_state == WORKLOAD_STATES.ACTIVE: - if are_peers_ready(): - msg = 'Application Ready' - else: - app_state = WORKLOAD_STATES.WAITING - msg = 'Some units are not ready' - juju_log(msg, 'DEBUG') - return app_state, msg - - -def sequence_status_check_functions(*functions): - """Sequence the functions passed so that they all get a chance to run as - the charm status check functions. - - :param *functions: a list of functions that return (state, message) - :type *functions: List[Callable[[OSConfigRender], (str, str)]] - :returns: the Callable that takes configs and returns (state, message) - :rtype: Callable[[OSConfigRender], (str, str)] - """ - def _inner_sequenced_functions(configs): - state, message = 'unknown', '' - for f in functions: - new_state, new_message = f(configs) - state = workload_state_compare(state, new_state) - if message: - message = "{}, {}".format(message, new_message) - else: - message = new_message - return state, message - - return _inner_sequenced_functions - - -SubordinatePackages = namedtuple('SubordinatePackages', ['install', 'purge']) - - -def get_subordinate_release_packages(os_release, package_type='deb'): - """Iterate over subordinate relations and get package information. - - :param os_release: OpenStack release to look for - :type os_release: str - :param package_type: Package type (one of 'deb' or 'snap') - :type package_type: str - :returns: Packages to install and packages to purge or None - :rtype: SubordinatePackages[set,set] - """ - install = set() - purge = set() - - for rdata in container_scoped_relation_get('releases-packages-map'): - rp_map = json.loads(rdata or '{}') - # The map provided by subordinate has OpenStack release name as key. - # Find package information from subordinate matching requested release - # or the most recent release prior to requested release by sorting the - # keys in reverse order. This follows established patterns in our - # charms for templates and reactive charm implementations, i.e. as long - # as nothing has changed the definitions for the prior OpenStack - # release is still valid. - for release in sorted(rp_map.keys(), reverse=True): - if (CompareOpenStackReleases(release) <= os_release and - package_type in rp_map[release]): - for name, container in ( - ('install', install), - ('purge', purge)): - for pkg in rp_map[release][package_type].get(name, []): - container.add(pkg) - break - return SubordinatePackages(install, purge) - - -def get_subordinate_services(): - """Iterate over subordinate relations and get service information. - - In a similar fashion as with get_subordinate_release_packages(), - principle charms can retrieve a list of services advertised by their - subordinate charms. This is useful to know about subordinate services when - pausing, resuming or upgrading a principle unit. - - :returns: Name of all services advertised by all subordinates - :rtype: Set[str] - """ - services = set() - for rdata in container_scoped_relation_get('services'): - services |= set(json.loads(rdata or '[]')) - return services - - -os_restart_on_change = partial( - pausable_restart_on_change, - can_restart_now_f=deferred_events.check_and_record_restart_request, - post_svc_restart_f=deferred_events.process_svc_restart) - - -def restart_services_action_helper(all_services): - """Helper to run the restart-services action. - - NOTE: all_services is all services that could be restarted but - depending on the action arguments it may be a subset of - these that are actually restarted. - - :param all_services: All services that could be restarted - :type all_services: List[str] - """ - deferred_only = action_get("deferred-only") - services = action_get("services") - if services: - services = services.split() - else: - services = all_services - if deferred_only: - restart_services_action(deferred_only=True) - else: - restart_services_action(services=services) - - -def show_deferred_events_action_helper(): - """Helper to run the show-deferred-restarts action.""" - restarts = [] - for event in deferred_events.get_deferred_events(): - restarts.append('{} {} {}'.format( - str(event.timestamp), - event.service.ljust(40), - event.reason)) - restarts.sort() - output = { - 'restarts': restarts, - 'hooks': deferred_events.get_deferred_hooks()} - action_set({'output': "{}".format( - yaml.dump(output, default_flow_style=False))}) diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py deleted file mode 100644 index e5418c39..00000000 --- a/hooks/charmhelpers/contrib/openstack/vaultlocker.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2018-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os - -import charmhelpers.contrib.openstack.alternatives as alternatives -import charmhelpers.contrib.openstack.context as context - -import charmhelpers.core.hookenv as hookenv -import charmhelpers.core.host as host -import charmhelpers.core.templating as templating -import charmhelpers.core.unitdata as unitdata - -VAULTLOCKER_BACKEND = 'charm-vaultlocker' - - -class VaultKVContext(context.OSContextGenerator): - """Vault KV context for interaction with vault-kv interfaces""" - interfaces = ['secrets-storage'] - - def __init__(self, secret_backend=None): - super(context.OSContextGenerator, self).__init__() - self.secret_backend = ( - secret_backend or 'charm-{}'.format(hookenv.service_name()) - ) - - def __call__(self): - try: - import hvac - except ImportError: - # BUG: #1862085 - if the relation is made to vault, but the - # 'encrypt' option is not made, then the charm errors with an - # import warning. This catches that, logs a warning, and returns - # with an empty context. - hookenv.log("VaultKVContext: trying to use hvac pythong module " - "but it's not available. Is secrets-stroage relation " - "made, but encrypt option not set?", - level=hookenv.WARNING) - # return an empty context on hvac import error - return {} - ctxt = {} - # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 - db = unitdata.kv() - # currently known-good secret-id - secret_id = db.get('secret-id') - - for relation_id in hookenv.relation_ids(self.interfaces[0]): - for unit in hookenv.related_units(relation_id): - data = hookenv.relation_get(unit=unit, - rid=relation_id) - vault_url = data.get('vault_url') - role_id = data.get('{}_role_id'.format(hookenv.local_unit())) - token = data.get('{}_token'.format(hookenv.local_unit())) - - if all([vault_url, role_id, token]): - token = json.loads(token) - vault_url = json.loads(vault_url) - - # Tokens may change when secret_id's are being - # reissued - if so use token to get new secret_id - token_success = False - try: - secret_id = retrieve_secret_id( - url=vault_url, - token=token - ) - token_success = True - except hvac.exceptions.InvalidRequest: - # Try next - pass - - if token_success: - db.set('secret-id', secret_id) - db.flush() - - ctxt['vault_url'] = vault_url - ctxt['role_id'] = json.loads(role_id) - ctxt['secret_id'] = secret_id - ctxt['secret_backend'] = self.secret_backend - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - - self.complete = True - break - else: - if secret_id: - ctxt['vault_url'] = vault_url - ctxt['role_id'] = json.loads(role_id) - ctxt['secret_id'] = secret_id - ctxt['secret_backend'] = self.secret_backend - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - - if self.complete: - break - - if ctxt: - self.complete = True - - return ctxt - - -def write_vaultlocker_conf(context, priority=100): - """Write vaultlocker configuration to disk and install alternative - - :param context: Dict of data from vault-kv relation - :ptype: context: dict - :param priority: Priority of alternative configuration - :ptype: priority: int""" - charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( - hookenv.service_name() - ) - host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) - templating.render(source='vaultlocker.conf.j2', - target=charm_vl_path, - context=context, perms=0o600), - alternatives.install_alternative('vaultlocker.conf', - '/etc/vaultlocker/vaultlocker.conf', - charm_vl_path, priority) - - -def vault_relation_complete(backend=None): - """Determine whether vault relation is complete - - :param backend: Name of secrets backend requested - :ptype backend: string - :returns: whether the relation to vault is complete - :rtype: bool""" - try: - import hvac - except ImportError: - return False - try: - vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) - vault_kv() - return vault_kv.complete - except hvac.exceptions.InvalidRequest: - return False - - -# TODO: contrib a high level unwrap method to hvac that works -def retrieve_secret_id(url, token): - """Retrieve a response-wrapped secret_id from Vault - - :param url: URL to Vault Server - :ptype url: str - :param token: One shot Token to use - :ptype token: str - :returns: secret_id to use for Vault Access - :rtype: str""" - import hvac - try: - # hvac 0.10.1 changed default adapter to JSONAdapter - client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) - except AttributeError: - # hvac < 0.6.2 doesn't have adapter but uses the same response interface - client = hvac.Client(url=url, token=token) - else: - # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate - if not isinstance(client.adapter, hvac.adapters.Request): - client.adapter = hvac.adapters.Request(base_uri=url, token=token) - response = client._post('/v1/sys/wrapping/unwrap') - if response.status_code == 200: - data = response.json() - return data['data']['secret_id'] diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/storage/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/contrib/storage/linux/bcache.py b/hooks/charmhelpers/contrib/storage/linux/bcache.py deleted file mode 100644 index 605991e1..00000000 --- a/hooks/charmhelpers/contrib/storage/linux/bcache.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2017 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import json - -from charmhelpers.core.hookenv import log - -stats_intervals = ['stats_day', 'stats_five_minute', - 'stats_hour', 'stats_total'] - -SYSFS = '/sys' - - -class Bcache(object): - """Bcache behaviour - """ - - def __init__(self, cachepath): - self.cachepath = cachepath - - @classmethod - def fromdevice(cls, devname): - return cls('{}/block/{}/bcache'.format(SYSFS, devname)) - - def __str__(self): - return self.cachepath - - def get_stats(self, interval): - """Get cache stats - """ - intervaldir = 'stats_{}'.format(interval) - path = "{}/{}".format(self.cachepath, intervaldir) - out = dict() - for elem in os.listdir(path): - out[elem] = open('{}/{}'.format(path, elem)).read().strip() - return out - - -def get_bcache_fs(): - """Return all cache sets - """ - cachesetroot = "{}/fs/bcache".format(SYSFS) - try: - dirs = os.listdir(cachesetroot) - except OSError: - log("No bcache fs found") - return [] - cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) - return cacheset - - -def get_stats_action(cachespec, interval): - """Action for getting bcache statistics for a given cachespec. - Cachespec can either be a device name, eg. 'sdb', which will retrieve - cache stats for the given device, or 'global', which will retrieve stats - for all cachesets - """ - if cachespec == 'global': - caches = get_bcache_fs() - else: - caches = [Bcache.fromdevice(cachespec)] - res = dict((c.cachepath, c.get_stats(interval)) for c in caches) - return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py deleted file mode 100644 index 1b20b8fe..00000000 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ /dev/null @@ -1,2384 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import collections -import errno -import hashlib -import math - -import os -import shutil -import json -import time -import uuid - -from subprocess import ( - check_call, - check_output, - CalledProcessError, -) -from charmhelpers import deprecate -from charmhelpers.core.hookenv import ( - application_name, - config, - service_name, - local_unit, - relation_get, - relation_ids, - relation_set, - related_units, - log, - DEBUG, - INFO, - WARNING, - ERROR, -) -from charmhelpers.core.host import ( - mount, - mounts, - service_start, - service_stop, - service_running, - umount, - cmp_pkgrevno, -) -from charmhelpers.fetch import ( - apt_install, -) -from charmhelpers.core.unitdata import kv - -from charmhelpers.core.kernel import modprobe -from charmhelpers.contrib.openstack.utils import config_flags_parser - -KEYRING = '/etc/ceph/ceph.client.{}.keyring' -KEYFILE = '/etc/ceph/ceph.client.{}.key' - -CEPH_CONF = """[global] -auth supported = {auth} -keyring = {keyring} -mon host = {mon_hosts} -log to syslog = {use_syslog} -err to syslog = {use_syslog} -clog to syslog = {use_syslog} -""" - -# The number of placement groups per OSD to target for placement group -# calculations. This number is chosen as 100 due to the ceph PG Calc -# documentation recommending to choose 100 for clusters which are not -# expected to increase in the foreseeable future. Since the majority of the -# calculations are done on deployment, target the case of non-expanding -# clusters as the default. -DEFAULT_PGS_PER_OSD_TARGET = 100 -DEFAULT_POOL_WEIGHT = 10.0 -LEGACY_PG_COUNT = 200 -DEFAULT_MINIMUM_PGS = 2 -AUTOSCALER_DEFAULT_PGS = 32 - - -class OsdPostUpgradeError(Exception): - """Error class for OSD post-upgrade operations.""" - pass - - -class OSDSettingConflict(Exception): - """Error class for conflicting osd setting requests.""" - pass - - -class OSDSettingNotAllowed(Exception): - """Error class for a disallowed setting.""" - pass - - -OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) - -OSD_SETTING_WHITELIST = [ - 'osd heartbeat grace', - 'osd heartbeat interval', -] - - -def _order_dict_by_key(rdict): - """Convert a dictionary into an OrderedDict sorted by key. - - :param rdict: Dictionary to be ordered. - :type rdict: dict - :returns: Ordered Dictionary. - :rtype: collections.OrderedDict - """ - return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) - - -def get_osd_settings(relation_name): - """Consolidate requested osd settings from all clients. - - Consolidate requested osd settings from all clients. Check that the - requested setting is on the whitelist and it does not conflict with - any other requested settings. - - :returns: Dictionary of settings - :rtype: dict - - :raises: OSDSettingNotAllowed - :raises: OSDSettingConflict - """ - rel_ids = relation_ids(relation_name) - osd_settings = {} - for relid in rel_ids: - for unit in related_units(relid): - unit_settings = relation_get('osd-settings', unit, relid) or '{}' - unit_settings = json.loads(unit_settings) - for key, value in unit_settings.items(): - if key not in OSD_SETTING_WHITELIST: - msg = 'Illegal settings "{}"'.format(key) - raise OSDSettingNotAllowed(msg) - if key in osd_settings: - if osd_settings[key] != unit_settings[key]: - msg = 'Conflicting settings for "{}"'.format(key) - raise OSDSettingConflict(msg) - else: - osd_settings[key] = value - return _order_dict_by_key(osd_settings) - - -def send_application_name(relid=None): - """Send the application name down the relation. - - :param relid: Relation id to set application name in. - :type relid: str - """ - relation_set( - relation_id=relid, - relation_settings={'application-name': application_name()}) - - -def send_osd_settings(): - """Pass on requested OSD settings to osd units.""" - try: - settings = get_osd_settings('client') - except OSD_SETTING_EXCEPTIONS as e: - # There is a problem with the settings, not passing them on. Update - # status will notify the user. - log(e, level=ERROR) - return - data = { - 'osd-settings': json.dumps(settings, sort_keys=True)} - for relid in relation_ids('osd'): - relation_set(relation_id=relid, - relation_settings=data) - - -def validator(value, valid_type, valid_range=None): - """Helper function for type validation. - - Used to validate these: - https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values - https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression - - Example input: - validator(value=1, - valid_type=int, - valid_range=[0, 2]) - - This says I'm testing value=1. It must be an int inclusive in [0,2] - - :param value: The value to validate. - :type value: any - :param valid_type: The type that value should be. - :type valid_type: any - :param valid_range: A range of values that value can assume. - :type valid_range: Optional[Union[List,Tuple]] - :raises: AssertionError, ValueError - """ - assert isinstance(value, valid_type), ( - "{} is not a {}".format(value, valid_type)) - if valid_range is not None: - assert isinstance( - valid_range, list) or isinstance(valid_range, tuple), ( - "valid_range must be of type List or Tuple, " - "was given {} of type {}" - .format(valid_range, type(valid_range))) - # If we're dealing with strings - if isinstance(value, str): - assert value in valid_range, ( - "{} is not in the list {}".format(value, valid_range)) - # Integer, float should have a min and max - else: - if len(valid_range) != 2: - raise ValueError( - "Invalid valid_range list of {} for {}. " - "List must be [min,max]".format(valid_range, value)) - assert value >= valid_range[0], ( - "{} is less than minimum allowed value of {}" - .format(value, valid_range[0])) - assert value <= valid_range[1], ( - "{} is greater than maximum allowed value of {}" - .format(value, valid_range[1])) - - -class PoolCreationError(Exception): - """A custom exception to inform the caller that a pool creation failed. - - Provides an error message - """ - - def __init__(self, message): - super(PoolCreationError, self).__init__(message) - - -class BasePool(object): - """An object oriented approach to Ceph pool creation. - - This base class is inherited by ReplicatedPool and ErasurePool. Do not call - create() on this base class as it will raise an exception. - - Instantiate a child class and call create(). - """ - # Dictionary that maps pool operation properties to Tuples with valid type - # and valid range - op_validation_map = { - 'compression-algorithm': (str, ('lz4', 'snappy', 'zlib', 'zstd')), - 'compression-mode': (str, ('none', 'passive', 'aggressive', 'force')), - 'compression-required-ratio': (float, None), - 'compression-min-blob-size': (int, None), - 'compression-min-blob-size-hdd': (int, None), - 'compression-min-blob-size-ssd': (int, None), - 'compression-max-blob-size': (int, None), - 'compression-max-blob-size-hdd': (int, None), - 'compression-max-blob-size-ssd': (int, None), - 'rbd-mirroring-mode': (str, ('image', 'pool')) - } - - def __init__(self, service, name=None, percent_data=None, app_name=None, - op=None): - """Initialize BasePool object. - - Pool information is either initialized from individual keyword - arguments or from a individual CephBrokerRq operation Dict. - - :param service: The Ceph user name to run commands under. - :type service: str - :param name: Name of pool to operate on. - :type name: str - :param percent_data: The expected pool size in relation to all - available resources in the Ceph cluster. Will be - used to set the ``target_size_ratio`` pool - property. (default: 10.0) - :type percent_data: Optional[float] - :param app_name: Ceph application name, usually one of: - ('cephfs', 'rbd', 'rgw') (default: 'unknown') - :type app_name: Optional[str] - :param op: Broker request Op to compile pool data from. - :type op: Optional[Dict[str,any]] - :raises: KeyError - """ - # NOTE: Do not perform initialization steps that require live data from - # a running cluster here. The *Pool classes may be used for validation. - self.service = service - self.op = op or {} - - if op: - # When initializing from op the `name` attribute is required and we - # will fail with KeyError if it is not provided. - self.name = op['name'] - self.percent_data = op.get('weight') - self.app_name = op.get('app-name') - else: - self.name = name - self.percent_data = percent_data - self.app_name = app_name - - # Set defaults for these if they are not provided - self.percent_data = self.percent_data or 10.0 - self.app_name = self.app_name or 'unknown' - - def validate(self): - """Check that value of supplied operation parameters are valid. - - :raises: ValueError - """ - for op_key, op_value in self.op.items(): - if op_key in self.op_validation_map and op_value is not None: - valid_type, valid_range = self.op_validation_map[op_key] - try: - validator(op_value, valid_type, valid_range) - except (AssertionError, ValueError) as e: - # Normalize on ValueError, also add information about which - # variable we had an issue with. - raise ValueError("'{}': {}".format(op_key, str(e))) - - def _create(self): - """Perform the pool creation, method MUST be overridden by child class. - """ - raise NotImplementedError - - def _post_create(self): - """Perform common post pool creation tasks. - - Note that pool properties subject to change during the lifetime of a - pool / deployment should go into the ``update`` method. - - Do not add calls for a specific pool type here, those should go into - one of the pool specific classes. - """ - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - if nautilus_or_later: - # Ensure we set the expected pool ratio - update_pool( - client=self.service, - pool=self.name, - settings={ - 'target_size_ratio': str( - self.percent_data / 100.0), - }) - try: - set_app_name_for_pool(client=self.service, - pool=self.name, - name=self.app_name) - except CalledProcessError: - log('Could not set app name for pool {}' - .format(self.name), - level=WARNING) - if 'pg_autoscaler' in enabled_manager_modules(): - try: - enable_pg_autoscale(self.service, self.name) - except CalledProcessError as e: - log('Could not configure auto scaling for pool {}: {}' - .format(self.name, e), - level=WARNING) - - def create(self): - """Create pool and perform any post pool creation tasks. - - To allow for sharing of common code among pool specific classes the - processing has been broken out into the private methods ``_create`` - and ``_post_create``. - - Do not add any pool type specific handling here, that should go into - one of the pool specific classes. - """ - if not pool_exists(self.service, self.name): - self.validate() - self._create() - self._post_create() - self.update() - - def set_quota(self): - """Set a quota if requested. - - :raises: CalledProcessError - """ - max_bytes = self.op.get('max-bytes') - max_objects = self.op.get('max-objects') - if max_bytes or max_objects: - set_pool_quota(service=self.service, pool_name=self.name, - max_bytes=max_bytes, max_objects=max_objects) - - def set_compression(self): - """Set compression properties if requested. - - :raises: CalledProcessError - """ - compression_properties = { - key.replace('-', '_'): value - for key, value in self.op.items() - if key in ( - 'compression-algorithm', - 'compression-mode', - 'compression-required-ratio', - 'compression-min-blob-size', - 'compression-min-blob-size-hdd', - 'compression-min-blob-size-ssd', - 'compression-max-blob-size', - 'compression-max-blob-size-hdd', - 'compression-max-blob-size-ssd') and value} - if compression_properties: - update_pool(self.service, self.name, compression_properties) - - def update(self): - """Update properties for an already existing pool. - - Do not add calls for a specific pool type here, those should go into - one of the pool specific classes. - """ - self.validate() - self.set_quota() - self.set_compression() - - def add_cache_tier(self, cache_pool, mode): - """Adds a new cache tier to an existing pool. - - :param cache_pool: The cache tier pool name to add. - :type cache_pool: str - :param mode: The caching mode to use for this pool. - valid range = ["readonly", "writeback"] - :type mode: str - """ - # Check the input types and values - validator(value=cache_pool, valid_type=str) - validator( - value=mode, valid_type=str, - valid_range=["readonly", "writeback"]) - - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'add', self.name, cache_pool, - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'cache-mode', cache_pool, mode, - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'set-overlay', self.name, cache_pool, - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom', - ]) - - def remove_cache_tier(self, cache_pool): - """Removes a cache tier from Ceph. - - Flushes all dirty objects from writeback pools and waits for that to - complete. - - :param cache_pool: The cache tier pool name to remove. - :type cache_pool: str - """ - # read-only is easy, writeback is much harder - mode = get_cache_mode(self.service, cache_pool) - if mode == 'readonly': - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'cache-mode', cache_pool, 'none' - ]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'remove', self.name, cache_pool, - ]) - - elif mode == 'writeback': - pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', - 'cache-mode', cache_pool, 'forward'] - if cmp_pkgrevno('ceph-common', '10.1') >= 0: - # Jewel added a mandatory flag - pool_forward_cmd.append('--yes-i-really-mean-it') - - check_call(pool_forward_cmd) - # Flush the cache and wait for it to return - check_call([ - 'rados', '--id', self.service, - '-p', cache_pool, 'cache-flush-evict-all']) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'remove-overlay', self.name]) - check_call([ - 'ceph', '--id', self.service, - 'osd', 'tier', 'remove', self.name, cache_pool]) - - def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, - device_class=None): - """Return the number of placement groups to use when creating the pool. - - Returns the number of placement groups which should be specified when - creating the pool. This is based upon the calculation guidelines - provided by the Ceph Placement Group Calculator (located online at - http://ceph.com/pgcalc/). - - The number of placement groups are calculated using the following: - - (Target PGs per OSD) * (OSD #) * (%Data) - ---------------------------------------- - (Pool size) - - Per the upstream guidelines, the OSD # should really be considered - based on the number of OSDs which are eligible to be selected by the - pool. Since the pool creation doesn't specify any of CRUSH set rules, - the default rule will be dependent upon the type of pool being - created (replicated or erasure). - - This code makes no attempt to determine the number of OSDs which can be - selected for the specific rule, rather it is left to the user to tune - in the form of 'expected-osd-count' config option. - - :param pool_size: pool_size is either the number of replicas for - replicated pools or the K+M sum for erasure coded pools - :type pool_size: int - :param percent_data: the percentage of data that is expected to - be contained in the pool for the specific OSD set. Default value - is to assume 10% of the data is for this pool, which is a - relatively low % of the data but allows for the pg_num to be - increased. NOTE: the default is primarily to handle the scenario - where related charms requiring pools has not been upgraded to - include an update to indicate their relative usage of the pools. - :type percent_data: float - :param device_class: class of storage to use for basis of pgs - calculation; ceph supports nvme, ssd and hdd by default based - on presence of devices of each type in the deployment. - :type device_class: str - :returns: The number of pgs to use. - :rtype: int - """ - - # Note: This calculation follows the approach that is provided - # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. - validator(value=pool_size, valid_type=int) - - # Ensure that percent data is set to something - even with a default - # it can be set to None, which would wreak havoc below. - if percent_data is None: - percent_data = DEFAULT_POOL_WEIGHT - - # If the expected-osd-count is specified, then use the max between - # the expected-osd-count and the actual osd_count - osd_list = get_osds(self.service, device_class) - expected = config('expected-osd-count') or 0 - - if osd_list: - if device_class: - osd_count = len(osd_list) - else: - osd_count = max(expected, len(osd_list)) - - # Log a message to provide some insight if the calculations claim - # to be off because someone is setting the expected count and - # there are more OSDs in reality. Try to make a proper guess - # based upon the cluster itself. - if not device_class and expected and osd_count != expected: - log("Found more OSDs than provided expected count. " - "Using the actual count instead", INFO) - elif expected: - # Use the expected-osd-count in older ceph versions to allow for - # a more accurate pg calculations - osd_count = expected - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - return LEGACY_PG_COUNT - - percent_data /= 100.0 - target_pgs_per_osd = config( - 'pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET - num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size - - # NOTE: ensure a sane minimum number of PGS otherwise we don't get any - # reasonable data distribution in minimal OSD configurations - if num_pg < DEFAULT_MINIMUM_PGS: - num_pg = DEFAULT_MINIMUM_PGS - - # The CRUSH algorithm has a slight optimization for placement groups - # with powers of 2 so find the nearest power of 2. If the nearest - # power of 2 is more than 25% below the original value, the next - # highest value is used. To do this, find the nearest power of 2 such - # that 2^n <= num_pg, check to see if its within the 25% tolerance. - exponent = math.floor(math.log(num_pg, 2)) - nearest = 2 ** exponent - if (num_pg - nearest) > (num_pg * 0.25): - # Choose the next highest power of 2 since the nearest is more - # than 25% below the original value. - return int(nearest * 2) - else: - return int(nearest) - - -class Pool(BasePool): - """Compatibility shim for any descendents external to this library.""" - - @deprecate( - 'The ``Pool`` baseclass has been replaced by ``BasePool`` class.') - def __init__(self, service, name): - super(Pool, self).__init__(service, name=name) - - def create(self): - pass - - -class ReplicatedPool(BasePool): - def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None, - profile_name='replicated_rule'): - """Initialize ReplicatedPool object. - - Pool information is either initialized from individual keyword - arguments or from a individual CephBrokerRq operation Dict. - - Please refer to the docstring of the ``BasePool`` class for - documentation of the common parameters. - - :param pg_num: Express wish for number of Placement Groups (this value - is subject to validation against a running cluster prior - to use to avoid creating a pool with too many PGs) - :type pg_num: int - :param replicas: Number of copies there should be of each object added - to this replicated pool. - :type replicas: int - :raises: KeyError - :param profile_name: Crush Profile to use - :type profile_name: Optional[str] - """ - # NOTE: Do not perform initialization steps that require live data from - # a running cluster here. The *Pool classes may be used for validation. - - # The common parameters are handled in our parents initializer - super(ReplicatedPool, self).__init__( - service=service, name=name, percent_data=percent_data, - app_name=app_name, op=op) - - if op: - # When initializing from op `replicas` is a required attribute, and - # we will fail with KeyError if it is not provided. - self.replicas = op['replicas'] - self.pg_num = op.get('pg_num') - self.profile_name = op.get('crush-profile') or profile_name - else: - self.replicas = replicas or 2 - self.pg_num = pg_num - self.profile_name = profile_name or 'replicated_rule' - - def _create(self): - # Validate if crush profile exists - if self.profile_name is None: - msg = ("Failed to discover crush profile named " - "{}".format(self.profile_name)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - # Do extra validation on pg_num with data from live cluster - if self.pg_num: - # Since the number of placement groups were specified, ensure - # that there aren't too many created. - max_pgs = self.get_pgs(self.replicas, 100.0) - self.pg_num = min(self.pg_num, max_pgs) - else: - self.pg_num = self.get_pgs(self.replicas, self.percent_data) - - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, self.pg_num) - ), - self.name, str(self.pg_num), self.profile_name - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num), self.profile_name - ] - check_call(cmd) - - def _post_create(self): - # Set the pool replica size - update_pool(client=self.service, - pool=self.name, - settings={'size': str(self.replicas)}) - # Perform other common post pool creation tasks - super(ReplicatedPool, self)._post_create() - - -class ErasurePool(BasePool): - """Default jerasure erasure coded pool.""" - - def __init__(self, service, name=None, erasure_code_profile=None, - percent_data=None, app_name=None, op=None, - allow_ec_overwrites=False): - """Initialize ErasurePool object. - - Pool information is either initialized from individual keyword - arguments or from a individual CephBrokerRq operation Dict. - - Please refer to the docstring of the ``BasePool`` class for - documentation of the common parameters. - - :param erasure_code_profile: EC Profile to use (default: 'default') - :type erasure_code_profile: Optional[str] - """ - # NOTE: Do not perform initialization steps that require live data from - # a running cluster here. The *Pool classes may be used for validation. - - # The common parameters are handled in our parents initializer - super(ErasurePool, self).__init__( - service=service, name=name, percent_data=percent_data, - app_name=app_name, op=op) - - if op: - # Note that the different default when initializing from op stems - # from different handling of this in the `charms.ceph` library. - self.erasure_code_profile = op.get('erasure-profile', - 'default-canonical') - self.allow_ec_overwrites = op.get('allow-ec-overwrites') - else: - # We keep the class default when initialized from keyword arguments - # to not break the API for any other consumers. - self.erasure_code_profile = erasure_code_profile or 'default' - self.allow_ec_overwrites = allow_ec_overwrites - - def _create(self): - # Try to find the erasure profile information in order to properly - # size the number of placement groups. The size of an erasure - # coded placement group is calculated as k+m. - erasure_profile = get_erasure_profile(self.service, - self.erasure_code_profile) - - # Check for errors - if erasure_profile is None: - msg = ("Failed to discover erasure profile named " - "{}".format(self.erasure_code_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - if 'k' not in erasure_profile or 'm' not in erasure_profile: - # Error - msg = ("Unable to find k (data chunks) or m (coding chunks) " - "in erasure profile {}".format(erasure_profile)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - - k = int(erasure_profile['k']) - m = int(erasure_profile['m']) - pgs = self.get_pgs(k + m, self.percent_data) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 - # Create it - if nautilus_or_later: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - '--pg-num-min={}'.format( - min(AUTOSCALER_DEFAULT_PGS, pgs) - ), - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - else: - cmd = [ - 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile - ] - check_call(cmd) - - def _post_create(self): - super(ErasurePool, self)._post_create() - if self.allow_ec_overwrites: - update_pool(self.service, self.name, - {'allow_ec_overwrites': 'true'}) - - -def enabled_manager_modules(): - """Return a list of enabled manager modules. - - :rtype: List[str] - """ - cmd = ['ceph', 'mgr', 'module', 'ls'] - quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 - if quincy_or_later: - cmd.append('--format=json') - try: - modules = check_output(cmd).decode('utf-8') - except CalledProcessError as e: - log("Failed to list ceph modules: {}".format(e), WARNING) - return [] - modules = json.loads(modules) - return modules['enabled_modules'] - - -def enable_pg_autoscale(service, pool_name): - """Enable Ceph's PG autoscaler for the specified pool. - - :param service: The Ceph user name to run the command under - :type service: str - :param pool_name: The name of the pool to enable sutoscaling on - :type pool_name: str - :raises: CalledProcessError if the command fails - """ - check_call([ - 'ceph', '--id', service, - 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) - - -def get_mon_map(service): - """Return the current monitor map. - - :param service: The Ceph user name to run the command under - :type service: str - :returns: Dictionary with monitor map data - :rtype: Dict[str,any] - :raises: ValueError if the monmap fails to parse, CalledProcessError if our - ceph command fails. - """ - try: - octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 - mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' - mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, - '--format=json'])).decode('utf-8') - try: - return json.loads(mon_status) - except ValueError as v: - log("Unable to parse mon_status json: {}. Error: {}" - .format(mon_status, str(v))) - raise - except CalledProcessError as e: - log("mon_status command failed with message: {}" - .format(str(e))) - raise - - -def hash_monitor_names(service): - """Get a sorted list of monitor hashes in ascending order. - - Uses the get_mon_map() function to get information about the monitor - cluster. Hash the name of each monitor. - - :param service: The Ceph user name to run the command under. - :type service: str - :returns: a sorted list of monitor hashes in an ascending order. - :rtype : List[str] - :raises: CalledProcessError, ValueError - """ - try: - hash_list = [] - monitor_list = get_mon_map(service=service) - if monitor_list['monmap']['mons']: - for mon in monitor_list['monmap']['mons']: - hash_list.append( - hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) - return sorted(hash_list) - else: - return None - except (ValueError, CalledProcessError): - raise - - -def monitor_key_delete(service, key): - """Delete a key and value pair from the monitor cluster. - - Deletes a key value pair on the monitor cluster. - - :param service: The Ceph user name to run the command under - :type service: str - :param key: The key to delete. - :type key: str - :raises: CalledProcessError - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'del', str(key)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}" - .format(e.output)) - raise - - -def monitor_key_set(service, key, value): - """Set a key value pair on the monitor cluster. - - :param service: The Ceph user name to run the command under. - :type service str - :param key: The key to set. - :type key: str - :param value: The value to set. This will be coerced into a string. - :type value: str - :raises: CalledProcessError - """ - try: - check_output( - ['ceph', '--id', service, - 'config-key', 'put', str(key), str(value)]) - except CalledProcessError as e: - log("Monitor config-key put failed with message: {}" - .format(e.output)) - raise - - -def monitor_key_get(service, key): - """Get the value of an existing key in the monitor cluster. - - :param service: The Ceph user name to run the command under - :type service: str - :param key: The key to search for. - :type key: str - :return: Returns the value of that key or None if not found. - :rtype: Optional[str] - """ - try: - output = check_output( - ['ceph', '--id', service, - 'config-key', 'get', str(key)]).decode('UTF-8') - return output - except CalledProcessError as e: - log("Monitor config-key get failed with message: {}" - .format(e.output)) - return None - - -def monitor_key_exists(service, key): - """Search for existence of key in the monitor cluster. - - :param service: The Ceph user name to run the command under. - :type service: str - :param key: The key to search for. - :type key: str - :return: Returns True if the key exists, False if not. - :rtype: bool - :raises: CalledProcessError if an unknown error occurs. - """ - try: - check_call( - ['ceph', '--id', service, - 'config-key', 'exists', str(key)]) - # I can return true here regardless because Ceph returns - # ENOENT if the key wasn't found - return True - except CalledProcessError as e: - if e.returncode == errno.ENOENT: - return False - else: - log("Unknown error from ceph config-get exists: {} {}" - .format(e.returncode, e.output)) - raise - - -def get_erasure_profile(service, name): - """Get an existing erasure code profile if it exists. - - :param service: The Ceph user name to run the command under. - :type service: str - :param name: Name of profile. - :type name: str - :returns: Dictionary with profile data. - :rtype: Optional[Dict[str]] - """ - try: - out = check_output(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name, '--format=json']).decode('utf-8') - return json.loads(out) - except (CalledProcessError, OSError, ValueError): - return None - - -def pool_set(service, pool_name, key, value): - """Sets a value for a RADOS pool in ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to set property on. - :type pool_name: str - :param key: Property key. - :type key: str - :param value: Value, will be coerced into str and shifted to lowercase. - :type value: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', pool_name, key, str(value).lower()] - check_call(cmd) - - -def snapshot_pool(service, pool_name, snapshot_name): - """Snapshots a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to snapshot. - :type pool_name: str - :param snapshot_name: Name of snapshot to create. - :type snapshot_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'mksnap', pool_name, snapshot_name] - check_call(cmd) - - -def remove_pool_snapshot(service, pool_name, snapshot_name): - """Remove a snapshot from a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to remove snapshot from. - :type pool_name: str - :param snapshot_name: Name of snapshot to remove. - :type snapshot_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] - check_call(cmd) - - -def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): - """Set byte quota on a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under - :type service: str - :param pool_name: Name of pool - :type pool_name: str - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: int - :param max_objects: Maximum objects quota to apply - :type max_objects: int - :raises: subprocess.CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set-quota', pool_name] - if max_bytes: - cmd = cmd + ['max_bytes', str(max_bytes)] - if max_objects: - cmd = cmd + ['max_objects', str(max_objects)] - check_call(cmd) - - -def remove_pool_quota(service, pool_name): - """Remove byte quota on a RADOS pool in Ceph. - - :param service: The Ceph user name to run the command under. - :type service: str - :param pool_name: Name of pool to remove quota from. - :type pool_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] - check_call(cmd) - - -def remove_erasure_profile(service, profile_name): - """Remove erasure code profile. - - :param service: The Ceph user name to run the command under - :type service: str - :param profile_name: Name of profile to remove. - :type profile_name: str - :raises: CalledProcessError - """ - cmd = [ - 'ceph', '--id', service, - 'osd', 'erasure-code-profile', 'rm', profile_name] - check_call(cmd) - - -def create_erasure_profile(service, profile_name, - erasure_plugin_name='jerasure', - failure_domain=None, - data_chunks=2, coding_chunks=1, - locality=None, durability_estimator=None, - helper_chunks=None, - scalar_mds=None, - crush_locality=None, - device_class=None, - erasure_plugin_technique=None): - """Create a new erasure code profile if one does not already exist for it. - - Profiles are considered immutable so will not be updated if the named - profile already exists. - - Please refer to [0] for more details. - - 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ - - :param service: The Ceph user name to run the command under. - :type service: str - :param profile_name: Name of profile. - :type profile_name: str - :param erasure_plugin_name: Erasure code plugin. - :type erasure_plugin_name: str - :param failure_domain: Failure domain, one of: - ('chassis', 'datacenter', 'host', 'osd', 'pdu', - 'pod', 'rack', 'region', 'room', 'root', 'row'). - :type failure_domain: str - :param data_chunks: Number of data chunks. - :type data_chunks: int - :param coding_chunks: Number of coding chunks. - :type coding_chunks: int - :param locality: Locality. - :type locality: int - :param durability_estimator: Durability estimator. - :type durability_estimator: int - :param helper_chunks: int - :type helper_chunks: int - :param device_class: Restrict placement to devices of specific class. - :type device_class: str - :param scalar_mds: one of ['isa', 'jerasure', 'shec'] - :type scalar_mds: str - :param crush_locality: LRC locality faulure domain, one of: - ('chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', - 'rack', 'region', 'room', 'root', 'row') or unset. - :type crush_locaity: str - :param erasure_plugin_technique: Coding technique for EC plugin - :type erasure_plugin_technique: str - :return: None. Can raise CalledProcessError, ValueError or AssertionError - """ - if erasure_profile_exists(service, profile_name): - log('EC profile {} exists, skipping update'.format(profile_name), - level=WARNING) - return - - plugin_techniques = { - 'jerasure': [ - 'reed_sol_van', - 'reed_sol_r6_op', - 'cauchy_orig', - 'cauchy_good', - 'liberation', - 'blaum_roth', - 'liber8tion' - ], - 'lrc': [], - 'isa': [ - 'reed_sol_van', - 'cauchy', - ], - 'shec': [ - 'single', - 'multiple' - ], - 'clay': [], - } - failure_domains = [ - 'chassis', 'datacenter', - 'host', 'osd', - 'pdu', 'pod', - 'rack', 'region', - 'room', 'root', - 'row', - ] - device_classes = [ - 'ssd', - 'hdd', - 'nvme' - ] - - validator(erasure_plugin_name, str, list(plugin_techniques.keys())) - - cmd = [ - 'ceph', '--id', service, - 'osd', 'erasure-code-profile', 'set', profile_name, - 'plugin={}'.format(erasure_plugin_name), - 'k={}'.format(str(data_chunks)), - 'm={}'.format(str(coding_chunks)), - ] - - if erasure_plugin_technique: - validator(erasure_plugin_technique, str, - plugin_techniques[erasure_plugin_name]) - cmd.append('technique={}'.format(erasure_plugin_technique)) - - luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - - # Set failure domain from options if not provided in args - if not failure_domain and config('customize-failure-domain'): - # Defaults to 'host' so just need to deal with - # setting 'rack' if feature is enabled - failure_domain = 'rack' - - if failure_domain: - validator(failure_domain, str, failure_domains) - # failure_domain changed in luminous - if luminous_or_later: - cmd.append('crush-failure-domain={}'.format(failure_domain)) - else: - cmd.append('ruleset-failure-domain={}'.format(failure_domain)) - - # device class new in luminous - if luminous_or_later and device_class: - validator(device_class, str, device_classes) - cmd.append('crush-device-class={}'.format(device_class)) - else: - log('Skipping device class configuration (ceph < 12.0.0)', - level=DEBUG) - - # Add plugin specific information - if erasure_plugin_name == 'lrc': - # LRC mandatory configuration - if locality: - cmd.append('l={}'.format(str(locality))) - else: - raise ValueError("locality must be provided for lrc plugin") - # LRC optional configuration - if crush_locality: - validator(crush_locality, str, failure_domains) - cmd.append('crush-locality={}'.format(crush_locality)) - - if erasure_plugin_name == 'shec': - # SHEC optional configuration - if durability_estimator: - cmd.append('c={}'.format((durability_estimator))) - - if erasure_plugin_name == 'clay': - # CLAY optional configuration - if helper_chunks: - cmd.append('d={}'.format(str(helper_chunks))) - if scalar_mds: - cmd.append('scalar-mds={}'.format(scalar_mds)) - - check_call(cmd) - - -def rename_pool(service, old_name, new_name): - """Rename a Ceph pool from old_name to new_name. - - :param service: The Ceph user name to run the command under. - :type service: str - :param old_name: Name of pool subject to rename. - :type old_name: str - :param new_name: Name to rename pool to. - :type new_name: str - """ - validator(value=old_name, valid_type=str) - validator(value=new_name, valid_type=str) - - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'rename', old_name, new_name] - check_call(cmd) - - -def erasure_profile_exists(service, name): - """Check to see if an Erasure code profile already exists. - - :param service: The Ceph user name to run the command under - :type service: str - :param name: Name of profile to look for. - :type name: str - :returns: True if it exists, False otherwise. - :rtype: bool - """ - validator(value=name, valid_type=str) - try: - check_call(['ceph', '--id', service, - 'osd', 'erasure-code-profile', 'get', - name]) - return True - except CalledProcessError: - return False - - -def get_cache_mode(service, pool_name): - """Find the current caching mode of the pool_name given. - - :param service: The Ceph user name to run the command under - :type service: str - :param pool_name: Name of pool. - :type pool_name: str - :returns: Current cache mode. - :rtype: Optional[int] - """ - validator(value=service, valid_type=str) - validator(value=pool_name, valid_type=str) - out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']).decode('utf-8') - try: - osd_json = json.loads(out) - for pool in osd_json['pools']: - if pool['pool_name'] == pool_name: - return pool['cache_mode'] - return None - except ValueError: - raise - - -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output( - ['rados', '--id', service, 'lspools']).decode('utf-8') - except CalledProcessError: - return False - - return name in out.split() - - -def get_osds(service, device_class=None): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster (optionally filtered by storage device class). - - :param device_class: Class of storage device for OSD's - :type device_class: str - """ - luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 - if luminous_or_later and device_class: - out = check_output(['ceph', '--id', service, - 'osd', 'crush', 'class', - 'ls-osd', device_class, - '--format=json']).decode('utf-8') - else: - out = check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('utf-8') - return json.loads(out) - - -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) - - apt_install('ceph-common', fatal=True) - - -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" - try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('utf-8') - except CalledProcessError: - return False - - return rbd_img in out - - -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] - check_call(cmd) - - -def update_pool(client, pool, settings): - """Update pool properties. - - :param client: Client/User-name to authenticate with. - :type client: str - :param pool: Name of pool to operate on - :type pool: str - :param settings: Dictionary with key/value pairs to set. - :type settings: Dict[str, str] - :raises: CalledProcessError - """ - cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in settings.items(): - check_call(cmd + [k, v]) - - -def set_app_name_for_pool(client, pool, name): - """Calls `osd pool application enable` for the specified pool name - - :param client: Name of the ceph client to use - :type client: str - :param pool: Pool to set app name for - :type pool: str - :param name: app name for the specified pool - :type name: str - - :raises: CalledProcessError if ceph call fails - """ - if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: - cmd = ['ceph', '--id', client, 'osd', 'pool', - 'application', 'enable', pool, name] - check_call(cmd) - - -def create_pool(service, name, replicas=3, pg_num=None): - """Create a new RADOS pool.""" - if pool_exists(service, name): - log("Ceph pool {} already exists, skipping creation".format(name), - level=WARNING) - return - - if not pg_num: - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pg_num = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pg_num = 200 - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] - check_call(cmd) - - update_pool(service, name, settings={'size': str(replicas)}) - - -def delete_pool(service, name): - """Delete a RADOS pool from ceph.""" - cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, - '--yes-i-really-really-mean-it'] - check_call(cmd) - - -def _keyfile_path(service): - return KEYFILE.format(service) - - -def _keyring_path(service): - return KEYRING.format(service) - - -def add_key(service, key): - """Add a key to a keyring. - - Creates the keyring if it doesn't already exist. - - Logs and returns if the key is already in the keyring. - """ - keyring = _keyring_path(service) - if os.path.exists(keyring): - with open(keyring, 'r') as ring: - if key in ring.read(): - log('Ceph keyring exists at %s and has not changed.' % keyring, - level=DEBUG) - return - log('Updating existing keyring %s.' % keyring, level=DEBUG) - - cmd = ['ceph-authtool', keyring, '--create-keyring', - '--name=client.{}'.format(service), '--add-key={}'.format(key)] - check_call(cmd) - log('Created new ceph keyring at %s.' % keyring, level=DEBUG) - - -def create_keyring(service, key): - """Deprecated. Please use the more accurately named 'add_key'""" - return add_key(service, key) - - -def delete_keyring(service): - """Delete an existing Ceph keyring.""" - keyring = _keyring_path(service) - if not os.path.exists(keyring): - log('Keyring does not exist at %s' % keyring, level=WARNING) - return - - os.remove(keyring) - log('Deleted ring at %s.' % keyring, level=INFO) - - -def create_key_file(service, key): - """Create a file containing key.""" - keyfile = _keyfile_path(service) - if os.path.exists(keyfile): - log('Keyfile exists at %s.' % keyfile, level=WARNING) - return - - with open(keyfile, 'w') as fd: - fd.write(key) - - log('Created new keyfile at %s.' % keyfile, level=INFO) - - -def get_ceph_nodes(relation='ceph'): - """Query named relation to determine current nodes.""" - hosts = [] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) - - return hosts - - -def configure(service, key, auth, use_syslog): - """Perform basic configuration of Ceph.""" - add_key(service, key) - create_key_file(service, key) - hosts = get_ceph_nodes() - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: - ceph_conf.write(CEPH_CONF.format(auth=auth, - keyring=_keyring_path(service), - mon_hosts=",".join(map(str, hosts)), - use_syslog=use_syslog)) - modprobe('rbd') - - -def image_mapped(name): - """Determine whether a RADOS block device is mapped locally.""" - try: - out = check_output(['rbd', 'showmapped']).decode('utf-8') - except CalledProcessError: - return False - - return name in out - - -def map_block_storage(service, pool, image): - """Map a RADOS block device for local use.""" - cmd = [ - 'rbd', - 'map', - '{}/{}'.format(pool, image), - '--user', - service, - '--secret', - _keyfile_path(service), - ] - check_call(cmd) - - -def filesystem_mounted(fs): - """Determine whether a filesystem is already mounted.""" - return fs in [f for f, m in mounts()] - - -def make_filesystem(blk_device, fstype='ext4', timeout=10): - """Make a new filesystem on the specified block device.""" - count = 0 - e_noent = errno.ENOENT - while not os.path.exists(blk_device): - if count >= timeout: - log('Gave up waiting on block device %s' % blk_device, - level=ERROR) - raise IOError(e_noent, os.strerror(e_noent), blk_device) - - log('Waiting for block device %s to appear' % blk_device, - level=DEBUG) - count += 1 - time.sleep(1) - else: - log('Formatting block device %s as filesystem %s.' % - (blk_device, fstype), level=INFO) - check_call(['mkfs', '-t', fstype, blk_device]) - - -def place_data_on_block_device(blk_device, data_src_dst): - """Migrate data in data_src_dst to blk_device and then remount.""" - # mount block device into /mnt - mount(blk_device, '/mnt') - # copy data to /mnt - copy_files(data_src_dst, '/mnt') - # umount block device - umount('/mnt') - # Grab user/group ID's from original source - _dir = os.stat(data_src_dst) - uid = _dir.st_uid - gid = _dir.st_gid - # re-mount where the data should originally be - # TODO: persist is currently a NO-OP in core.host - mount(blk_device, data_src_dst, persist=True) - # ensure original ownership of new mount. - os.chown(data_src_dst, uid, gid) - - -def copy_files(src, dst, symlinks=False, ignore=None): - """Copy files from src to dst.""" - for item in os.listdir(src): - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - shutil.copytree(s, d, symlinks, ignore) - else: - shutil.copy2(s, d) - - -def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[], - replicas=3): - """NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. - - Ensures given pool and RBD image exists, is mapped to a block device, - and the device is formatted and mounted at the given mount_point. - - If formatting a device for the first time, data existing at mount_point - will be migrated to the RBD device before being re-mounted. - - All services listed in system_services will be stopped prior to data - migration and restarted when complete. - """ - # Ensure pool, RBD image, RBD mappings are in place. - if not pool_exists(service, pool): - log('Creating new pool {}.'.format(pool), level=INFO) - create_pool(service, pool, replicas=replicas) - - if not rbd_exists(service, pool, rbd_img): - log('Creating RBD image ({}).'.format(rbd_img), level=INFO) - create_rbd_image(service, pool, rbd_img, sizemb) - - if not image_mapped(rbd_img): - log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), - level=INFO) - map_block_storage(service, pool, rbd_img) - - # make file system - # TODO: What happens if for whatever reason this is run again and - # the data is already in the rbd device and/or is mounted?? - # When it is mounted already, it will fail to make the fs - # XXX: This is really sketchy! Need to at least add an fstab entry - # otherwise this hook will blow away existing data if its executed - # after a reboot. - if not filesystem_mounted(mount_point): - make_filesystem(blk_device, fstype) - - for svc in system_services: - if service_running(svc): - log('Stopping services {} prior to migrating data.' - .format(svc), level=DEBUG) - service_stop(svc) - - place_data_on_block_device(blk_device, mount_point) - - for svc in system_services: - log('Starting service {} after migrating data.' - .format(svc), level=DEBUG) - service_start(svc) - - -def ensure_ceph_keyring(service, user=None, group=None, - relation='ceph', key=None): - """Ensures a ceph keyring is created for a named service and optionally - ensures user and group ownership. - - @returns boolean: Flag to indicate whether a key was successfully written - to disk based on either relation data or a supplied key - """ - if not key: - for rid in relation_ids(relation): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - - if not key: - return False - - add_key(service=service, key=key) - keyring = _keyring_path(service) - if user and group: - check_call(['chown', '%s.%s' % (user, group), keyring]) - - return True - - -class CephBrokerRq(object): - """Ceph broker request. - - Multiple operations can be added to a request and sent to the Ceph broker - to be executed. - - Request is json-encoded for sending over the wire. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, api_version=1, request_id=None, raw_request_data=None): - """Initialize CephBrokerRq object. - - Builds a new empty request or rebuilds a request from on-wire JSON - data. - - :param api_version: API version for request (default: 1). - :type api_version: Optional[int] - :param request_id: Unique identifier for request. - (default: string representation of generated UUID) - :type request_id: Optional[str] - :param raw_request_data: JSON-encoded string to build request from. - :type raw_request_data: Optional[str] - :raises: KeyError - """ - if raw_request_data: - request_data = json.loads(raw_request_data) - self.api_version = request_data['api-version'] - self.request_id = request_data['request-id'] - self.set_ops(request_data['ops']) - else: - self.api_version = api_version - if request_id: - self.request_id = request_id - else: - self.request_id = str(uuid.uuid1()) - self.ops = [] - - def add_op(self, op): - """Add an op if it is not already in the list. - - :param op: Operation to add. - :type op: dict - """ - if op not in self.ops: - self.ops.append(op) - - def add_op_request_access_to_group(self, name, namespace=None, - permission=None, key_name=None, - object_prefix_permissions=None): - """ - Adds the requested permissions to the current service's Ceph key, - allowing the key to access only the specified pools or - object prefixes. object_prefix_permissions should be a dictionary - keyed on the permission with the corresponding value being a list - of prefixes to apply that permission to. - { - 'rwx': ['prefix1', 'prefix2'], - 'class-read': ['prefix3']} - """ - self.add_op({ - 'op': 'add-permissions-to-key', 'group': name, - 'namespace': namespace, - 'name': key_name or service_name(), - 'group-permission': permission, - 'object-prefix-permissions': object_prefix_permissions}) - - def add_op_create_pool(self, name, replica_count=3, pg_num=None, - weight=None, group=None, namespace=None, - app_name=None, max_bytes=None, max_objects=None): - """DEPRECATED: Use ``add_op_create_replicated_pool()`` or - ``add_op_create_erasure_pool()`` instead. - """ - return self.add_op_create_replicated_pool( - name, replica_count=replica_count, pg_num=pg_num, weight=weight, - group=group, namespace=namespace, app_name=app_name, - max_bytes=max_bytes, max_objects=max_objects) - - # Use function parameters and docstring to define types in a compatible - # manner. - # - # NOTE: Our caller should always use a kwarg Dict when calling us so - # no need to maintain fixed order/position for parameters. Please keep them - # sorted by name when adding new ones. - def _partial_build_common_op_create(self, - app_name=None, - compression_algorithm=None, - compression_mode=None, - compression_required_ratio=None, - compression_min_blob_size=None, - compression_min_blob_size_hdd=None, - compression_min_blob_size_ssd=None, - compression_max_blob_size=None, - compression_max_blob_size_hdd=None, - compression_max_blob_size_ssd=None, - group=None, - max_bytes=None, - max_objects=None, - namespace=None, - rbd_mirroring_mode='pool', - weight=None): - """Build common part of a create pool operation. - - :param app_name: Tag pool with application name. Note that there is - certain protocols emerging upstream with regard to - meaningful application names to use. - Examples are 'rbd' and 'rgw'. - :type app_name: Optional[str] - :param compression_algorithm: Compressor to use, one of: - ('lz4', 'snappy', 'zlib', 'zstd') - :type compression_algorithm: Optional[str] - :param compression_mode: When to compress data, one of: - ('none', 'passive', 'aggressive', 'force') - :type compression_mode: Optional[str] - :param compression_required_ratio: Minimum compression ratio for data - chunk, if the requested ratio is not - achieved the compressed version will - be thrown away and the original - stored. - :type compression_required_ratio: Optional[float] - :param compression_min_blob_size: Chunks smaller than this are never - compressed (unit: bytes). - :type compression_min_blob_size: Optional[int] - :param compression_min_blob_size_hdd: Chunks smaller than this are not - compressed when destined to - rotational media (unit: bytes). - :type compression_min_blob_size_hdd: Optional[int] - :param compression_min_blob_size_ssd: Chunks smaller than this are not - compressed when destined to flash - media (unit: bytes). - :type compression_min_blob_size_ssd: Optional[int] - :param compression_max_blob_size: Chunks larger than this are broken - into N * compression_max_blob_size - chunks before being compressed - (unit: bytes). - :type compression_max_blob_size: Optional[int] - :param compression_max_blob_size_hdd: Chunks larger than this are - broken into - N * compression_max_blob_size_hdd - chunks before being compressed - when destined for rotational - media (unit: bytes) - :type compression_max_blob_size_hdd: Optional[int] - :param compression_max_blob_size_ssd: Chunks larger than this are - broken into - N * compression_max_blob_size_ssd - chunks before being compressed - when destined for flash media - (unit: bytes). - :type compression_max_blob_size_ssd: Optional[int] - :param group: Group to add pool to - :type group: Optional[str] - :param max_bytes: Maximum bytes quota to apply - :type max_bytes: Optional[int] - :param max_objects: Maximum objects quota to apply - :type max_objects: Optional[int] - :param namespace: Group namespace - :type namespace: Optional[str] - :param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD - mirroring is enabled. - :type rbd_mirroring_mode: Optional[str] - :param weight: The percentage of data that is expected to be contained - in the pool from the total available space on the OSDs. - Used to calculate number of Placement Groups to create - for pool. - :type weight: Optional[float] - :returns: Dictionary with kwarg name as key. - :rtype: Dict[str,any] - :raises: AssertionError - """ - return { - 'app-name': app_name, - 'compression-algorithm': compression_algorithm, - 'compression-mode': compression_mode, - 'compression-required-ratio': compression_required_ratio, - 'compression-min-blob-size': compression_min_blob_size, - 'compression-min-blob-size-hdd': compression_min_blob_size_hdd, - 'compression-min-blob-size-ssd': compression_min_blob_size_ssd, - 'compression-max-blob-size': compression_max_blob_size, - 'compression-max-blob-size-hdd': compression_max_blob_size_hdd, - 'compression-max-blob-size-ssd': compression_max_blob_size_ssd, - 'group': group, - 'max-bytes': max_bytes, - 'max-objects': max_objects, - 'group-namespace': namespace, - 'rbd-mirroring-mode': rbd_mirroring_mode, - 'weight': weight, - } - - def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - crush_profile=None, **kwargs): - """Adds an operation to create a replicated pool. - - Refer to docstring for ``_partial_build_common_op_create`` for - documentation of keyword arguments. - - :param name: Name of pool to create - :type name: str - :param replica_count: Number of copies Ceph should keep of your data. - :type replica_count: int - :param pg_num: Request specific number of Placement Groups to create - for pool. - :type pg_num: int - :raises: AssertionError if provided data is of invalid type/range - :param crush_profile: Name of crush profile to use. If not set the - ceph-mon unit handling the broker request will - set its default value. - :type crush_profile: Optional[str] - """ - if pg_num and kwargs.get('weight'): - raise ValueError('pg_num and weight are mutually exclusive') - - op = { - 'op': 'create-pool', - 'name': name, - 'replicas': replica_count, - 'pg_num': pg_num, - 'crush-profile': crush_profile - } - op.update(self._partial_build_common_op_create(**kwargs)) - - # Initialize Pool-object to validate type and range of ops. - pool = ReplicatedPool('dummy-service', op=op) - pool.validate() - - self.add_op(op) - - def add_op_create_erasure_pool(self, name, erasure_profile=None, - allow_ec_overwrites=False, **kwargs): - """Adds an operation to create a erasure coded pool. - - Refer to docstring for ``_partial_build_common_op_create`` for - documentation of keyword arguments. - - :param name: Name of pool to create - :type name: str - :param erasure_profile: Name of erasure code profile to use. If not - set the ceph-mon unit handling the broker - request will set its default value. - :type erasure_profile: str - :param allow_ec_overwrites: allow EC pools to be overridden - :type allow_ec_overwrites: bool - :raises: AssertionError if provided data is of invalid type/range - """ - op = { - 'op': 'create-pool', - 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'allow-ec-overwrites': allow_ec_overwrites, - } - op.update(self._partial_build_common_op_create(**kwargs)) - - # Initialize Pool-object to validate type and range of ops. - pool = ErasurePool('dummy-service', op) - pool.validate() - - self.add_op(op) - - def add_op_create_erasure_profile(self, name, - erasure_type='jerasure', - erasure_technique=None, - k=None, m=None, - failure_domain=None, - lrc_locality=None, - shec_durability_estimator=None, - clay_helper_chunks=None, - device_class=None, - clay_scalar_mds=None, - lrc_crush_locality=None): - """Adds an operation to create a erasure coding profile. - - :param name: Name of profile to create - :type name: str - :param erasure_type: Which of the erasure coding plugins should be used - :type erasure_type: string - :param erasure_technique: EC plugin technique to use - :type erasure_technique: string - :param k: Number of data chunks - :type k: int - :param m: Number of coding chunks - :type m: int - :param lrc_locality: Group the coding and data chunks into sets of size locality - (lrc plugin) - :type lrc_locality: int - :param durability_estimator: The number of parity chunks each of which includes - a data chunk in its calculation range (shec plugin) - :type durability_estimator: int - :param helper_chunks: The number of helper chunks to use for recovery operations - (clay plugin) - :type: helper_chunks: int - :param failure_domain: Type of failure domain from Ceph bucket types - to be used - :type failure_domain: string - :param device_class: Device class to use for profile (ssd, hdd) - :type device_class: string - :param clay_scalar_mds: Plugin to use for CLAY layered construction - (jerasure|isa|shec) - :type clay_scaler_mds: string - :param lrc_crush_locality: Type of crush bucket in which set of chunks - defined by lrc_locality will be stored. - :type lrc_crush_locality: string - """ - self.add_op({'op': 'create-erasure-profile', - 'name': name, - 'k': k, - 'm': m, - 'l': lrc_locality, - 'c': shec_durability_estimator, - 'd': clay_helper_chunks, - 'erasure-type': erasure_type, - 'erasure-technique': erasure_technique, - 'failure-domain': failure_domain, - 'device-class': device_class, - 'scalar-mds': clay_scalar_mds, - 'crush-locality': lrc_crush_locality}) - - def set_ops(self, ops): - """Set request ops to provided value. - - Useful for injecting ops that come from a previous request - to allow comparisons to ensure validity. - """ - self.ops = ops - - @property - def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops, - 'request-id': self.request_id}) - - def _ops_equal(self, other): - keys_to_compare = [ - 'replicas', 'name', 'op', 'pg_num', 'group-permission', - 'object-prefix-permissions', - ] - keys_to_compare += list(self._partial_build_common_op_create().keys()) - if len(self.ops) == len(other.ops): - for req_no in range(0, len(self.ops)): - for key in keys_to_compare: - if self.ops[req_no].get(key) != other.ops[req_no].get(key): - return False - else: - return False - return True - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - if self.api_version == other.api_version and \ - self._ops_equal(other): - return True - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -class CephBrokerRsp(object): - """Ceph broker response. - - Response is json-decoded and contents provided as methods/properties. - - The API is versioned and defaults to version 1. - """ - - def __init__(self, encoded_rsp): - self.api_version = None - self.rsp = json.loads(encoded_rsp) - - @property - def request_id(self): - return self.rsp.get('request-id') - - @property - def exit_code(self): - return self.rsp.get('exit-code') - - @property - def exit_msg(self): - return self.rsp.get('stderr') - - -# Ceph Broker Conversation: -# If a charm needs an action to be taken by ceph it can create a CephBrokerRq -# and send that request to ceph via the ceph relation. The CephBrokerRq has a -# unique id so that the client can identity which CephBrokerRsp is associated -# with the request. Ceph will also respond to each client unit individually -# creating a response key per client unit eg glance/0 will get a CephBrokerRsp -# via key broker-rsp-glance-0 -# -# To use this the charm can just do something like: -# -# from charmhelpers.contrib.storage.linux.ceph import ( -# send_request_if_needed, -# is_request_complete, -# CephBrokerRq, -# ) -# -# @hooks.hook('ceph-relation-changed') -# def ceph_changed(): -# rq = CephBrokerRq() -# rq.add_op_create_pool(name='poolname', replica_count=3) -# -# if is_request_complete(rq): -# -# else: -# send_request_if_needed(get_ceph_request()) -# -# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example -# of glance having sent a request to ceph which ceph has successfully processed -# 'ceph:8': { -# 'ceph/0': { -# 'auth': 'cephx', -# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', -# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', -# 'ceph-public-address': '10.5.44.103', -# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', -# 'private-address': '10.5.44.103', -# }, -# 'glance/0': { -# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' -# '"ops": [{"replicas": 3, "name": "glance", ' -# '"op": "create-pool"}]}'), -# 'private-address': '10.5.44.109', -# }, -# } - -def get_previous_request(rid): - """Return the last ceph broker request sent on a given relation - - :param rid: Relation id to query for request - :type rid: str - :returns: CephBrokerRq object or None if relation data not found. - :rtype: Optional[CephBrokerRq] - """ - broker_req = relation_get(attribute='broker_req', rid=rid, - unit=local_unit()) - if broker_req: - return CephBrokerRq(raw_request_data=broker_req) - - -def get_request_states(request, relation='ceph'): - """Return a dict of requests per relation id with their corresponding - completion state. - - This allows a charm, which has a request for ceph, to see whether there is - an equivalent request already being processed and if so what state that - request is in. - - @param request: A CephBrokerRq object - """ - complete = [] - requests = {} - for rid in relation_ids(relation): - complete = False - previous_request = get_previous_request(rid) - if request == previous_request: - sent = True - complete = is_request_complete_for_rid(previous_request, rid) - else: - sent = False - complete = False - - requests[rid] = { - 'sent': sent, - 'complete': complete, - } - - return requests - - -def is_request_sent(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been sent - - Returns True if a similair request has been sent - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['sent']: - return False - - return True - - -def is_request_complete(request, relation='ceph'): - """Check to see if a functionally equivalent request has already been - completed - - Returns True if a similair request has been completed - - @param request: A CephBrokerRq object - """ - states = get_request_states(request, relation=relation) - for rid in states.keys(): - if not states[rid]['complete']: - return False - - return True - - -def is_request_complete_for_rid(request, rid): - """Check if a given request has been completed on the given relation - - @param request: A CephBrokerRq object - @param rid: Relation ID - """ - broker_key = get_broker_rsp_key() - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if rdata.get(broker_key): - rsp = CephBrokerRsp(rdata.get(broker_key)) - if rsp.request_id == request.request_id: - if not rsp.exit_code: - return True - else: - # The remote unit sent no reply targeted at this unit so either the - # remote ceph cluster does not support unit targeted replies or it - # has not processed our request yet. - if rdata.get('broker_rsp'): - request_data = json.loads(rdata['broker_rsp']) - if request_data.get('request-id'): - log('Ignoring legacy broker_rsp without unit key as remote ' - 'service supports unit specific replies', level=DEBUG) - else: - log('Using legacy broker_rsp as remote service does not ' - 'supports unit specific replies', level=DEBUG) - rsp = CephBrokerRsp(rdata['broker_rsp']) - if not rsp.exit_code: - return True - - return False - - -def get_broker_rsp_key(): - """Return broker response key for this unit - - This is the key that ceph is going to use to pass request status - information back to this unit - """ - return 'broker-rsp-' + local_unit().replace('/', '-') - - -def send_request_if_needed(request, relation='ceph'): - """Send broker request if an equivalent request has not already been sent - - @param request: A CephBrokerRq object - """ - if is_request_sent(request, relation=relation): - log('Request already sent but not complete, not sending new request', - level=DEBUG) - else: - for rid in relation_ids(relation): - log('Sending request {}'.format(request.request_id), level=DEBUG) - relation_set(relation_id=rid, broker_req=request.request) - relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()}) - - -def has_broker_rsp(rid=None, unit=None): - """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. - - :param rid: The relation to check (default of None means current relation) - :type rid: Union[str, None] - :param unit: The remote unit to check (default of None means current unit) - :type unit: Union[str, None] - :returns: True if broker key exists and is set to something 'truthy' - :rtype: bool - """ - rdata = relation_get(rid=rid, unit=unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - return True if broker_rsp else False - - -def is_broker_action_done(action, rid=None, unit=None): - """Check whether broker action has completed yet. - - @param action: name of action to be performed - @returns True if action complete otherwise False - """ - rdata = relation_get(rid=rid, unit=unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - if not broker_rsp: - return False - - rsp = CephBrokerRsp(broker_rsp) - unit_name = local_unit().partition('/')[2] - key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) - kvstore = kv() - val = kvstore.get(key=key) - if val and val == rsp.request_id: - return True - - return False - - -def mark_broker_action_done(action, rid=None, unit=None): - """Mark action as having been completed. - - @param action: name of action to be performed - @returns None - """ - rdata = relation_get(rid=rid, unit=unit) or {} - broker_rsp = rdata.get(get_broker_rsp_key()) - if not broker_rsp: - return - - rsp = CephBrokerRsp(broker_rsp) - unit_name = local_unit().partition('/')[2] - key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) - kvstore = kv() - kvstore.set(key=key, value=rsp.request_id) - kvstore.flush() - - -class CephConfContext(object): - """Ceph config (ceph.conf) context. - - Supports user-provided Ceph configuration settings. Use can provide a - dictionary as the value for the config-flags charm option containing - Ceph configuration settings keyede by their section in ceph.conf. - """ - def __init__(self, permitted_sections=None): - self.permitted_sections = permitted_sections or [] - - def __call__(self): - conf = config('config-flags') - if not conf: - return {} - - conf = config_flags_parser(conf) - if not isinstance(conf, dict): - log("Provided config-flags is not a dictionary - ignoring", - level=WARNING) - return {} - - permitted = self.permitted_sections - if permitted: - diff = set(conf.keys()).difference(set(permitted)) - if diff: - log("Config-flags contains invalid keys '%s' - they will be " - "ignored" % (', '.join(diff)), level=WARNING) - - ceph_conf = {} - for key in conf: - if permitted and key not in permitted: - log("Ignoring key '%s'" % key, level=WARNING) - continue - - ceph_conf[key] = conf[key] - return ceph_conf - - -class CephOSDConfContext(CephConfContext): - """Ceph config (ceph.conf) context. - - Consolidates settings from config-flags via CephConfContext with - settings provided by the mons. The config-flag values are preserved in - conf['osd'], settings from the mons which do not clash with config-flag - settings are in conf['osd_from_client'] and finally settings which do - clash are in conf['osd_from_client_conflict']. Rather than silently drop - the conflicting settings they are provided in the context so they can be - rendered commented out to give some visibility to the admin. - """ - - def __init__(self, permitted_sections=None): - super(CephOSDConfContext, self).__init__( - permitted_sections=permitted_sections) - try: - self.settings_from_mons = get_osd_settings('mon') - except OSDSettingConflict: - log( - "OSD settings from mons are inconsistent, ignoring them", - level=WARNING) - self.settings_from_mons = {} - - def filter_osd_from_mon_settings(self): - """Filter settings from client relation against config-flags. - - :returns: A tuple ( - ,config-flag values, - ,client settings which do not conflict with config-flag values, - ,client settings which confilct with config-flag values) - :rtype: (OrderedDict, OrderedDict, OrderedDict) - """ - ceph_conf = super(CephOSDConfContext, self).__call__() - conflicting_entries = {} - clear_entries = {} - for key, value in self.settings_from_mons.items(): - if key in ceph_conf.get('osd', {}): - if ceph_conf['osd'][key] != value: - conflicting_entries[key] = value - else: - clear_entries[key] = value - clear_entries = _order_dict_by_key(clear_entries) - conflicting_entries = _order_dict_by_key(conflicting_entries) - return ceph_conf, clear_entries, conflicting_entries - - def __call__(self): - """Construct OSD config context. - - Standard context with two additional special keys. - osd_from_client_conflict: client settings which confilct with - config-flag values - osd_from_client: settings which do not conflict with config-flag - values - - :returns: OSD config context dict. - :rtype: dict - """ - conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() - conf['osd_from_client_conflict'] = osd_conflict - conf['osd_from_client'] = osd_clear - return conf diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py deleted file mode 100644 index 04daea29..00000000 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -from subprocess import ( - check_call, - check_output, -) - - -################################################## -# loopback device helpers. -################################################## -def loopback_devices(): - ''' - Parse through 'losetup -a' output to determine currently mapped - loopback devices. Output is expected to look like: - - /dev/loop0: [0807]:961814 (/tmp/my.img) - - or: - - /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) - - :returns: dict: a dict mapping {loopback_dev: backing_file} - ''' - loopbacks = {} - cmd = ['losetup', '-a'] - output = check_output(cmd).decode('utf-8') - devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] - for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] - return loopbacks - - -def create_loopback(file_path): - ''' - Create a loopback device for a given backing file. - - :returns: str: Full path to new loopback device (eg, /dev/loop0) - ''' - file_path = os.path.abspath(file_path) - check_call(['losetup', '--find', file_path]) - for d, f in loopback_devices().items(): - if f == file_path: - return d - - -def ensure_loopback_device(path, size): - ''' - Ensure a loopback device exists for a given backing file path and size. - If it a loopback device is not mapped to file, a new one will be created. - - TODO: Confirm size of found loopback device. - - :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) - ''' - for d, f in loopback_devices().items(): - if f == path: - return d - - if not os.path.exists(path): - cmd = ['truncate', '--size', size, path] - check_call(cmd) - - return create_loopback(path) - - -def is_mapped_loopback_device(device): - """ - Checks if a given device name is an existing/mapped loopback device. - :param device: str: Full path to the device (eg, /dev/loop1). - :returns: str: Path to the backing file if is a loopback device - empty string otherwise - """ - return loopback_devices().get(device, "") diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py deleted file mode 100644 index d0a57211..00000000 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -from subprocess import ( - CalledProcessError, - check_call, - check_output, - Popen, - PIPE, -) - - -################################################## -# LVM helpers. -################################################## -def deactivate_lvm_volume_group(block_device): - ''' - Deactivate any volume group associated with an LVM physical volume. - - :param block_device: str: Full path to LVM physical volume - ''' - vg = list_lvm_volume_group(block_device) - if vg: - cmd = ['vgchange', '-an', vg] - check_call(cmd) - - -def is_lvm_physical_volume(block_device): - ''' - Determine whether a block device is initialized as an LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: boolean: True if block device is a PV, False if not. - ''' - try: - check_output(['pvdisplay', block_device]) - return True - except CalledProcessError: - return False - - -def remove_lvm_physical_volume(block_device): - ''' - Remove LVM PV signatures from a given block device. - - :param block_device: str: Full path of block device to scrub. - ''' - p = Popen(['pvremove', '-ff', block_device], - stdin=PIPE) - p.communicate(input='y\n') - - -def list_lvm_volume_group(block_device): - ''' - List LVM volume group associated with a given block device. - - Assumes block device is a valid LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: str: Name of volume group associated with block device or None - ''' - vg = None - pvd = check_output(['pvdisplay', block_device]).splitlines() - for lvm in pvd: - lvm = lvm.decode('UTF-8') - if lvm.strip().startswith('VG Name'): - vg = ' '.join(lvm.strip().split()[2:]) - return vg - - -def create_lvm_physical_volume(block_device): - ''' - Initialize a block device as an LVM physical volume. - - :param block_device: str: Full path of block device to initialize. - - ''' - check_call(['pvcreate', block_device]) - - -def create_lvm_volume_group(volume_group, block_device): - ''' - Create an LVM volume group backed by a given block device. - - Assumes block device has already been initialized as an LVM PV. - - :param volume_group: str: Name of volume group to create. - :block_device: str: Full path of PV-initialized block device. - ''' - check_call(['vgcreate', volume_group, block_device]) - - -def list_logical_volumes(select_criteria=None, path_mode=False): - ''' - List logical volumes - - :param select_criteria: str: Limit list to those volumes matching this - criteria (see 'lvs -S help' for more details) - :param path_mode: bool: return logical volume name in 'vg/lv' format, this - format is required for some commands like lvextend - :returns: [str]: List of logical volumes - ''' - lv_diplay_attr = 'lv_name' - if path_mode: - # Parsing output logic relies on the column order - lv_diplay_attr = 'vg_name,' + lv_diplay_attr - cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] - if select_criteria: - cmd.extend(['--select', select_criteria]) - lvs = [] - for lv in check_output(cmd).decode('UTF-8').splitlines(): - if not lv: - continue - if path_mode: - lvs.append('/'.join(lv.strip().split())) - else: - lvs.append(lv.strip()) - return lvs - - -list_thin_logical_volume_pools = functools.partial( - list_logical_volumes, - select_criteria='lv_attr =~ ^t') - -list_thin_logical_volumes = functools.partial( - list_logical_volumes, - select_criteria='lv_attr =~ ^V') - - -def extend_logical_volume_by_device(lv_name, block_device): - ''' - Extends the size of logical volume lv_name by the amount of free space on - physical volume block_device. - - :param lv_name: str: name of logical volume to be extended (vg/lv format) - :param block_device: str: name of block_device to be allocated to lv_name - ''' - cmd = ['lvextend', lv_name, block_device] - check_call(cmd) - - -def create_logical_volume(lv_name, volume_group, size=None): - ''' - Create a new logical volume in an existing volume group - - :param lv_name: str: name of logical volume to be created. - :param volume_group: str: Name of volume group to use for the new volume. - :param size: str: Size of logical volume to create (100% if not supplied) - :raises subprocess.CalledProcessError: in the event that the lvcreate fails. - ''' - if size: - check_call([ - 'lvcreate', - '--yes', - '-L', - '{}'.format(size), - '-n', lv_name, volume_group - ]) - # create the lv with all the space available, this is needed because the - # system call is different for LVM - else: - check_call([ - 'lvcreate', - '--yes', - '-l', - '100%FREE', - '-n', lv_name, volume_group - ]) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py deleted file mode 100644 index a3561760..00000000 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import re -from stat import S_ISBLK - -from subprocess import ( - CalledProcessError, - check_call, - check_output, - call -) - - -def _luks_uuid(dev): - """ - Check to see if dev is a LUKS encrypted volume, returning the UUID - of volume if it is. - - :param: dev: path to block device to check. - :returns: str. UUID of LUKS device or None if not a LUKS device - """ - try: - cmd = ['cryptsetup', 'luksUUID', dev] - return check_output(cmd).decode('UTF-8').strip() - except CalledProcessError: - return None - - -def is_luks_device(dev): - """ - Determine if dev is a LUKS-formatted block device. - - :param: dev: A full path to a block device to check for LUKS header - presence - :returns: boolean: indicates whether a device is used based on LUKS header. - """ - return True if _luks_uuid(dev) else False - - -def is_mapped_luks_device(dev): - """ - Determine if dev is a mapped LUKS device - :param: dev: A full path to a block device to be checked - :returns: boolean: indicates whether a device is mapped - """ - _, dirs, _ = next(os.walk( - '/sys/class/block/{}/holders/' - .format(os.path.basename(os.path.realpath(dev)))) - ) - is_held = len(dirs) > 0 - return is_held and is_luks_device(dev) - - -def is_block_device(path): - ''' - Confirm device at path is a valid block device node. - - :returns: boolean: True if path is a block device, False if not. - ''' - if not os.path.exists(path): - return False - return S_ISBLK(os.stat(path).st_mode) - - -def zap_disk(block_device): - ''' - Clear a block device of partition table. Relies on sgdisk, which is - installed as pat of the 'gdisk' package in Ubuntu. - - :param block_device: str: Full path of block device to clean. - ''' - # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b - # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--', block_device]) - call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) - dev_end = check_output(['blockdev', '--getsz', - block_device]).decode('UTF-8') - gpt_end = int(dev_end.split()[0]) - 100 - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=1M', 'count=1']) - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) - - -def is_device_mounted(device): - '''Given a device path, return True if that device is mounted, and False - if it isn't. - - :param device: str: Full path of the device to check. - :returns: boolean: True if the path represents a mounted device, False if - it doesn't. - ''' - try: - out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except Exception: - return False - return bool(re.search(r'MOUNTPOINT=".+"', out)) - - -def mkfs_xfs(device, force=False, inode_size=1024): - """Format device with XFS filesystem. - - By default this should fail if the device already has a filesystem on it. - :param device: Full path to device to format - :ptype device: tr - :param force: Force operation - :ptype: force: boolean - :param inode_size: XFS inode size in bytes - :ptype inode_size: int""" - cmd = ['mkfs.xfs'] - if force: - cmd.append("-f") - - cmd += ['-i', "size={}".format(inode_size), device] - check_call(cmd) diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py deleted file mode 100644 index d7567b86..00000000 --- a/hooks/charmhelpers/core/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py deleted file mode 100644 index e7e95d17..00000000 --- a/hooks/charmhelpers/core/decorators.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 - - -def retry_on_predicate(num_retries, predicate_fun, base_delay=0): - """Retry based on return value - - The return value of the decorated function is passed to the given predicate_fun. If the - result of the predicate is False, retry the decorated function up to num_retries times - - An exponential backoff up to base_delay^num_retries seconds can be introduced by setting - base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay - - :param num_retries: Max. number of retries to perform - :type num_retries: int - :param predicate_fun: Predicate function to determine if a retry is necessary - :type predicate_fun: callable - :param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay) - :type base_delay: float - """ - def _retry_on_pred_inner_1(f): - def _retry_on_pred_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - delay = base_delay - while True: - result = f(*args, **kwargs) - if predicate_fun(result) or retries <= 0: - return result - delay *= multiplier - multiplier += 1 - log("Result {}, retrying '{}' {} more times (delay={})".format( - result, f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_pred_inner_2 - - return _retry_on_pred_inner_1 diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py deleted file mode 100644 index fdd82b75..00000000 --- a/hooks/charmhelpers/core/files.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__author__ = 'Jorge Niedbalski ' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py deleted file mode 100644 index d9fa9152..00000000 --- a/hooks/charmhelpers/core/fstab.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py deleted file mode 100644 index 370c3e8f..00000000 --- a/hooks/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1636 +0,0 @@ -# Copyright 2013-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Interactions with the Juju environment" -# -# Authors: -# Charm Helpers Developers - -import copy -from distutils.version import LooseVersion -from enum import Enum -from functools import wraps -from collections import namedtuple, UserDict -import glob -import os -import json -import yaml -import re -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -from charmhelpers import deprecate - - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -TRACE = "TRACE" -MARKER = object() -SH_MAX_ARG = 131071 - - -RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' - 'This may not be compatible with software you are ' - 'running in your shell.') - - -class WORKLOAD_STATES(Enum): - ACTIVE = 'active' - BLOCKED = 'blocked' - MAINTENANCE = 'maintenance' - WAITING = 'waiting' - - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = json.dumps((func, args, kwargs), sort_keys=True, default=str) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, str): - message = repr(message) - command += [message[:SH_MAX_ARG]] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -def function_log(message): - """Write a function progress message""" - command = ['function-log'] - if not isinstance(message, str): - message = repr(message) - command += [message[:SH_MAX_ARG]] - # Missing function-log should not cause failures in unit tests - # Send function_log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - message = "function-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def departing_unit(): - """The departing unit for the current relation hook. - - Available since juju 2.8. - - :returns: the departing unit, or None if the information isn't available. - :rtype: Optional[str] - """ - return os.environ.get('JUJU_DEPARTING_UNIT', None) - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def application_name(): - """ - The name of the deployed application this unit belongs to. - """ - return local_unit().split('/')[0] - - -def service_name(): - """ - .. deprecated:: 0.19.1 - Alias for :func:`application_name`. - """ - return application_name() - - -def model_name(): - """ - Name of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_NAME'] - - -def model_uuid(): - """ - UUID of the model that this unit is deployed in. - """ - return os.environ['JUJU_MODEL_UUID'] - - -def principal_unit(): - """Returns the principal unit of this unit, otherwise None""" - # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT - principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) - # If it's empty, then this unit is the principal - if principal_unit == '': - return os.environ['JUJU_UNIT_NAME'] - elif principal_unit is not None: - return principal_unit - # For Juju 2.1 and below, let's try work out the principle unit by - # the various charms' metadata.yaml. - for reltype in relation_types(): - for rid in relation_ids(reltype): - for unit in related_units(rid): - md = _metadata_unit(unit) - if not md: - continue - subordinate = md.pop('subordinate', None) - if not subordinate: - return unit - return None - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path) and os.stat(self.path).st_size: - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - try: - self._prev_dict = json.load(f) - except ValueError as e: - log('Found but was unable to parse previous config data, ' - 'ignoring which will report all values as changed - {}' - .format(str(e)), level=ERROR) - return - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -_cache_config = None - - -def config(scope=None): - """ - Get the juju charm configuration (scope==None) or individual key, - (scope=str). The returned value is a Python data structure loaded as - JSON from the Juju config command. - - :param scope: If set, return the value for the specified key. - :type scope: Optional[str] - :returns: Either the whole config as a Config, or a key from it. - :rtype: Any - """ - global _cache_config - config_cmd_line = ['config-get', '--all', '--format=json'] - try: - if _cache_config is None: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - _cache_config = Config(config_data) - if scope is not None: - return _cache_config.get(scope) - return _cache_config - except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: - log('Unable to parse output from config-get: config_cmd_line="{}" ' - 'message="{}"' - .format(config_cmd_line, str(e)), level=ERROR) - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None, app=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if app is not None: - if unit is not None: - raise ValueError("Cannot use both 'unit' and 'app'") - _args.append('--app') - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - # unit or application name - if unit or app: - _args.append(unit or app) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -@cached -def _relation_set_accepts_file(): - """Return True if the juju relation-set command accepts a file. - - Cache the result as it won't change during the execution of a hook, and - thus we can make relation_set() more efficient by only checking for the - first relation_set() call. - - :returns: True if relation_set accepts a file. - :rtype: bool - :raises: subprocess.CalledProcessError if the check fails. - """ - return "--file" in subprocess.check_output( - ["relation-set", "--help"], universal_newlines=True) - - -def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - if app: - relation_cmd_line.append('--app') - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if _relation_set_accepts_file(): - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -def expected_peer_units(): - """Get a generator for units we expect to join peer relation based on - goal-state. - - The local unit is excluded from the result to make it easy to gauge - completion of all peers joining the relation with existing hook tools. - - Example usage: - log('peer {} of {} joined peer relation' - .format(len(related_units()), - len(list(expected_peer_units())))) - - This function will raise NotImplementedError if used with juju versions - without goal-state support. - - :returns: iterator - :rtype: types.GeneratorType - :raises: NotImplementedError - """ - if not has_juju_version("2.4.0"): - # goal-state first appeared in 2.4.0. - raise NotImplementedError("goal-state") - _goal_state = goal_state() - return (key for key in _goal_state['units'] - if '/' in key and key != local_unit()) - - -def expected_related_units(reltype=None): - """Get a generator for units we expect to join relation based on - goal-state. - - Note that you can not use this function for the peer relation, take a look - at expected_peer_units() for that. - - This function will raise KeyError if you request information for a - relation type for which juju goal-state does not have information. It will - raise NotImplementedError if used with juju versions without goal-state - support. - - Example usage: - log('participant {} of {} joined relation {}' - .format(len(related_units()), - len(list(expected_related_units())), - relation_type())) - - :param reltype: Relation type to list data for, default is to list data for - the relation type we are currently executing a hook for. - :type reltype: str - :returns: iterator - :rtype: types.GeneratorType - :raises: KeyError, NotImplementedError - """ - if not has_juju_version("2.4.4"): - # goal-state existed in 2.4.0, but did not list individual units to - # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) - raise NotImplementedError("goal-state relation unit count") - reltype = reltype or relation_type() - _goal_state = goal_state() - return (key for key in _goal_state['relations'][reltype] if '/' in key) - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json representation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -def _metadata_unit(unit): - """Given the name of a unit (e.g. apache2/0), get the unit charm's - metadata.yaml. Very similar to metadata() but allows us to inspect - other units. Unit needs to be co-located, such as a subordinate or - principal/primary. - - :returns: metadata.yaml as a python object. - - """ - basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) - unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') - if not os.path.exists(joineddir): - return None - with open(joineddir) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def _port_op(op_name, port, protocol="TCP"): - """Open or close a service network port""" - _args = [op_name] - icmp = protocol.upper() == "ICMP" - if icmp: - _args.append(protocol) - else: - _args.append('{}/{}'.format(port, protocol)) - try: - subprocess.check_call(_args) - except subprocess.CalledProcessError: - # Older Juju pre 2.3 doesn't support ICMP - # so treat it as a no-op if it fails. - if not icmp: - raise - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _port_op('open-port', port, protocol) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _port_op('close-port', port, protocol) - - -def open_ports(start, end, protocol="TCP"): - """Opens a range of service network ports""" - _args = ['open-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def close_ports(start, end, protocol="TCP"): - """Close a range of service network ports""" - _args = ['close-port'] - _args.append('{}-{}/{}'.format(start, end, protocol)) - subprocess.check_call(_args) - - -def opened_ports(): - """Get the opened ports - - *Note that this will only show ports opened in a previous hook* - - :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` - """ - _args = ['opened-ports', '--format=json'] - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -class NoNetworkBinding(Exception): - pass - - -def charm_dir(): - """Return the root directory of the current charm""" - d = os.environ.get('JUJU_CHARM_DIR') - if d is not None: - return d - return os.environ.get('CHARM_DIR') - - -def cmd_exists(cmd): - """Return True if the specified cmd exists in the path""" - return any( - os.access(os.path.join(path, cmd), os.X_OK) - for path in os.environ["PATH"].split(os.pathsep) - ) - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs.""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -@cached -@deprecate("moved to action_get()", log=log) -def function_get(key=None): - """ - .. deprecated:: - Gets the value of an action parameter, or all key/value param pairs. - """ - cmd = ['function-get'] - # Fallback for older charms. - if not cmd_exists('function-get'): - cmd = ['action-get'] - - if key is not None: - cmd.append(key) - cmd.append('--format=json') - function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return function_data - - -def action_set(values): - """Sets the values to be returned after the action finishes.""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@deprecate("moved to action_set()", log=log) -def function_set(values): - """ - .. deprecated:: - Sets the values to be returned after the function finishes. - """ - cmd = ['function-set'] - # Fallback for older charms. - if not cmd_exists('function-get'): - cmd = ['action-set'] - - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """ - Sets the action status to failed and sets the error message. - - The results set by action_set are preserved. - """ - subprocess.check_call(['action-fail', message]) - - -@deprecate("moved to action_fail()", log=log) -def function_fail(message): - """ - .. deprecated:: - Sets the function status to failed and sets the error message. - - The results set by function_set are preserved. - """ - cmd = ['function-fail'] - # Fallback for older charms. - if not cmd_exists('function-fail'): - cmd = ['action-fail'] - cmd.append(message) - - subprocess.check_call(cmd) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def function_name(): - """Get the name of the currently executing function.""" - return os.environ.get('JUJU_FUNCTION_NAME') or action_name() - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def function_id(): - """Get the ID of the currently executing function.""" - return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def function_tag(): - """Get the tag for the currently executing function.""" - return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() - - -def status_set(workload_state, message, application=False): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message instead. - - workload_state -- valid juju workload state. str or WORKLOAD_STATES - message -- status update message - application -- Whether this is an application state set - """ - bad_state_msg = '{!r} is not a valid workload state' - - if isinstance(workload_state, str): - try: - # Convert string to enum. - workload_state = WORKLOAD_STATES[workload_state.upper()] - except KeyError: - raise ValueError(bad_state_msg.format(workload_state)) - - if workload_state not in WORKLOAD_STATES: - raise ValueError(bad_state_msg.format(workload_state)) - - cmd = ['status-set'] - if application: - cmd.append('--application') - cmd.extend([workload_state.value, message]) - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state.value, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -def application_version_set(version): - """Charm authors may trigger this command from any hook to output what - version of the application is running. This could be a package version, - for instance postgres version 9.5. It could also be a build number or - version control revision identifier, for instance git sha 6fb7ba68. """ - - cmd = ['application-version-set'] - cmd.append(version) - try: - subprocess.check_call(cmd) - except OSError: - log("Application Version: {}".format(version)) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -@cached -def goal_state(): - """Juju goal state values""" - cmd = ['goal-state', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The and provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The and provided must match a payload that has been previously - registered with juju using payload-register. The must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Deprecated since Juju 2.3; use network_get() - - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - try: - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - except CalledProcessError as e: - if 'no network config found for binding' in e.output.decode('UTF-8'): - raise NoNetworkBinding("No network binding for {}" - .format(binding)) - else: - raise - return response - - -def network_get(endpoint, relation_id=None): - """ - Retrieve the network details for a relation endpoint - - :param endpoint: string. The name of a relation endpoint - :param relation_id: int. The ID of the relation for the current context. - :return: dict. The loaded YAML output of the network-get query. - :raise: NotImplementedError if request not supported by the Juju version. - """ - if not has_juju_version('2.2'): - raise NotImplementedError(juju_version()) # earlier versions require --primary-address - if relation_id and not has_juju_version('2.3'): - raise NotImplementedError # 2.3 added the -r option - - cmd = ['network-get', endpoint, '--format', 'yaml'] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - response = subprocess.check_output( - cmd, - stderr=subprocess.STDOUT).decode('UTF-8').strip() - return yaml.safe_load(response) - - -def add_metric(*args, **kwargs): - """Add metric values. Values may be expressed with keyword arguments. For - metric names containing dashes, these may be expressed as one or more - 'key=value' positional arguments. May only be called from the collect-metrics - hook.""" - _args = ['add-metric'] - _kvpairs = [] - _kvpairs.extend(args) - _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) - _args.extend(sorted(_kvpairs)) - try: - subprocess.check_call(_args) - return - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) - log(log_message, level='INFO') - - -def meter_status(): - """Get the meter status, if running in the meter-status-changed hook.""" - return os.environ.get('JUJU_METER_STATUS') - - -def meter_info(): - """Get the meter status information, if running in the meter-status-changed - hook.""" - return os.environ.get('JUJU_METER_INFO') - - -def iter_units_for_relation_name(relation_name): - """Iterate through all units in a relation - - Generator that iterates through all the units in a relation and yields - a named tuple with rid and unit field names. - - Usage: - data = [(u.rid, u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param relation_name: string relation name - :yield: Named Tuple with rid and unit field names - """ - RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') - for rid in relation_ids(relation_name): - for unit in related_units(rid): - yield RelatedUnit(rid, unit) - - -def ingress_address(rid=None, unit=None): - """ - Retrieve the ingress-address from a relation when available. - Otherwise, return the private-address. - - When used on the consuming side of the relation (unit is a remote - unit), the ingress-address is the IP address that this unit needs - to use to reach the provided service on the remote unit. - - When used on the providing side of the relation (unit == local_unit()), - the ingress-address is the IP address that is advertised to remote - units on this relation. Remote units need to use this address to - reach the local provided service on this unit. - - Note that charms may document some other method to use in - preference to the ingress_address(), such as an address provided - on a different relation attribute or a service discovery mechanism. - This allows charms to redirect inbound connections to their peers - or different applications such as load balancers. - - Usage: - addresses = [ingress_address(rid=u.rid, unit=u.unit) - for u in iter_units_for_relation_name(relation_name)] - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: string IP address - """ - settings = relation_get(rid=rid, unit=unit) - return (settings.get('ingress-address') or - settings.get('private-address')) - - -def egress_subnets(rid=None, unit=None): - """ - Retrieve the egress-subnets from a relation. - - This function is to be used on the providing side of the - relation, and provides the ranges of addresses that client - connections may come from. The result is uninteresting on - the consuming side of a relation (unit == local_unit()). - - Returns a stable list of subnets in CIDR format. - eg. ['192.168.1.0/24', '2001::F00F/128'] - - If egress-subnets is not available, falls back to using the published - ingress-address, or finally private-address. - - :param rid: string relation id - :param unit: string unit name - :side effect: calls relation_get - :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] - """ - def _to_range(addr): - if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: - addr += '/32' - elif ':' in addr and '/' not in addr: # IPv6 - addr += '/128' - return addr - - settings = relation_get(rid=rid, unit=unit) - if 'egress-subnets' in settings: - return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] - if 'ingress-address' in settings: - return [_to_range(settings['ingress-address'])] - if 'private-address' in settings: - return [_to_range(settings['private-address'])] - return [] # Should never happen - - -def unit_doomed(unit=None): - """Determines if the unit is being removed from the model - - Requires Juju 2.4.1. - - :param unit: string unit name, defaults to local_unit - :side effect: calls goal_state - :side effect: calls local_unit - :side effect: calls has_juju_version - :return: True if the unit is being removed, already gone, or never existed - """ - if not has_juju_version("2.4.1"): - # We cannot risk blindly returning False for 'we don't know', - # because that could cause data loss; if call sites don't - # need an accurate answer, they likely don't need this helper - # at all. - # goal-state existed in 2.4.0, but did not handle removals - # correctly until 2.4.1. - raise NotImplementedError("is_doomed") - if unit is None: - unit = local_unit() - gs = goal_state() - units = gs.get('units', {}) - if unit not in units: - return True - # I don't think 'dead' units ever show up in the goal-state, but - # check anyway in addition to 'dying'. - return units[unit]['status'] in ('dying', 'dead') - - -def env_proxy_settings(selected_settings=None): - """Get proxy settings from process environment variables. - - Get charm proxy settings from environment variables that correspond to - juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see - lp:1782236) and juju-ftp-proxy in a format suitable for passing to an - application that reacts to proxy settings passed as environment variables. - Some applications support lowercase or uppercase notation (e.g. curl), some - support only lowercase (e.g. wget), there are also subjectively rare cases - of only uppercase notation support. no_proxy CIDR and wildcard support also - varies between runtimes and applications as there is no enforced standard. - - Some applications may connect to multiple destinations and expose config - options that would affect only proxy settings for a specific destination - these should be handled in charms in an application-specific manner. - - :param selected_settings: format only a subset of possible settings - :type selected_settings: list - :rtype: Option(None, dict[str, str]) - """ - SUPPORTED_SETTINGS = { - 'http': 'HTTP_PROXY', - 'https': 'HTTPS_PROXY', - 'no_proxy': 'NO_PROXY', - 'ftp': 'FTP_PROXY' - } - if selected_settings is None: - selected_settings = SUPPORTED_SETTINGS - - selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() - if k in selected_settings] - proxy_settings = {} - for var in selected_vars: - var_val = os.getenv(var) - if var_val: - proxy_settings[var] = var_val - proxy_settings[var.lower()] = var_val - # Now handle juju-prefixed environment variables. The legacy vs new - # environment variable usage is mutually exclusive - charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) - if charm_var_val: - proxy_settings[var] = charm_var_val - proxy_settings[var.lower()] = charm_var_val - if 'no_proxy' in proxy_settings: - if _contains_range(proxy_settings['no_proxy']): - log(RANGE_WARNING, level=WARNING) - return proxy_settings if proxy_settings else None - - -def _contains_range(addresses): - """Check for cidr or wildcard domain in a string. - - Given a string comprising a comma separated list of ip addresses - and domain names, determine whether the string contains IP ranges - or wildcard domains. - - :param addresses: comma separated list of domains and ip addresses. - :type addresses: str - """ - return ( - # Test for cidr (e.g. 10.20.20.0/24) - "/" in addresses or - # Test for wildcard domains (*.foo.com or .foo.com) - "*" in addresses or - addresses.startswith(".") or - ",." in addresses or - " ." in addresses) - - -def is_subordinate(): - """Check whether charm is subordinate in unit metadata. - - :returns: True if unit is subordniate, False otherwise. - :rtype: bool - """ - return metadata().get('subordinate') is True diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py deleted file mode 100644 index ad2cab46..00000000 --- a/hooks/charmhelpers/core/host.py +++ /dev/null @@ -1,1304 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import errno -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools - -from contextlib import contextmanager -from collections import OrderedDict, defaultdict -from .hookenv import log, INFO, DEBUG, local_unit, charm_name -from .fstab import Fstab -from charmhelpers.osplatform import get_platform - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - get_distrib_codename, - arch - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( # NOQA:F401 - service_available, - add_new_group, - lsb_release, - cmp_pkgrevno, - CompareHostReleases, - ) # flake8: noqa -- ignore F401 for this import - -UPDATEDB_PATH = '/etc/updatedb.conf' -CA_CERT_DIR = '/usr/local/share/ca-certificates' - - -def service_start(service_name, **kwargs): - """Start a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('start', service_name, **kwargs) - - -def service_stop(service_name, **kwargs): - """Stop a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example stops the ceph-osd service for instance id=4: - - service_stop('ceph-osd', id=4) - - :param service_name: the name of the service to stop - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - return service('stop', service_name, **kwargs) - - -def service_enable(service_name, **kwargs): - """Enable a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_enable('ceph-osd', id=4) - - :param service_name: the name of the service to enable - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('enable', service_name, **kwargs) - - -def service_restart(service_name, **kwargs): - """Restart a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_restart('ceph-osd', id=4) - - :param service_name: the name of the service to restart - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False, **kwargs): - """Reload a system service, optionally falling back to restart if - reload fails. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be reloaded. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_reload('ceph-osd', id=4) - - :param service_name: the name of the service to reload - :param restart_on_failure: boolean indicating whether to fallback to a - restart if the reload fails. - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - service_result = service('reload', service_name, **kwargs) - if not service_result and restart_on_failure: - service_result = service('restart', service_name, **kwargs) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", - **kwargs): - """Pause a system service. - - Stop it, and prevent it from starting again at boot. - - :param service_name: the name of the service to pause - :param init_dir: path to the upstart init directory - :param initd_dir: path to the sysv init directory - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems which do not support - key=value arguments via the commandline. - """ - stopped = True - if service_running(service_name, **kwargs): - stopped = service_stop(service_name, **kwargs) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(service_name=service_name): - service('disable', service_name) - service('mask', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d", **kwargs): - """Resume a system service. - - Re-enable starting again at boot. Start the service. - - :param service_name: the name of the service to resume - :param init_dir: the path to the init dir - :param initd dir: the path to the initd dir - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for systemd enabled systems. - """ - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(service_name=service_name): - service('unmask', service_name) - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - started = service_running(service_name, **kwargs) - - if not started: - started = service_start(service_name, **kwargs) - return started - - -def service(action, service_name, **kwargs): - """Control a system service. - - :param action: the action to take on the service - :param service_name: the name of the service to perform th action on - :param **kwargs: additional params to be passed to the service command in - the form of key=value. - """ - if init_is_systemd(service_name=service_name): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - for key, value in kwargs.items(): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - return subprocess.call(cmd) == 0 - - -_UPSTART_CONF = "/etc/init/{}.conf" -_INIT_D_CONF = "/etc/init.d/{}" - - -def service_running(service_name, **kwargs): - """Determine whether a system service is running. - - :param service_name: the name of the service - :param **kwargs: additional args to pass to the service command. This is - used to pass additional key=value arguments to the - service command line for managing specific instance - units (e.g. service ceph-osd status id=2). The kwargs - are ignored in systemd services. - """ - if init_is_systemd(service_name=service_name): - return service('is-active', service_name) - else: - if os.path.exists(_UPSTART_CONF.format(service_name)): - try: - cmd = ['status', service_name] - for key, value in kwargs.items(): - parameter = '%s=%s' % (key, value) - cmd.append(parameter) - output = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running - # 'start/running' - if ("start/running" in output or - "is running" in output or - "up and running" in output): - return True - elif os.path.exists(_INIT_D_CONF.format(service_name)): - # Check System V scripts init script return codes - return service('status', service_name) - return False - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(service_name=None): - """ - Returns whether the host uses systemd for the specified service. - - @param Optional[str] service_name: specific name of service - """ - if str(service_name).startswith("snap."): - return True - if lsb_release()['DISTRIB_CODENAME'] == 'trusty': - return False - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', - system_user=False, primary_group=None, - secondary_groups=None, uid=None, home_dir=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - :param int uid: UID for user being created - :param str home_dir: Home directory for user - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - if uid: - user_info = pwd.getpwuid(int(uid)) - log('user with uid {0} already exists!'.format(uid)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if uid: - cmd.extend(['--uid', str(uid)]) - if home_dir: - cmd.extend(['--home', str(home_dir)]) - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def uid_exists(uid): - """Check if a uid exists""" - try: - pwd.getpwuid(uid) - uid_exists = True - except KeyError: - uid_exists = False - return uid_exists - - -def group_exists(groupname): - """Check if a group exists""" - try: - grp.getgrnam(groupname) - group_exists = True - except KeyError: - group_exists = False - return group_exists - - -def gid_exists(gid): - """Check if a gid exists""" - try: - grp.getgrgid(gid) - gid_exists = True - except KeyError: - gid_exists = False - return gid_exists - - -def add_group(group_name, system_group=False, gid=None): - """Add a group to the system - - Will log but otherwise succeed if the group already exists. - - :param str group_name: group to create - :param bool system_group: Create system group - :param int gid: GID for user being created - - :returns: The password database entry struct, as returned by `grp.getgrnam` - """ - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - if gid: - group_info = grp.getgrgid(gid) - log('group with gid {0} already exists!'.format(gid)) - except KeyError: - log('creating group {0}'.format(group_name)) - add_new_group(group_name, system_group, gid) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def chage(username, lastday=None, expiredate=None, inactive=None, - mindays=None, maxdays=None, root=None, warndays=None): - """Change user password expiry information - - :param str username: User to update - :param str lastday: Set when password was changed in YYYY-MM-DD format - :param str expiredate: Set when user's account will no longer be - accessible in YYYY-MM-DD format. - -1 will remove an account expiration date. - :param str inactive: Set the number of days of inactivity after a password - has expired before the account is locked. - -1 will remove an account's inactivity. - :param str mindays: Set the minimum number of days between password - changes to MIN_DAYS. - 0 indicates the password can be changed anytime. - :param str maxdays: Set the maximum number of days during which a - password is valid. - -1 as MAX_DAYS will remove checking maxdays - :param str root: Apply changes in the CHROOT_DIR directory - :param str warndays: Set the number of days of warning before a password - change is required - :raises subprocess.CalledProcessError: if call to chage fails - """ - cmd = ['chage'] - if root: - cmd.extend(['--root', root]) - if lastday: - cmd.extend(['--lastday', lastday]) - if expiredate: - cmd.extend(['--expiredate', expiredate]) - if inactive: - cmd.extend(['--inactive', inactive]) - if mindays: - cmd.extend(['--mindays', mindays]) - if maxdays: - cmd.extend(['--maxdays', maxdays]) - if warndays: - cmd.extend(['--warndays', warndays]) - cmd.append(username) - subprocess.check_call(cmd) - - -remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') - - -def rsync(from_path, to_path, flags='-r', options=None, timeout=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - if timeout: - cmd = ['timeout', str(timeout)] + cmd - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - # lets see if we can grab the file and compare the context, to avoid doing - # a write. - existing_content = None - existing_uid, existing_gid, existing_perms = None, None, None - try: - with open(path, 'rb') as target: - existing_content = target.read() - stat = os.stat(path) - existing_uid, existing_gid, existing_perms = ( - stat.st_uid, stat.st_gid, stat.st_mode - ) - except Exception: - pass - if content != existing_content: - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), - level=DEBUG) - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - if isinstance(content, str): - content = content.encode('UTF-8') - target.write(content) - return - # the contents were the same, but we might still need to change the - # ownership or permissions. - if existing_uid != uid: - log("Changing uid on already existing content: {} -> {}" - .format(existing_uid, uid), level=DEBUG) - os.chown(path, uid, -1) - if existing_gid != gid: - log("Changing gid on already existing content: {} -> {}" - .format(existing_gid, gid), level=DEBUG) - os.chown(path, -1, gid) - if existing_perms != perms: - log("Changing permissions on existing content: {} -> {}" - .format(existing_perms, perms), level=DEBUG) - os.chmod(path, perms) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash algorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -class restart_on_change(object): - """Decorator and context manager to handle restarts. - - Usage: - - @restart_on_change(restart_map, ...) - def function_that_might_trigger_a_restart(...) - ... - - Or: - - with restart_on_change(restart_map, ...): - do_stuff_that_might_trigger_a_restart() - ... - """ - - def __init__(self, restart_map, stopstart=False, restart_functions=None, - can_restart_now_f=None, post_svc_restart_f=None, - pre_restarts_wait_f=None): - """ - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart - services {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - """ - self.restart_map = restart_map - self.stopstart = stopstart - self.restart_functions = restart_functions - self.can_restart_now_f = can_restart_now_f - self.post_svc_restart_f = post_svc_restart_f - self.pre_restarts_wait_f = pre_restarts_wait_f - - def __call__(self, f): - """Work like a decorator. - - Returns a wrapped function that performs the restart if triggered. - - :param f: The function that is being wrapped. - :type f: Callable[[Any], Any] - :returns: the wrapped function - :rtype: Callable[[Any], Any] - """ - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), - self.restart_map, - stopstart=self.stopstart, - restart_functions=self.restart_functions, - can_restart_now_f=self.can_restart_now_f, - post_svc_restart_f=self.post_svc_restart_f, - pre_restarts_wait_f=self.pre_restarts_wait_f) - return wrapped_f - - def __enter__(self): - """Enter the runtime context related to this object. """ - self.checksums = _pre_restart_on_change_helper(self.restart_map) - - def __exit__(self, exc_type, exc_val, exc_tb): - """Exit the runtime context related to this object. - - The parameters describe the exception that caused the context to be - exited. If the context was exited without an exception, all three - arguments will be None. - """ - if exc_type is None: - _post_restart_on_change_helper( - self.checksums, - self.restart_map, - stopstart=self.stopstart, - restart_functions=self.restart_functions, - can_restart_now_f=self.can_restart_now_f, - post_svc_restart_f=self.post_svc_restart_f, - pre_restarts_wait_f=self.pre_restarts_wait_f) - # All is good, so return False; any exceptions will propagate. - return False - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False, - restart_functions=None, - can_restart_now_f=None, - post_svc_restart_f=None, - pre_restarts_wait_f=None): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - This functions allows for a number of helper functions to be passed. - - `restart_functions` is a map with a service as the key and the - corresponding value being the function to call to restart the service. For - example if `restart_functions={'some-service': my_restart_func}` then - `my_restart_func` should a function which takes one argument which is the - service name to be retstarted. - - `can_restart_now_f` is a function which checks that a restart is permitted. - It should return a bool which indicates if a restart is allowed and should - take a service name (str) and a list of changed files (List[str]) as - arguments. - - `post_svc_restart_f` is a function which runs after a service has been - restarted. It takes the service name that was restarted as an argument. - - `pre_restarts_wait_f` is a function which is called before any restarts - occur. The use case for this is an application which wants to try and - stagger restarts between units. - - :param lambda_f: function to call. - :type lambda_f: Callable[[], ANY] - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - :returns: result of lambda_f() - :rtype: ANY - """ - checksums = _pre_restart_on_change_helper(restart_map) - r = lambda_f() - _post_restart_on_change_helper(checksums, - restart_map, - stopstart, - restart_functions, - can_restart_now_f, - post_svc_restart_f, - pre_restarts_wait_f) - return r - - -def _pre_restart_on_change_helper(restart_map): - """Take a snapshot of file hashes. - - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :returns: Dictionary of file paths and the files checksum. - :rtype: Dict[str, str] - """ - return {path: path_hash(path) for path in restart_map} - - -def _post_restart_on_change_helper(checksums, - restart_map, - stopstart=False, - restart_functions=None, - can_restart_now_f=None, - post_svc_restart_f=None, - pre_restarts_wait_f=None): - """Check whether files have changed. - - :param checksums: Dictionary of file paths and the files checksum. - :type checksums: Dict[str, str] - :param restart_map: {file: [service, ...]} - :type restart_map: Dict[str, List[str,]] - :param stopstart: whether to stop, start or restart a service - :type stopstart: booleean - :param restart_functions: nonstandard functions to use to restart services - {svc: func, ...} - :type restart_functions: Dict[str, Callable[[str], None]] - :param can_restart_now_f: A function used to check if the restart is - permitted. - :type can_restart_now_f: Callable[[str, List[str]], boolean] - :param post_svc_restart_f: A function run after a service has - restarted. - :type post_svc_restart_f: Callable[[str], None] - :param pre_restarts_wait_f: A function called before any restarts. - :type pre_restarts_wait_f: Callable[None, None] - """ - if restart_functions is None: - restart_functions = {} - changed_files = defaultdict(list) - restarts = [] - # create a list of lists of the services to restart - for path, services in restart_map.items(): - if path_hash(path) != checksums[path]: - restarts.append(services) - for svc in services: - changed_files[svc].append(path) - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - if pre_restarts_wait_f: - pre_restarts_wait_f() - actions = ('stop', 'start') if stopstart else ('restart',) - for service_name in services_list: - if can_restart_now_f: - if not can_restart_now_f(service_name, - changed_files[service_name]): - continue - if service_name in restart_functions: - restart_functions[service_name](service_name) - else: - for action in actions: - service(action, service_name) - if post_svc_restart_f: - post_svc_restart_f(service_name) - - -def pwgen(length=None): - """Generate a random password.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, str): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output( - cmd).decode('UTF-8', errors='replace') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output( - cmd).decode('UTF-8', errors='replace').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile(r'^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output( - cmd).decode('UTF-8', errors='replace').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specified directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also follow and chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path, followlinks=follow_links): - for name in dirs + files: - full = os.path.join(root, name) - try: - chown(full, uid, gid) - except (IOError, OSError) as e: - # Intended to ignore "file not found". - if e.errno == errno.ENOENT: - pass - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def owner(path): - """Returns a tuple containing the username & groupname owning the path. - - :param str path: the string path to retrieve the ownership - :return tuple(str, str): A (username, groupname) tuple containing the - name of the user and group owning the path. - :raises OSError: if the specified path does not exist - """ - stat = os.stat(path) - username = pwd.getpwuid(stat.st_uid)[0] - groupname = grp.getgrgid(stat.st_gid)[0] - return username, groupname - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() - - -UPSTART_CONTAINER_TYPE = '/run/container_type' - - -def is_container(): - """Determine whether unit is running in a container - - @return: boolean indicating if unit is in a container - """ - if init_is_systemd(): - # Detect using systemd-detect-virt - return subprocess.call(['systemd-detect-virt', - '--container']) == 0 - else: - # Detect using upstart container file marker - return os.path.exists(UPSTART_CONTAINER_TYPE) - - -def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): - """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. - - This method has no effect if the path specified by updatedb_path does not - exist or is not a file. - - @param path: string the path to add to the updatedb.conf PRUNEPATHS value - @param updatedb_path: the path the updatedb.conf file - """ - if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): - # If the updatedb.conf file doesn't exist then don't attempt to update - # the file as the package providing mlocate may not be installed on - # the local system - return - - with open(updatedb_path, 'r+') as f_id: - updatedb_text = f_id.read() - output = updatedb(updatedb_text, path) - f_id.seek(0) - f_id.write(output) - f_id.truncate() - - -def updatedb(updatedb_text, new_path): - lines = [line for line in updatedb_text.split("\n")] - for i, line in enumerate(lines): - if line.startswith("PRUNEPATHS="): - paths_line = line.split("=")[1].replace('"', '') - paths = paths_line.split(" ") - if new_path not in paths: - paths.append(new_path) - lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) - output = "\n".join(lines) - return output - - -def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): - """ Modulo distribution - - This helper uses the unit number, a modulo value and a constant wait time - to produce a calculated wait time distribution. This is useful in large - scale deployments to distribute load during an expensive operation such as - service restarts. - - If you have 1000 nodes that need to restart 100 at a time 1 minute at a - time: - - time.wait(modulo_distribution(modulo=100, wait=60)) - restart() - - If you need restarts to happen serially set modulo to the exact number of - nodes and set a high constant wait time: - - time.wait(modulo_distribution(modulo=10, wait=120)) - restart() - - @param modulo: int The modulo number creates the group distribution - @param wait: int The constant time wait value - @param non_zero_wait: boolean Override unit % modulo == 0, - return modulo * wait. Used to avoid collisions with - leader nodes which are often given priority. - @return: int Calculated time to wait for unit operation - """ - unit_number = int(local_unit().split('/')[1]) - calculated_wait_time = (unit_number % modulo) * wait - if non_zero_wait and calculated_wait_time == 0: - return modulo * wait - else: - return calculated_wait_time - - -def ca_cert_absolute_path(basename_without_extension): - """Returns absolute path to CA certificate. - - :param basename_without_extension: Filename without extension - :type basename_without_extension: str - :returns: Absolute full path - :rtype: str - """ - return '{}/{}.crt'.format(CA_CERT_DIR, basename_without_extension) - - -def install_ca_cert(ca_cert, name=None): - """ - Install the given cert as a trusted CA. - - The ``name`` is the stem of the filename where the cert is written, and if - not provided, it will default to ``juju-{charm_name}``. - - If the cert is empty or None, or is unchanged, nothing is done. - """ - if not ca_cert: - return - if not isinstance(ca_cert, bytes): - ca_cert = ca_cert.encode('utf8') - if not name: - name = 'juju-{}'.format(charm_name()) - cert_file = ca_cert_absolute_path(name) - new_hash = hashlib.md5(ca_cert).hexdigest() - if file_hash(cert_file) == new_hash: - return - log("Installing new CA cert at: {}".format(cert_file), level=INFO) - write_file(cert_file, ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) - - -def get_system_env(key, default=None): - """Get data from system environment as represented in ``/etc/environment``. - - :param key: Key to look up - :type key: str - :param default: Value to return if key is not found - :type default: any - :returns: Value for key if found or contents of default parameter - :rtype: any - :raises: subprocess.CalledProcessError - """ - env_file = '/etc/environment' - # use the shell and env(1) to parse the global environments file. This is - # done to get the correct result even if the user has shell variable - # substitutions or other shell logic in that file. - output = subprocess.check_output( - ['env', '-i', '/bin/bash', '-c', - 'set -a && source {} && env'.format(env_file)], - universal_newlines=True) - for k, v in (line.split('=', 1) - for line in output.splitlines() if '=' in line): - if k == key: - return v - else: - return default diff --git a/hooks/charmhelpers/core/host_factory/__init__.py b/hooks/charmhelpers/core/host_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py deleted file mode 100644 index 7781a396..00000000 --- a/hooks/charmhelpers/core/host_factory/centos.py +++ /dev/null @@ -1,72 +0,0 @@ -import subprocess -import yum -import os - -from charmhelpers.core.strutils import BasicStringComparator - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Host releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - - def __init__(self, item): - raise NotImplementedError( - "CompareHostReleases() is not implemented for CentOS") - - -def service_available(service_name): - # """Determine whether a system service is available.""" - if os.path.isdir('/run/systemd/system'): - cmd = ['systemctl', 'is-enabled', service_name] - else: - cmd = ['service', service_name, 'is-enabled'] - return subprocess.call(cmd) == 0 - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['groupadd'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('-r') - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/os-release in a dict.""" - d = {} - with open('/etc/os-release', 'r') as lsb: - for l in lsb: - s = l.split('=') - if len(s) != 2: - continue - d[s[0].strip()] = s[1].strip() - return d - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports YumBase function if the pkgcache argument - is None. - """ - if not pkgcache: - y = yum.YumBase() - packages = y.doPackageLists() - pkgcache = {i.Name: i.version for i in packages['installed']} - pkg = pkgcache[package] - if pkg > revno: - return 1 - if pkg < revno: - return -1 - return 0 diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py deleted file mode 100644 index 0906c5c0..00000000 --- a/hooks/charmhelpers/core/host_factory/ubuntu.py +++ /dev/null @@ -1,122 +0,0 @@ -import subprocess - -from charmhelpers.core.hookenv import cached -from charmhelpers.core.strutils import BasicStringComparator - - -UBUNTU_RELEASES = ( - 'lucid', - 'maverick', - 'natty', - 'oneiric', - 'precise', - 'quantal', - 'raring', - 'saucy', - 'trusty', - 'utopic', - 'vivid', - 'wily', - 'xenial', - 'yakkety', - 'zesty', - 'artful', - 'bionic', - 'cosmic', - 'disco', - 'eoan', - 'focal', - 'groovy', - 'hirsute', - 'impish', - 'jammy', -) - - -class CompareHostReleases(BasicStringComparator): - """Provide comparisons of Ubuntu releases. - - Use in the form of - - if CompareHostReleases(release) > 'trusty': - # do something with mitaka - """ - _list = UBUNTU_RELEASES - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def add_new_group(group_name, system_group=False, gid=None): - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def get_distrib_codename(): - """Return the codename of the distribution - :returns: The codename - :rtype: str - """ - return lsb_release()['DISTRIB_CODENAME'].lower() - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package. - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - from charmhelpers.fetch import apt_pkg, get_installed_version - if not pkgcache: - current_ver = get_installed_version(package) - else: - pkg = pkgcache[package] - current_ver = pkg.current_ver - - return apt_pkg.version_compare(current_ver.ver_str, revno) - - -@cached -def arch(): - """Return the package architecture as a string. - - :returns: the architecture - :rtype: str - :raises: subprocess.CalledProcessError if dpkg command fails - """ - return subprocess.check_output( - ['dpkg', '--print-architecture'] - ).rstrip().decode('UTF-8') diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py deleted file mode 100644 index 54b5b5e2..00000000 --- a/hooks/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py deleted file mode 100644 index e01f4f8b..00000000 --- a/hooks/charmhelpers/core/kernel.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import subprocess - -from charmhelpers.osplatform import get_platform -from charmhelpers.core.hookenv import ( - log, - INFO -) - -__platform__ = get_platform() -if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import -elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 - persistent_modprobe, - update_initramfs, - ) # flake8: noqa -- ignore F401 for this import - -__author__ = "Jorge Niedbalski " - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - subprocess.check_call(cmd) - if persist: - persistent_modprobe(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return subprocess.check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return subprocess.check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 diff --git a/hooks/charmhelpers/core/kernel_factory/__init__.py b/hooks/charmhelpers/core/kernel_factory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py deleted file mode 100644 index 1c402c11..00000000 --- a/hooks/charmhelpers/core/kernel_factory/centos.py +++ /dev/null @@ -1,17 +0,0 @@ -import subprocess -import os - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - if not os.path.exists('/etc/rc.modules'): - open('/etc/rc.modules', 'a') - os.chmod('/etc/rc.modules', 111) - with open('/etc/rc.modules', 'r+') as modules: - if module not in modules.read(): - modules.write('modprobe %s\n' % module) - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["dracut", "-f", version]) diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py deleted file mode 100644 index 3de372fd..00000000 --- a/hooks/charmhelpers/core/kernel_factory/ubuntu.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess - - -def persistent_modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module + "\n") - - -def update_initramfs(version='all'): - """Updates an initramfs image.""" - return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py deleted file mode 100644 index 61fd074e..00000000 --- a/hooks/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py deleted file mode 100644 index 7c37c65c..00000000 --- a/hooks/charmhelpers/core/services/base.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import json -import inspect -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = inspect.getfullargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - # turn this generator into a list, - # as we'll be going over it multiple times - new_ports = list(service.get('ports', [])) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port) and not self.ports_contains(old_port, new_ports): - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - # A port is either a number or 'ICMP' - protocol = 'TCP' - if str(port).upper() == 'ICMP': - protocol = 'ICMP' - if event_name == 'start': - hookenv.open_port(port, protocol) - elif event_name == 'stop': - hookenv.close_port(port, protocol) - - def ports_contains(self, port, ports): - if not bool(port): - return False - if str(port).upper() != 'ICMP': - port = int(port) - return port in ports - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py deleted file mode 100644 index 5bf62dd5..00000000 --- a/hooks/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.safe_load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.safe_load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py deleted file mode 100644 index 31366871..00000000 --- a/hooks/charmhelpers/core/strutils.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} -FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'} - - -def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, str): - value = str(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in truthy_strings: - return True - elif value in falsey_strings or assume_false: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, str): - value = str(value) - else: - msg = "Unable to interpret non-string value '%s' as bytes" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if matches: - size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) - else: - # Assume that value passed in is bytes - try: - size = int(value) - except ValueError: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return size - - -class BasicStringComparator(object): - """Provides a class that will compare strings from an iterator type object. - Used to provide > and < comparisons on strings that may not necessarily be - alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the - z-wrap. - """ - - _list = None - - def __init__(self, item): - if self._list is None: - raise Exception("Must define the _list in the class definition!") - try: - self.index = self._list.index(item) - except Exception: - raise KeyError("Item '{}' is not in list '{}'" - .format(item, self._list)) - - def __eq__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index == self._list.index(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __lt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index < self._list.index(other) - - def __ge__(self, other): - return not self.__lt__(other) - - def __gt__(self, other): - assert isinstance(other, str) or isinstance(other, self.__class__) - return self.index > self._list.index(other) - - def __le__(self, other): - return not self.__gt__(other) - - def __str__(self): - """Always give back the item at the index so it can be used in - comparisons like: - - s_mitaka = CompareOpenStack('mitaka') - s_newton = CompareOpenstack('newton') - - assert s_newton > s_mitaka - - @returns: - """ - return self._list[self.index] diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py deleted file mode 100644 index 386428d6..00000000 --- a/hooks/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import yaml - -from subprocess import check_call, CalledProcessError - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, - WARNING, -) - -from charmhelpers.core.host import is_container - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file, ignore=False): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a dict or YAML-formatted string of sysctl - options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :param ignore: If True, ignore "unknown variable" errors. - :type ignore: bool - :returns: None - """ - if type(sysctl_dict) is not dict: - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - else: - sysctl_dict_parsed = sysctl_dict - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: {} values: {}".format(sysctl_file, - sysctl_dict_parsed), - level=DEBUG) - - call = ["sysctl", "-p", sysctl_file] - if ignore: - call.append("-e") - - try: - check_call(call) - except CalledProcessError as e: - if is_container(): - log("Error setting some sysctl keys in this container: {}".format(e.output), - level=WARNING) - else: - raise e diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py deleted file mode 100644 index cb0213dc..00000000 --- a/hooks/charmhelpers/core/templating.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', - template_loader=None, config_template=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - config_template may be provided to render from a provided template instead - of loading from a file. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python3-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - apt_install('python3-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - - # load from a string if provided explicitly - if config_template is not None: - template = template_env.from_string(config_template) - else: - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py deleted file mode 100644 index d9b8d0b0..00000000 --- a/hooks/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,525 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookkeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - - Note: to facilitate unit testing, ':memory:' can be passed as the - path parameter which causes sqlite3 to only build the db in memory. - This should only be used for testing purposes. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - if self.db_path != ':memory:': - with open(self.db_path, 'a') as f: - os.fchmod(f.fileno(), 0o600) - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except Exception: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookkeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py deleted file mode 100644 index 1283f25b..00000000 --- a/hooks/charmhelpers/fetch/__init__.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import importlib -from charmhelpers.osplatform import get_platform -from yaml import safe_load -from charmhelpers.core.hookenv import ( - config, - log, -) - -from urllib.parse import urlparse, urlunparse - - -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', - 'charmhelpers.fetch.giturl.GitUrlFetchHandler', -) - - -class SourceConfigError(Exception): - pass - - -class UnhandledSource(Exception): - pass - - -class AptLockError(Exception): - pass - - -class GPGKeyError(Exception): - """Exception occurs when a GPG key cannot be fetched or used. The message - indicates what the problem is. - """ - pass - - -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - -__platform__ = get_platform() -module = "charmhelpers.fetch.%s" % __platform__ -fetch = importlib.import_module(module) - -filter_installed_packages = fetch.filter_installed_packages -filter_missing_packages = fetch.filter_missing_packages -install = fetch.apt_install -upgrade = fetch.apt_upgrade -update = _fetch_update = fetch.apt_update -purge = fetch.apt_purge -add_source = fetch.add_source - -if __platform__ == "ubuntu": - apt_cache = fetch.apt_cache - apt_install = fetch.apt_install - apt_update = fetch.apt_update - apt_upgrade = fetch.apt_upgrade - apt_purge = fetch.apt_purge - apt_autoremove = fetch.apt_autoremove - apt_mark = fetch.apt_mark - apt_hold = fetch.apt_hold - apt_unhold = fetch.apt_unhold - import_key = fetch.import_key - get_upstream_version = fetch.get_upstream_version - apt_pkg = fetch.ubuntu_apt_pkg - get_apt_dpkg_env = fetch.get_apt_dpkg_env - get_installed_version = fetch.get_installed_version - OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES - UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE -elif __platform__ == "centos": - yum_search = fetch.yum_search - - -def configure_sources(update=False, - sources_var='install_sources', - keys_var='install_keys'): - """Configure multiple sources from charm configuration. - - The lists are encoded as yaml fragments in the configuration. - The fragment needs to be included as a string. Sources and their - corresponding keys are of the types supported by add_source(). - - Example config: - install_sources: | - - "ppa:foo" - - "http://example.com/repo precise main" - install_keys: | - - null - - "a1b2c3d4" - - Note that 'null' (a.k.a. None) should not be quoted. - """ - sources = safe_load((config(sources_var) or '').strip()) or [] - keys = safe_load((config(keys_var) or '').strip()) or None - - if isinstance(sources, str): - sources = [sources] - - if keys is None: - for source in sources: - add_source(source, None) - else: - if isinstance(keys, str): - keys = [keys] - - if len(sources) != len(keys): - raise SourceConfigError( - 'Install sources and keys lists are different lengths') - for source, key in zip(sources, keys): - add_source(source, key) - if update: - _fetch_update(fatal=True) - - -def install_remote(source, *args, **kwargs): - """Install a file tree from a remote source. - - The specified source should be a url of the form: - scheme://[host]/path[#[option=value][&...]] - - Schemes supported are based on this modules submodules. - Options supported are submodule-specific. - Additional arguments are passed through to the submodule. - - For example:: - - dest = install_remote('http://example.com/archive.tgz', - checksum='deadbeef', - hash_type='sha1') - - This will download `archive.tgz`, validate it using SHA1 and, if - the file is ok, extract it and return the directory in which it - was extracted. If the checksum fails, it will raise - :class:`charmhelpers.core.host.ChecksumError`. - """ - # We ONLY check for True here because can_handle may return a string - # explaining why it can't handle a given source. - handlers = [h for h in plugins() if h.can_handle(source) is True] - for handler in handlers: - try: - return handler.install(source, *args, **kwargs) - except UnhandledSource as e: - log('Install source attempt unsuccessful: {}'.format(e), - level='WARNING') - raise UnhandledSource("No handler found for source {}".format(source)) - - -def install_from_config(config_var_name): - """Install a file from config.""" - charm_config = config() - source = charm_config[config_var_name] - return install_remote(source) - - -def plugins(fetch_handlers=None): - if not fetch_handlers: - fetch_handlers = FETCH_HANDLERS - plugin_list = [] - for handler_name in fetch_handlers: - package, classname = handler_name.rsplit('.', 1) - try: - handler_class = getattr( - importlib.import_module(package), - classname) - plugin_list.append(handler_class()) - except NotImplementedError: - # Skip missing plugins so that they can be omitted from - # installation if desired - log("FetchHandler {} not found, skipping plugin".format( - handler_name)) - return plugin_list diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py deleted file mode 100644 index 2cb2e88b..00000000 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import hashlib -import re - -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - -from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, -) -from urllib.parse import urlparse, urlunparse, parse_qs -from urllib.error import URLError - - -def splituser(host): - _userprog = re.compile('^(.*)@(.*)$') - match = _userprog.match(host) - if match: - return match.group(1, 2) - return None, host - - -def splitpasswd(user): - _passwdprog = re.compile('^([^:]*):(.*)$', re.S) - match = _passwdprog.match(user) - if match: - return match.group(1, 2) - return user, None - - -class ArchiveUrlFetchHandler(BaseFetchHandler): - """ - Handler to download archive files from arbitrary URLs. - - Can fetch from http, https, ftp, and file URLs. - - Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. - - Installs the contents of the archive in $CHARM_DIR/fetched/. - """ - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): - # XXX: Why is this returning a boolean and a string? It's - # doomed to fail since "bool(can_handle('foo://'))" will be True. - return "Wrong source type" - if get_archive_handler(self.base_url(source)): - return True - return False - - def download(self, source, dest): - """ - Download an archive file. - - :param str source: URL pointing to an archive file. - :param str dest: Local path location to download archive file to. - """ - # propagate all exceptions - # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse(source) - if proto in ('http', 'https'): - auth, barehost = splituser(netloc) - if auth is not None: - source = urlunparse((proto, barehost, path, params, query, fragment)) - username, password = splitpasswd(auth) - passman = HTTPPasswordMgrWithDefaultRealm() - # Realm is set to None in add_password to force the username and password - # to be used whatever the realm - passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) - try: - with open(dest, 'wb') as dest_file: - dest_file.write(response.read()) - except Exception as e: - if os.path.isfile(dest): - os.unlink(dest) - raise e - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - tempfile, headers = urlretrieve(url) - check_hash(tempfile, hashsum, validate) - return tempfile - - def install(self, source, dest=None, checksum=None, hash_type='sha1'): - """ - Download and install an archive file, with optional checksum validation. - - The checksum can also be given on the `source` URL's fragment. - For example:: - - handler.install('http://example.com/file.tgz#sha1=deadbeef') - - :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. - :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - - """ - url_parts = self.parse_url(source) - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) - try: - self.download(source, dld_file) - except URLError as e: - raise UnhandledSource(e.reason) - except OSError as e: - raise UnhandledSource(e.strerror) - options = parse_qs(url_parts.fragment) - for key, value in options.items(): - algorithms = hashlib.algorithms_available - if key in algorithms: - if len(value) != 1: - raise TypeError( - "Expected 1 hash value, not %d" % len(value)) - expected = value[0] - check_hash(dld_file, expected, key) - if checksum: - check_hash(dld_file, checksum, hash_type) - return extract(dld_file, dest) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py deleted file mode 100644 index c4ab3ff1..00000000 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from subprocess import STDOUT, check_output -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - install, -) -from charmhelpers.core.host import mkdir - - -if filter_installed_packages(['bzr']) != []: - install(['bzr']) - if filter_installed_packages(['bzr']) != []: - raise NotImplementedError('Unable to install bzr') - - -class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs.""" - - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.bzr')) - else: - return True - - def branch(self, source, dest, revno=None): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - cmd_opts = [] - if revno: - cmd_opts += ['-r', str(revno)] - if os.path.exists(dest): - cmd = ['bzr', 'pull'] - cmd += cmd_opts - cmd += ['--overwrite', '-d', dest, source] - else: - cmd = ['bzr', 'branch'] - cmd += cmd_opts - cmd += [source, dest] - check_output(cmd, stderr=STDOUT) - - def install(self, source, dest=None, revno=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - - if dest and not os.path.exists(dest): - mkdir(dest, perms=0o755) - - try: - self.branch(source, dest_dir, revno) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py deleted file mode 100644 index f8492018..00000000 --- a/hooks/charmhelpers/fetch/centos.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import subprocess -import os -import time -import yum - -from tempfile import NamedTemporaryFile -from charmhelpers.core.hookenv import log - -YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. -YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - - -def filter_installed_packages(packages): - """Return a list of packages that require installation.""" - yb = yum.YumBase() - package_list = yb.doPackageLists() - temp_cache = {p.base_package_name: 1 for p in package_list['installed']} - - _pkgs = [p for p in packages if not temp_cache.get(p, False)] - return _pkgs - - -def install(packages, options=None, fatal=False): - """Install one or more packages.""" - cmd = ['yum', '--assumeyes'] - if options is not None: - cmd.extend(options) - cmd.append('install') - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_yum_command(cmd, fatal) - - -def upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" - cmd = ['yum', '--assumeyes'] - if options is not None: - cmd.extend(options) - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_yum_command(cmd, fatal) - - -def update(fatal=False): - """Update local yum cache.""" - cmd = ['yum', '--assumeyes', 'update'] - log("Update with fatal: {}".format(fatal)) - _run_yum_command(cmd, fatal) - - -def purge(packages, fatal=False): - """Purge one or more packages.""" - cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_yum_command(cmd, fatal) - - -def yum_search(packages): - """Search for a package.""" - output = {} - cmd = ['yum', 'search'] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Searching for {}".format(packages)) - result = subprocess.check_output(cmd) - for package in list(packages): - output[package] = package in result - return output - - -def add_source(source, key=None): - """Add a package source to this system. - - @param source: a URL with a rpm package - - @param key: A key to be added to the system's keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. - """ - if source is None: - log('Source is not present. Skipping') - return - - if source.startswith('http'): - directory = '/etc/yum.repos.d/' - for filename in os.listdir(directory): - with open(directory + filename, 'r') as rpm_file: - if source in rpm_file.read(): - break - else: - log("Add source: {!r}".format(source)) - # write in the charms.repo - with open(directory + 'Charms.repo', 'a') as rpm_file: - rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) - rpm_file.write('name=%s\n' % source[7:]) - rpm_file.write('baseurl=%s\n\n' % source) - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file.name]) - else: - subprocess.check_call(['rpm', '--import', key]) - - -def _run_yum_command(cmd, fatal=False): - """Run an YUM command. - - Checks the output and retry if the fatal flag is set to True. - - :param: cmd: str: The yum command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the yum - # lock was not acquired. - - while result is None or result == YUM_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > YUM_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire YUM lock. Will retry in {} seconds." - "".format(YUM_NO_LOCK_RETRY_DELAY)) - time.sleep(YUM_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py deleted file mode 100644 index 070ca9bb..00000000 --- a/hooks/charmhelpers/fetch/giturl.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from subprocess import check_output, CalledProcessError, STDOUT -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - install, -) - -if filter_installed_packages(['git']) != []: - install(['git']) - if filter_installed_packages(['git']) != []: - raise NotImplementedError('Unable to install git') - - -class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs.""" - - def can_handle(self, source): - url_parts = self.parse_url(source) - # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.git')) - else: - return True - - def clone(self, source, dest, branch="master", depth=None): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - - if os.path.exists(dest): - cmd = ['git', '-C', dest, 'pull', source, branch] - else: - cmd = ['git', 'clone', source, dest, '--branch', branch] - if depth: - cmd.extend(['--depth', depth]) - check_output(cmd, stderr=STDOUT) - - def install(self, source, branch="master", dest=None, depth=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - try: - self.clone(source, dest_dir, branch, depth) - except CalledProcessError as e: - raise UnhandledSource(e) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/hooks/charmhelpers/fetch/python/__init__.py b/hooks/charmhelpers/fetch/python/__init__.py deleted file mode 100644 index bff99dc9..00000000 --- a/hooks/charmhelpers/fetch/python/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2014-2019 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hooks/charmhelpers/fetch/python/debug.py b/hooks/charmhelpers/fetch/python/debug.py deleted file mode 100644 index dd5cca80..00000000 --- a/hooks/charmhelpers/fetch/python/debug.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import atexit -import sys - -from charmhelpers.fetch.python.rpdb import Rpdb -from charmhelpers.core.hookenv import ( - open_port, - close_port, - ERROR, - log -) - -__author__ = "Jorge Niedbalski " - -DEFAULT_ADDR = "0.0.0.0" -DEFAULT_PORT = 4444 - - -def _error(message): - log(message, level=ERROR) - - -def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): - """ - Set a trace point using the remote debugger - """ - atexit.register(close_port, port) - try: - log("Starting a remote python debugger session on %s:%s" % (addr, - port)) - open_port(port) - debugger = Rpdb(addr=addr, port=port) - debugger.set_trace(sys._getframe().f_back) - except Exception: - _error("Cannot start a remote debug session on %s:%s" % (addr, - port)) diff --git a/hooks/charmhelpers/fetch/python/packages.py b/hooks/charmhelpers/fetch/python/packages.py deleted file mode 100644 index 93f1fa3f..00000000 --- a/hooks/charmhelpers/fetch/python/packages.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess -import sys - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import charm_dir, log - -__author__ = "Jorge Niedbalski " - - -def pip_execute(*args, **kwargs): - """Overridden pip_execute() to stop sys.path being changed. - - The act of importing main from the pip module seems to cause add wheels - from the /usr/share/python-wheels which are installed by various tools. - This function ensures that sys.path remains the same after the call is - executed. - """ - try: - _path = sys.path - try: - from pip import main as _pip_execute - except ImportError: - apt_update() - apt_install('python3-pip') - from pip import main as _pip_execute - _pip_execute(*args, **kwargs) - finally: - sys.path = _path - - -def parse_options(given, available): - """Given a set of options, check if available""" - for key, value in sorted(given.items()): - if not value: - continue - if key in available: - yield "--{0}={1}".format(key, value) - - -def pip_install_requirements(requirements, constraints=None, **options): - """Install a requirements file. - - :param constraints: Path to pip constraints file. - http://pip.readthedocs.org/en/stable/user_guide/#constraints-files - """ - command = ["install"] - - available_options = ('proxy', 'src', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - command.append("-r {0}".format(requirements)) - if constraints: - command.append("-c {0}".format(constraints)) - log("Installing from file: {} with constraints {} " - "and options: {}".format(requirements, constraints, command)) - else: - log("Installing from file: {} with options: {}".format(requirements, - command)) - pip_execute(command) - - -def pip_install(package, fatal=False, upgrade=False, venv=None, - constraints=None, **options): - """Install a python package""" - if venv: - venv_python = os.path.join(venv, 'bin/pip') - command = [venv_python, "install"] - else: - command = ["install"] - - available_options = ('proxy', 'src', 'log', 'index-url', ) - for option in parse_options(options, available_options): - command.append(option) - - if upgrade: - command.append('--upgrade') - - if constraints: - command.extend(['-c', constraints]) - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Installing {} package with options: {}".format(package, - command)) - if venv: - subprocess.check_call(command) - else: - pip_execute(command) - - -def pip_uninstall(package, **options): - """Uninstall a python package""" - command = ["uninstall", "-q", "-y"] - - available_options = ('proxy', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Uninstalling {} package with options: {}".format(package, - command)) - pip_execute(command) - - -def pip_list(): - """Returns the list of current python installed packages - """ - return pip_execute(["list"]) - - -def pip_create_virtualenv(path=None): - """Create an isolated Python environment.""" - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] - - if path: - venv_path = path - else: - venv_path = os.path.join(charm_dir(), 'venv') - - if not os.path.exists(venv_path): - subprocess.check_call(['virtualenv', venv_path] + extra_flags) diff --git a/hooks/charmhelpers/fetch/python/rpdb.py b/hooks/charmhelpers/fetch/python/rpdb.py deleted file mode 100644 index 9b31610c..00000000 --- a/hooks/charmhelpers/fetch/python/rpdb.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Remote Python Debugger (pdb wrapper).""" - -import pdb -import socket -import sys - -__author__ = "Bertrand Janin " -__version__ = "0.1.3" - - -class Rpdb(pdb.Pdb): - - def __init__(self, addr="127.0.0.1", port=4444): - """Initialize the socket and initialize pdb.""" - - # Backup stdin and stdout before replacing them by the socket handle - self.old_stdout = sys.stdout - self.old_stdin = sys.stdin - - # Open a 'reusable' socket to let the webapp reload on the same port - self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) - self.skt.bind((addr, port)) - self.skt.listen(1) - (clientsocket, address) = self.skt.accept() - handle = clientsocket.makefile('rw') - pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) - sys.stdout = sys.stdin = handle - - def shutdown(self): - """Revert stdin and stdout, close the socket.""" - sys.stdout = self.old_stdout - sys.stdin = self.old_stdin - self.skt.close() - self.set_continue() - - def do_continue(self, arg): - """Stop all operation on ``continue``.""" - self.shutdown() - return 1 - - do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/hooks/charmhelpers/fetch/python/version.py b/hooks/charmhelpers/fetch/python/version.py deleted file mode 100644 index 3eb42103..00000000 --- a/hooks/charmhelpers/fetch/python/version.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -__author__ = "Jorge Niedbalski " - - -def current_version(): - """Current system python version""" - return sys.version_info - - -def current_version_string(): - """Current system python version as string major.minor.micro""" - return "{0}.{1}.{2}".format(sys.version_info.major, - sys.version_info.minor, - sys.version_info.micro) diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py deleted file mode 100644 index 36d6bce9..00000000 --- a/hooks/charmhelpers/fetch/snap.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Charm helpers snap for classic charms. - -If writing reactive charms, use the snap layer: -https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html -""" -import subprocess -import os -from time import sleep -from charmhelpers.core.hookenv import log - -__author__ = 'Joseph Borg ' - -# The return code for "couldn't acquire lock" in Snap -# (hopefully this will be improved). -SNAP_NO_LOCK = 1 -SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. -SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. -SNAP_CHANNELS = [ - 'edge', - 'beta', - 'candidate', - 'stable', -] - - -class CouldNotAcquireLockException(Exception): - pass - - -class InvalidSnapChannel(Exception): - pass - - -def _snap_exec(commands): - """ - Execute snap commands. - - :param commands: List commands - :return: Integer exit code - """ - assert type(commands) == list - - retry_count = 0 - return_code = None - - while return_code is None or return_code == SNAP_NO_LOCK: - try: - return_code = subprocess.check_call(['snap'] + commands, - env=os.environ) - except subprocess.CalledProcessError as e: - retry_count += + 1 - if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException( - 'Could not acquire lock after {} attempts' - .format(SNAP_NO_LOCK_RETRY_COUNT)) - return_code = e.returncode - log('Snap failed to acquire lock, trying again in {} seconds.' - .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') - sleep(SNAP_NO_LOCK_RETRY_DELAY) - - return return_code - - -def snap_install(packages, *flags): - """ - Install a snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to install command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Installing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with option(s) "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['install'] + flags + packages) - - -def snap_remove(packages, *flags): - """ - Remove a snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to remove command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Removing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with options "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['remove'] + flags + packages) - - -def snap_refresh(packages, *flags): - """ - Refresh / Update snap package. - - :param packages: String or List String package name - :param flags: List String flags to pass to refresh command - :return: Integer return code from snap - """ - if type(packages) is not list: - packages = [packages] - - flags = list(flags) - - message = 'Refreshing snap(s) "%s"' % ', '.join(packages) - if flags: - message += ' with options "%s"' % ', '.join(flags) - - log(message, level='INFO') - return _snap_exec(['refresh'] + flags + packages) - - -def valid_snap_channel(channel): - """ Validate snap channel exists - - :raises InvalidSnapChannel: When channel does not exist - :return: Boolean - """ - if channel.lower() in SNAP_CHANNELS: - return True - else: - raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py deleted file mode 100644 index e6f8a0ad..00000000 --- a/hooks/charmhelpers/fetch/ubuntu.py +++ /dev/null @@ -1,1003 +0,0 @@ -# Copyright 2014-2021 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict -import platform -import re -import subprocess -import sys -import time - -from charmhelpers import deprecate -from charmhelpers.core.host import get_distrib_codename, get_system_env - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - WARNING, - env_proxy_settings, -) -from charmhelpers.fetch import SourceConfigError, GPGKeyError -from charmhelpers.fetch import ubuntu_apt_pkg - -PROPOSED_POCKET = ( - "# Proposed\n" - "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " - "multiverse restricted\n") -PROPOSED_PORTS_POCKET = ( - "# Proposed\n" - "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " - "multiverse restricted\n") -# Only supports 64bit and ppc64 at the moment. -ARCH_TO_PROPOSED_POCKET = { - 'x86_64': PROPOSED_POCKET, - 'ppc64le': PROPOSED_PORTS_POCKET, - 'aarch64': PROPOSED_PORTS_POCKET, - 's390x': PROPOSED_PORTS_POCKET, -} -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'liberty/updates': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'mitaka/updates': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'newton/updates': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', - # Ocata - 'ocata': 'xenial-updates/ocata', - 'ocata/updates': 'xenial-updates/ocata', - 'xenial-ocata': 'xenial-updates/ocata', - 'xenial-ocata/updates': 'xenial-updates/ocata', - 'xenial-updates/ocata': 'xenial-updates/ocata', - 'ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-proposed/ocata': 'xenial-proposed/ocata', - # Pike - 'pike': 'xenial-updates/pike', - 'xenial-pike': 'xenial-updates/pike', - 'xenial-pike/updates': 'xenial-updates/pike', - 'xenial-updates/pike': 'xenial-updates/pike', - 'pike/proposed': 'xenial-proposed/pike', - 'xenial-pike/proposed': 'xenial-proposed/pike', - 'xenial-proposed/pike': 'xenial-proposed/pike', - # Queens - 'queens': 'xenial-updates/queens', - 'xenial-queens': 'xenial-updates/queens', - 'xenial-queens/updates': 'xenial-updates/queens', - 'xenial-updates/queens': 'xenial-updates/queens', - 'queens/proposed': 'xenial-proposed/queens', - 'xenial-queens/proposed': 'xenial-proposed/queens', - 'xenial-proposed/queens': 'xenial-proposed/queens', - # Rocky - 'rocky': 'bionic-updates/rocky', - 'bionic-rocky': 'bionic-updates/rocky', - 'bionic-rocky/updates': 'bionic-updates/rocky', - 'bionic-updates/rocky': 'bionic-updates/rocky', - 'rocky/proposed': 'bionic-proposed/rocky', - 'bionic-rocky/proposed': 'bionic-proposed/rocky', - 'bionic-proposed/rocky': 'bionic-proposed/rocky', - # Stein - 'stein': 'bionic-updates/stein', - 'bionic-stein': 'bionic-updates/stein', - 'bionic-stein/updates': 'bionic-updates/stein', - 'bionic-updates/stein': 'bionic-updates/stein', - 'stein/proposed': 'bionic-proposed/stein', - 'bionic-stein/proposed': 'bionic-proposed/stein', - 'bionic-proposed/stein': 'bionic-proposed/stein', - # Train - 'train': 'bionic-updates/train', - 'bionic-train': 'bionic-updates/train', - 'bionic-train/updates': 'bionic-updates/train', - 'bionic-updates/train': 'bionic-updates/train', - 'train/proposed': 'bionic-proposed/train', - 'bionic-train/proposed': 'bionic-proposed/train', - 'bionic-proposed/train': 'bionic-proposed/train', - # Ussuri - 'ussuri': 'bionic-updates/ussuri', - 'bionic-ussuri': 'bionic-updates/ussuri', - 'bionic-ussuri/updates': 'bionic-updates/ussuri', - 'bionic-updates/ussuri': 'bionic-updates/ussuri', - 'ussuri/proposed': 'bionic-proposed/ussuri', - 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', - 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', - # Victoria - 'victoria': 'focal-updates/victoria', - 'focal-victoria': 'focal-updates/victoria', - 'focal-victoria/updates': 'focal-updates/victoria', - 'focal-updates/victoria': 'focal-updates/victoria', - 'victoria/proposed': 'focal-proposed/victoria', - 'focal-victoria/proposed': 'focal-proposed/victoria', - 'focal-proposed/victoria': 'focal-proposed/victoria', - # Wallaby - 'wallaby': 'focal-updates/wallaby', - 'focal-wallaby': 'focal-updates/wallaby', - 'focal-wallaby/updates': 'focal-updates/wallaby', - 'focal-updates/wallaby': 'focal-updates/wallaby', - 'wallaby/proposed': 'focal-proposed/wallaby', - 'focal-wallaby/proposed': 'focal-proposed/wallaby', - 'focal-proposed/wallaby': 'focal-proposed/wallaby', - # Xena - 'xena': 'focal-updates/xena', - 'focal-xena': 'focal-updates/xena', - 'focal-xena/updates': 'focal-updates/xena', - 'focal-updates/xena': 'focal-updates/xena', - 'xena/proposed': 'focal-proposed/xena', - 'focal-xena/proposed': 'focal-proposed/xena', - 'focal-proposed/xena': 'focal-proposed/xena', - # Yoga - 'yoga': 'focal-updates/yoga', - 'focal-yoga': 'focal-updates/yoga', - 'focal-yoga/updates': 'focal-updates/yoga', - 'focal-updates/yoga': 'focal-updates/yoga', - 'yoga/proposed': 'focal-proposed/yoga', - 'focal-yoga/proposed': 'focal-proposed/yoga', - 'focal-proposed/yoga': 'focal-proposed/yoga', -} - - -OPENSTACK_RELEASES = ( - 'diablo', - 'essex', - 'folsom', - 'grizzly', - 'havana', - 'icehouse', - 'juno', - 'kilo', - 'liberty', - 'mitaka', - 'newton', - 'ocata', - 'pike', - 'queens', - 'rocky', - 'stein', - 'train', - 'ussuri', - 'victoria', - 'wallaby', - 'xena', - 'yoga', -) - - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ('wily', 'liberty'), - ('xenial', 'mitaka'), - ('yakkety', 'newton'), - ('zesty', 'ocata'), - ('artful', 'pike'), - ('bionic', 'queens'), - ('cosmic', 'rocky'), - ('disco', 'stein'), - ('eoan', 'train'), - ('focal', 'ussuri'), - ('groovy', 'victoria'), - ('hirsute', 'wallaby'), - ('impish', 'xena'), - ('jammy', 'yoga'), -]) - - -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times. - - -def filter_installed_packages(packages): - """Return a list of packages that require installation.""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def filter_missing_packages(packages): - """Return a list of packages that are installed. - - :param packages: list of packages to evaluate. - :returns list: Packages that are installed. - """ - return list( - set(packages) - - set(filter_installed_packages(packages)) - ) - - -def apt_cache(*_, **__): - """Shim returning an object simulating the apt_pkg Cache. - - :param _: Accept arguments for compatibility, not used. - :type _: any - :param __: Accept keyword arguments for compatibility, not used. - :type __: any - :returns:Object used to interrogate the system apt and dpkg databases. - :rtype:ubuntu_apt_pkg.Cache - """ - if 'apt_pkg' in sys.modules: - # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module - # in conjunction with the apt_cache helper function, they may expect us - # to call ``apt_pkg.init()`` for them. - # - # Detect this situation, log a warning and make the call to - # ``apt_pkg.init()`` to avoid the consumer Python interpreter from - # crashing with a segmentation fault. - @deprecate( - 'Support for use of upstream ``apt_pkg`` module in conjunction' - 'with charm-helpers is deprecated since 2019-06-25', - date=None, log=lambda x: log(x, level=WARNING)) - def one_shot_log(): - pass - - one_shot_log() - sys.modules['apt_pkg'].init() - return ubuntu_apt_pkg.Cache() - - -def apt_install(packages, options=None, fatal=False, quiet=False): - """Install one or more packages. - - :param packages: Package(s) to install - :type packages: Option[str, List[str]] - :param options: Options to pass on to apt-get - :type options: Option[None, List[str]] - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :param quiet: if True (default), suppress log message to stdout/stderr - :type quiet: bool - :raises: subprocess.CalledProcessError - """ - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - if not quiet: - log("Installing {} with options: {}" - .format(packages, options)) - _run_apt_command(cmd, fatal, quiet=quiet) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages. - - :param options: Options to pass on to apt-get - :type options: Option[None, List[str]] - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` - :type dist: bool - :raises: subprocess.CalledProcessError - """ - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache.""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages. - - :param packages: Package(s) to install - :type packages: Option[str, List[str]] - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :raises: subprocess.CalledProcessError - """ - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_autoremove(purge=True, fatal=False): - """Purge one or more packages. - :param purge: Whether the ``--purge`` option should be passed on or not. - :type purge: bool - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :raises: subprocess.CalledProcessError - """ - cmd = ['apt-get', '--assume-yes', 'autoremove'] - if purge: - cmd.append('--purge') - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark.""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, str): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - - -def import_key(key): - """Import an ASCII Armor key. - - A Radix64 format keyid is also supported for backwards - compatibility. In this case Ubuntu keyserver will be - queried for a key via HTTPS by its keyid. This method - is less preferable because https proxy servers may - require traffic decryption which is equivalent to a - man-in-the-middle attack (a proxy server impersonates - keyserver TLS certificates and has to be explicitly - trusted by the system). - - :param key: A GPG key in ASCII armor format, - including BEGIN and END markers or a keyid. - :type key: (bytes, str) - :raises: GPGKeyError if the key could not be imported - """ - key = key.strip() - if '-' in key or '\n' in key: - # Send everything not obviously a keyid to GPG to import, as - # we trust its validation better than our own. eg. handling - # comments before the key. - log("PGP key found (looks like ASCII Armor format)", level=DEBUG) - if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and - '-----END PGP PUBLIC KEY BLOCK-----' in key): - log("Writing provided PGP key in the binary format", level=DEBUG) - key_bytes = key.encode('utf-8') - key_name = _get_keyid_by_gpg_key(key_bytes) - key_gpg = _dearmor_gpg_key(key_bytes) - _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) - else: - raise GPGKeyError("ASCII armor markers missing from GPG key") - else: - log("PGP key found (looks like Radix64 format)", level=WARNING) - log("SECURELY importing PGP key from keyserver; " - "full key not provided.", level=WARNING) - # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL - # to retrieve GPG keys. `apt-key adv` command is deprecated as is - # apt-key in general as noted in its manpage. See lp:1433761 for more - # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop - # gpg - key_asc = _get_key_by_keyid(key) - # write the key in GPG format so that apt-key list shows it - key_gpg = _dearmor_gpg_key(key_asc) - _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) - - -def _get_keyid_by_gpg_key(key_material): - """Get a GPG key fingerprint by GPG key material. - Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded - or binary GPG key material. Can be used, for example, to generate file - names for keys passed via charm options. - - :param key_material: ASCII armor-encoded or binary GPG key material - :type key_material: bytes - :raises: GPGKeyError if invalid key material has been provided - :returns: A GPG key fingerprint - :rtype: str - """ - # Use the same gpg command for both Xenial and Bionic - cmd = 'gpg --with-colons --with-fingerprint' - ps = subprocess.Popen(cmd.split(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=subprocess.PIPE) - out, err = ps.communicate(input=key_material) - out = out.decode('utf-8') - err = err.decode('utf-8') - if 'gpg: no valid OpenPGP data found.' in err: - raise GPGKeyError('Invalid GPG key material provided') - # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) - return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) - - -def _get_key_by_keyid(keyid): - """Get a key via HTTPS from the Ubuntu keyserver. - Different key ID formats are supported by SKS keyservers (the longer ones - are more secure, see "dead beef attack" and https://evil32.com/). Since - HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will - impersonate keyserver.ubuntu.com and generate a certificate with - keyserver.ubuntu.com in the CN field or in SubjAltName fields of a - certificate. If such proxy behavior is expected it is necessary to add the - CA certificate chain containing the intermediate CA of the SSLBump proxy to - every machine that this code runs on via ca-certs cloud-init directive (via - cloudinit-userdata model-config) or via other means (such as through a - custom charm option). Also note that DNS resolution for the hostname in a - URL is done at a proxy server - not at the client side. - - 8-digit (32 bit) key ID - https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 - 16-digit (64 bit) key ID - https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 - 40-digit key ID: - https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 - - :param keyid: An 8, 16 or 40 hex digit keyid to find a key for - :type keyid: (bytes, str) - :returns: A key material for the specified GPG key id - :rtype: (str, bytes) - :raises: subprocess.CalledProcessError - """ - # options=mr - machine-readable output (disables html wrappers) - keyserver_url = ('https://keyserver.ubuntu.com' - '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') - curl_cmd = ['curl', keyserver_url.format(keyid)] - # use proxy server settings in order to retrieve the key - return subprocess.check_output(curl_cmd, - env=env_proxy_settings(['https'])) - - -def _dearmor_gpg_key(key_asc): - """Converts a GPG key in the ASCII armor format to the binary format. - - :param key_asc: A GPG key in ASCII armor format. - :type key_asc: (str, bytes) - :returns: A GPG key in binary format - :rtype: (str, bytes) - :raises: GPGKeyError - """ - ps = subprocess.Popen(['gpg', '--dearmor'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=subprocess.PIPE) - out, err = ps.communicate(input=key_asc) - # no need to decode output as it is binary (invalid utf-8), only error - err = err.decode('utf-8') - if 'gpg: no valid OpenPGP data found.' in err: - raise GPGKeyError('Invalid GPG key material. Check your network setup' - ' (MTU, routing, DNS) and/or proxy server settings' - ' as well as destination keyserver status.') - else: - return out - - -def _write_apt_gpg_keyfile(key_name, key_material): - """Writes GPG key material into a file at a provided path. - - :param key_name: A key name to use for a key file (could be a fingerprint) - :type key_name: str - :param key_material: A GPG key material (binary) - :type key_material: (str, bytes) - """ - with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), - 'wb') as keyf: - keyf.write(key_material) - - -def add_source(source, key=None, fail_invalid=False): - """Add a package source to this system. - - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - Full list of source specifications supported by the function are: - - 'distro': A NOP; i.e. it has no effect. - 'proposed': the proposed deb spec [2] is wrtten to - /etc/apt/sources.list/proposed - 'distro-proposed': adds -proposed to the debs [2] - 'ppa:': add-apt-repository --yes - 'deb ': add-apt-repository --yes deb - 'http://....': add-apt-repository --yes http://... - 'cloud-archive:': add-apt-repository -yes cloud-archive: - 'cloud:[-staging]': specify a Cloud Archive pocket with - optional staging version. If staging is used then the staging PPA [2] - with be used. If staging is NOT used then the cloud archive [3] will be - added, and the 'ubuntu-cloud-keyring' package will be added for the - current distro. - '': translate to cloud: based on the current - distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or - 'distro'. - '/proposed': as above, but for proposed. - - Otherwise the source is not recognised and this is logged to the juju log. - However, no error is raised, unless sys_error_on_exit is True. - - [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main - where {} is replaced with the derived pocket name. - [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ - main universe multiverse restricted - where {} is replaced with the lsb_release codename (e.g. xenial) - [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu - to /etc/apt/sources.list.d/cloud-archive-list - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automatically, so should not be provided. - - @param fail_invalid: (boolean) if True, then the function raises a - SourceConfigError is there is no matching installation source. - - @raises SourceConfigError() if for cloud:, the is not a - valid pocket in CLOUD_ARCHIVE_POCKETS - """ - # extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use - # the list in contrib.openstack.utils as it might not be included in - # classic charms and would break everything. Having OpenStack specific - # code in this file is a bit of an antipattern, anyway. - os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES)) - - _mapping = OrderedDict([ - (r"^distro$", lambda: None), # This is a NOP - (r"^(?:proposed|distro-proposed)$", _add_proposed), - (r"^cloud-archive:(.*)$", _add_apt_repository), - (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), - (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), - (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), - (r"^cloud:(.*)$", _add_cloud_pocket), - (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), - (r"^{}\/proposed$".format(os_versions_regex), - _add_bare_openstack_proposed), - (r"^{}$".format(os_versions_regex), _add_bare_openstack), - ]) - if source is None: - source = '' - for r, fn in _mapping.items(): - m = re.match(r, source) - if m: - if key: - # Import key before adding the source which depends on it, - # as refreshing packages could fail otherwise. - try: - import_key(key) - except GPGKeyError as e: - raise SourceConfigError(str(e)) - # call the associated function with the captured groups - # raises SourceConfigError on error. - fn(*m.groups()) - break - else: - # nothing matched. log an error and maybe sys.exit - err = "Unknown source: {!r}".format(source) - log(err) - if fail_invalid: - raise SourceConfigError(err) - - -def _add_proposed(): - """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list - - Uses get_distrib_codename to determine the correct stanza for - the deb line. - - For Intel architectures PROPOSED_POCKET is used for the release, but for - other architectures PROPOSED_PORTS_POCKET is used for the release. - """ - release = get_distrib_codename() - arch = platform.machine() - if arch not in ARCH_TO_PROPOSED_POCKET.keys(): - raise SourceConfigError("Arch {} not supported for (distro-)proposed" - .format(arch)) - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) - - -def _add_apt_repository(spec): - """Add the spec using add_apt_repository - - :param spec: the parameter to pass to add_apt_repository - :type spec: str - """ - if '{series}' in spec: - series = get_distrib_codename() - spec = spec.replace('{series}', series) - _run_with_retries(['add-apt-repository', '--yes', spec], - cmd_env=env_proxy_settings(['https', 'http', 'no_proxy']) - ) - - -def _add_cloud_pocket(pocket): - """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list - - Note that this overwrites the existing file if there is one. - - This function also converts the simple pocket in to the actual pocket using - the CLOUD_ARCHIVE_POCKETS mapping. - - :param pocket: string representing the pocket to add a deb spec for. - :raises: SourceConfigError if the cloud pocket doesn't exist or the - requested release doesn't match the current distro version. - """ - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - - -def _add_cloud_staging(cloud_archive_release, openstack_release): - """Add the cloud staging repository which is in - ppa:ubuntu-cloud-archive/-staging - - This function checks that the cloud_archive_release matches the current - codename for the distro that charm is being installed on. - - :param cloud_archive_release: string, codename for the release. - :param openstack_release: String, codename for the openstack release. - :raises: SourceConfigError if the cloud_archive_release doesn't match the - current version of the os. - """ - _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) - ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) - cmd = 'add-apt-repository -y {}'.format(ppa) - _run_with_retries(cmd.split(' ')) - - -def _add_cloud_distro_check(cloud_archive_release, openstack_release): - """Add the cloud pocket, but also check the cloud_archive_release against - the current distro, and use the openstack_release as the full lookup. - - This just calls _add_cloud_pocket() with the openstack_release as pocket - to get the correct cloud-archive.list for dpkg to work with. - - :param cloud_archive_release:String, codename for the distro release. - :param openstack_release: String, spec for the release to look up in the - CLOUD_ARCHIVE_POCKETS - :raises: SourceConfigError if this is the wrong distro, or the pocket spec - doesn't exist. - """ - _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) - _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) - - -def _verify_is_ubuntu_rel(release, os_release): - """Verify that the release is in the same as the current ubuntu release. - - :param release: String, lowercase for the release. - :param os_release: String, the os_release being asked for - :raises: SourceConfigError if the release is not the same as the ubuntu - release. - """ - ubuntu_rel = get_distrib_codename() - if release != ubuntu_rel: - raise SourceConfigError( - 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' - 'version ({})'.format(release, os_release, ubuntu_rel)) - - -def _add_bare_openstack(openstack_release): - """Add cloud or distro based on the release given. - - The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri - or 'distro' depending on whether the ubuntu release is bionic or focal. - - :param openstack_release: the OpenStack codename to determine the release - for. - :type openstack_release: str - :raises: SourceConfigError - """ - # TODO(ajkavanagh) - surely this means we should be removing cloud archives - # if they exist? - __add_bare_helper(openstack_release, "{}-{}", lambda: None) - - -def _add_bare_openstack_proposed(openstack_release): - """Add cloud of distro but with proposed. - - The spec given is, say, 'ussuri' but this could apply - cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the - ubuntu release is bionic or focal. - - :param openstack_release: the OpenStack codename to determine the release - for. - :type openstack_release: str - :raises: SourceConfigError - """ - __add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed) - - -def __add_bare_helper(openstack_release, pocket_format, final_function): - """Helper for _add_bare_openstack[_proposed] - - The bulk of the work between the two functions is exactly the same except - for the pocket format and the function that is run if it's the distro - version. - - :param openstack_release: the OpenStack codename. e.g. ussuri - :type openstack_release: str - :param pocket_format: the pocket formatter string to construct a pocket str - from the openstack_release and the current ubuntu version. - :type pocket_format: str - :param final_function: the function to call if it is the distro version. - :type final_function: Callable - :raises SourceConfigError on error - """ - ubuntu_version = get_distrib_codename() - possible_pocket = pocket_format.format(ubuntu_version, openstack_release) - if possible_pocket in CLOUD_ARCHIVE_POCKETS: - _add_cloud_pocket(possible_pocket) - return - # Otherwise it's almost certainly the distro version; verify that it - # exists. - try: - assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release - except KeyError: - raise SourceConfigError( - "Invalid ubuntu version {} isn't known to this library" - .format(ubuntu_version)) - except AssertionError: - raise SourceConfigError( - 'Invalid OpenStack release specified: {} for Ubuntu version {}' - .format(openstack_release, ubuntu_version)) - final_function() - - -def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), - retry_message="", cmd_env=None, quiet=False): - """Run a command and retry until success or max_retries is reached. - - :param cmd: The apt command to run. - :type cmd: str - :param max_retries: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :type max_retries: int - :param retry_exitcodes: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :type retry_exitcodes: tuple - :param retry_message: Optional log prefix emitted during retries. - :type retry_message: str - :param: cmd_env: Environment variables to add to the command run. - :type cmd_env: Option[None, Dict[str, str]] - :param quiet: if True, silence the output of the command from stdout and - stderr - :type quiet: bool - """ - env = get_apt_dpkg_env() - if cmd_env: - env.update(cmd_env) - - kwargs = {} - if quiet: - kwargs['stdout'] = subprocess.DEVNULL - kwargs['stderr'] = subprocess.DEVNULL - - if not retry_message: - retry_message = "Failed executing '{}'".format(" ".join(cmd)) - retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) - - retry_count = 0 - result = None - - retry_results = (None,) + retry_exitcodes - while result in retry_results: - try: - result = subprocess.check_call(cmd, env=env, **kwargs) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > max_retries: - raise - result = e.returncode - log(retry_message) - time.sleep(CMD_RETRY_DELAY) - - -def _run_apt_command(cmd, fatal=False, quiet=False): - """Run an apt command with optional retries. - - :param cmd: The apt command to run. - :type cmd: str - :param fatal: Whether the command's output should be checked and - retried. - :type fatal: bool - :param quiet: if True, silence the output of the command from stdout and - stderr - :type quiet: bool - """ - if fatal: - _run_with_retries( - cmd, retry_exitcodes=(1, APT_NO_LOCK,), - retry_message="Couldn't acquire DPKG lock", - quiet=quiet) - else: - kwargs = {} - if quiet: - kwargs['stdout'] = subprocess.DEVNULL - kwargs['stderr'] = subprocess.DEVNULL - subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) - - -def get_upstream_version(package): - """Determine upstream version based on installed package - - @returns None (if not installed) or the upstream version - """ - cache = apt_cache() - try: - pkg = cache[package] - except Exception: - # the package is unknown to the current apt cache. - return None - - if not pkg.current_ver: - # package is known, but no version is currently installed. - return None - - return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) - - -def get_installed_version(package): - """Determine installed version of a package - - @returns None (if not installed) or the installed version as - Version object - """ - cache = apt_cache() - dpkg_result = cache.dpkg_list([package]).get(package, {}) - current_ver = None - installed_version = dpkg_result.get('version') - - if installed_version: - current_ver = ubuntu_apt_pkg.Version({'ver_str': installed_version}) - return current_ver - - -def get_apt_dpkg_env(): - """Get environment suitable for execution of APT and DPKG tools. - - We keep this in a helper function instead of in a global constant to - avoid execution on import of the library. - :returns: Environment suitable for execution of APT and DPKG tools. - :rtype: Dict[str, str] - """ - # The fallback is used in the event of ``/etc/environment`` not containing - # avalid PATH variable. - return {'DEBIAN_FRONTEND': 'noninteractive', - 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py deleted file mode 100644 index 6da355fd..00000000 --- a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2019-2021 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Provide a subset of the ``python-apt`` module API. - -Data collection is done through subprocess calls to ``apt-cache`` and -``dpkg-query`` commands. - -The main purpose for this module is to avoid dependency on the -``python-apt`` python module. - -The indicated python module is a wrapper around the ``apt`` C++ library -which is tightly connected to the version of the distribution it was -shipped on. It is not developed in a backward/forward compatible manner. - -This in turn makes it incredibly hard to distribute as a wheel for a piece -of python software that supports a span of distro releases [0][1]. - -Upstream feedback like [2] does not give confidence in this ever changing, -so with this we get rid of the dependency. - -0: https://github.com/juju-solutions/layer-basic/pull/135 -1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 -2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 -""" - -import locale -import os -import subprocess -import sys - -from charmhelpers import deprecate -from charmhelpers.core.hookenv import log - - -class _container(dict): - """Simple container for attributes.""" - __getattr__ = dict.__getitem__ - __setattr__ = dict.__setitem__ - - -class Package(_container): - """Simple container for package attributes.""" - - -class Version(_container): - """Simple container for version attributes.""" - - -class Cache(object): - """Simulation of ``apt_pkg`` Cache object.""" - def __init__(self, progress=None): - pass - - def __contains__(self, package): - try: - pkg = self.__getitem__(package) - return pkg is not None - except KeyError: - return False - - def __getitem__(self, package): - """Get information about a package from apt and dpkg databases. - - :param package: Name of package - :type package: str - :returns: Package object - :rtype: object - :raises: KeyError, subprocess.CalledProcessError - """ - apt_result = self._apt_cache_show([package])[package] - apt_result['name'] = apt_result.pop('package') - pkg = Package(apt_result) - dpkg_result = self.dpkg_list([package]).get(package, {}) - current_ver = None - installed_version = dpkg_result.get('version') - if installed_version: - current_ver = Version({'ver_str': installed_version}) - pkg.current_ver = current_ver - pkg.architecture = dpkg_result.get('architecture') - return pkg - - @deprecate("use dpkg_list() instead.", "2022-05", log=log) - def _dpkg_list(self, packages): - return self.dpkg_list(packages) - - def dpkg_list(self, packages): - """Get data from system dpkg database for package. - - Note that this method is also useful for querying package names - containing wildcards, for example - - apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) - - may return - - { - 'nvidia-vgpu-ubuntu-470': { - 'name': 'nvidia-vgpu-ubuntu-470', - 'version': '470.68', - 'architecture': 'amd64', - 'description': 'NVIDIA vGPU driver - version 470.68' - } - } - - :param packages: Packages to get data from - :type packages: List[str] - :returns: Structured data about installed packages, keys like - ``dpkg-query --list`` - :rtype: dict - :raises: subprocess.CalledProcessError - """ - pkgs = {} - cmd = ['dpkg-query', '--list'] - cmd.extend(packages) - if locale.getlocale() == (None, None): - # subprocess calls out to locale.getpreferredencoding(False) to - # determine encoding. Workaround for Trusty where the - # environment appears to not be set up correctly. - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') - try: - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT, - universal_newlines=True) - except subprocess.CalledProcessError as cp: - # ``dpkg-query`` may return error and at the same time have - # produced useful output, for example when asked for multiple - # packages where some are not installed - if cp.returncode != 1: - raise - output = cp.output - headings = [] - for line in output.splitlines(): - if line.startswith('||/'): - headings = line.split() - headings.pop(0) - continue - elif (line.startswith('|') or line.startswith('+') or - line.startswith('dpkg-query:')): - continue - else: - data = line.split(None, 4) - status = data.pop(0) - if status not in ('ii', 'hi'): - continue - pkg = {} - pkg.update({k.lower(): v for k, v in zip(headings, data)}) - if 'name' in pkg: - pkgs.update({pkg['name']: pkg}) - return pkgs - - def _apt_cache_show(self, packages): - """Get data from system apt cache for package. - - :param packages: Packages to get data from - :type packages: List[str] - :returns: Structured data about package, keys like - ``apt-cache show`` - :rtype: dict - :raises: subprocess.CalledProcessError - """ - pkgs = {} - cmd = ['apt-cache', 'show', '--no-all-versions'] - cmd.extend(packages) - if locale.getlocale() == (None, None): - # subprocess calls out to locale.getpreferredencoding(False) to - # determine encoding. Workaround for Trusty where the - # environment appears to not be set up correctly. - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') - try: - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT, - universal_newlines=True) - previous = None - pkg = {} - for line in output.splitlines(): - if not line: - if 'package' in pkg: - pkgs.update({pkg['package']: pkg}) - pkg = {} - continue - if line.startswith(' '): - if previous and previous in pkg: - pkg[previous] += os.linesep + line.lstrip() - continue - if ':' in line: - kv = line.split(':', 1) - key = kv[0].lower() - if key == 'n': - continue - previous = key - pkg.update({key: kv[1].lstrip()}) - except subprocess.CalledProcessError as cp: - # ``apt-cache`` returns 100 if none of the packages asked for - # exist in the apt cache. - if cp.returncode != 100: - raise - return pkgs - - -class Config(_container): - def __init__(self): - super(Config, self).__init__(self._populate()) - - def _populate(self): - cfgs = {} - cmd = ['apt-config', 'dump'] - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT, - universal_newlines=True) - for line in output.splitlines(): - if not line.startswith("CommandLine"): - k, v = line.split(" ", 1) - cfgs[k] = v.strip(";").strip("\"") - - return cfgs - - -# Backwards compatibility with old apt_pkg module -sys.modules[__name__].config = Config() - - -def init(): - """Compatibility shim that does nothing.""" - pass - - -def upstream_version(version): - """Extracts upstream version from a version string. - - Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ - apt-pkg/deb/debversion.cc#L259 - - :param version: Version string - :type version: str - :returns: Upstream version - :rtype: str - """ - if version: - version = version.split(':')[-1] - version = version.split('-')[0] - return version - - -def version_compare(a, b): - """Compare the given versions. - - Call out to ``dpkg`` to make sure the code doing the comparison is - compatible with what the ``apt`` library would do. Mimic the return - values. - - Upstream reference: - https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html - ?highlight=version_compare#apt_pkg.version_compare - - :param a: version string - :type a: str - :param b: version string - :type b: str - :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, - <0 if ``a`` is smaller than ``b`` - :rtype: int - :raises: subprocess.CalledProcessError, RuntimeError - """ - for op in ('gt', 1), ('eq', 0), ('lt', -1): - try: - subprocess.check_call(['dpkg', '--compare-versions', - a, op[0], b], - stderr=subprocess.STDOUT, - universal_newlines=True) - return op[1] - except subprocess.CalledProcessError as cp: - if cp.returncode == 1: - continue - raise - else: - raise RuntimeError('Unable to compare "{}" and "{}", according to ' - 'our logic they are neither greater, equal nor ' - 'less than each other.'.format(a, b)) - - -class PkgVersion(): - """Allow package versions to be compared. - - For example:: - - >>> import charmhelpers.fetch as fetch - >>> (fetch.apt_pkg.PkgVersion('2:20.4.0') < - ... fetch.apt_pkg.PkgVersion('2:20.5.0')) - True - >>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'), - ... fetch.apt_pkg.PkgVersion('2:21.4.0'), - ... fetch.apt_pkg.PkgVersion('2:17.4.0')] - >>> pkgs.sort() - >>> pkgs - [2:17.4.0, 2:20.4.0, 2:21.4.0] - """ - - def __init__(self, version): - self.version = version - - def __lt__(self, other): - return version_compare(self.version, other.version) == -1 - - def __le__(self, other): - return self.__lt__(other) or self.__eq__(other) - - def __gt__(self, other): - return version_compare(self.version, other.version) == 1 - - def __ge__(self, other): - return self.__gt__(other) or self.__eq__(other) - - def __eq__(self, other): - return version_compare(self.version, other.version) == 0 - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return self.version - - def __hash__(self): - return hash(repr(self)) diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py deleted file mode 100644 index 1ace468f..00000000 --- a/hooks/charmhelpers/osplatform.py +++ /dev/null @@ -1,49 +0,0 @@ -import platform -import os - - -def get_platform(): - """Return the current OS platform. - - For example: if current os platform is Ubuntu then a string "ubuntu" - will be returned (which is the name of the module). - This string is used to decide which platform module should be imported. - """ - # linux_distribution is deprecated and will be removed in Python 3.7 - # Warnings *not* disabled, as we certainly need to fix this. - if hasattr(platform, 'linux_distribution'): - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] - else: - current_platform = _get_platform_from_fs() - - if "Ubuntu" in current_platform: - return "ubuntu" - elif "CentOS" in current_platform: - return "centos" - elif "debian" in current_platform: - # Stock Python does not detect Ubuntu and instead returns debian. - # Or at least it does in some build environments like Travis CI - return "ubuntu" - elif "elementary" in current_platform: - # ElementaryOS fails to run tests locally without this. - return "ubuntu" - elif "Pop!_OS" in current_platform: - # Pop!_OS also fails to run tests locally without this. - return "ubuntu" - else: - raise RuntimeError("This module is not supported on {}." - .format(current_platform)) - - -def _get_platform_from_fs(): - """Get Platform from /etc/os-release.""" - with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: - content = dict( - line.split('=', 1) - for line in fin.read().splitlines() - if '=' in line - ) - for k, v in content.items(): - content[k] = v.strip('"') - return content["NAME"] diff --git a/hooks/charmhelpers/payload/__init__.py b/hooks/charmhelpers/payload/__init__.py deleted file mode 100644 index ee55cb3d..00000000 --- a/hooks/charmhelpers/payload/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"Tools for working with files injected into a charm just before deployment." diff --git a/hooks/charmhelpers/payload/execd.py b/hooks/charmhelpers/payload/execd.py deleted file mode 100644 index 1502aa0b..00000000 --- a/hooks/charmhelpers/payload/execd.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014-2015 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import subprocess -from charmhelpers.core import hookenv - - -def default_execd_dir(): - return os.path.join(os.environ['CHARM_DIR'], 'exec.d') - - -def execd_module_paths(execd_dir=None): - """Generate a list of full paths to modules within execd_dir.""" - if not execd_dir: - execd_dir = default_execd_dir() - - if not os.path.exists(execd_dir): - return - - for subpath in os.listdir(execd_dir): - module = os.path.join(execd_dir, subpath) - if os.path.isdir(module): - yield module - - -def execd_submodule_paths(command, execd_dir=None): - """Generate a list of full paths to the specified command within exec_dir. - """ - for module_path in execd_module_paths(execd_dir): - path = os.path.join(module_path, command) - if os.access(path, os.X_OK) and os.path.isfile(path): - yield path - - -def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): - """Run command for each module within execd_dir which defines it.""" - for submodule_path in execd_submodule_paths(command, execd_dir): - try: - subprocess.check_output(submodule_path, stderr=stderr, - universal_newlines=True) - except subprocess.CalledProcessError as e: - hookenv.log("Error ({}) running {}. Output: {}".format( - e.returncode, e.cmd, e.output)) - if die_on_error: - sys.exit(e.returncode) - - -def execd_preinstall(execd_dir=None): - """Run charm-pre-install for each module within execd_dir.""" - execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hooks/client-relation-changed b/hooks/client-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/client-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/client-relation-joined b/hooks/client-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/client-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/config-changed b/hooks/config-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/config-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/dashboard-relation-joined b/hooks/dashboard-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/dashboard-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/install b/hooks/install deleted file mode 100755 index e8ad54b4..00000000 --- a/hooks/install +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -e -# ensure that the python3 bits are installed, whichever version of ubunut -# is being installed. - -declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') - -check_and_install() { - pkg="${1}-${2}" - if ! dpkg -s ${pkg} 2>&1 > /dev/null; then - apt-get -y install ${pkg} - fi -} - -PYTHON="python3" - -for dep in ${DEPS[@]}; do - check_and_install ${PYTHON} ${dep} -done - -./hooks/install_deps -exec ./hooks/install.real diff --git a/hooks/install.real b/hooks/install.real deleted file mode 120000 index 52d96630..00000000 --- a/hooks/install.real +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/install_deps b/hooks/install_deps deleted file mode 100755 index c480f29e..00000000 --- a/hooks/install_deps +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -e -# Wrapper to ensure that python dependencies are installed before we get into -# the python part of the hook execution - -declare -a DEPS=('dnspython' 'pyudev') - -check_and_install() { - pkg="${1}-${2}" - if ! dpkg -s ${pkg} 2>&1 > /dev/null; then - apt-get -y install ${pkg} - fi -} - -PYTHON="python3" - -for dep in ${DEPS[@]}; do - check_and_install ${PYTHON} ${dep} -done diff --git a/hooks/leader-settings-changed b/hooks/leader-settings-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/leader-settings-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/mds-relation-changed b/hooks/mds-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/mds-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/mds-relation-joined b/hooks/mds-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/mds-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/mon-relation-changed b/hooks/mon-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/mon-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/mon-relation-departed b/hooks/mon-relation-departed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/mon-relation-departed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/mon-relation-joined b/hooks/mon-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/mon-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/nrpe-external-master-relation-changed b/hooks/nrpe-external-master-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/nrpe-external-master-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/nrpe-external-master-relation-joined b/hooks/nrpe-external-master-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/nrpe-external-master-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/osd-relation-changed b/hooks/osd-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/osd-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/osd-relation-joined b/hooks/osd-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/osd-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/post-series-upgrade b/hooks/post-series-upgrade deleted file mode 120000 index 52d96630..00000000 --- a/hooks/post-series-upgrade +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/pre-series-upgrade b/hooks/pre-series-upgrade deleted file mode 120000 index 52d96630..00000000 --- a/hooks/pre-series-upgrade +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/prometheus-relation-changed b/hooks/prometheus-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/prometheus-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/prometheus-relation-departed b/hooks/prometheus-relation-departed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/prometheus-relation-departed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/prometheus-relation-joined b/hooks/prometheus-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/prometheus-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/radosgw-relation-changed b/hooks/radosgw-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/radosgw-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/radosgw-relation-joined b/hooks/radosgw-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/radosgw-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/rbd-mirror-relation-changed b/hooks/rbd-mirror-relation-changed deleted file mode 120000 index 52d96630..00000000 --- a/hooks/rbd-mirror-relation-changed +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/rbd-mirror-relation-joined b/hooks/rbd-mirror-relation-joined deleted file mode 120000 index 52d96630..00000000 --- a/hooks/rbd-mirror-relation-joined +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/start b/hooks/start deleted file mode 120000 index 52d96630..00000000 --- a/hooks/start +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/stop b/hooks/stop deleted file mode 120000 index 52d96630..00000000 --- a/hooks/stop +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/update-status b/hooks/update-status deleted file mode 120000 index 52d96630..00000000 --- a/hooks/update-status +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm deleted file mode 100755 index a454f76f..00000000 --- a/hooks/upgrade-charm +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -e -# Wrapper to ensure that old python bytecode isn't hanging around -# after we upgrade the charm with newer libraries -rm -rf **/*.pyc - -./hooks/install_deps -exec ./hooks/upgrade-charm.real diff --git a/hooks/upgrade-charm.real b/hooks/upgrade-charm.real deleted file mode 120000 index 52d96630..00000000 --- a/hooks/upgrade-charm.real +++ /dev/null @@ -1 +0,0 @@ -ceph_hooks.py \ No newline at end of file diff --git a/lib/charms_ceph/__init__.py b/lib/charms_ceph/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/charms_ceph/broker.py b/lib/charms_ceph/broker.py deleted file mode 100644 index 90b536fb..00000000 --- a/lib/charms_ceph/broker.py +++ /dev/null @@ -1,913 +0,0 @@ -# Copyright 2016 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import json -import os - -from subprocess import check_call, check_output, CalledProcessError -from tempfile import NamedTemporaryFile - -from charms_ceph.utils import ( - get_cephfs, - get_osd_weight -) -from charms_ceph.crush_utils import Crushmap - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - INFO, - ERROR, -) -from charmhelpers.contrib.storage.linux.ceph import ( - create_erasure_profile, - delete_pool, - erasure_profile_exists, - get_osds, - monitor_key_get, - monitor_key_set, - pool_exists, - pool_set, - remove_pool_snapshot, - rename_pool, - snapshot_pool, - validator, - ErasurePool, - BasePool, - ReplicatedPool, -) - -# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/ -# This should do a decent job of preventing people from passing in bad values. -# It will give a useful error message - -POOL_KEYS = { - # "Ceph Key Name": [Python type, [Valid Range]] - "size": [int], - "min_size": [int], - "crash_replay_interval": [int], - "pgp_num": [int], # = or < pg_num - "crush_ruleset": [int], - "hashpspool": [bool], - "nodelete": [bool], - "nopgchange": [bool], - "nosizechange": [bool], - "write_fadvise_dontneed": [bool], - "noscrub": [bool], - "nodeep-scrub": [bool], - "hit_set_type": [str, ["bloom", "explicit_hash", - "explicit_object"]], - "hit_set_count": [int, [1, 1]], - "hit_set_period": [int], - "hit_set_fpp": [float, [0.0, 1.0]], - "cache_target_dirty_ratio": [float], - "cache_target_dirty_high_ratio": [float], - "cache_target_full_ratio": [float], - "target_max_bytes": [int], - "target_max_objects": [int], - "cache_min_flush_age": [int], - "cache_min_evict_age": [int], - "fast_read": [bool], - "allow_ec_overwrites": [bool], - "compression_mode": [str, ["none", "passive", "aggressive", "force"]], - "compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]], - "compression_required_ratio": [float, [0.0, 1.0]], - "crush_rule": [str], -} - -CEPH_BUCKET_TYPES = [ - 'osd', - 'host', - 'chassis', - 'rack', - 'row', - 'pdu', - 'pod', - 'room', - 'datacenter', - 'region', - 'root' -] - - -def decode_req_encode_rsp(f): - """Decorator to decode incoming requests and encode responses.""" - - def decode_inner(req): - return json.dumps(f(json.loads(req))) - - return decode_inner - - -@decode_req_encode_rsp -def process_requests(reqs): - """Process Ceph broker request(s). - - This is a versioned api. API version must be supplied by the client making - the request. - - :param reqs: dict of request parameters. - :returns: dict. exit-code and reason if not 0 - """ - request_id = reqs.get('request-id') - try: - version = reqs.get('api-version') - if version == 1: - log('Processing request {}'.format(request_id), level=DEBUG) - resp = process_requests_v1(reqs['ops']) - if request_id: - resp['request-id'] = request_id - - return resp - - except Exception as exc: - log(str(exc), level=ERROR) - msg = ("Unexpected error occurred while processing requests: %s" % - reqs) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - msg = ("Missing or invalid api version ({})".format(version)) - resp = {'exit-code': 1, 'stderr': msg} - if request_id: - resp['request-id'] = request_id - - return resp - - -def handle_create_erasure_profile(request, service): - """Create an erasure profile. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - # "isa" | "lrc" | "shec" | "clay" or it defaults to "jerasure" - erasure_type = request.get('erasure-type') - # dependent on erasure coding type - erasure_technique = request.get('erasure-technique') - # "host" | "rack" | ... - failure_domain = request.get('failure-domain') - name = request.get('name') - # Binary Distribution Matrix (BDM) parameters - bdm_k = request.get('k') - bdm_m = request.get('m') - # LRC parameters - bdm_l = request.get('l') - crush_locality = request.get('crush-locality') - # SHEC parameters - bdm_c = request.get('c') - # CLAY parameters - bdm_d = request.get('d') - scalar_mds = request.get('scalar-mds') - # Device Class - device_class = request.get('device-class') - - if failure_domain and failure_domain not in CEPH_BUCKET_TYPES: - msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - create_erasure_profile(service=service, - erasure_plugin_name=erasure_type, - profile_name=name, - failure_domain=failure_domain, - data_chunks=bdm_k, - coding_chunks=bdm_m, - locality=bdm_l, - durability_estimator=bdm_d, - helper_chunks=bdm_c, - scalar_mds=scalar_mds, - crush_locality=crush_locality, - device_class=device_class, - erasure_plugin_technique=erasure_technique) - - return {'exit-code': 0} - - -def handle_add_permissions_to_key(request, service): - """Groups are defined by the key cephx.groups.(namespace-)?-(name). This - key will contain a dict serialized to JSON with data about the group, - including pools and members. - - A group can optionally have a namespace defined that will be used to - further restrict pool access. - """ - resp = {'exit-code': 0} - - service_name = request.get('name') - group_name = request.get('group') - group_namespace = request.get('group-namespace') - if group_namespace: - group_name = "{}-{}".format(group_namespace, group_name) - group = get_group(group_name=group_name) - service_obj = get_service_groups(service=service_name, - namespace=group_namespace) - if request.get('object-prefix-permissions'): - service_obj['object_prefix_perms'] = request.get( - 'object-prefix-permissions') - format("Service object: {}".format(service_obj)) - permission = request.get('group-permission') or "rwx" - if service_name not in group['services']: - group['services'].append(service_name) - save_group(group=group, group_name=group_name) - if permission not in service_obj['group_names']: - service_obj['group_names'][permission] = [] - if group_name not in service_obj['group_names'][permission]: - service_obj['group_names'][permission].append(group_name) - save_service(service=service_obj, service_name=service_name) - service_obj['groups'] = _build_service_groups(service_obj, - group_namespace) - update_service_permissions(service_name, service_obj, group_namespace) - - return resp - - -def handle_set_key_permissions(request, service): - """Ensure the key has the requested permissions.""" - permissions = request.get('permissions') - client = request.get('client') - call = ['ceph', '--id', service, 'auth', 'caps', - 'client.{}'.format(client)] + permissions - try: - check_call(call) - except CalledProcessError as e: - log("Error updating key capabilities: {}".format(e), level=ERROR) - - -def update_service_permissions(service, service_obj=None, namespace=None): - """Update the key permissions for the named client in Ceph""" - if not service_obj: - service_obj = get_service_groups(service=service, namespace=namespace) - permissions = pool_permission_list_for_service(service_obj) - call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions - try: - check_call(call) - except CalledProcessError as e: - log("Error updating key capabilities: {}".format(e)) - - -def add_pool_to_group(pool, group, namespace=None): - """Add a named pool to a named group""" - group_name = group - if namespace: - group_name = "{}-{}".format(namespace, group_name) - group = get_group(group_name=group_name) - if pool not in group['pools']: - group["pools"].append(pool) - save_group(group, group_name=group_name) - for service in group['services']: - update_service_permissions(service, namespace=namespace) - - -def pool_permission_list_for_service(service): - """Build the permission string for Ceph for a given service""" - permissions = [] - permission_types = collections.OrderedDict() - for permission, group in sorted(service["group_names"].items()): - if permission not in permission_types: - permission_types[permission] = [] - for item in group: - permission_types[permission].append(item) - for permission, groups in permission_types.items(): - permission = "allow {}".format(permission) - for group in groups: - for pool in service['groups'][group].get('pools', []): - permissions.append("{} pool={}".format(permission, pool)) - for permission, prefixes in sorted( - service.get("object_prefix_perms", {}).items()): - for prefix in prefixes: - permissions.append("allow {} object_prefix {}".format(permission, - prefix)) - return ['mon', ('allow r, allow command "osd blacklist"' - ', allow command "osd blocklist"'), - 'osd', ', '.join(permissions)] - - -def get_service_groups(service, namespace=None): - """Services are objects stored with some metadata, they look like (for a - service named "nova"): - { - group_names: {'rwx': ['images']}, - groups: {} - } - After populating the group, it looks like: - { - group_names: {'rwx': ['images']}, - groups: { - 'images': { - pools: ['glance'], - services: ['nova'] - } - } - } - """ - service_json = monitor_key_get(service='admin', - key="cephx.services.{}".format(service)) - try: - service = json.loads(service_json) - except (TypeError, ValueError): - service = None - if service: - service['groups'] = _build_service_groups(service, namespace) - else: - service = {'group_names': {}, 'groups': {}} - return service - - -def _build_service_groups(service, namespace=None): - """Rebuild the 'groups' dict for a service group - - :returns: dict: dictionary keyed by group name of the following - format: - - { - 'images': { - pools: ['glance'], - services: ['nova', 'glance] - }, - 'vms':{ - pools: ['nova'], - services: ['nova'] - } - } - """ - all_groups = {} - for groups in service['group_names'].values(): - for group in groups: - name = group - if namespace: - name = "{}-{}".format(namespace, name) - all_groups[group] = get_group(group_name=name) - return all_groups - - -def get_group(group_name): - """A group is a structure to hold data about a named group, structured as: - { - pools: ['glance'], - services: ['nova'] - } - """ - group_key = get_group_key(group_name=group_name) - group_json = monitor_key_get(service='admin', key=group_key) - try: - group = json.loads(group_json) - except (TypeError, ValueError): - group = None - if not group: - group = { - 'pools': [], - 'services': [] - } - return group - - -def save_service(service_name, service): - """Persist a service in the monitor cluster""" - service['groups'] = {} - return monitor_key_set(service='admin', - key="cephx.services.{}".format(service_name), - value=json.dumps(service, sort_keys=True)) - - -def save_group(group, group_name): - """Persist a group in the monitor cluster""" - group_key = get_group_key(group_name=group_name) - return monitor_key_set(service='admin', - key=group_key, - value=json.dumps(group, sort_keys=True)) - - -def get_group_key(group_name): - """Build group key""" - return 'cephx.groups.{}'.format(group_name) - - -def handle_erasure_pool(request, service): - """Create a new erasure coded pool. - - :param request: dict of request operations and params. - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0. - """ - pool_name = request.get('name') - erasure_profile = request.get('erasure-profile') - group_name = request.get('group') - - if erasure_profile is None: - erasure_profile = "default-canonical" - - if group_name: - group_namespace = request.get('group-namespace') - # Add the pool to the group named "group_name" - add_pool_to_group(pool=pool_name, - group=group_name, - namespace=group_namespace) - - # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds - if not erasure_profile_exists(service=service, name=erasure_profile): - # TODO: Fail and tell them to create the profile or default - msg = ("erasure-profile {} does not exist. Please create it with: " - "create-erasure-profile".format(erasure_profile)) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - try: - pool = ErasurePool(service=service, - op=request) - except KeyError: - msg = "Missing parameter." - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Ok make the erasure pool - if not pool_exists(service=service, name=pool_name): - log("Creating pool '{}' (erasure_profile={})" - .format(pool.name, erasure_profile), level=INFO) - pool.create() - - # Set/update properties that are allowed to change after pool creation. - pool.update() - - -def handle_replicated_pool(request, service): - """Create a new replicated pool. - - :param request: dict of request operations and params. - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0. - """ - pool_name = request.get('name') - group_name = request.get('group') - - # Optional params - # NOTE: Check this against the handling in the Pool classes, reconcile and - # remove. - pg_num = request.get('pg_num') - replicas = request.get('replicas') - if pg_num: - # Cap pg_num to max allowed just in case. - osds = get_osds(service) - if osds: - pg_num = min(pg_num, (len(osds) * 100 // replicas)) - request.update({'pg_num': pg_num}) - - if group_name: - group_namespace = request.get('group-namespace') - # Add the pool to the group named "group_name" - add_pool_to_group(pool=pool_name, - group=group_name, - namespace=group_namespace) - - try: - pool = ReplicatedPool(service=service, - op=request) - except KeyError: - msg = "Missing parameter." - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - if not pool_exists(service=service, name=pool_name): - log("Creating pool '{}' (replicas={})".format(pool.name, replicas), - level=INFO) - pool.create() - else: - log("Pool '{}' already exists - skipping create".format(pool.name), - level=DEBUG) - - # Set/update properties that are allowed to change after pool creation. - pool.update() - - -def handle_create_cache_tier(request, service): - """Create a cache tier on a cold pool. Modes supported are - "writeback" and "readonly". - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - # mode = "writeback" | "readonly" - storage_pool = request.get('cold-pool') - cache_pool = request.get('hot-pool') - cache_mode = request.get('mode') - - if cache_mode is None: - cache_mode = "writeback" - - # cache and storage pool must exist first - if not pool_exists(service=service, name=storage_pool) or not pool_exists( - service=service, name=cache_pool): - msg = ("cold-pool: {} and hot-pool: {} must exist. Please create " - "them first".format(storage_pool, cache_pool)) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - p = BasePool(service=service, name=storage_pool) - p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode) - - -def handle_remove_cache_tier(request, service): - """Remove a cache tier from the cold pool. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - storage_pool = request.get('cold-pool') - cache_pool = request.get('hot-pool') - # cache and storage pool must exist first - if not pool_exists(service=service, name=storage_pool) or not pool_exists( - service=service, name=cache_pool): - msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not " - "deleting cache tier".format(storage_pool, cache_pool)) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - pool = BasePool(name=storage_pool, service=service) - pool.remove_cache_tier(cache_pool=cache_pool) - - -def handle_set_pool_value(request, service, coerce=False): - """Sets an arbitrary pool value. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :param coerce: Try to parse/coerce the value into the correct type. - Used by the action code that only gets Str from Juju - :returns: dict. exit-code and reason if not 0 - """ - # Set arbitrary pool values - params = {'pool': request.get('name'), - 'key': request.get('key'), - 'value': request.get('value')} - if params['key'] not in POOL_KEYS: - msg = "Invalid key '{}'".format(params['key']) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Get the validation method - validator_params = POOL_KEYS[params['key']] - # BUG: #1838650 - the function needs to try to coerce the value param to - # the type required for the validator to pass. Note, if this blows, then - # the param isn't parsable to the correct type. - if coerce: - try: - params['value'] = validator_params[0](params['value']) - except ValueError: - raise RuntimeError("Value {} isn't of type {}" - .format(params['value'], validator_params[0])) - # end of BUG: #1838650 - if len(validator_params) == 1: - # Validate that what the user passed is actually legal per Ceph's rules - validator(params['value'], validator_params[0]) - else: - # Validate that what the user passed is actually legal per Ceph's rules - validator(params['value'], validator_params[0], validator_params[1]) - - # Set the value - pool_set(service=service, pool_name=params['pool'], key=params['key'], - value=params['value']) - - -def handle_rgw_regionmap_update(request, service): - """Change the radosgw region map. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - name = request.get('client-name') - if not name: - msg = "Missing rgw-region or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - try: - check_output(['radosgw-admin', - '--id', service, - 'regionmap', 'update', '--name', name]) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_rgw_regionmap_default(request, service): - """Create a radosgw region map. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - region = request.get('rgw-region') - name = request.get('client-name') - if not region or not name: - msg = "Missing rgw-region or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - try: - check_output( - [ - 'radosgw-admin', - '--id', service, - 'regionmap', - 'default', - '--rgw-region', region, - '--name', name]) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_rgw_zone_set(request, service): - """Create a radosgw zone. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - json_file = request.get('zone-json') - name = request.get('client-name') - region_name = request.get('region-name') - zone_name = request.get('zone-name') - if not json_file or not name or not region_name or not zone_name: - msg = "Missing json-file or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - infile = NamedTemporaryFile(delete=False) - with open(infile.name, 'w') as infile_handle: - infile_handle.write(json_file) - try: - check_output( - [ - 'radosgw-admin', - '--id', service, - 'zone', - 'set', - '--rgw-zone', zone_name, - '--infile', infile.name, - '--name', name, - ] - ) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - os.unlink(infile.name) - - -def handle_put_osd_in_bucket(request, service): - """Move an osd into a specified crush bucket. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - osd_id = request.get('osd') - target_bucket = request.get('bucket') - if not osd_id or not target_bucket: - msg = "Missing OSD ID or Bucket" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - crushmap = Crushmap() - try: - crushmap.ensure_bucket_is_present(target_bucket) - check_output( - [ - 'ceph', - '--id', service, - 'osd', - 'crush', - 'set', - str(osd_id), - str(get_osd_weight(osd_id)), - "root={}".format(target_bucket) - ] - ) - - except Exception as exc: - msg = "Failed to move OSD " \ - "{} into Bucket {} :: {}".format(osd_id, target_bucket, exc) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - -def handle_rgw_create_user(request, service): - """Create a new rados gateway user. - - :param request: dict of request operations and params - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - user_id = request.get('rgw-uid') - display_name = request.get('display-name') - name = request.get('client-name') - if not name or not display_name or not user_id: - msg = "Missing client-name, display-name or rgw-uid" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - try: - create_output = check_output( - [ - 'radosgw-admin', - '--id', service, - 'user', - 'create', - '--uid', user_id, - '--display-name', display_name, - '--name', name, - '--system' - ] - ) - try: - user_json = json.loads(str(create_output.decode('UTF-8'))) - return {'exit-code': 0, 'user': user_json} - except ValueError as err: - log(err, level=ERROR) - return {'exit-code': 1, 'stderr': err} - - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_create_cephfs(request, service): - """Create a new cephfs. - - :param request: The broker request - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - cephfs_name = request.get('mds_name') - data_pool = request.get('data_pool') - extra_pools = request.get('extra_pools', None) or [] - metadata_pool = request.get('metadata_pool') - # Check if the user params were provided - if not cephfs_name or not data_pool or not metadata_pool: - msg = "Missing mds_name, data_pool or metadata_pool params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - # Sanity check that the required pools exist - for pool_name in [data_pool, metadata_pool] + extra_pools: - if not pool_exists(service=service, name=pool_name): - msg = "CephFS pool {} does not exist. Cannot create CephFS".format( - pool_name) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - if get_cephfs(service=service): - # CephFS new has already been called - log("CephFS already created") - return - - # Finally create CephFS - try: - check_output(["ceph", - '--id', service, - "fs", "new", cephfs_name, - metadata_pool, - data_pool]) - except CalledProcessError as err: - if err.returncode == 22: - log("CephFS already created") - return - else: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - for pool_name in extra_pools: - cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name, - pool_name] - try: - check_output(cmd) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - - -def handle_rgw_region_set(request, service): - # radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 - """Set the rados gateway region. - - :param request: dict. The broker request. - :param service: The ceph client to run the command under. - :returns: dict. exit-code and reason if not 0 - """ - json_file = request.get('region-json') - name = request.get('client-name') - region_name = request.get('region-name') - zone_name = request.get('zone-name') - if not json_file or not name or not region_name or not zone_name: - msg = "Missing json-file or client-name params" - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - infile = NamedTemporaryFile(delete=False) - with open(infile.name, 'w') as infile_handle: - infile_handle.write(json_file) - try: - check_output( - [ - 'radosgw-admin', - '--id', service, - 'region', - 'set', - '--rgw-zone', zone_name, - '--infile', infile.name, - '--name', name, - ] - ) - except CalledProcessError as err: - log(err.output, level=ERROR) - return {'exit-code': 1, 'stderr': err.output} - os.unlink(infile.name) - - -def process_requests_v1(reqs): - """Process v1 requests. - - Takes a list of requests (dicts) and processes each one. If an error is - found, processing stops and the client is notified in the response. - - Returns a response dict containing the exit code (non-zero if any - operation failed along with an explanation). - """ - ret = None - log("Processing {} ceph broker requests".format(len(reqs)), level=INFO) - for req in reqs: - op = req.get('op') - log("Processing op='{}'".format(op), level=DEBUG) - # Use admin client since we do not have other client key locations - # setup to use them for these operations. - svc = 'admin' - if op == "create-pool": - pool_type = req.get('pool-type') # "replicated" | "erasure" - - # Default to replicated if pool_type isn't given - if pool_type == 'erasure': - ret = handle_erasure_pool(request=req, service=svc) - else: - ret = handle_replicated_pool(request=req, service=svc) - elif op == "create-cephfs": - ret = handle_create_cephfs(request=req, service=svc) - elif op == "create-cache-tier": - ret = handle_create_cache_tier(request=req, service=svc) - elif op == "remove-cache-tier": - ret = handle_remove_cache_tier(request=req, service=svc) - elif op == "create-erasure-profile": - ret = handle_create_erasure_profile(request=req, service=svc) - elif op == "delete-pool": - pool = req.get('name') - ret = delete_pool(service=svc, name=pool) - elif op == "rename-pool": - old_name = req.get('name') - new_name = req.get('new-name') - ret = rename_pool(service=svc, old_name=old_name, - new_name=new_name) - elif op == "snapshot-pool": - pool = req.get('name') - snapshot_name = req.get('snapshot-name') - ret = snapshot_pool(service=svc, pool_name=pool, - snapshot_name=snapshot_name) - elif op == "remove-pool-snapshot": - pool = req.get('name') - snapshot_name = req.get('snapshot-name') - ret = remove_pool_snapshot(service=svc, pool_name=pool, - snapshot_name=snapshot_name) - elif op == "set-pool-value": - ret = handle_set_pool_value(request=req, service=svc) - elif op == "rgw-region-set": - ret = handle_rgw_region_set(request=req, service=svc) - elif op == "rgw-zone-set": - ret = handle_rgw_zone_set(request=req, service=svc) - elif op == "rgw-regionmap-update": - ret = handle_rgw_regionmap_update(request=req, service=svc) - elif op == "rgw-regionmap-default": - ret = handle_rgw_regionmap_default(request=req, service=svc) - elif op == "rgw-create-user": - ret = handle_rgw_create_user(request=req, service=svc) - elif op == "move-osd-to-bucket": - ret = handle_put_osd_in_bucket(request=req, service=svc) - elif op == "add-permissions-to-key": - ret = handle_add_permissions_to_key(request=req, service=svc) - elif op == 'set-key-permissions': - ret = handle_set_key_permissions(request=req, service=svc) - else: - msg = "Unknown operation '{}'".format(op) - log(msg, level=ERROR) - return {'exit-code': 1, 'stderr': msg} - - if type(ret) == dict and 'exit-code' in ret: - return ret - - return {'exit-code': 0} diff --git a/lib/charms_ceph/crush_utils.py b/lib/charms_ceph/crush_utils.py deleted file mode 100644 index 37084bf1..00000000 --- a/lib/charms_ceph/crush_utils.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2014 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from subprocess import check_output, CalledProcessError - -from charmhelpers.core.hookenv import ( - log, - ERROR, -) - -CRUSH_BUCKET = """root {name} {{ - id {id} # do not change unnecessarily - # weight 0.000 - alg straw2 - hash 0 # rjenkins1 -}} - -rule {name} {{ - ruleset 0 - type replicated - min_size 1 - max_size 10 - step take {name} - step chooseleaf firstn 0 type host - step emit -}}""" - -# This regular expression looks for a string like: -# root NAME { -# id NUMBER -# so that we can extract NAME and ID from the crushmap -CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)") - -# This regular expression looks for ID strings in the crushmap like: -# id NUMBER -# so that we can extract the IDs from a crushmap -CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)") - - -class Crushmap(object): - """An object oriented approach to Ceph crushmap management.""" - - def __init__(self): - self._crushmap = self.load_crushmap() - roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap) - buckets = [] - ids = list(map( - lambda x: int(x), - re.findall(CRUSHMAP_ID_RE, self._crushmap))) - ids = sorted(ids) - if roots != []: - for root in roots: - buckets.append(CRUSHBucket(root[0], root[1], True)) - - self._buckets = buckets - if ids != []: - self._ids = ids - else: - self._ids = [0] - - def load_crushmap(self): - try: - crush = str(check_output(['ceph', 'osd', 'getcrushmap']) - .decode('UTF-8')) - return str(check_output(['crushtool', '-d', '-'], - stdin=crush.stdout) - .decode('UTF-8')) - except CalledProcessError as e: - log("Error occurred while loading and decompiling CRUSH map:" - "{}".format(e), ERROR) - raise - - def ensure_bucket_is_present(self, bucket_name): - if bucket_name not in [bucket.name for bucket in self.buckets()]: - self.add_bucket(bucket_name) - self.save() - - def buckets(self): - """Return a list of buckets that are in the Crushmap.""" - return self._buckets - - def add_bucket(self, bucket_name): - """Add a named bucket to Ceph""" - new_id = min(self._ids) - 1 - self._ids.append(new_id) - self._buckets.append(CRUSHBucket(bucket_name, new_id)) - - def save(self): - """Persist Crushmap to Ceph""" - try: - crushmap = self.build_crushmap() - compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o', - '/dev/stdout'], stdin=crushmap) - .decode('UTF-8')) - ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i', - '/dev/stdin'], stdin=compiled) - .decode('UTF-8')) - return ceph_output - except CalledProcessError as e: - log("save error: {}".format(e)) - raise - - def build_crushmap(self): - """Modifies the current CRUSH map to include the new buckets""" - tmp_crushmap = self._crushmap - for bucket in self._buckets: - if not bucket.default: - tmp_crushmap = "{}\n\n{}".format( - tmp_crushmap, - Crushmap.bucket_string(bucket.name, bucket.id)) - - return tmp_crushmap - - @staticmethod - def bucket_string(name, id): - return CRUSH_BUCKET.format(name=name, id=id) - - -class CRUSHBucket(object): - """CRUSH bucket description object.""" - - def __init__(self, name, id, default=False): - self.name = name - self.id = int(id) - self.default = default - - def __repr__(self): - return "Bucket {{Name: {name}, ID: {id}}}".format( - name=self.name, id=self.id) - - def __eq__(self, other): - """Override the default Equals behavior""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return NotImplemented - - def __ne__(self, other): - """Define a non-equality test""" - if isinstance(other, self.__class__): - return not self.__eq__(other) - return NotImplemented diff --git a/lib/charms_ceph/utils.py b/lib/charms_ceph/utils.py deleted file mode 100644 index e6adcb82..00000000 --- a/lib/charms_ceph/utils.py +++ /dev/null @@ -1,3583 +0,0 @@ -# Copyright 2017-2021 Canonical Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import glob -import itertools -import json -import os -import pyudev -import random -import re -import socket -import subprocess -import sys -import time -import uuid -import functools - -from contextlib import contextmanager -from datetime import datetime - -from charmhelpers.core import hookenv -from charmhelpers.core import templating -from charmhelpers.core.host import ( - chownr, - cmp_pkgrevno, - lsb_release, - mkdir, - owner, - service_restart, - service_start, - service_stop, - CompareHostReleases, - write_file, - is_container, -) -from charmhelpers.core.hookenv import ( - cached, - config, - log, - status_set, - DEBUG, - ERROR, - WARNING, - storage_get, - storage_list, -) -from charmhelpers.fetch import ( - add_source, - apt_install, - apt_purge, - apt_update, - filter_missing_packages, - get_installed_version -) -from charmhelpers.contrib.storage.linux.ceph import ( - get_mon_map, - monitor_key_set, - monitor_key_exists, - monitor_key_get, -) -from charmhelpers.contrib.storage.linux.utils import ( - is_block_device, - is_device_mounted, -) -from charmhelpers.contrib.openstack.utils import ( - get_os_codename_install_source, -) -from charmhelpers.contrib.storage.linux import lvm -from charmhelpers.core.unitdata import kv - -CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph') -OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd') -HDPARM_FILE = os.path.join(os.sep, 'etc', 'hdparm.conf') - -LEADER = 'leader' -PEON = 'peon' -QUORUM = [LEADER, PEON] - -PACKAGES = ['ceph', 'gdisk', - 'radosgw', 'xfsprogs', - 'lvm2', 'parted', 'smartmontools'] - -REMOVE_PACKAGES = [] -CHRONY_PACKAGE = 'chrony' - -CEPH_KEY_MANAGER = 'ceph' -VAULT_KEY_MANAGER = 'vault' -KEY_MANAGERS = [ - CEPH_KEY_MANAGER, - VAULT_KEY_MANAGER, -] - -LinkSpeed = { - "BASE_10": 10, - "BASE_100": 100, - "BASE_1000": 1000, - "GBASE_10": 10000, - "GBASE_40": 40000, - "GBASE_100": 100000, - "UNKNOWN": None -} - -# Mapping of adapter speed to sysctl settings -NETWORK_ADAPTER_SYSCTLS = { - # 10Gb - LinkSpeed["GBASE_10"]: { - 'net.core.rmem_default': 524287, - 'net.core.wmem_default': 524287, - 'net.core.rmem_max': 524287, - 'net.core.wmem_max': 524287, - 'net.core.optmem_max': 524287, - 'net.core.netdev_max_backlog': 300000, - 'net.ipv4.tcp_rmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_wmem': '10000000 10000000 10000000', - 'net.ipv4.tcp_mem': '10000000 10000000 10000000' - }, - # Mellanox 10/40Gb - LinkSpeed["GBASE_40"]: { - 'net.ipv4.tcp_timestamps': 0, - 'net.ipv4.tcp_sack': 1, - 'net.core.netdev_max_backlog': 250000, - 'net.core.rmem_max': 4194304, - 'net.core.wmem_max': 4194304, - 'net.core.rmem_default': 4194304, - 'net.core.wmem_default': 4194304, - 'net.core.optmem_max': 4194304, - 'net.ipv4.tcp_rmem': '4096 87380 4194304', - 'net.ipv4.tcp_wmem': '4096 65536 4194304', - 'net.ipv4.tcp_low_latency': 1, - 'net.ipv4.tcp_adv_win_scale': 1 - } -} - - -class Partition(object): - def __init__(self, name, number, size, start, end, sectors, uuid): - """A block device partition. - - :param name: Name of block device - :param number: Partition number - :param size: Capacity of the device - :param start: Starting block - :param end: Ending block - :param sectors: Number of blocks - :param uuid: UUID of the partition - """ - self.name = name, - self.number = number - self.size = size - self.start = start - self.end = end - self.sectors = sectors - self.uuid = uuid - - def __str__(self): - return "number: {} start: {} end: {} sectors: {} size: {} " \ - "name: {} uuid: {}".format(self.number, self.start, - self.end, - self.sectors, self.size, - self.name, self.uuid) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -def unmounted_disks(): - """List of unmounted block devices on the current host.""" - disks = [] - context = pyudev.Context() - for device in context.list_devices(DEVTYPE='disk'): - if device['SUBSYSTEM'] == 'block': - if device.device_node is None: - continue - - matched = False - for block_type in [u'dm-', u'loop', u'ram', u'nbd']: - if block_type in device.device_node: - matched = True - if matched: - continue - - disks.append(device.device_node) - log("Found disks: {}".format(disks)) - return [disk for disk in disks if not is_device_mounted(disk)] - - -def save_sysctls(sysctl_dict, save_location): - """Persist the sysctls to the hard drive. - - :param sysctl_dict: dict - :param save_location: path to save the settings to - :raises: IOError if anything goes wrong with writing. - """ - try: - # Persist the settings for reboots - with open(save_location, "w") as fd: - for key, value in sysctl_dict.items(): - fd.write("{}={}\n".format(key, value)) - - except IOError as e: - log("Unable to persist sysctl settings to {}. Error {}".format( - save_location, e), level=ERROR) - raise - - -def tune_nic(network_interface): - """This will set optimal sysctls for the particular network adapter. - - :param network_interface: string The network adapter name. - """ - speed = get_link_speed(network_interface) - if speed in NETWORK_ADAPTER_SYSCTLS: - status_set('maintenance', 'Tuning device {}'.format( - network_interface)) - sysctl_file = os.path.join( - os.sep, - 'etc', - 'sysctl.d', - '51-ceph-osd-charm-{}.conf'.format(network_interface)) - try: - log("Saving sysctl_file: {} values: {}".format( - sysctl_file, NETWORK_ADAPTER_SYSCTLS[speed]), - level=DEBUG) - save_sysctls(sysctl_dict=NETWORK_ADAPTER_SYSCTLS[speed], - save_location=sysctl_file) - except IOError as e: - log("Write to /etc/sysctl.d/51-ceph-osd-charm-{} " - "failed. {}".format(network_interface, e), - level=ERROR) - - try: - # Apply the settings - log("Applying sysctl settings", level=DEBUG) - subprocess.check_output(["sysctl", "-p", sysctl_file]) - except subprocess.CalledProcessError as err: - log('sysctl -p {} failed with error {}'.format(sysctl_file, - err.output), - level=ERROR) - else: - log("No settings found for network adapter: {}".format( - network_interface), level=DEBUG) - - -def get_link_speed(network_interface): - """This will find the link speed for a given network device. Returns None - if an error occurs. - :param network_interface: string The network adapter interface. - :returns: LinkSpeed - """ - speed_path = os.path.join(os.sep, 'sys', 'class', 'net', - network_interface, 'speed') - # I'm not sure where else we'd check if this doesn't exist - if not os.path.exists(speed_path): - return LinkSpeed["UNKNOWN"] - - try: - with open(speed_path, 'r') as sysfs: - nic_speed = sysfs.readlines() - - # Did we actually read anything? - if not nic_speed: - return LinkSpeed["UNKNOWN"] - - # Try to find a sysctl match for this particular speed - for name, speed in LinkSpeed.items(): - if speed == int(nic_speed[0].strip()): - return speed - # Default to UNKNOWN if we can't find a match - return LinkSpeed["UNKNOWN"] - except IOError as e: - log("Unable to open {path} because of error: {error}".format( - path=speed_path, - error=e), level='error') - return LinkSpeed["UNKNOWN"] - - -def persist_settings(settings_dict): - # Write all settings to /etc/hdparm.conf - """This will persist the hard drive settings to the /etc/hdparm.conf file - - The settings_dict should be in the form of {"uuid": {"key":"value"}} - - :param settings_dict: dict of settings to save - """ - if not settings_dict: - return - - try: - templating.render(source='hdparm.conf', target=HDPARM_FILE, - context=settings_dict) - except IOError as err: - log("Unable to open {path} because of error: {error}".format( - path=HDPARM_FILE, error=err), level=ERROR) - except Exception as e: - # The templating.render can raise a jinja2 exception if the - # template is not found. Rather than polluting the import - # space of this charm, simply catch Exception - log('Unable to render {path} due to error: {error}'.format( - path=HDPARM_FILE, error=e), level=ERROR) - - -def set_max_sectors_kb(dev_name, max_sectors_size): - """This function sets the max_sectors_kb size of a given block device. - - :param dev_name: Name of the block device to query - :param max_sectors_size: int of the max_sectors_size to save - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - try: - with open(max_sectors_kb_path, 'w') as f: - f.write(max_sectors_size) - except IOError as e: - log('Failed to write max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e), level=ERROR) - - -def get_max_sectors_kb(dev_name): - """This function gets the max_sectors_kb size of a given block device. - - :param dev_name: Name of the block device to query - :returns: int which is either the max_sectors_kb or 0 on error. - """ - max_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_sectors_kb') - - # Read in what Linux has set by default - if os.path.exists(max_sectors_kb_path): - try: - with open(max_sectors_kb_path, 'r') as f: - max_sectors_kb = f.read().strip() - return int(max_sectors_kb) - except IOError as e: - log('Failed to read max_sectors_kb to {}. Error: {}'.format( - max_sectors_kb_path, e), level=ERROR) - # Bail. - return 0 - return 0 - - -def get_max_hw_sectors_kb(dev_name): - """This function gets the max_hw_sectors_kb for a given block device. - - :param dev_name: Name of the block device to query - :returns: int which is either the max_hw_sectors_kb or 0 on error. - """ - max_hw_sectors_kb_path = os.path.join('sys', 'block', dev_name, 'queue', - 'max_hw_sectors_kb') - # Read in what the hardware supports - if os.path.exists(max_hw_sectors_kb_path): - try: - with open(max_hw_sectors_kb_path, 'r') as f: - max_hw_sectors_kb = f.read().strip() - return int(max_hw_sectors_kb) - except IOError as e: - log('Failed to read max_hw_sectors_kb to {}. Error: {}'.format( - max_hw_sectors_kb_path, e), level=ERROR) - return 0 - return 0 - - -def set_hdd_read_ahead(dev_name, read_ahead_sectors=256): - """This function sets the hard drive read ahead. - - :param dev_name: Name of the block device to set read ahead on. - :param read_ahead_sectors: int How many sectors to read ahead. - """ - try: - # Set the read ahead sectors to 256 - log('Setting read ahead to {} for device {}'.format( - read_ahead_sectors, - dev_name)) - subprocess.check_output(['hdparm', - '-a{}'.format(read_ahead_sectors), - dev_name]) - except subprocess.CalledProcessError as e: - log('hdparm failed with error: {}'.format(e.output), - level=ERROR) - - -def get_block_uuid(block_dev): - """This queries blkid to get the uuid for a block device. - - :param block_dev: Name of the block device to query. - :returns: The UUID of the device or None on Error. - """ - try: - block_info = str(subprocess - .check_output(['blkid', '-o', 'export', block_dev]) - .decode('UTF-8')) - for tag in block_info.split('\n'): - parts = tag.split('=') - if parts[0] == 'UUID': - return parts[1] - return None - except subprocess.CalledProcessError as err: - log('get_block_uuid failed with error: {}'.format(err.output), - level=ERROR) - return None - - -def check_max_sectors(save_settings_dict, - block_dev, - uuid): - """Tune the max_hw_sectors if needed. - - make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb or at - least 1MB for spinning disks - If the box has a RAID card with cache this could go much bigger. - - :param save_settings_dict: The dict used to persist settings - :param block_dev: A block device name: Example: /dev/sda - :param uuid: The uuid of the block device - """ - dev_name = None - path_parts = os.path.split(block_dev) - if len(path_parts) == 2: - dev_name = path_parts[1] - else: - log('Unable to determine the block device name from path: {}'.format( - block_dev)) - # Play it safe and bail - return - max_sectors_kb = get_max_sectors_kb(dev_name=dev_name) - max_hw_sectors_kb = get_max_hw_sectors_kb(dev_name=dev_name) - - if max_sectors_kb < max_hw_sectors_kb: - # OK we have a situation where the hardware supports more than Linux is - # currently requesting - config_max_sectors_kb = hookenv.config('max-sectors-kb') - if config_max_sectors_kb < max_hw_sectors_kb: - # Set the max_sectors_kb to the config.yaml value if it is less - # than the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, config_max_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid][ - "read_ahead_sect"] = config_max_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=config_max_sectors_kb) - else: - # Set to the max_hw_sectors_kb - log('Setting max_sectors_kb for device {} to {}'.format( - dev_name, max_hw_sectors_kb)) - save_settings_dict[ - "drive_settings"][uuid]['read_ahead_sect'] = max_hw_sectors_kb - set_max_sectors_kb(dev_name=dev_name, - max_sectors_size=max_hw_sectors_kb) - else: - log('max_sectors_kb match max_hw_sectors_kb. No change needed for ' - 'device: {}'.format(block_dev)) - - -def tune_dev(block_dev): - """Try to make some intelligent decisions with HDD tuning. Future work will - include optimizing SSDs. - - This function will change the read ahead sectors and the max write - sectors for each block device. - - :param block_dev: A block device name: Example: /dev/sda - """ - uuid = get_block_uuid(block_dev) - if uuid is None: - log('block device {} uuid is None. Unable to save to ' - 'hdparm.conf'.format(block_dev), level=DEBUG) - return - save_settings_dict = {} - log('Tuning device {}'.format(block_dev)) - status_set('maintenance', 'Tuning device {}'.format(block_dev)) - set_hdd_read_ahead(block_dev) - save_settings_dict["drive_settings"] = {} - save_settings_dict["drive_settings"][uuid] = {} - save_settings_dict["drive_settings"][uuid]['read_ahead_sect'] = 256 - - check_max_sectors(block_dev=block_dev, - save_settings_dict=save_settings_dict, - uuid=uuid) - - persist_settings(settings_dict=save_settings_dict) - status_set('maintenance', 'Finished tuning device {}'.format(block_dev)) - - -def ceph_user(): - return 'ceph' - - -class CrushLocation(object): - def __init__(self, identifier, name, osd="", host="", chassis="", - rack="", row="", pdu="", pod="", room="", - datacenter="", zone="", region="", root=""): - self.identifier = identifier - self.name = name - self.osd = osd - self.host = host - self.chassis = chassis - self.rack = rack - self.row = row - self.pdu = pdu - self.pod = pod - self.room = room - self.datacenter = datacenter - self.zone = zone - self.region = region - self.root = root - - def __str__(self): - return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \ - "row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \ - "region: {} root: {}".format(self.name, self.identifier, - self.osd, self.host, self.chassis, - self.rack, self.row, self.pdu, - self.pod, self.room, - self.datacenter, self.zone, - self.region, self.root) - - def __eq__(self, other): - return not self.name < other.name and not other.name < self.name - - def __ne__(self, other): - return self.name < other.name or other.name < self.name - - def __gt__(self, other): - return self.name > other.name - - def __ge__(self, other): - return not self.name < other.name - - def __le__(self, other): - return self.name < other.name - - -def get_osd_weight(osd_id): - """Returns the weight of the specified OSD. - - :returns: Float - :raises: ValueError if the monmap fails to parse. - :raises: CalledProcessError if our Ceph command fails. - """ - try: - tree = str(subprocess - .check_output(['ceph', 'osd', 'tree', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - # Make sure children are present in the JSON - if not json_tree['nodes']: - return None - for device in json_tree['nodes']: - if device['type'] == 'osd' and device['name'] == osd_id: - return device['crush_weight'] - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format( - e)) - raise - - -def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type): - """Get all nodes of the desired type, with all their attributes. - - These attributes can be direct or inherited from ancestors. - """ - attribute_dict = {node['type']: node['name']} - if node['type'] == lookup_type: - attribute_dict['name'] = node['name'] - attribute_dict['identifier'] = node['id'] - return [attribute_dict] - elif not node.get('children'): - return [attribute_dict] - else: - descendant_attribute_dicts = [ - _filter_nodes_and_set_attributes(node_lookup_map[node_id], - node_lookup_map, lookup_type) - for node_id in node.get('children', []) - ] - return [dict(attribute_dict, **descendant_attribute_dict) - for descendant_attribute_dict - in itertools.chain.from_iterable(descendant_attribute_dicts)] - - -def _flatten_roots(nodes, lookup_type='host'): - """Get a flattened list of nodes of the desired type. - - :param nodes: list of nodes defined as a dictionary of attributes and - children - :type nodes: List[Dict[int, Any]] - :param lookup_type: type of searched node - :type lookup_type: str - :returns: flattened list of nodes - :rtype: List[Dict[str, Any]] - """ - lookup_map = {node['id']: node for node in nodes} - root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map, - lookup_type) - for node in nodes if node['type'] == 'root'] - # get a flattened list of roots. - return list(itertools.chain.from_iterable(root_attributes_dicts)) - - -def get_osd_tree(service): - """Returns the current OSD map in JSON. - - :returns: List. - :rtype: List[CrushLocation] - :raises: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our Ceph command fails - """ - try: - tree = str(subprocess - .check_output(['ceph', '--id', service, - 'osd', 'tree', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - roots = _flatten_roots(json_tree["nodes"]) - return [CrushLocation(**host) for host in roots] - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph osd tree command failed with message: {}".format(e)) - raise - - -def _get_child_dirs(path): - """Returns a list of directory names in the specified path. - - :param path: a full path listing of the parent directory to return child - directory names - :returns: list. A list of child directories under the parent directory - :raises: ValueError if the specified path does not exist or is not a - directory, - OSError if an error occurs reading the directory listing - """ - if not os.path.exists(path): - raise ValueError('Specified path "%s" does not exist' % path) - if not os.path.isdir(path): - raise ValueError('Specified path "%s" is not a directory' % path) - - files_in_dir = [os.path.join(path, f) for f in os.listdir(path)] - return list(filter(os.path.isdir, files_in_dir)) - - -def _get_osd_num_from_dirname(dirname): - """Parses the dirname and returns the OSD id. - - Parses a string in the form of 'ceph-{osd#}' and returns the OSD number - from the directory name. - - :param dirname: the directory name to return the OSD number from - :return int: the OSD number the directory name corresponds to - :raises ValueError: if the OSD number cannot be parsed from the provided - directory name. - """ - match = re.search(r'ceph-(?P\d+)', dirname) - if not match: - raise ValueError("dirname not in correct format: {}".format(dirname)) - - return match.group('osd_id') - - -def get_local_osd_ids(): - """This will list the /var/lib/ceph/osd/* directories and try - to split the ID off of the directory name and return it in - a list. - - :returns: list. A list of OSD identifiers - :raises: OSError if something goes wrong with listing the directory. - """ - osd_ids = [] - osd_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'osd') - if os.path.exists(osd_path): - try: - dirs = os.listdir(osd_path) - for osd_dir in dirs: - osd_id = osd_dir.split('-')[1] - if (_is_int(osd_id) and - filesystem_mounted(os.path.join( - os.sep, osd_path, osd_dir))): - osd_ids.append(osd_id) - except OSError: - raise - return osd_ids - - -def get_local_mon_ids(): - """This will list the /var/lib/ceph/mon/* directories and try - to split the ID off of the directory name and return it in - a list. - - :returns: list. A list of monitor identifiers - :raises: OSError if something goes wrong with listing the directory. - """ - mon_ids = [] - mon_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mon') - if os.path.exists(mon_path): - try: - dirs = os.listdir(mon_path) - for mon_dir in dirs: - # Basically this takes everything after ceph- as the monitor ID - match = re.search('ceph-(?P.*)', mon_dir) - if match: - mon_ids.append(match.group('mon_id')) - except OSError: - raise - return mon_ids - - -def _is_int(v): - """Return True if the object v can be turned into an integer.""" - try: - int(v) - return True - except ValueError: - return False - - -def get_version(): - """Derive Ceph release from an installed package.""" - import apt_pkg as apt - - package = "ceph" - - current_ver = get_installed_version(package) - if not current_ver: - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(current_ver.ver_str) - - # x.y match only for 20XX.X - # and ignore patch level for other packages - match = re.match(r'^(\d+)\.(\d+)', vers) - - if match: - vers = match.group(0) - return float(vers) - - -def error_out(msg): - log("FATAL ERROR: {}".format(msg), - level=ERROR) - sys.exit(1) - - -def is_quorum(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(str(subprocess - .check_output(cmd) - .decode('UTF-8'))) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] in QUORUM: - return True - else: - return False - else: - return False - - -def is_leader(): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "mon_status" - ] - if os.path.exists(asok): - try: - result = json.loads(str(subprocess - .check_output(cmd) - .decode('UTF-8'))) - except subprocess.CalledProcessError: - return False - except ValueError: - # Non JSON response from mon_status - return False - if result['state'] == LEADER: - return True - else: - return False - else: - return False - - -def manager_available(): - # if manager daemon isn't on this release, just say it is Fine - if cmp_pkgrevno('ceph', '11.0.0') < 0: - return True - cmd = ["sudo", "-u", "ceph", "ceph", "mgr", "dump", "-f", "json"] - try: - result = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return result['available'] - except subprocess.CalledProcessError as e: - log("'{}' failed: {}".format(" ".join(cmd), str(e))) - return False - except Exception: - return False - - -def wait_for_quorum(): - while not is_quorum(): - log("Waiting for quorum to be reached") - time.sleep(3) - - -def wait_for_manager(): - while not manager_available(): - log("Waiting for manager to be available") - time.sleep(5) - - -def add_bootstrap_hint(peer): - asok = "/var/run/ceph/ceph-mon.{}.asok".format(socket.gethostname()) - cmd = [ - "sudo", - "-u", - ceph_user(), - "ceph", - "--admin-daemon", - asok, - "add_bootstrap_peer_hint", - peer - ] - if os.path.exists(asok): - # Ignore any errors for this call - subprocess.call(cmd) - - -DISK_FORMATS = [ - 'xfs', - 'ext4', - 'btrfs' -] - -CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation -] - - -def get_partition_list(dev): - """Lists the partitions of a block device. - - :param dev: Path to a block device. ex: /dev/sda - :returns: Returns a list of Partition objects. - :raises: CalledProcessException if lsblk fails - """ - partitions_list = [] - try: - partitions = get_partitions(dev) - # For each line of output - for partition in partitions: - parts = partition.split() - try: - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name=parts[5], - uuid=parts[6]) - ) - except IndexError: - partitions_list.append( - Partition(number=parts[0], - start=parts[1], - end=parts[2], - sectors=parts[3], - size=parts[4], - name="", - uuid=parts[5]) - ) - - return partitions_list - except subprocess.CalledProcessError: - raise - - -def is_pristine_disk(dev): - """ - Read first 2048 bytes (LBA 0 - 3) of block device to determine whether it - is actually all zeros and safe for us to use. - - Existing partitioning tools does not discern between a failure to read from - block device, failure to understand a partition table and the fact that a - block device has no partition table. Since we need to be positive about - which is which we need to read the device directly and confirm ourselves. - - :param dev: Path to block device - :type dev: str - :returns: True all 2048 bytes == 0x0, False if not - :rtype: bool - """ - want_bytes = 2048 - - try: - f = open(dev, 'rb') - except OSError as e: - log(e) - return False - - data = f.read(want_bytes) - read_bytes = len(data) - if read_bytes != want_bytes: - log('{}: short read, got {} bytes expected {}.' - .format(dev, read_bytes, want_bytes), level=WARNING) - return False - - return all(byte == 0x0 for byte in data) - - -def is_osd_disk(dev): - db = kv() - osd_devices = db.get('osd-devices', []) - if dev in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(dev)) - return True - - partitions = get_partition_list(dev) - for partition in partitions: - try: - info = str(subprocess - .check_output(['sgdisk', '-i', partition.number, dev]) - .decode('UTF-8')) - info = info.split("\n") # IGNORE:E1103 - for line in info: - for ptype in CEPH_PARTITIONS: - sig = 'Partition GUID code: {}'.format(ptype) - if line.startswith(sig): - return True - except subprocess.CalledProcessError as e: - log("sgdisk inspection of partition {} on {} failed with " - "error: {}. Skipping".format(partition.minor, dev, e), - level=ERROR) - return False - - -def start_osds(devices): - # Scan for Ceph block devices - rescan_osd_devices() - if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and - cmp_pkgrevno('ceph', '14.2.0') < 0): - # Use ceph-disk activate for directory based OSD's - for dev_or_path in devices: - if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path): - subprocess.check_call( - ['ceph-disk', 'activate', dev_or_path]) - - -def udevadm_settle(): - cmd = ['udevadm', 'settle'] - subprocess.call(cmd) - - -def rescan_osd_devices(): - cmd = [ - 'udevadm', 'trigger', - '--subsystem-match=block', '--action=add' - ] - - subprocess.call(cmd) - - udevadm_settle() - - -_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring' - - -def is_bootstrapped(): - return os.path.exists( - '/var/lib/ceph/mon/ceph-{}/done'.format(socket.gethostname())) - - -def wait_for_bootstrap(): - while not is_bootstrapped(): - time.sleep(3) - - -def generate_monitor_secret(): - cmd = [ - 'ceph-authtool', - '/dev/stdout', - '--name=mon.', - '--gen-key' - ] - res = str(subprocess.check_output(cmd).decode('UTF-8')) - - return "{}==".format(res.split('=')[1].strip()) - - -# OSD caps taken from ceph-create-keys -_osd_bootstrap_caps = { - 'mon': [ - 'allow command osd create ...', - 'allow command osd crush set ...', - r'allow command auth add * osd allow\ * mon allow\ rwx', - 'allow command mon getmap' - ] -} - -_osd_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-osd' - ] -} - - -def parse_key(raw_key): - # get-or-create appears to have different output depending - # on whether its 'get' or 'create' - # 'create' just returns the key, 'get' is more verbose and - # needs parsing - key = None - if len(raw_key.splitlines()) == 1: - key = raw_key - else: - for element in raw_key.splitlines(): - if 'key' in element: - return element.split(' = ')[1].strip() # IGNORE:E1103 - return key - - -def get_osd_bootstrap_key(): - try: - # Attempt to get/create a key using the OSD bootstrap profile first - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps_profile) - except Exception: - # If that fails try with the older style permissions - key = get_named_key('bootstrap-osd', - _osd_bootstrap_caps) - return key - - -_radosgw_keyring = "/etc/ceph/keyring.rados.gateway" - - -def import_radosgw_key(key): - if not os.path.exists(_radosgw_keyring): - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph-authtool', - _radosgw_keyring, - '--create-keyring', - '--name=client.radosgw.gateway', - '--add-key={}'.format(key) - ] - subprocess.check_call(cmd) - - -# OSD caps taken from ceph-create-keys -_radosgw_caps = { - 'mon': ['allow rw'], - 'osd': ['allow rwx'] -} -_upgrade_caps = { - 'mon': ['allow rwx'] -} - - -def get_radosgw_key(pool_list=None, name=None): - return get_named_key(name=name or 'radosgw.gateway', - caps=_radosgw_caps, - pool_list=pool_list) - - -def get_mds_key(name): - return create_named_keyring(entity='mds', - name=name, - caps=mds_caps) - - -_mds_bootstrap_caps_profile = { - 'mon': [ - 'allow profile bootstrap-mds' - ] -} - - -def get_mds_bootstrap_key(): - return get_named_key('bootstrap-mds', - _mds_bootstrap_caps_profile) - - -_default_caps = collections.OrderedDict([ - ('mon', ['allow r', - 'allow command "osd blacklist"', - 'allow command "osd blocklist"']), - ('osd', ['allow rwx']), -]) - -admin_caps = collections.OrderedDict([ - ('mds', ['allow *']), - ('mgr', ['allow *']), - ('mon', ['allow *']), - ('osd', ['allow *']) -]) - -mds_caps = collections.OrderedDict([ - ('osd', ['allow *']), - ('mds', ['allow']), - ('mon', ['allow rwx']), -]) - -osd_upgrade_caps = collections.OrderedDict([ - ('mon', ['allow command "config-key"', - 'allow command "osd tree"', - 'allow command "config-key list"', - 'allow command "config-key put"', - 'allow command "config-key get"', - 'allow command "config-key exists"', - 'allow command "osd out"', - 'allow command "osd in"', - 'allow command "osd rm"', - 'allow command "auth del"', - ]) -]) - -rbd_mirror_caps = collections.OrderedDict([ - ('mon', ['allow profile rbd-mirror-peer', - 'allow command "service dump"', - 'allow command "service status"' - ]), - ('osd', ['profile rbd']), - ('mgr', ['allow r']), -]) - - -def get_rbd_mirror_key(name): - return get_named_key(name=name, caps=rbd_mirror_caps) - - -def create_named_keyring(entity, name, caps=None): - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', '{entity}.{name}'.format(entity=entity, - name=name), - ] - for subsystem, subcaps in caps.items(): - cmd.extend([subsystem, '; '.join(subcaps)]) - log("Calling check_output: {}".format(cmd), level=DEBUG) - return (parse_key(str(subprocess - .check_output(cmd) - .decode('UTF-8')) - .strip())) # IGNORE:E1103 - - -def get_upgrade_key(): - return get_named_key('upgrade-osd', _upgrade_caps) - - -def get_named_key(name, caps=None, pool_list=None): - """Retrieve a specific named cephx key. - - :param name: String Name of key to get. - :param pool_list: The list of pools to give access to - :param caps: dict of cephx capabilities - :returns: Returns a cephx key - """ - key_name = 'client.{}'.format(name) - try: - # Does the key already exist? - output = str(subprocess.check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', - 'get', - key_name, - ]).decode('UTF-8')).strip() - return parse_key(output) - except subprocess.CalledProcessError: - # Couldn't get the key, time to create it! - log("Creating new key for {}".format(name), level=DEBUG) - caps = caps or _default_caps - cmd = [ - "sudo", - "-u", - ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get-or-create', key_name, - ] - # Add capabilities - for subsystem, subcaps in caps.items(): - if subsystem == 'osd': - if pool_list: - # This will output a string similar to: - # "pool=rgw pool=rbd pool=something" - pools = " ".join(['pool={0}'.format(i) for i in pool_list]) - subcaps[0] = subcaps[0] + " " + pools - cmd.extend([subsystem, '; '.join(subcaps)]) - - log("Calling check_output: {}".format(cmd), level=DEBUG) - return parse_key(str(subprocess - .check_output(cmd) - .decode('UTF-8')) - .strip()) # IGNORE:E1103 - - -def upgrade_key_caps(key, caps, pool_list=None): - """Upgrade key to have capabilities caps""" - if not is_leader(): - # Not the MON leader OR not clustered - return - cmd = [ - "sudo", "-u", ceph_user(), 'ceph', 'auth', 'caps', key - ] - for subsystem, subcaps in caps.items(): - if subsystem == 'osd': - if pool_list: - # This will output a string similar to: - # "pool=rgw pool=rbd pool=something" - pools = " ".join(['pool={0}'.format(i) for i in pool_list]) - subcaps[0] = subcaps[0] + " " + pools - cmd.extend([subsystem, '; '.join(subcaps)]) - subprocess.check_call(cmd) - - -@cached -def systemd(): - return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid' - - -def use_bluestore(): - """Determine whether bluestore should be used for OSD's - - :returns: whether bluestore disk format should be used - :rtype: bool""" - if cmp_pkgrevno('ceph', '12.2.0') < 0: - return False - return config('bluestore') - - -def bootstrap_monitor_cluster(secret): - """Bootstrap local Ceph mon into the Ceph cluster - - :param secret: cephx secret to use for monitor authentication - :type secret: str - :raises: Exception if Ceph mon cannot be bootstrapped - """ - hostname = socket.gethostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - done = '{}/done'.format(path) - if systemd(): - init_marker = '{}/systemd'.format(path) - else: - init_marker = '{}/upstart'.format(path) - - keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname) - - if os.path.exists(done): - log('bootstrap_monitor_cluster: mon already initialized.') - else: - # Ceph >= 0.61.3 needs this for ceph-mon fs creation - mkdir('/var/run/ceph', owner=ceph_user(), - group=ceph_user(), perms=0o755) - mkdir(path, owner=ceph_user(), group=ceph_user(), - perms=0o755) - # end changes for Ceph >= 0.61.3 - try: - _create_monitor(keyring, - secret, - hostname, - path, - done, - init_marker) - except Exception: - raise - finally: - os.unlink(keyring) - - -def _create_monitor(keyring, secret, hostname, path, done, init_marker): - """Create monitor filesystem and enable and start ceph-mon process - - :param keyring: path to temporary keyring on disk - :type keyring: str - :param secret: cephx secret to use for monitor authentication - :type: secret: str - :param hostname: hostname of the local unit - :type hostname: str - :param path: full path to Ceph mon directory - :type path: str - :param done: full path to 'done' marker for Ceph mon - :type done: str - :param init_marker: full path to 'init' marker for Ceph mon - :type init_marker: str - """ - subprocess.check_call(['ceph-authtool', keyring, - '--create-keyring', '--name=mon.', - '--add-key={}'.format(secret), - '--cap', 'mon', 'allow *']) - subprocess.check_call(['ceph-mon', '--mkfs', - '-i', hostname, - '--keyring', keyring]) - chownr('/var/log/ceph', ceph_user(), ceph_user()) - chownr(path, ceph_user(), ceph_user()) - with open(done, 'w'): - pass - with open(init_marker, 'w'): - pass - - if systemd(): - if cmp_pkgrevno('ceph', '14.0.0') >= 0: - systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) - else: - systemd_unit = 'ceph-mon' - subprocess.check_call(['systemctl', 'enable', systemd_unit]) - service_restart(systemd_unit) - else: - service_restart('ceph-mon-all') - - -def create_keyrings(): - """Create keyrings for operation of ceph-mon units - - NOTE: The quorum should be done before to execute this function. - - :raises: Exception if keyrings cannot be created - """ - if cmp_pkgrevno('ceph', '14.0.0') >= 0: - # NOTE(jamespage): At Nautilus, keys are created by the - # monitors automatically and just need - # exporting. - output = str(subprocess.check_output( - [ - 'sudo', - '-u', ceph_user(), - 'ceph', - '--name', 'mon.', - '--keyring', - '/var/lib/ceph/mon/ceph-{}/keyring'.format( - socket.gethostname() - ), - 'auth', 'get', 'client.admin', - ]).decode('UTF-8')).strip() - if not output: - # NOTE: key not yet created, raise exception and retry - raise Exception - # NOTE: octopus wants newline at end of file LP: #1864706 - output += '\n' - write_file(_client_admin_keyring, output, - owner=ceph_user(), group=ceph_user(), - perms=0o400) - else: - # NOTE(jamespage): Later Ceph releases require explicit - # call to ceph-create-keys to setup the - # admin keys for the cluster; this command - # will wait for quorum in the cluster before - # returning. - # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older - # Ceph releases too. This improves bootstrap - # resilience as the charm will wait for - # presence of peer units before attempting - # to bootstrap. Note that charms deploying - # ceph-mon service should disable running of - # `ceph-create-keys` service in init system. - cmd = ['ceph-create-keys', '--id', socket.gethostname()] - if cmp_pkgrevno('ceph', '12.0.0') >= 0: - # NOTE(fnordahl): The default timeout in ceph-create-keys of 600 - # seconds is not adequate. Increase timeout when - # timeout parameter available. For older releases - # we rely on retry_on_exception decorator. - # LP#1719436 - cmd.extend(['--timeout', '1800']) - subprocess.check_call(cmd) - osstat = os.stat(_client_admin_keyring) - if not osstat.st_size: - # NOTE(fnordahl): Retry will fail as long as this file exists. - # LP#1719436 - os.remove(_client_admin_keyring) - raise Exception - - -def update_monfs(): - hostname = socket.gethostname() - monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - if systemd(): - init_marker = '{}/systemd'.format(monfs) - else: - init_marker = '{}/upstart'.format(monfs) - if os.path.exists(monfs) and not os.path.exists(init_marker): - # Mark mon as managed by upstart so that - # it gets start correctly on reboots - with open(init_marker, 'w'): - pass - - -def get_partitions(dev): - cmd = ['partx', '--raw', '--noheadings', dev] - try: - out = str(subprocess.check_output(cmd).decode('UTF-8')).splitlines() - log("get partitions: {}".format(out), level=DEBUG) - return out - except subprocess.CalledProcessError as e: - log("Can't get info for {0}: {1}".format(dev, e.output)) - return [] - - -def get_lvs(dev): - """ - List logical volumes for the provided block device - - :param: dev: Full path to block device. - :raises subprocess.CalledProcessError: in the event that any supporting - operation failed. - :returns: list: List of logical volumes provided by the block device - """ - if not lvm.is_lvm_physical_volume(dev): - return [] - vg_name = lvm.list_lvm_volume_group(dev) - return lvm.list_logical_volumes('vg_name={}'.format(vg_name)) - - -def find_least_used_utility_device(utility_devices, lvs=False): - """ - Find a utility device which has the smallest number of partitions - among other devices in the supplied list. - - :utility_devices: A list of devices to be used for filestore journal - or bluestore wal or db. - :lvs: flag to indicate whether inspection should be based on LVM LV's - :return: string device name - """ - if lvs: - usages = map(lambda a: (len(get_lvs(a)), a), utility_devices) - else: - usages = map(lambda a: (len(get_partitions(a)), a), utility_devices) - least = min(usages, key=lambda t: t[0]) - return least[1] - - -def get_devices(name): - """Merge config and Juju storage based devices - - :name: The name of the device type, e.g.: wal, osd, journal - :returns: Set(device names), which are strings - """ - if config(name): - devices = [dev.strip() for dev in config(name).split(' ')] - else: - devices = [] - storage_ids = storage_list(name) - devices.extend((storage_get('location', sid) for sid in storage_ids)) - devices = filter(os.path.exists, devices) - - return set(devices) - - -def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): - if dev.startswith('/dev'): - osdize_dev(dev, osd_format, osd_journal, - ignore_errors, encrypt, - bluestore, key_manager, osd_id) - else: - if cmp_pkgrevno('ceph', '14.0.0') >= 0: - log("Directory backed OSDs can not be created on Nautilus", - level=WARNING) - return - osdize_dir(dev, encrypt, bluestore) - - -def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, - osd_id=None): - """ - Prepare a block device for use as a Ceph OSD - - A block device will only be prepared once during the lifetime - of the calling charm unit; future executions will be skipped. - - :param: dev: Full path to block device to use - :param: osd_format: Format for OSD filesystem - :param: osd_journal: List of block devices to use for OSD journals - :param: ignore_errors: Don't fail in the event of any errors during - processing - :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native Ceph block device format - :param: key_manager: Key management approach for encryption keys - :raises subprocess.CalledProcessError: in the event that any supporting - subprocess operation failed - :raises ValueError: if an invalid key_manager is provided - """ - if key_manager not in KEY_MANAGERS: - raise ValueError('Unsupported key manager: {}'.format(key_manager)) - - db = kv() - osd_devices = db.get('osd-devices', []) - try: - if dev in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(dev)) - return - - if not os.path.exists(dev): - log('Path {} does not exist - bailing'.format(dev)) - return - - if not is_block_device(dev): - log('Path {} is not a block device - bailing'.format(dev)) - return - - if is_osd_disk(dev): - log('Looks like {} is already an' - ' OSD data or journal, skipping.'.format(dev)) - if is_device_mounted(dev): - osd_devices.append(dev) - return - - if is_device_mounted(dev): - log('Looks like {} is in use, skipping.'.format(dev)) - return - - if is_active_bluestore_device(dev): - log('{} is in use as an active bluestore block device,' - ' skipping.'.format(dev)) - osd_devices.append(dev) - return - - if is_mapped_luks_device(dev): - log('{} is a mapped LUKS device,' - ' skipping.'.format(dev)) - return - - if cmp_pkgrevno('ceph', '12.2.4') >= 0: - cmd = _ceph_volume(dev, - osd_journal, - encrypt, - bluestore, - key_manager, - osd_id) - else: - cmd = _ceph_disk(dev, - osd_format, - osd_journal, - encrypt, - bluestore) - - try: - status_set('maintenance', 'Initializing device {}'.format(dev)) - log("osdize cmd: {}".format(cmd)) - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - try: - lsblk_output = subprocess.check_output( - ['lsblk', '-P']).decode('UTF-8') - except subprocess.CalledProcessError as e: - log("Couldn't get lsblk output: {}".format(e), ERROR) - if ignore_errors: - log('Unable to initialize device: {}'.format(dev), WARNING) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), DEBUG) - else: - log('Unable to initialize device: {}'.format(dev), ERROR) - if lsblk_output: - log('lsblk output: {}'.format(lsblk_output), WARNING) - raise - - # NOTE: Record processing of device only on success to ensure that - # the charm only tries to initialize a device of OSD usage - # once during its lifetime. - osd_devices.append(dev) - finally: - db.set('osd-devices', osd_devices) - db.flush() - - -def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): - """ - Prepare a device for usage as a Ceph OSD using ceph-disk - - :param: dev: Full path to use for OSD block device setup, - The function looks up realpath of the device - :param: osd_journal: List of block devices to use for OSD journals - :param: encrypt: Use block device encryption (unsupported) - :param: bluestore: Use bluestore storage for OSD - :returns: list. 'ceph-disk' command and required parameters for - execution by check_call - """ - cmd = ['ceph-disk', 'prepare'] - - if encrypt: - cmd.append('--dmcrypt') - - if osd_format and not bluestore: - cmd.append('--fs-type') - cmd.append(osd_format) - - # NOTE(jamespage): enable experimental bluestore support - if use_bluestore(): - cmd.append('--bluestore') - wal = get_devices('bluestore-wal') - if wal: - cmd.append('--block.wal') - least_used_wal = find_least_used_utility_device(wal) - cmd.append(least_used_wal) - db = get_devices('bluestore-db') - if db: - cmd.append('--block.db') - least_used_db = find_least_used_utility_device(db) - cmd.append(least_used_db) - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') - - cmd.append(os.path.realpath(dev)) - - if osd_journal: - least_used = find_least_used_utility_device(osd_journal) - cmd.append(least_used) - - return cmd - - -def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER, osd_id=None): - """ - Prepare and activate a device for usage as a Ceph OSD using ceph-volume. - - This also includes creation of all PV's, VG's and LV's required to - support the initialization of the OSD. - - :param: dev: Full path to use for OSD block device setup - :param: osd_journal: List of block devices to use for OSD journals - :param: encrypt: Use block device encryption - :param: bluestore: Use bluestore storage for OSD - :param: key_manager: dm-crypt Key Manager to use - :param: osd_id: The OSD-id to recycle, or None to create a new one - :raises subprocess.CalledProcessError: in the event that any supporting - LVM operation failed. - :returns: list. 'ceph-volume' command and required parameters for - execution by check_call - """ - cmd = ['ceph-volume', 'lvm', 'create'] - - osd_fsid = str(uuid.uuid4()) - cmd.append('--osd-fsid') - cmd.append(osd_fsid) - - if bluestore: - cmd.append('--bluestore') - main_device_type = 'block' - else: - cmd.append('--filestore') - main_device_type = 'data' - - if encrypt and key_manager == CEPH_KEY_MANAGER: - cmd.append('--dmcrypt') - - if osd_id is not None: - cmd.extend(['--osd-id', str(osd_id)]) - - # On-disk journal volume creation - if not osd_journal and not bluestore: - journal_lv_type = 'journal' - cmd.append('--journal') - cmd.append(_allocate_logical_volume( - dev=dev, - lv_type=journal_lv_type, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - encrypt=encrypt, - key_manager=key_manager) - ) - - cmd.append('--data') - cmd.append(_allocate_logical_volume(dev=dev, - lv_type=main_device_type, - osd_fsid=osd_fsid, - encrypt=encrypt, - key_manager=key_manager)) - - if bluestore: - for extra_volume in ('wal', 'db'): - devices = get_devices('bluestore-{}'.format(extra_volume)) - if devices: - cmd.append('--block.{}'.format(extra_volume)) - least_used = find_least_used_utility_device(devices, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type=extra_volume, - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size(extra_volume)), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) - - elif osd_journal: - cmd.append('--journal') - least_used = find_least_used_utility_device(osd_journal, - lvs=True) - cmd.append(_allocate_logical_volume( - dev=least_used, - lv_type='journal', - osd_fsid=osd_fsid, - size='{}M'.format(calculate_volume_size('journal')), - shared=True, - encrypt=encrypt, - key_manager=key_manager) - ) - - return cmd - - -def _partition_name(dev): - """ - Derive the first partition name for a block device - - :param: dev: Full path to block device. - :returns: str: Full path to first partition on block device. - """ - if dev[-1].isdigit(): - return '{}p1'.format(dev) - else: - return '{}1'.format(dev) - - -def is_active_bluestore_device(dev): - """ - Determine whether provided device is part of an active - bluestore based OSD (as its block component). - - :param: dev: Full path to block device to check for Bluestore usage. - :returns: boolean: indicating whether device is in active use. - """ - if not lvm.is_lvm_physical_volume(dev): - return False - - vg_name = lvm.list_lvm_volume_group(dev) - try: - lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0] - except IndexError: - return False - - block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block') - for block_candidate in block_symlinks: - if os.path.islink(block_candidate): - target = os.readlink(block_candidate) - if target.endswith(lv_name): - return True - - return False - - -def is_luks_device(dev): - """ - Determine if dev is a LUKS-formatted block device. - - :param: dev: A full path to a block device to check for LUKS header - presence - :returns: boolean: indicates whether a device is used based on LUKS header. - """ - return True if _luks_uuid(dev) else False - - -def is_mapped_luks_device(dev): - """ - Determine if dev is a mapped LUKS device - :param: dev: A full path to a block device to be checked - :returns: boolean: indicates whether a device is mapped - """ - _, dirs, _ = next(os.walk( - '/sys/class/block/{}/holders/' - .format(os.path.basename(os.path.realpath(dev)))) - ) - is_held = len(dirs) > 0 - return is_held and is_luks_device(dev) - - -def get_conf(variable): - """ - Get the value of the given configuration variable from the - cluster. - - :param variable: Ceph configuration variable - :returns: str. configured value for provided variable - - """ - return subprocess.check_output([ - 'ceph-osd', - '--show-config-value={}'.format(variable), - '--no-mon-config', - ]).strip() - - -def calculate_volume_size(lv_type): - """ - Determine the configured size for Bluestore DB/WAL or - Filestore Journal devices - - :param lv_type: volume type (db, wal or journal) - :raises KeyError: if invalid lv_type is supplied - :returns: int. Configured size in megabytes for volume type - """ - # lv_type -> Ceph configuration option - _config_map = { - 'db': 'bluestore_block_db_size', - 'wal': 'bluestore_block_wal_size', - 'journal': 'osd_journal_size', - } - - # default sizes in MB - _default_size = { - 'db': 1024, - 'wal': 576, - 'journal': 1024, - } - - # conversion of Ceph config units to MB - _units = { - 'db': 1048576, # Bytes -> MB - 'wal': 1048576, # Bytes -> MB - 'journal': 1, # Already in MB - } - - configured_size = get_conf(_config_map[lv_type]) - - if configured_size is None or int(configured_size) == 0: - return _default_size[lv_type] - else: - return int(configured_size) / _units[lv_type] - - -def _luks_uuid(dev): - """ - Check to see if dev is a LUKS encrypted volume, returning the UUID - of volume if it is. - - :param: dev: path to block device to check. - :returns: str. UUID of LUKS device or None if not a LUKS device - """ - try: - cmd = ['cryptsetup', 'luksUUID', dev] - return subprocess.check_output(cmd).decode('UTF-8').strip() - except subprocess.CalledProcessError: - return None - - -def _initialize_disk(dev, dev_uuid, encrypt=False, - key_manager=CEPH_KEY_MANAGER): - """ - Initialize a raw block device consuming 100% of the available - disk space. - - Function assumes that block device has already been wiped. - - :param: dev: path to block device to initialize - :param: dev_uuid: UUID to use for any dm-crypt operations - :param: encrypt: Encrypt OSD devices using dm-crypt - :param: key_manager: Key management approach for dm-crypt keys - :raises: subprocess.CalledProcessError: if any parted calls fail - :returns: str: Full path to new partition. - """ - use_vaultlocker = encrypt and key_manager == VAULT_KEY_MANAGER - - if use_vaultlocker: - # NOTE(jamespage): Check to see if already initialized as a LUKS - # volume, which indicates this is a shared block - # device for journal, db or wal volumes. - luks_uuid = _luks_uuid(dev) - if luks_uuid: - return '/dev/mapper/crypt-{}'.format(luks_uuid) - - dm_crypt = '/dev/mapper/crypt-{}'.format(dev_uuid) - - if use_vaultlocker and not os.path.exists(dm_crypt): - subprocess.check_call([ - 'vaultlocker', - 'encrypt', - '--uuid', dev_uuid, - dev, - ]) - subprocess.check_call([ - 'dd', - 'if=/dev/zero', - 'of={}'.format(dm_crypt), - 'bs=512', - 'count=1', - ]) - - if use_vaultlocker: - return dm_crypt - else: - return dev - - -def _allocate_logical_volume(dev, lv_type, osd_fsid, - size=None, shared=False, - encrypt=False, - key_manager=CEPH_KEY_MANAGER): - """ - Allocate a logical volume from a block device, ensuring any - required initialization and setup of PV's and VG's to support - the LV. - - :param: dev: path to block device to allocate from. - :param: lv_type: logical volume type to create - (data, block, journal, wal, db) - :param: osd_fsid: UUID of the OSD associate with the LV - :param: size: Size in LVM format for the device; - if unset 100% of VG - :param: shared: Shared volume group (journal, wal, db) - :param: encrypt: Encrypt OSD devices using dm-crypt - :param: key_manager: dm-crypt Key Manager to use - :raises subprocess.CalledProcessError: in the event that any supporting - LVM or parted operation fails. - :returns: str: String in the format 'vg_name/lv_name'. - """ - lv_name = "osd-{}-{}".format(lv_type, osd_fsid) - current_volumes = lvm.list_logical_volumes() - if shared: - dev_uuid = str(uuid.uuid4()) - else: - dev_uuid = osd_fsid - pv_dev = _initialize_disk(dev, dev_uuid, encrypt, key_manager) - - vg_name = None - if not lvm.is_lvm_physical_volume(pv_dev): - lvm.create_lvm_physical_volume(pv_dev) - if not os.path.exists(pv_dev): - # NOTE: trigger rescan to work around bug 1878752 - rescan_osd_devices() - if shared: - vg_name = 'ceph-{}-{}'.format(lv_type, - str(uuid.uuid4())) - else: - vg_name = 'ceph-{}'.format(osd_fsid) - lvm.create_lvm_volume_group(vg_name, pv_dev) - else: - vg_name = lvm.list_lvm_volume_group(pv_dev) - - if lv_name not in current_volumes: - lvm.create_logical_volume(lv_name, vg_name, size) - - return "{}/{}".format(vg_name, lv_name) - - -def osdize_dir(path, encrypt=False, bluestore=False): - """Ask ceph-disk to prepare a directory to become an OSD. - - :param path: str. The directory to osdize - :param encrypt: bool. Should the OSD directory be encrypted at rest - :returns: None - """ - - db = kv() - osd_devices = db.get('osd-devices', []) - if path in osd_devices: - log('Device {} already processed by charm,' - ' skipping'.format(path)) - return - - for t in ['upstart', 'systemd']: - if os.path.exists(os.path.join(path, t)): - log('Path {} is already used as an OSD dir - bailing'.format(path)) - return - - if cmp_pkgrevno('ceph', "0.56.6") < 0: - log('Unable to use directories for OSDs with ceph < 0.56.6', - level=ERROR) - return - - mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755) - chownr('/var/lib/ceph', ceph_user(), ceph_user()) - cmd = [ - 'sudo', '-u', ceph_user(), - 'ceph-disk', - 'prepare', - '--data-dir', - path - ] - if cmp_pkgrevno('ceph', '0.60') >= 0: - if encrypt: - cmd.append('--dmcrypt') - - # NOTE(icey): enable experimental bluestore support - if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore: - cmd.append('--bluestore') - elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore: - cmd.append('--filestore') - log("osdize dir cmd: {}".format(cmd)) - subprocess.check_call(cmd) - - # NOTE: Record processing of device only on success to ensure that - # the charm only tries to initialize a device of OSD usage - # once during its lifetime. - osd_devices.append(path) - db.set('osd-devices', osd_devices) - db.flush() - - -def filesystem_mounted(fs): - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 - - -def get_running_osds(): - """Returns a list of the pids of the current running OSD daemons""" - cmd = ['pgrep', 'ceph-osd'] - try: - result = str(subprocess.check_output(cmd).decode('UTF-8')) - return result.split() - except subprocess.CalledProcessError: - return [] - - -def get_cephfs(service): - """List the Ceph Filesystems that exist. - - :param service: The service name to run the Ceph command under - :returns: list. Returns a list of the Ceph filesystems - """ - if get_version() < 0.86: - # This command wasn't introduced until 0.86 Ceph - return [] - try: - output = str(subprocess - .check_output(["ceph", '--id', service, "fs", "ls"]) - .decode('UTF-8')) - if not output: - return [] - """ - Example subprocess output: - 'name: ip-172-31-23-165, metadata pool: ip-172-31-23-165_metadata, - data pools: [ip-172-31-23-165_data ]\n' - output: filesystems: ['ip-172-31-23-165'] - """ - filesystems = [] - for line in output.splitlines(): - parts = line.split(',') - for part in parts: - if "name" in part: - filesystems.append(part.split(' ')[1]) - except subprocess.CalledProcessError: - return [] - - -def wait_for_all_monitors_to_upgrade(new_version, upgrade_key): - """Fairly self explanatory name. This function will wait - for all monitors in the cluster to upgrade or it will - return after a timeout period has expired. - - :param new_version: str of the version to watch - :param upgrade_key: the cephx key name to use - """ - done = False - start_time = time.time() - monitor_list = [] - - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - while not done: - try: - done = all(monitor_key_exists(upgrade_key, "{}_{}_{}_done".format( - "mon", mon, new_version - )) for mon in monitor_list) - current_time = time.time() - if current_time > (start_time + 10 * 60): - raise Exception - else: - # Wait 30 seconds and test again if all monitors are upgraded - time.sleep(30) - except subprocess.CalledProcessError: - raise - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -def roll_monitor_cluster(new_version, upgrade_key): - """This is tricky to get right so here's what we're going to do. - - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous monitor is upgraded yet. - - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - """ - log('roll_monitor_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - monitor_list = [] - mon_map = get_mon_map('admin') - if mon_map['monmap']['mons']: - for mon in mon_map['monmap']['mons']: - monitor_list.append(mon['name']) - else: - status_set('blocked', 'Unable to get monitor cluster information') - sys.exit(1) - log('monitor_list: {}'.format(monitor_list)) - - # A sorted list of OSD unit names - mon_sorted_list = sorted(monitor_list) - - # Install packages immediately but defer restarts to when it's our time. - upgrade_monitor(new_version, restart_daemons=False) - try: - position = mon_sorted_list.index(my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('waiting', - 'Waiting on {} to finish upgrading'.format( - mon_sorted_list[position - 1])) - wait_on_previous_node(upgrade_key=upgrade_key, - service='mon', - previous_node=mon_sorted_list[position - 1], - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='mon', - my_name=my_name, - version=new_version) - # NOTE(jamespage): - # Wait until all monitors have upgraded before bootstrapping - # the ceph-mgr daemons due to use of new mgr keyring profiles - if new_version == 'luminous': - wait_for_all_monitors_to_upgrade(new_version=new_version, - upgrade_key=upgrade_key) - bootstrap_manager() - - # NOTE(jmcvaughn): - # Nautilus and later binaries use msgr2 by default, but existing - # clusters that have been upgraded from pre-Nautilus will not - # automatically have msgr2 enabled. Without this, Ceph will show - # a warning only (with no impact to operations), but newly added units - # will not be able to join the cluster. Therefore, we ensure it is - # enabled on upgrade for all versions including and after Nautilus - # (to cater for previous charm versions that will not have done this). - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 - if nautilus_or_later: - wait_for_all_monitors_to_upgrade(new_version=new_version, - upgrade_key=upgrade_key) - enable_msgr2() - except ValueError: - log("Failed to find {} in list {}.".format( - my_name, mon_sorted_list)) - status_set('blocked', 'failed to upgrade monitor') - - -# For E731 we can't assign a lambda, therefore, instead pass this. -def noop(): - pass - - -def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): - """Upgrade the current Ceph monitor to the new version - - :param new_version: String version to upgrade to. - """ - if kick_function is None: - kick_function = noop - current_version = get_version() - status_set("maintenance", "Upgrading monitor") - log("Current Ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - # Needed to determine if whether to stop/start ceph-mgr - luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 - # Needed to differentiate between systemd unit names - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 - kick_function() - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the Ceph source failed with message: {}".format( - err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - kick_function() - - try: - apt_install(packages=determine_packages(), fatal=True) - rm_packages = determine_packages_to_remove() - if rm_packages: - apt_purge(packages=rm_packages, fatal=True) - except subprocess.CalledProcessError as err: - log("Upgrading packages failed " - "with message: {}".format(err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - if not restart_daemons: - log("Packages upgraded but not restarting daemons yet.") - return - - try: - if systemd(): - if nautilus_or_later: - systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) - else: - systemd_unit = 'ceph-mon' - service_stop(systemd_unit) - log("restarting ceph-mgr.target maybe: {}" - .format(luminous_or_later)) - if luminous_or_later: - service_stop('ceph-mgr.target') - else: - service_stop('ceph-mon-all') - - kick_function() - - owner = ceph_user() - - # Ensure the files and directories under /var/lib/ceph is chowned - # properly as part of the move to the Jewel release, which moved the - # ceph daemons to running as ceph:ceph instead of root:root. - if new_version == 'jewel': - # Ensure the ownership of Ceph's directories is correct - chownr(path=os.path.join(os.sep, "var", "lib", "ceph"), - owner=owner, - group=owner, - follow_links=True) - - kick_function() - - # Ensure that mon directory is user writable - hostname = socket.gethostname() - path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) - mkdir(path, owner=ceph_user(), group=ceph_user(), - perms=0o755) - - if systemd(): - if nautilus_or_later: - systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) - else: - systemd_unit = 'ceph-mon' - service_restart(systemd_unit) - log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) - if luminous_or_later: - # due to BUG: #1849874 we have to force a restart to get it to - # drop the previous version of ceph-manager and start the new - # one. - service_restart('ceph-mgr.target') - else: - service_start('ceph-mon-all') - except subprocess.CalledProcessError as err: - log("Stopping ceph and upgrading packages failed " - "with message: {}".format(err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def lock_and_roll(upgrade_key, service, my_name, version): - """Create a lock on the Ceph monitor cluster and upgrade. - - :param upgrade_key: str. The cephx key to use - :param service: str. The cephx id to use - :param my_name: str. The current hostname - :param version: str. The version we are upgrading to - """ - start_timestamp = time.time() - - log('monitor_key_set {}_{}_{}_start {}'.format( - service, - my_name, - version, - start_timestamp)) - monitor_key_set(upgrade_key, "{}_{}_{}_start".format( - service, my_name, version), start_timestamp) - - # alive indication: - alive_function = ( - lambda: monitor_key_set( - upgrade_key, "{}_{}_{}_alive" - .format(service, my_name, version), time.time())) - dog = WatchDog(kick_interval=3 * 60, - kick_function=alive_function) - - log("Rolling") - - # This should be quick - if service == 'osd': - upgrade_osd(version, kick_function=dog.kick_the_dog) - elif service == 'mon': - upgrade_monitor(version, kick_function=dog.kick_the_dog) - else: - log("Unknown service {}. Unable to upgrade".format(service), - level=ERROR) - log("Done") - - stop_timestamp = time.time() - # Set a key to inform others I am finished - log('monitor_key_set {}_{}_{}_done {}'.format(service, - my_name, - version, - stop_timestamp)) - status_set('maintenance', 'Finishing upgrade') - monitor_key_set(upgrade_key, "{}_{}_{}_done".format(service, - my_name, - version), - stop_timestamp) - - -def wait_on_previous_node(upgrade_key, service, previous_node, version): - """A lock that sleeps the current thread while waiting for the previous - node to finish upgrading. - - :param upgrade_key: - :param service: str. the cephx id to use - :param previous_node: str. The name of the previous node to wait on - :param version: str. The version we are upgrading to - :returns: None - """ - log("Previous node is: {}".format(previous_node)) - - previous_node_started_f = ( - lambda: monitor_key_exists( - upgrade_key, - "{}_{}_{}_start".format(service, previous_node, version))) - previous_node_finished_f = ( - lambda: monitor_key_exists( - upgrade_key, - "{}_{}_{}_done".format(service, previous_node, version))) - previous_node_alive_time_f = ( - lambda: monitor_key_get( - upgrade_key, - "{}_{}_{}_alive".format(service, previous_node, version))) - - # wait for 30 minutes until the previous node starts. We don't proceed - # unless we get a start condition. - try: - WatchDog.wait_until(previous_node_started_f, timeout=30 * 60) - except WatchDog.WatchDogTimeoutException: - log("Waited for previous node to start for 30 minutes. " - "It didn't start, so may have a serious issue. Continuing with " - "upgrade of this node.", - level=WARNING) - return - - # keep the time it started from this nodes' perspective. - previous_node_started_at = time.time() - log("Detected that previous node {} has started. Time now: {}" - .format(previous_node, previous_node_started_at)) - - # Now wait for the node to complete. The node may optionally be kicking - # with the *_alive key, which allows this node to wait longer as it 'knows' - # the other node is proceeding. - try: - WatchDog.timed_wait(kicked_at_function=previous_node_alive_time_f, - complete_function=previous_node_finished_f, - wait_time=30 * 60, - compatibility_wait_time=10 * 60, - max_kick_interval=5 * 60) - except WatchDog.WatchDogDeadException: - # previous node was kicking, but timed out; log this condition and move - # on. - now = time.time() - waited = int((now - previous_node_started_at) / 60) - log("Previous node started, but has now not ticked for 5 minutes. " - "Waited total of {} mins on node {}. current time: {} > " - "previous node start time: {}. " - "Continuing with upgrade of this node." - .format(waited, previous_node, now, previous_node_started_at), - level=WARNING) - except WatchDog.WatchDogTimeoutException: - # previous node never kicked, or simply took too long; log this - # condition and move on. - now = time.time() - waited = int((now - previous_node_started_at) / 60) - log("Previous node is taking too long; assuming it has died." - "Waited {} mins on node {}. current time: {} > " - "previous node start time: {}. " - "Continuing with upgrade of this node." - .format(waited, previous_node, now, previous_node_started_at), - level=WARNING) - - -class WatchDog(object): - """Watch a dog; basically a kickable timer with a timeout between two async - units. - - The idea is that you have an overall timeout and then can kick that timeout - with intermediary hits, with a max time between those kicks allowed. - - Note that this watchdog doesn't rely on the clock of the other side; just - roughly when it detects when the other side started. All timings are based - on the local clock. - - The kicker will not 'kick' more often than a set interval, regardless of - how often the kick_the_dog() function is called. The kicker provides a - function (lambda: -> None) that is called when the kick interval is - reached. - - The waiter calls the static method with a check function - (lambda: -> Boolean) that indicates when the wait should be over and the - maximum interval to wait. e.g. 30 minutes with a 5 minute kick interval. - - So the waiter calls wait(f, 30, 3) and the kicker sets up a 3 minute kick - interval, or however long it is expected for the key to propagate and to - allow for other delays. - - There is a compatibility mode where if the otherside never kicks, then it - simply waits for the compatibility timer. - """ - - class WatchDogDeadException(Exception): - pass - - class WatchDogTimeoutException(Exception): - pass - - def __init__(self, kick_interval=3 * 60, kick_function=None): - """Initialise a new WatchDog - - :param kick_interval: the interval when this side kicks the other in - seconds. - :type kick_interval: Int - :param kick_function: The function to call that does the kick. - :type kick_function: Callable[] - """ - self.start_time = time.time() - self.last_run_func = None - self.last_kick_at = None - self.kick_interval = kick_interval - self.kick_f = kick_function - - def kick_the_dog(self): - """Might call the kick_function if it's time. - - This function can be called as frequently as needed, but will run the - self.kick_function after kick_interval seconds have passed. - """ - now = time.time() - if (self.last_run_func is None or - (now - self.last_run_func > self.kick_interval)): - if self.kick_f is not None: - self.kick_f() - self.last_run_func = now - self.last_kick_at = now - - @staticmethod - def wait_until(wait_f, timeout=10 * 60): - """Wait for timeout seconds until the passed function return True. - - :param wait_f: The function to call that will end the wait. - :type wait_f: Callable[[], Boolean] - :param timeout: The time to wait in seconds. - :type timeout: int - """ - start_time = time.time() - while(not wait_f()): - now = time.time() - if now > start_time + timeout: - raise WatchDog.WatchDogTimeoutException() - wait_time = random.randrange(5, 30) - log('wait_until: waiting for {} seconds'.format(wait_time)) - time.sleep(wait_time) - - @staticmethod - def timed_wait(kicked_at_function, - complete_function, - wait_time=30 * 60, - compatibility_wait_time=10 * 60, - max_kick_interval=5 * 60): - """Wait a maximum time with an intermediate 'kick' time. - - This function will wait for max_kick_interval seconds unless the - kicked_at_function() call returns a time that is not older that - max_kick_interval (in seconds). i.e. the other side can signal that it - is still doing things during the max_kick_interval as long as it kicks - at least every max_kick_interval seconds. - - The maximum wait is "wait_time", but the otherside must keep kicking - during this period. - - The "compatibility_wait_time" is used if the other side never kicks - (i.e. the kicked_at_function() always returns None. In this case the - function wait up to "compatibility_wait_time". - - Note that the type of the return from the kicked_at_function is an - Optional[str], not a Float. The function will coerce this to a float - for the comparison. This represents the return value of - time.time() at the "other side". It's a string to simplify the - function obtaining the time value from the other side. - - The function raises WatchDogTimeoutException if either the - compatibility_wait_time or the wait_time are exceeded. - - The function raises WatchDogDeadException if the max_kick_interval is - exceeded. - - Note that it is possible that the first kick interval is extended to - compatibility_wait_time if the "other side" doesn't kick immediately. - The best solution is for the other side to kick early and often. - - :param kicked_at_function: The function to call to retrieve the time - that the other side 'kicked' at. None if the other side hasn't - kicked. - :type kicked_at_function: Callable[[], Optional[str]] - :param complete_function: The callable that returns True when done. - :type complete_function: Callable[[], Boolean] - :param wait_time: the maximum time to wait, even with kicks, in - seconds. - :type wait_time: int - :param compatibility_wait_time: The time to wait if no kicks are - received, in seconds. - :type compatibility_wait_time: int - :param max_kick_interval: The maximum time allowed between kicks before - the wait is over, in seconds: - :type max_kick_interval: int - :raises: WatchDog.WatchDogTimeoutException, - WatchDog.WatchDogDeadException - """ - start_time = time.time() - while True: - if complete_function(): - break - # the time when the waiting for unit last kicked. - kicked_at = kicked_at_function() - now = time.time() - if kicked_at is None: - # assume other end doesn't do alive kicks - if (now - start_time > compatibility_wait_time): - raise WatchDog.WatchDogTimeoutException() - else: - # other side is participating in kicks; must kick at least - # every 'max_kick_interval' to stay alive. - if (now - float(kicked_at) > max_kick_interval): - raise WatchDog.WatchDogDeadException() - if (now - start_time > wait_time): - raise WatchDog.WatchDogTimeoutException() - delay_time = random.randrange(5, 30) - log('waiting for {} seconds'.format(delay_time)) - time.sleep(delay_time) - - -def get_upgrade_position(osd_sorted_list, match_name): - """Return the upgrade position for the given OSD. - - :param osd_sorted_list: OSDs sorted - :type osd_sorted_list: [str] - :param match_name: The OSD name to match - :type match_name: str - :returns: The position of the name - :rtype: int - :raises: ValueError if name is not found - """ - for index, item in enumerate(osd_sorted_list): - if item.name == match_name: - return index - raise ValueError("OSD name '{}' not found in get_upgrade_position list" - .format(match_name)) - - -# Edge cases: -# 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the OSD failure domain is not set to OSD. -# It rolls an entire server at a time. -def roll_osd_cluster(new_version, upgrade_key): - """This is tricky to get right so here's what we're going to do. - - There's 2 possible cases: Either I'm first in line or not. - If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous OSD is upgraded yet. - - TODO: If you're not in the same failure domain it's safe to upgrade - 1. Examine all pools and adopt the most strict failure domain policy - Example: Pool 1: Failure domain = rack - Pool 2: Failure domain = host - Pool 3: Failure domain = row - - outcome: Failure domain = host - - :param new_version: str of the version to upgrade to - :param upgrade_key: the cephx key name to use when upgrading - """ - log('roll_osd_cluster called with {}'.format(new_version)) - my_name = socket.gethostname() - osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of OSD unit names - osd_sorted_list = sorted(osd_tree) - log("osd_sorted_list: {}".format(osd_sorted_list)) - - try: - position = get_upgrade_position(osd_sorted_list, my_name) - log("upgrade position: {}".format(position)) - if position == 0: - # I'm first! Roll - # First set a key to inform others I'm about to roll - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - else: - # Check if the previous node has finished - status_set('waiting', - 'Waiting on {} to finish upgrading'.format( - osd_sorted_list[position - 1].name)) - wait_on_previous_node( - upgrade_key=upgrade_key, - service='osd', - previous_node=osd_sorted_list[position - 1].name, - version=new_version) - lock_and_roll(upgrade_key=upgrade_key, - service='osd', - my_name=my_name, - version=new_version) - except ValueError: - log("Failed to find name {} in list {}".format( - my_name, osd_sorted_list)) - status_set('blocked', 'failed to upgrade osd') - - -def upgrade_osd(new_version, kick_function=None): - """Upgrades the current OSD - - :param new_version: str. The new version to upgrade to - """ - if kick_function is None: - kick_function = noop - - current_version = get_version() - status_set("maintenance", "Upgrading OSD") - log("Current Ceph version is {}".format(current_version)) - log("Upgrading to: {}".format(new_version)) - - try: - add_source(config('source'), config('key')) - apt_update(fatal=True) - except subprocess.CalledProcessError as err: - log("Adding the Ceph sources failed with message: {}".format( - err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - kick_function() - - try: - # Upgrade the packages before restarting the daemons. - status_set('maintenance', 'Upgrading packages to %s' % new_version) - apt_install(packages=determine_packages(), fatal=True) - kick_function() - - # If the upgrade does not need an ownership update of any of the - # directories in the OSD service directory, then simply restart - # all of the OSDs at the same time as this will be the fastest - # way to update the code on the node. - if not dirs_need_ownership_update('osd'): - log('Restarting all OSDs to load new binaries', DEBUG) - with maintain_all_osd_states(): - if systemd(): - service_restart('ceph-osd.target') - else: - service_restart('ceph-osd-all') - return - - # Need to change the ownership of all directories which are not OSD - # directories as well. - # TODO - this should probably be moved to the general upgrade function - # and done before mon/OSD. - update_owner(CEPH_BASE_DIR, recurse_dirs=False) - non_osd_dirs = filter(lambda x: not x == 'osd', - os.listdir(CEPH_BASE_DIR)) - non_osd_dirs = map(lambda x: os.path.join(CEPH_BASE_DIR, x), - non_osd_dirs) - for i, path in enumerate(non_osd_dirs): - if i % 100 == 0: - kick_function() - update_owner(path) - - # Fast service restart wasn't an option because each of the OSD - # directories need the ownership updated for all the files on - # the OSD. Walk through the OSDs one-by-one upgrading the OSD. - for osd_dir in _get_child_dirs(OSD_BASE_DIR): - kick_function() - try: - osd_num = _get_osd_num_from_dirname(osd_dir) - _upgrade_single_osd(osd_num, osd_dir) - except ValueError as ex: - # Directory could not be parsed - junk directory? - log('Could not parse OSD directory %s: %s' % (osd_dir, ex), - WARNING) - continue - - except (subprocess.CalledProcessError, IOError) as err: - log("Stopping Ceph and upgrading packages failed " - "with message: {}".format(err)) - status_set("blocked", "Upgrade to {} failed".format(new_version)) - sys.exit(1) - - -def _upgrade_single_osd(osd_num, osd_dir): - """Upgrades the single OSD directory. - - :param osd_num: the num of the OSD - :param osd_dir: the directory of the OSD to upgrade - :raises CalledProcessError: if an error occurs in a command issued as part - of the upgrade process - :raises IOError: if an error occurs reading/writing to a file as part - of the upgrade process - """ - with maintain_osd_state(osd_num): - stop_osd(osd_num) - disable_osd(osd_num) - update_owner(osd_dir) - enable_osd(osd_num) - start_osd(osd_num) - - -def stop_osd(osd_num): - """Stops the specified OSD number. - - :param osd_num: the OSD number to stop - """ - if systemd(): - service_stop('ceph-osd@{}'.format(osd_num)) - else: - service_stop('ceph-osd', id=osd_num) - - -def start_osd(osd_num): - """Starts the specified OSD number. - - :param osd_num: the OSD number to start. - """ - if systemd(): - service_start('ceph-osd@{}'.format(osd_num)) - else: - service_start('ceph-osd', id=osd_num) - - -def disable_osd(osd_num): - """Disables the specified OSD number. - - Ensures that the specified OSD will not be automatically started at the - next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified OSD cannot be - started manually. - - :param osd_num: the OSD id which should be disabled. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - to disable the OSD - :raises IOError, OSError: if the attempt to read/remove the ready file in - an upstart enabled system fails - """ - if systemd(): - # When running under systemd, the individual ceph-osd daemons run as - # templated units and can be directly addressed by referring to the - # templated service name ceph-osd@. Additionally, systemd - # allows one to disable a specific templated unit by running the - # 'systemctl disable ceph-osd@' command. When disabled, the - # OSD should remain disabled until re-enabled via systemd. - # Note: disabling an already disabled service in systemd returns 0, so - # no need to check whether it is enabled or not. - cmd = ['systemctl', 'disable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # Neither upstart nor the ceph-osd upstart script provides for - # disabling the starting of an OSD automatically. The specific OSD - # cannot be prevented from running manually, however it can be - # prevented from running automatically on reboot by removing the - # 'ready' file in the OSD's root directory. This is due to the - # ceph-osd-all upstart script checking for the presence of this file - # before starting the OSD. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - if os.path.exists(ready_file): - os.unlink(ready_file) - - -def enable_osd(osd_num): - """Enables the specified OSD number. - - Ensures that the specified osd_num will be enabled and ready to start - automatically in the event of a reboot. - - :param osd_num: the osd id which should be enabled. - :raises CalledProcessError: if the call to the systemd command issued - fails when enabling the service - :raises IOError: if the attempt to write the ready file in an upstart - enabled system fails - """ - if systemd(): - cmd = ['systemctl', 'enable', 'ceph-osd@{}'.format(osd_num)] - subprocess.check_call(cmd) - else: - # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the OSD if it has a 'ready' - # file. Make sure that file exists. - ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), - 'ready') - with open(ready_file, 'w') as f: - f.write('ready') - - # Make sure the correct user owns the file. It shouldn't be necessary - # as the upstart script should run with root privileges, but its better - # to have all the files matching ownership. - update_owner(ready_file) - - -def update_owner(path, recurse_dirs=True): - """Changes the ownership of the specified path. - - Changes the ownership of the specified path to the new ceph daemon user - using the system's native chown functionality. This may take awhile, - so this method will issue a set_status for any changes of ownership which - recurses into directory structures. - - :param path: the path to recursively change ownership for - :param recurse_dirs: boolean indicating whether to recursively change the - ownership of all the files in a path's subtree or to - simply change the ownership of the path. - :raises CalledProcessError: if an error occurs issuing the chown system - command - """ - user = ceph_user() - user_group = '{ceph_user}:{ceph_user}'.format(ceph_user=user) - cmd = ['chown', user_group, path] - if os.path.isdir(path) and recurse_dirs: - status_set('maintenance', ('Updating ownership of %s to %s' % - (path, user))) - cmd.insert(1, '-R') - - log('Changing ownership of {path} to {user}'.format( - path=path, user=user_group), DEBUG) - start = datetime.now() - subprocess.check_call(cmd) - elapsed_time = (datetime.now() - start) - - log('Took {secs} seconds to change the ownership of path: {path}'.format( - secs=elapsed_time.total_seconds(), path=path), DEBUG) - - -def get_osd_state(osd_num, osd_goal_state=None): - """Get OSD state or loop until OSD state matches OSD goal state. - - If osd_goal_state is None, just return the current OSD state. - If osd_goal_state is not None, loop until the current OSD state matches - the OSD goal state. - - :param osd_num: the OSD id to get state for - :param osd_goal_state: (Optional) string indicating state to wait for - Defaults to None - :returns: Returns a str, the OSD state. - :rtype: str - """ - while True: - asok = "/var/run/ceph/ceph-osd.{}.asok".format(osd_num) - cmd = [ - 'ceph', - 'daemon', - asok, - 'status' - ] - try: - result = json.loads(str(subprocess - .check_output(cmd) - .decode('UTF-8'))) - except (subprocess.CalledProcessError, ValueError) as e: - log("{}".format(e), level=DEBUG) - continue - osd_state = result['state'] - log("OSD {} state: {}, goal state: {}".format( - osd_num, osd_state, osd_goal_state), level=DEBUG) - if not osd_goal_state: - return osd_state - if osd_state == osd_goal_state: - return osd_state - time.sleep(3) - - -def get_all_osd_states(osd_goal_states=None): - """Get all OSD states or loop until all OSD states match OSD goal states. - - If osd_goal_states is None, just return a dictionary of current OSD states. - If osd_goal_states is not None, loop until the current OSD states match - the OSD goal states. - - :param osd_goal_states: (Optional) dict indicating states to wait for - Defaults to None - :returns: Returns a dictionary of current OSD states. - :rtype: dict - """ - osd_states = {} - for osd_num in get_local_osd_ids(): - if not osd_goal_states: - osd_states[osd_num] = get_osd_state(osd_num) - else: - osd_states[osd_num] = get_osd_state( - osd_num, - osd_goal_state=osd_goal_states[osd_num]) - return osd_states - - -@contextmanager -def maintain_osd_state(osd_num): - """Ensure the state of an OSD is maintained. - - Ensures the state of an OSD is the same at the end of a block nested - in a with statement as it was at the beginning of the block. - - :param osd_num: the OSD id to maintain state for - """ - osd_state = get_osd_state(osd_num) - try: - yield - finally: - get_osd_state(osd_num, osd_goal_state=osd_state) - - -@contextmanager -def maintain_all_osd_states(): - """Ensure all local OSD states are maintained. - - Ensures the states of all local OSDs are the same at the end of a - block nested in a with statement as they were at the beginning of - the block. - """ - osd_states = get_all_osd_states() - try: - yield - finally: - get_all_osd_states(osd_goal_states=osd_states) - - -def list_pools(client='admin'): - """This will list the current pools that Ceph has - - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Returns a list of available pools. - :rtype: list - :raises: subprocess.CalledProcessError if the subprocess fails to run. - """ - try: - pool_list = [] - pools = subprocess.check_output(['rados', '--id', client, 'lspools'], - universal_newlines=True, - stderr=subprocess.STDOUT) - for pool in pools.splitlines(): - pool_list.append(pool) - return pool_list - except subprocess.CalledProcessError as err: - log("rados lspools failed with error: {}".format(err.output)) - raise - - -def get_pool_param(pool, param, client='admin'): - """Get parameter from pool. - - :param pool: Name of pool to get variable from - :type pool: str - :param param: Name of variable to get - :type param: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Value of variable on pool or None - :rtype: str or None - :raises: subprocess.CalledProcessError - """ - try: - output = subprocess.check_output( - ['ceph', '--id', client, 'osd', 'pool', 'get', pool, param], - universal_newlines=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as cp: - if cp.returncode == 2 and 'ENOENT: option' in cp.output: - return None - raise - if ':' in output: - return output.split(':')[1].lstrip().rstrip() - - -def get_pool_erasure_profile(pool, client='admin'): - """Get erasure code profile for pool. - - :param pool: Name of pool to get variable from - :type pool: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Erasure code profile of pool or None - :rtype: str or None - :raises: subprocess.CalledProcessError - """ - try: - return get_pool_param(pool, 'erasure_code_profile', client=client) - except subprocess.CalledProcessError as cp: - if cp.returncode == 13 and 'EACCES: pool' in cp.output: - # Not a Erasure coded pool - return None - raise - - -def get_pool_quota(pool, client='admin'): - """Get pool quota. - - :param pool: Name of pool to get variable from - :type pool: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Dictionary with quota variables - :rtype: dict - :raises: subprocess.CalledProcessError - """ - output = subprocess.check_output( - ['ceph', '--id', client, 'osd', 'pool', 'get-quota', pool], - universal_newlines=True, stderr=subprocess.STDOUT) - rc = re.compile(r'\s+max\s+(\S+)\s*:\s+(\d+)') - result = {} - for line in output.splitlines(): - m = rc.match(line) - if m: - result.update({'max_{}'.format(m.group(1)): m.group(2)}) - return result - - -def get_pool_applications(pool='', client='admin'): - """Get pool applications. - - :param pool: (Optional) Name of pool to get applications for - Defaults to get for all pools - :type pool: str - :param client: (Optional) client id for Ceph key to use - Defaults to ``admin`` - :type client: str - :returns: Dictionary with pool name as key - :rtype: dict - :raises: subprocess.CalledProcessError - """ - - cmd = ['ceph', '--id', client, 'osd', 'pool', 'application', 'get'] - if pool: - cmd.append(pool) - try: - output = subprocess.check_output(cmd, - universal_newlines=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as cp: - if cp.returncode == 2 and 'ENOENT' in cp.output: - return {} - raise - return json.loads(output) - - -def list_pools_detail(): - """Get detailed information about pools. - - Structure: - {'pool_name_1': {'applications': {'application': {}}, - 'parameters': {'pg_num': '42', 'size': '42'}, - 'quota': {'max_bytes': '1000', - 'max_objects': '10'}, - }, - 'pool_name_2': ... - } - - :returns: Dictionary with detailed pool information. - :rtype: dict - :raises: subproces.CalledProcessError - """ - get_params = ['pg_num', 'size'] - result = {} - applications = get_pool_applications() - for pool in list_pools(): - result[pool] = { - 'applications': applications.get(pool, {}), - 'parameters': {}, - 'quota': get_pool_quota(pool), - } - for param in get_params: - result[pool]['parameters'].update({ - param: get_pool_param(pool, param)}) - erasure_profile = get_pool_erasure_profile(pool) - if erasure_profile: - result[pool]['parameters'].update({ - 'erasure_code_profile': erasure_profile}) - return result - - -def dirs_need_ownership_update(service): - """Determines if directories still need change of ownership. - - Examines the set of directories under the /var/lib/ceph/{service} directory - and determines if they have the correct ownership or not. This is - necessary due to the upgrade from Hammer to Jewel where the daemon user - changes from root: to ceph:. - - :param service: the name of the service folder to check (e.g. OSD, mon) - :returns: boolean. True if the directories need a change of ownership, - False otherwise. - :raises IOError: if an error occurs reading the file stats from one of - the child directories. - :raises OSError: if the specified path does not exist or some other error - """ - expected_owner = expected_group = ceph_user() - path = os.path.join(CEPH_BASE_DIR, service) - for child in _get_child_dirs(path): - curr_owner, curr_group = owner(child) - - if (curr_owner == expected_owner) and (curr_group == expected_group): - continue - - # NOTE(lathiat): when config_changed runs on reboot, the OSD might not - # yet be mounted or started, and the underlying directory the OSD is - # mounted to is expected to be owned by root. So skip the check. This - # may also happen for OSD directories for OSDs that were removed. - if (service == 'osd' and - not os.path.exists(os.path.join(child, 'magic'))): - continue - - log('Directory "%s" needs its ownership updated' % child, DEBUG) - return True - - # All child directories had the expected ownership - return False - - -# A dict of valid Ceph upgrade paths. Mapping is old -> new -UPGRADE_PATHS = collections.OrderedDict([ - ('firefly', 'hammer'), - ('hammer', 'jewel'), - ('jewel', 'luminous'), - ('luminous', 'mimic'), - ('mimic', 'nautilus'), - ('nautilus', 'octopus'), - ('octopus', 'pacific'), - ('pacific', 'quincy'), -]) - -# Map UCA codenames to Ceph codenames -UCA_CODENAME_MAP = { - 'icehouse': 'firefly', - 'juno': 'firefly', - 'kilo': 'hammer', - 'liberty': 'hammer', - 'mitaka': 'jewel', - 'newton': 'jewel', - 'ocata': 'jewel', - 'pike': 'luminous', - 'queens': 'luminous', - 'rocky': 'mimic', - 'stein': 'mimic', - 'train': 'nautilus', - 'ussuri': 'octopus', - 'victoria': 'octopus', - 'wallaby': 'pacific', - 'xena': 'pacific', - 'yoga': 'quincy', -} - - -def pretty_print_upgrade_paths(): - """Pretty print supported upgrade paths for Ceph""" - return ["{} -> {}".format(key, value) - for key, value in UPGRADE_PATHS.items()] - - -def resolve_ceph_version(source): - """Resolves a version of Ceph based on source configuration - based on Ubuntu Cloud Archive pockets. - - @param: source: source configuration option of charm - :returns: Ceph release codename or None if not resolvable - """ - os_release = get_os_codename_install_source(source) - return UCA_CODENAME_MAP.get(os_release) - - -def get_ceph_pg_stat(): - """Returns the result of 'ceph pg stat'. - - :returns: dict - """ - try: - tree = str(subprocess - .check_output(['ceph', 'pg', 'stat', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - if not json_tree['num_pg_by_state']: - return None - return json_tree - except ValueError as v: - log("Unable to parse ceph pg stat json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph pg stat command failed with message: {}".format(e)) - raise - - -def get_ceph_health(): - """Returns the health of the cluster from a 'ceph status' - - :returns: dict tree of ceph status - :raises: CalledProcessError if our ceph command fails to get the overall - status, use get_ceph_health()['overall_status']. - """ - try: - tree = str(subprocess - .check_output(['ceph', 'status', '--format=json']) - .decode('UTF-8')) - try: - json_tree = json.loads(tree) - # Make sure children are present in the JSON - if not json_tree['overall_status']: - return None - - return json_tree - except ValueError as v: - log("Unable to parse ceph tree json: {}. Error: {}".format( - tree, v)) - raise - except subprocess.CalledProcessError as e: - log("ceph status command failed with message: {}".format(e)) - raise - - -def reweight_osd(osd_num, new_weight): - """Changes the crush weight of an OSD to the value specified. - - :param osd_num: the OSD id which should be changed - :param new_weight: the new weight for the OSD - :returns: bool. True if output looks right, else false. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - """ - try: - cmd_result = str(subprocess - .check_output(['ceph', 'osd', 'crush', - 'reweight', "osd.{}".format(osd_num), - new_weight], - stderr=subprocess.STDOUT) - .decode('UTF-8')) - expected_result = "reweighted item id {ID} name \'osd.{ID}\'".format( - ID=osd_num) + " to {}".format(new_weight) - log(cmd_result) - if expected_result in cmd_result: - return True - return False - except subprocess.CalledProcessError as e: - log("ceph osd crush reweight command failed" - " with message: {}".format(e)) - raise - - -def determine_packages(): - """Determines packages for installation. - - :returns: list of Ceph packages - """ - packages = PACKAGES.copy() - if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': - btrfs_package = 'btrfs-progs' - else: - btrfs_package = 'btrfs-tools' - packages.append(btrfs_package) - return packages - - -def determine_packages_to_remove(): - """Determines packages for removal - - Note: if in a container, then the CHRONY_PACKAGE is removed. - - :returns: list of packages to be removed - :rtype: List[str] - """ - rm_packages = REMOVE_PACKAGES.copy() - if is_container(): - rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE])) - return rm_packages - - -def bootstrap_manager(): - hostname = socket.gethostname() - path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname) - keyring = os.path.join(path, 'keyring') - - if os.path.exists(keyring): - log('bootstrap_manager: mgr already initialized.') - else: - mkdir(path, owner=ceph_user(), group=ceph_user()) - subprocess.check_call(['ceph', 'auth', 'get-or-create', - 'mgr.{}'.format(hostname), 'mon', - 'allow profile mgr', 'osd', 'allow *', - 'mds', 'allow *', '--out-file', - keyring]) - chownr(path, ceph_user(), ceph_user()) - - unit = 'ceph-mgr@{}'.format(hostname) - subprocess.check_call(['systemctl', 'enable', unit]) - service_restart(unit) - - -def enable_msgr2(): - """ - Enables msgr2 - - :raises: subprocess.CalledProcessError if the command fails - """ - cmd = ['ceph', 'mon', 'enable-msgr2'] - subprocess.check_call(cmd) - - -def osd_noout(enable): - """Sets or unsets 'noout' - - :param enable: bool. True to set noout, False to unset. - :returns: bool. True if output looks right. - :raises CalledProcessError: if an error occurs invoking the systemd cmd - """ - operation = { - True: 'set', - False: 'unset', - } - try: - subprocess.check_call(['ceph', '--id', 'admin', - 'osd', operation[enable], - 'noout']) - log('running ceph osd {} noout'.format(operation[enable])) - return True - except subprocess.CalledProcessError as e: - log(e) - raise - - -class OSDConfigSetError(Exception): - """Error occurred applying OSD settings.""" - pass - - -def apply_osd_settings(settings): - """Applies the provided OSD settings - - Apply the provided settings to all local OSD unless settings are already - present. Settings stop being applied on encountering an error. - - :param settings: dict. Dictionary of settings to apply. - :returns: bool. True if commands ran successfully. - :raises: OSDConfigSetError - """ - current_settings = {} - base_cmd = 'ceph daemon osd.{osd_id} config --format=json' - get_cmd = base_cmd + ' get {key}' - set_cmd = base_cmd + ' set {key} {value}' - - def _get_cli_key(key): - return(key.replace(' ', '_')) - # Retrieve the current values to check keys are correct and to make this a - # noop if setting are already applied. - for osd_id in get_local_osd_ids(): - for key, value in sorted(settings.items()): - cli_key = _get_cli_key(key) - cmd = get_cmd.format(osd_id=osd_id, key=cli_key) - out = json.loads( - subprocess.check_output(cmd.split()).decode('UTF-8')) - if 'error' in out: - log("Error retrieving OSD setting: {}".format(out['error']), - level=ERROR) - return False - current_settings[key] = out[cli_key] - settings_diff = { - k: v - for k, v in settings.items() - if str(v) != str(current_settings[k])} - for key, value in sorted(settings_diff.items()): - log("Setting {} to {}".format(key, value), level=DEBUG) - cmd = set_cmd.format( - osd_id=osd_id, - key=_get_cli_key(key), - value=value) - out = json.loads( - subprocess.check_output(cmd.split()).decode('UTF-8')) - if 'error' in out: - log("Error applying OSD setting: {}".format(out['error']), - level=ERROR) - raise OSDConfigSetError - return True - - -def enabled_manager_modules(): - """Return a list of enabled manager modules. - - :rtype: List[str] - """ - cmd = ['ceph', 'mgr', 'module', 'ls'] - quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 - if quincy_or_later: - cmd.append('--format=json') - try: - modules = subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError as e: - log("Failed to list ceph modules: {}".format(e), WARNING) - return [] - modules = json.loads(modules) - return modules['enabled_modules'] - - -def is_mgr_module_enabled(module): - """Is a given manager module enabled. - - :param module: - :type module: str - :returns: Whether the named module is enabled - :rtype: bool - """ - return module in enabled_manager_modules() - - -is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard') - - -def mgr_enable_module(module): - """Enable a Ceph Manager Module. - - :param module: The module name to enable - :type module: str - - :raises: subprocess.CalledProcessError - """ - if not is_mgr_module_enabled(module): - subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module]) - return True - return False - - -mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard') - - -def mgr_disable_module(module): - """Enable a Ceph Manager Module. - - :param module: The module name to enable - :type module: str - - :raises: subprocess.CalledProcessError - """ - if is_mgr_module_enabled(module): - subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module]) - return True - return False - - -mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') - - -def ceph_config_set(name, value, who): - """Set a Ceph config option - - :param name: key to set - :type name: str - :param value: value corresponding to key - :type value: str - :param who: Config area the key is associated with (e.g. 'dashboard') - :type who: str - - :raises: subprocess.CalledProcessError - """ - subprocess.check_call(['ceph', 'config', 'set', who, name, value]) - - -mgr_config_set = functools.partial(ceph_config_set, who='mgr') - - -def ceph_config_get(name, who): - """Retrieve the value of a Ceph config option - - :param name: key to lookup - :type name: str - :param who: Config area the key is associated with (e.g. 'dashboard') - :type who: str - :returns: Value associated with key - :rtype: str - :raises: subprocess.CalledProcessError - """ - return subprocess.check_output( - ['ceph', 'config', 'get', who, name]).decode('UTF-8') - - -mgr_config_get = functools.partial(ceph_config_get, who='mgr') - - -def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None): - """Set SSL dashboard config option. - - :param path: Path to file - :type path: str - :param artifact_name: Option name for setting the artifact - :type artifact_name: str - :param hostname: If hostname is set artifact will only be associated with - the dashboard on that host. - :type hostname: str - :raises: subprocess.CalledProcessError - """ - cmd = ['ceph', 'dashboard', artifact_name] - if hostname: - cmd.append(hostname) - cmd.extend(['-i', path]) - log(cmd, level=DEBUG) - subprocess.check_call(cmd) - - -dashboard_set_ssl_certificate = functools.partial( - _dashboard_set_ssl_artifact, - artifact_name='set-ssl-certificate') - - -dashboard_set_ssl_certificate_key = functools.partial( - _dashboard_set_ssl_artifact, - artifact_name='set-ssl-certificate-key') diff --git a/osci.yaml b/osci.yaml index f2ffe001..6970c873 100644 --- a/osci.yaml +++ b/osci.yaml @@ -8,3 +8,15 @@ needs_charm_build: true charm_build_name: ceph-mon build_type: charmcraft + check: + jobs: + - new-install-focal-yoga +- job: + name: new-install-focal-yoga + parent: func-target + dependencies: + - osci-lint + - charm-build + - tox-py38 + vars: + tox_extra_args: install:local-focal-yoga diff --git a/requirements.txt b/requirements.txt index ead6e89a..d9dd8416 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,24 +1,14 @@ -# This file is managed centrally by release-tools and should not be modified -# within individual charm repos. See the 'global' dir contents for available -# choices of *requirements.txt files for OpenStack Charms: -# https://github.com/openstack-charmers/release-tools -# -# TODO: Distill the func test requirements from the lint/unit test -# requirements. They are intertwined. Also, Zaza itself should specify -# all of its own requirements and if it doesn't, fix it there. -# -pbr==5.6.0 -simplejson>=2.2.0 -netifaces>=0.10.4 - -# Strange import error with newer netaddr: -netaddr>0.7.16,<0.8.0 - -Jinja2>=2.6 # BSD License (3 clause) -six>=1.9.0 - -# dnspython 2.0.0 dropped py3.5 support -dnspython<2.0.0; python_version < '3.6' -dnspython; python_version >= '3.6' - -psutil>=1.1.1,<2.0.0 +importlib-resources +ops >= 1.2.0 +tenacity +pyudev +dnspython +netaddr +netifaces +pyyaml +git+https://github.com/openstack/charms.ceph#egg=charms_ceph +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack +git+https://opendev.org/openstack/charm-ops-interface-tls-certificates#egg=interface_tls_certificates +git+https://github.com/openstack-charmers/ops-interface-ceph-iscsi-admin-access#egg=interface_ceph_iscsi_admin_access +git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer +git+https://github.com/juju/charm-helpers#egg=charm-helpers diff --git a/hooks/ceph_hooks.py b/src/ceph_hooks.py similarity index 100% rename from hooks/ceph_hooks.py rename to src/ceph_hooks.py diff --git a/src/charm.py b/src/charm.py new file mode 100755 index 00000000..9003aa52 --- /dev/null +++ b/src/charm.py @@ -0,0 +1,134 @@ +#! /usr/bin/python3 + +from ops.main import main + +import ops_openstack.core + +import ceph_hooks as hooks + + +class CephMonCharm(ops_openstack.core.OSBaseCharm): + + # General charm control callbacks. + def on_install(self, event): + hooks.install() + + def on_config(self, event): + hooks.config_changed() + + def on_pre_series_upgrade(self, event): + hooks.pre_series_upgrade() + + def on_upgrade(self, event): + hooks.upgrade_charm() + + def on_post_series_upgrade(self, event): + hooks.post_series_upgrade() + + # Relations. + def on_mon_relation_joined(self, event): + hooks.mon_relation_joined() + + def on_bootstrap_source_relation_changed(self, event): + hooks.bootstrap_source_relation_changed() + + def on_prometheus_relation_joined_or_changed(self, event): + hooks.prometheus_relation() + + def on_prometheus_relation_departed(self, event): + hooks.prometheus_left() + + def on_mon_relation(self, event): + hooks.mon_relation() + + def on_osd_relation(self, event): + hooks.osd_relation() + + def on_dashboard_relation_joined(self, event): + hooks.dashboard_relation() + + def on_radosgw_relation(self, event): + hooks.radosgw_relation() + + def on_rbd_mirror_relation(self, event): + hooks.rbd_mirror_relation() + + def on_mds_relation(self, event): + hooks.mds_relation_joined() + + def on_admin_relation(self, event): + hooks.admin_relation_joined() + + def on_client_relation(self, event): + hooks.client_relation() + + def on_nrpe_relation(self, event): + hooks.upgrade_nrpe_config() + + def __init__(self, *args): + super().__init__(*args) + self._stored.is_started = True + fw = self.framework + + fw.observe(self.on.install, self.on_install) + fw.observe(self.on.config_changed, self.on_config) + fw.observe(self.on.pre_series_upgrade, self.on_pre_series_upgrade) + fw.observe(self.on.upgrade_charm, self.on_upgrade) + fw.observe(self.on.post_series_upgrade, self.on_post_series_upgrade) + + fw.observe(self.on.mon_relation_joined, self.on_mon_relation_joined) + fw.observe(self.on.bootstrap_source_relation_changed, + self.on_bootstrap_source_relation_changed) + fw.observe(self.on.prometheus_relation_joined, + self.on_prometheus_relation_joined_or_changed) + fw.observe(self.on.prometheus_relation_changed, + self.on_prometheus_relation_joined_or_changed) + fw.observe(self.on.prometheus_relation_departed, + self.on_prometheus_relation_departed) + + for key in ('mon_relation_departed', 'mon_relation_changed', + 'leader_settings_changed', + 'bootstrap_source_relation_departed'): + fw.observe(getattr(self.on, key), self.on_mon_relation) + + fw.observe(self.on.osd_relation_joined, + self.on_osd_relation) + fw.observe(self.on.osd_relation_changed, + self.on_osd_relation) + + fw.observe(self.on.dashboard_relation_joined, + self.on_dashboard_relation_joined) + + fw.observe(self.on.radosgw_relation_changed, + self.on_radosgw_relation) + fw.observe(self.on.radosgw_relation_joined, + self.on_radosgw_relation) + + fw.observe(self.on.rbd_mirror_relation_changed, + self.on_rbd_mirror_relation) + fw.observe(self.on.rbd_mirror_relation_joined, + self.on_rbd_mirror_relation) + + fw.observe(self.on.mds_relation_changed, + self.on_mds_relation) + fw.observe(self.on.mds_relation_joined, + self.on_mds_relation) + + fw.observe(self.on.admin_relation_changed, + self.on_admin_relation) + fw.observe(self.on.admin_relation_joined, + self.on_admin_relation) + + fw.observe(self.on.client_relation_changed, + self.on_client_relation) + fw.observe(self.on.client_relation_joined, + self.on_client_relation) + + fw.observe(self.on.nrpe_external_master_relation_joined, + self.on_nrpe_relation) + fw.observe(self.on.nrpe_external_master_relation_changed, + self.on_nrpe_relation) + + +if __name__ == '__main__': + main(CephMonCharm) diff --git a/hooks/utils.py b/src/utils.py similarity index 98% rename from hooks/utils.py rename to src/utils.py index 2781eefd..759ae9d7 100644 --- a/hooks/utils.py +++ b/src/utils.py @@ -32,10 +32,7 @@ from charmhelpers.core.hookenv import ( status_set, unit_get, ) -from charmhelpers.fetch import ( - apt_install, - filter_installed_packages -) + from charmhelpers.core.host import ( lsb_release, CompareHostReleases, @@ -47,12 +44,7 @@ from charmhelpers.contrib.network.ip import ( ) from charmhelpers.contrib.storage.linux import ceph -try: - import dns.resolver -except ImportError: - apt_install(filter_installed_packages(['python-dnspython']), - fatal=True) - import dns.resolver +import dns.resolver class OsdPostUpgradeError(Exception): diff --git a/test-requirements.txt b/test-requirements.txt index d515cae9..eee61b69 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -45,7 +45,7 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open # Needed for charm-glance: git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' -tempest<31.0.0;python_version<'3.8' +tempest<30.0.0;python_version<'3.8' and python_version >= '3.6' tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests diff --git a/tests/tests.yaml b/tests/tests.yaml index 06b51cc4..7b0f7309 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -9,9 +9,17 @@ smoke_bundles: - focal-yoga configure: - - zaza.openstack.charm_tests.glance.setup.add_lts_image + - install: + - zaza.openstack.charm_tests.glance.setup.add_lts_image tests: + - install: + - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes + - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest + - zaza.openstack.charm_tests.ceph.tests.CephRelationTest + - zaza.openstack.charm_tests.ceph.tests.CephTest + - zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest + - zaza.openstack.charm_tests.ceph.tests.CephPrometheusTest - zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes - zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest - zaza.openstack.charm_tests.ceph.tests.CephRelationTest diff --git a/tox.ini b/tox.ini index f4e8a47c..5fd9c98d 100644 --- a/tox.ini +++ b/tox.ini @@ -85,7 +85,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python3 deps = flake8==3.9.2 charm-tools==2.8.3 -commands = flake8 {posargs} hooks unit_tests tests actions lib files +commands = flake8 {posargs} unit_tests tests actions files src charm-proof [testenv:cover] diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index 70342765..f439d3f1 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -17,3 +17,4 @@ sys.path.append('hooks') sys.path.append('lib') sys.path.append('unit_tests') sys.path.append('actions') +sys.path.append('src') diff --git a/unit_tests/test_ceph_utils.py b/unit_tests/test_ceph_utils.py index 9a82ff1d..5da722fd 100644 --- a/unit_tests/test_ceph_utils.py +++ b/unit_tests/test_ceph_utils.py @@ -16,7 +16,7 @@ import unittest.mock as mock import test_utils -from hooks import utils +import utils class CephUtilsTestCase(test_utils.CharmTestCase):