diff --git a/Makefile b/Makefile index d7a13bf8..6f53deac 100644 --- a/Makefile +++ b/Makefile @@ -2,9 +2,17 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers hooks tests @charm proof || true +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse + bin/charm_helpers_sync.py: @mkdir -p bin @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ diff --git a/tests/00-setup b/tests/00-setup new file mode 100755 index 00000000..1243ec43 --- /dev/null +++ b/tests/00-setup @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet \ + python-keystoneclient \ + python-glanceclient \ + python-novaclient diff --git a/tests/14-basic-precise-icehouse b/tests/14-basic-precise-icehouse new file mode 100755 index 00000000..13aee613 --- /dev/null +++ b/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on precise-icehouse.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/tests/15-basic-trusty-icehouse b/tests/15-basic-trusty-icehouse new file mode 100755 index 00000000..9079f5e3 --- /dev/null +++ b/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic ceph-osd deployment on trusty-icehouse.""" + +from basic_deployment import CephOsdBasicDeployment + +if __name__ == '__main__': + deployment = CephOsdBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/tests/README b/tests/README new file mode 100644 index 00000000..643eb8dd --- /dev/null +++ b/tests/README @@ -0,0 +1,53 @@ +This directory provides Amulet tests that focus on verification of ceph-osd +deployments. + +In order to run tests, you'll need charm-tools installed (in addition to +juju, of course): + sudo add-apt-repository ppa:juju/stable + sudo apt-get update + sudo apt-get install charm-tools + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py new file mode 100644 index 00000000..2f0542b2 --- /dev/null +++ b/tests/basic_deployment.py @@ -0,0 +1,281 @@ +#!/usr/bin/python import amulet + +import amulet +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( # noqa + OpenStackAmuletUtils, + DEBUG, + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class CephOsdBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic ceph-osd deployment.""" + + def __init__(self, series=None, openstack=None, source=None, + stable=False): + """Deploy the entire test environment.""" + super(CephOsdBasicDeployment, self).__init__(series, openstack, + source, stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, where ceph-osd is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + """ + this_service = {'name': 'ceph-osd'} + other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'}, + {'name': 'keystone'}, {'name': 'rabbitmq-server'}, + {'name': 'nova-compute'}, {'name': 'glance'}, + {'name': 'cinder'}] + super(CephOsdBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'nova-compute:shared-db': 'mysql:shared-db', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-compute:image-service': 'glance:image-service', + 'nova-compute:ceph': 'ceph:client', + 'keystone:shared-db': 'mysql:shared-db', + 'glance:shared-db': 'mysql:shared-db', + 'glance:identity-service': 'keystone:identity-service', + 'glance:amqp': 'rabbitmq-server:amqp', + 'glance:ceph': 'ceph:client', + 'cinder:shared-db': 'mysql:shared-db', + 'cinder:identity-service': 'keystone:identity-service', + 'cinder:amqp': 'rabbitmq-server:amqp', + 'cinder:image-service': 'glance:image-service', + 'cinder:ceph': 'ceph:client', + 'ceph-osd:mon': 'ceph:osd' + } + super(CephOsdBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + mysql_config = {'dataset-size': '50%'} + cinder_config = {'block-device': 'None', 'glance-api-version': '2'} + ceph_config = { + 'monitor-count': '3', + 'auth-supported': 'none', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' + } + ceph_osd_config = { + 'osd-reformat': 'yes', + 'ephemeral-unmount': '/mnt', + 'osd-devices': '/dev/vdb /srv/ceph' + } + + configs = {'keystone': keystone_config, + 'mysql': mysql_config, + 'cinder': cinder_config, + 'ceph': ceph_config, + 'ceph-osd': ceph_osd_config} + super(CephOsdBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] + self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + self.glance_sentry = self.d.sentry.unit['glance/0'] + self.cinder_sentry = self.d.sentry.unit['cinder/0'] + self.ceph0_sentry = self.d.sentry.unit['ceph/0'] + self.ceph1_sentry = self.d.sentry.unit['ceph/1'] + self.ceph2_sentry = self.d.sentry.unit['ceph/2'] + self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0'] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + # Authenticate admin with glance endpoint + self.glance = u.authenticate_glance_admin(self.keystone) + + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate demo user with keystone + self.keystone_demo = u.authenticate_keystone_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + # Authenticate demo user with nova-api + self.nova_demo = u.authenticate_nova_user(self.keystone, + self.demo_user, + 'password', + self.demo_tenant) + + def _ceph_osd_id(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa + + def test_services(self): + """Verify the expected services are running on the service units.""" + commands = { + self.mysql_sentry: ['status mysql'], + self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], + self.nova_compute_sentry: ['status nova-compute'], + self.keystone_sentry: ['status keystone'], + self.glance_sentry: ['status glance-registry', + 'status glance-api'], + self.cinder_sentry: ['status cinder-api', + 'status cinder-scheduler', + 'status cinder-volume'] + } + ceph_services = ['status ceph-mon-all', + 'status ceph-mon id=`hostname`'] + ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0)) + ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1)) + ceph_osd_services = [ceph_osd0, ceph_osd1, 'status ceph-osd-all'] + ceph_services.extend(ceph_osd_services) + commands[self.ceph0_sentry] = ceph_services + commands[self.ceph1_sentry] = ceph_services + commands[self.ceph2_sentry] = ceph_services + commands[self.ceph_osd_sentry] = ceph_osd_services + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_ceph_osd_ceph_relation(self): + """Verify the ceph-osd to ceph relation data.""" + unit = self.ceph_osd_sentry + relation = ['mon', 'ceph:osd'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph-osd to ceph', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph0_to_ceph_osd_relation(self): + """Verify the ceph0 to ceph-osd relation data.""" + unit = self.ceph0_sentry + relation = ['osd', 'ceph-osd:mon'] + expected = { + 'osd_bootstrap_key': u.not_null, + 'private-address': u.valid_ip, + 'auth': u'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph0 to ceph-osd', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph1_to_ceph_osd_relation(self): + """Verify the ceph1 to ceph-osd relation data.""" + unit = self.ceph1_sentry + relation = ['osd', 'ceph-osd:mon'] + expected = { + 'osd_bootstrap_key': u.not_null, + 'private-address': u.valid_ip, + 'auth': u'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph1 to ceph-osd', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph2_to_ceph_osd_relation(self): + """Verify the ceph2 to ceph-osd relation data.""" + unit = self.ceph2_sentry + relation = ['osd', 'ceph-osd:mon'] + expected = { + 'osd_bootstrap_key': u.not_null, + 'private-address': u.valid_ip, + 'auth': u'none', + 'ceph-public-address': u.valid_ip, + 'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('ceph2 to ceph-osd', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ceph_config(self): + """Verify the data in the ceph config file.""" + unit = self.ceph_osd_sentry + conf = '/etc/ceph/ceph.conf' + expected = { + 'global': { + 'auth cluster required': 'none', + 'auth service required': 'none', + 'auth client required': 'none', + 'keyring': '/etc/ceph/$cluster.$name.keyring', + 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', + 'log to syslog': 'false', + 'err to syslog': 'false', + 'clog to syslog': 'false' + }, + 'mon': { + 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring' + }, + 'mds': { + 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring' + }, + 'osd': { + 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring', + 'osd journal size': '1024', + 'filestore xattr use omap': 'true' + }, + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ceph config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_restart_on_config_change(self): + """Verify the specified services are restarted on config change.""" + # NOTE(coreycb): Test not implemented but should it be? ceph-osd svcs + # aren't restarted by charm after config change. Should + # they be restarted? + if self._get_openstack_release() >= self.precise_essex: + u.log.error("Test not implemented") + return