[coreycb,r=james-page] Add amulet tests.
This commit is contained in:
commit
78991105d6
13
Makefile
13
Makefile
@ -2,16 +2,25 @@
|
|||||||
PYTHON := /usr/bin/env python
|
PYTHON := /usr/bin/env python
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@flake8 --exclude hooks/charmhelpers hooks
|
@flake8 --exclude hooks/charmhelpers hooks tests
|
||||||
@charm proof || true
|
@charm proof || true
|
||||||
|
|
||||||
|
test:
|
||||||
|
@echo Starting Amulet tests...
|
||||||
|
# coreycb note: The -v should only be temporary until Amulet sends
|
||||||
|
# raise_status() messages to stderr:
|
||||||
|
# https://bugs.launchpad.net/amulet/+bug/1320357
|
||||||
|
@juju test -v -p AMULET_HTTP_PROXY --timeout 900 \
|
||||||
|
00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse
|
||||||
|
|
||||||
bin/charm_helpers_sync.py:
|
bin/charm_helpers_sync.py:
|
||||||
@mkdir -p bin
|
@mkdir -p bin
|
||||||
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
|
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
|
||||||
> bin/charm_helpers_sync.py
|
> bin/charm_helpers_sync.py
|
||||||
|
|
||||||
sync: bin/charm_helpers_sync.py
|
sync: bin/charm_helpers_sync.py
|
||||||
$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
|
$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
||||||
|
$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
||||||
|
|
||||||
publish: lint
|
publish: lint
|
||||||
bzr push lp:charms/ceph-osd
|
bzr push lp:charms/ceph-osd
|
||||||
|
5
charm-helpers-tests.yaml
Normal file
5
charm-helpers-tests.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
branch: lp:charm-helpers
|
||||||
|
destination: tests/charmhelpers
|
||||||
|
include:
|
||||||
|
- contrib.amulet
|
||||||
|
- contrib.openstack.amulet
|
@ -57,6 +57,8 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||||||
else:
|
else:
|
||||||
if fatal:
|
if fatal:
|
||||||
not_found_error_out()
|
not_found_error_out()
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
_validate_cidr(network)
|
_validate_cidr(network)
|
||||||
network = netaddr.IPNetwork(network)
|
network = netaddr.IPNetwork(network)
|
||||||
|
@ -1,24 +1,24 @@
|
|||||||
[global]
|
[global]
|
||||||
{% if old_auth %}
|
{% if old_auth %}
|
||||||
auth supported = {{ auth_supported }}
|
auth supported = {{ auth_supported }}
|
||||||
{% else %}
|
{% else %}
|
||||||
auth cluster required = {{ auth_supported }}
|
auth cluster required = {{ auth_supported }}
|
||||||
auth service required = {{ auth_supported }}
|
auth service required = {{ auth_supported }}
|
||||||
auth client required = {{ auth_supported }}
|
auth client required = {{ auth_supported }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
mon host = {{ mon_hosts }}
|
mon host = {{ mon_hosts }}
|
||||||
fsid = {{ fsid }}
|
fsid = {{ fsid }}
|
||||||
|
|
||||||
log to syslog = {{ use_syslog }}
|
log to syslog = {{ use_syslog }}
|
||||||
err to syslog = {{ use_syslog }}
|
err to syslog = {{ use_syslog }}
|
||||||
clog to syslog = {{ use_syslog }}
|
clog to syslog = {{ use_syslog }}
|
||||||
|
|
||||||
{%- if ceph_public_network is string %}
|
{%- if ceph_public_network is string %}
|
||||||
public network = {{ ceph_public_network }}
|
public network = {{ ceph_public_network }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if ceph_cluster_network is string %}
|
{%- if ceph_cluster_network is string %}
|
||||||
cluster network = {{ ceph_cluster_network }}
|
cluster network = {{ ceph_cluster_network }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
{% if public_addr %}
|
{% if public_addr %}
|
||||||
@ -30,13 +30,13 @@ cluster addr = {{ cluster_addr }}
|
|||||||
|
|
||||||
|
|
||||||
[mon]
|
[mon]
|
||||||
keyring = /var/lib/ceph/mon/$cluster-$id/keyring
|
keyring = /var/lib/ceph/mon/$cluster-$id/keyring
|
||||||
|
|
||||||
[mds]
|
[mds]
|
||||||
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
|
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
|
||||||
|
|
||||||
[osd]
|
[osd]
|
||||||
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
|
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
|
||||||
osd journal size = {{ osd_journal_size }}
|
osd journal size = {{ osd_journal_size }}
|
||||||
filestore xattr use omap = true
|
filestore xattr use omap = true
|
||||||
|
|
||||||
|
10
tests/00-setup
Executable file
10
tests/00-setup
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
sudo add-apt-repository --yes ppa:juju/stable
|
||||||
|
sudo apt-get update --yes
|
||||||
|
sudo apt-get install --yes python-amulet \
|
||||||
|
python-keystoneclient \
|
||||||
|
python-glanceclient \
|
||||||
|
python-novaclient
|
11
tests/14-basic-precise-icehouse
Executable file
11
tests/14-basic-precise-icehouse
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic ceph-osd deployment on precise-icehouse."""
|
||||||
|
|
||||||
|
from basic_deployment import CephOsdBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = CephOsdBasicDeployment(series='precise',
|
||||||
|
openstack='cloud:precise-icehouse',
|
||||||
|
source='cloud:precise-updates/icehouse')
|
||||||
|
deployment.run_tests()
|
9
tests/15-basic-trusty-icehouse
Executable file
9
tests/15-basic-trusty-icehouse
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
"""Amulet tests on a basic ceph-osd deployment on trusty-icehouse."""
|
||||||
|
|
||||||
|
from basic_deployment import CephOsdBasicDeployment
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
deployment = CephOsdBasicDeployment(series='trusty')
|
||||||
|
deployment.run_tests()
|
53
tests/README
Normal file
53
tests/README
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
This directory provides Amulet tests that focus on verification of ceph-osd
|
||||||
|
deployments.
|
||||||
|
|
||||||
|
In order to run tests, you'll need charm-tools installed (in addition to
|
||||||
|
juju, of course):
|
||||||
|
sudo add-apt-repository ppa:juju/stable
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install charm-tools
|
||||||
|
|
||||||
|
If you use a web proxy server to access the web, you'll need to set the
|
||||||
|
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
|
||||||
|
|
||||||
|
The following examples demonstrate different ways that tests can be executed.
|
||||||
|
All examples are run from the charm's root directory.
|
||||||
|
|
||||||
|
* To run all tests (starting with 00-setup):
|
||||||
|
|
||||||
|
make test
|
||||||
|
|
||||||
|
* To run a specific test module (or modules):
|
||||||
|
|
||||||
|
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
|
||||||
|
|
||||||
|
* To run a specific test module (or modules), and keep the environment
|
||||||
|
deployed after a failure:
|
||||||
|
|
||||||
|
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
|
||||||
|
|
||||||
|
* To re-run a test module against an already deployed environment (one
|
||||||
|
that was deployed by a previous call to 'juju test --set-e'):
|
||||||
|
|
||||||
|
./tests/15-basic-trusty-icehouse
|
||||||
|
|
||||||
|
For debugging and test development purposes, all code should be idempotent.
|
||||||
|
In other words, the code should have the ability to be re-run without changing
|
||||||
|
the results beyond the initial run. This enables editing and re-running of a
|
||||||
|
test module against an already deployed environment, as described above.
|
||||||
|
|
||||||
|
Manual debugging tips:
|
||||||
|
|
||||||
|
* Set the following env vars before using the OpenStack CLI as admin:
|
||||||
|
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
|
||||||
|
export OS_TENANT_NAME=admin
|
||||||
|
export OS_USERNAME=admin
|
||||||
|
export OS_PASSWORD=openstack
|
||||||
|
export OS_REGION_NAME=RegionOne
|
||||||
|
|
||||||
|
* Set the following env vars before using the OpenStack CLI as demoUser:
|
||||||
|
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
|
||||||
|
export OS_TENANT_NAME=demoTenant
|
||||||
|
export OS_USERNAME=demoUser
|
||||||
|
export OS_PASSWORD=password
|
||||||
|
export OS_REGION_NAME=RegionOne
|
281
tests/basic_deployment.py
Normal file
281
tests/basic_deployment.py
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
#!/usr/bin/python import amulet
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||||
|
OpenStackAmuletDeployment
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
|
||||||
|
OpenStackAmuletUtils,
|
||||||
|
DEBUG,
|
||||||
|
ERROR
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use DEBUG to turn on debug logging
|
||||||
|
u = OpenStackAmuletUtils(ERROR)
|
||||||
|
|
||||||
|
|
||||||
|
class CephOsdBasicDeployment(OpenStackAmuletDeployment):
|
||||||
|
"""Amulet tests on a basic ceph-osd deployment."""
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None,
|
||||||
|
stable=False):
|
||||||
|
"""Deploy the entire test environment."""
|
||||||
|
super(CephOsdBasicDeployment, self).__init__(series, openstack,
|
||||||
|
source, stable)
|
||||||
|
self._add_services()
|
||||||
|
self._add_relations()
|
||||||
|
self._configure_services()
|
||||||
|
self._deploy()
|
||||||
|
self._initialize_tests()
|
||||||
|
|
||||||
|
def _add_services(self):
|
||||||
|
"""Add services
|
||||||
|
|
||||||
|
Add the services that we're testing, where ceph-osd is local,
|
||||||
|
and the rest of the service are from lp branches that are
|
||||||
|
compatible with the local charm (e.g. stable or next).
|
||||||
|
"""
|
||||||
|
this_service = {'name': 'ceph-osd'}
|
||||||
|
other_services = [{'name': 'ceph', 'units': 3}, {'name': 'mysql'},
|
||||||
|
{'name': 'keystone'}, {'name': 'rabbitmq-server'},
|
||||||
|
{'name': 'nova-compute'}, {'name': 'glance'},
|
||||||
|
{'name': 'cinder'}]
|
||||||
|
super(CephOsdBasicDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
def _add_relations(self):
|
||||||
|
"""Add all of the relations for the services."""
|
||||||
|
relations = {
|
||||||
|
'nova-compute:shared-db': 'mysql:shared-db',
|
||||||
|
'nova-compute:amqp': 'rabbitmq-server:amqp',
|
||||||
|
'nova-compute:image-service': 'glance:image-service',
|
||||||
|
'nova-compute:ceph': 'ceph:client',
|
||||||
|
'keystone:shared-db': 'mysql:shared-db',
|
||||||
|
'glance:shared-db': 'mysql:shared-db',
|
||||||
|
'glance:identity-service': 'keystone:identity-service',
|
||||||
|
'glance:amqp': 'rabbitmq-server:amqp',
|
||||||
|
'glance:ceph': 'ceph:client',
|
||||||
|
'cinder:shared-db': 'mysql:shared-db',
|
||||||
|
'cinder:identity-service': 'keystone:identity-service',
|
||||||
|
'cinder:amqp': 'rabbitmq-server:amqp',
|
||||||
|
'cinder:image-service': 'glance:image-service',
|
||||||
|
'cinder:ceph': 'ceph:client',
|
||||||
|
'ceph-osd:mon': 'ceph:osd'
|
||||||
|
}
|
||||||
|
super(CephOsdBasicDeployment, self)._add_relations(relations)
|
||||||
|
|
||||||
|
def _configure_services(self):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
keystone_config = {'admin-password': 'openstack',
|
||||||
|
'admin-token': 'ubuntutesting'}
|
||||||
|
mysql_config = {'dataset-size': '50%'}
|
||||||
|
cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
|
||||||
|
ceph_config = {
|
||||||
|
'monitor-count': '3',
|
||||||
|
'auth-supported': 'none',
|
||||||
|
'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
|
||||||
|
'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
|
||||||
|
'osd-reformat': 'yes',
|
||||||
|
'ephemeral-unmount': '/mnt',
|
||||||
|
'osd-devices': '/dev/vdb /srv/ceph'
|
||||||
|
}
|
||||||
|
ceph_osd_config = {
|
||||||
|
'osd-reformat': 'yes',
|
||||||
|
'ephemeral-unmount': '/mnt',
|
||||||
|
'osd-devices': '/dev/vdb /srv/ceph'
|
||||||
|
}
|
||||||
|
|
||||||
|
configs = {'keystone': keystone_config,
|
||||||
|
'mysql': mysql_config,
|
||||||
|
'cinder': cinder_config,
|
||||||
|
'ceph': ceph_config,
|
||||||
|
'ceph-osd': ceph_osd_config}
|
||||||
|
super(CephOsdBasicDeployment, self)._configure_services(configs)
|
||||||
|
|
||||||
|
def _initialize_tests(self):
|
||||||
|
"""Perform final initialization before tests get run."""
|
||||||
|
# Access the sentries for inspecting service units
|
||||||
|
self.mysql_sentry = self.d.sentry.unit['mysql/0']
|
||||||
|
self.keystone_sentry = self.d.sentry.unit['keystone/0']
|
||||||
|
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
|
||||||
|
self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
|
||||||
|
self.glance_sentry = self.d.sentry.unit['glance/0']
|
||||||
|
self.cinder_sentry = self.d.sentry.unit['cinder/0']
|
||||||
|
self.ceph0_sentry = self.d.sentry.unit['ceph/0']
|
||||||
|
self.ceph1_sentry = self.d.sentry.unit['ceph/1']
|
||||||
|
self.ceph2_sentry = self.d.sentry.unit['ceph/2']
|
||||||
|
self.ceph_osd_sentry = self.d.sentry.unit['ceph-osd/0']
|
||||||
|
|
||||||
|
# Authenticate admin with keystone
|
||||||
|
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
|
||||||
|
user='admin',
|
||||||
|
password='openstack',
|
||||||
|
tenant='admin')
|
||||||
|
|
||||||
|
# Authenticate admin with glance endpoint
|
||||||
|
self.glance = u.authenticate_glance_admin(self.keystone)
|
||||||
|
|
||||||
|
# Create a demo tenant/role/user
|
||||||
|
self.demo_tenant = 'demoTenant'
|
||||||
|
self.demo_role = 'demoRole'
|
||||||
|
self.demo_user = 'demoUser'
|
||||||
|
if not u.tenant_exists(self.keystone, self.demo_tenant):
|
||||||
|
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
|
||||||
|
description='demo tenant',
|
||||||
|
enabled=True)
|
||||||
|
self.keystone.roles.create(name=self.demo_role)
|
||||||
|
self.keystone.users.create(name=self.demo_user,
|
||||||
|
password='password',
|
||||||
|
tenant_id=tenant.id,
|
||||||
|
email='demo@demo.com')
|
||||||
|
|
||||||
|
# Authenticate demo user with keystone
|
||||||
|
self.keystone_demo = u.authenticate_keystone_user(self.keystone,
|
||||||
|
self.demo_user,
|
||||||
|
'password',
|
||||||
|
self.demo_tenant)
|
||||||
|
|
||||||
|
# Authenticate demo user with nova-api
|
||||||
|
self.nova_demo = u.authenticate_nova_user(self.keystone,
|
||||||
|
self.demo_user,
|
||||||
|
'password',
|
||||||
|
self.demo_tenant)
|
||||||
|
|
||||||
|
def _ceph_osd_id(self, index):
|
||||||
|
"""Produce a shell command that will return a ceph-osd id."""
|
||||||
|
return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
|
||||||
|
|
||||||
|
def test_services(self):
|
||||||
|
"""Verify the expected services are running on the service units."""
|
||||||
|
commands = {
|
||||||
|
self.mysql_sentry: ['status mysql'],
|
||||||
|
self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
|
||||||
|
self.nova_compute_sentry: ['status nova-compute'],
|
||||||
|
self.keystone_sentry: ['status keystone'],
|
||||||
|
self.glance_sentry: ['status glance-registry',
|
||||||
|
'status glance-api'],
|
||||||
|
self.cinder_sentry: ['status cinder-api',
|
||||||
|
'status cinder-scheduler',
|
||||||
|
'status cinder-volume']
|
||||||
|
}
|
||||||
|
ceph_services = ['status ceph-mon-all',
|
||||||
|
'status ceph-mon id=`hostname`']
|
||||||
|
ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
|
||||||
|
ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
|
||||||
|
ceph_osd_services = [ceph_osd0, ceph_osd1, 'status ceph-osd-all']
|
||||||
|
ceph_services.extend(ceph_osd_services)
|
||||||
|
commands[self.ceph0_sentry] = ceph_services
|
||||||
|
commands[self.ceph1_sentry] = ceph_services
|
||||||
|
commands[self.ceph2_sentry] = ceph_services
|
||||||
|
commands[self.ceph_osd_sentry] = ceph_osd_services
|
||||||
|
|
||||||
|
ret = u.validate_services(commands)
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||||
|
|
||||||
|
def test_ceph_osd_ceph_relation(self):
|
||||||
|
"""Verify the ceph-osd to ceph relation data."""
|
||||||
|
unit = self.ceph_osd_sentry
|
||||||
|
relation = ['mon', 'ceph:osd']
|
||||||
|
expected = {
|
||||||
|
'private-address': u.valid_ip
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('ceph-osd to ceph', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_ceph0_to_ceph_osd_relation(self):
|
||||||
|
"""Verify the ceph0 to ceph-osd relation data."""
|
||||||
|
unit = self.ceph0_sentry
|
||||||
|
relation = ['osd', 'ceph-osd:mon']
|
||||||
|
expected = {
|
||||||
|
'osd_bootstrap_key': u.not_null,
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'auth': u'none',
|
||||||
|
'ceph-public-address': u.valid_ip,
|
||||||
|
'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('ceph0 to ceph-osd', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_ceph1_to_ceph_osd_relation(self):
|
||||||
|
"""Verify the ceph1 to ceph-osd relation data."""
|
||||||
|
unit = self.ceph1_sentry
|
||||||
|
relation = ['osd', 'ceph-osd:mon']
|
||||||
|
expected = {
|
||||||
|
'osd_bootstrap_key': u.not_null,
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'auth': u'none',
|
||||||
|
'ceph-public-address': u.valid_ip,
|
||||||
|
'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('ceph1 to ceph-osd', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_ceph2_to_ceph_osd_relation(self):
|
||||||
|
"""Verify the ceph2 to ceph-osd relation data."""
|
||||||
|
unit = self.ceph2_sentry
|
||||||
|
relation = ['osd', 'ceph-osd:mon']
|
||||||
|
expected = {
|
||||||
|
'osd_bootstrap_key': u.not_null,
|
||||||
|
'private-address': u.valid_ip,
|
||||||
|
'auth': u'none',
|
||||||
|
'ceph-public-address': u.valid_ip,
|
||||||
|
'fsid': u'6547bd3e-1397-11e2-82e5-53567c8d32dc'
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = u.validate_relation_data(unit, relation, expected)
|
||||||
|
if ret:
|
||||||
|
message = u.relation_error('ceph2 to ceph-osd', ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_ceph_config(self):
|
||||||
|
"""Verify the data in the ceph config file."""
|
||||||
|
unit = self.ceph_osd_sentry
|
||||||
|
conf = '/etc/ceph/ceph.conf'
|
||||||
|
expected = {
|
||||||
|
'global': {
|
||||||
|
'auth cluster required': 'none',
|
||||||
|
'auth service required': 'none',
|
||||||
|
'auth client required': 'none',
|
||||||
|
'keyring': '/etc/ceph/$cluster.$name.keyring',
|
||||||
|
'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
|
||||||
|
'log to syslog': 'false',
|
||||||
|
'err to syslog': 'false',
|
||||||
|
'clog to syslog': 'false'
|
||||||
|
},
|
||||||
|
'mon': {
|
||||||
|
'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
|
||||||
|
},
|
||||||
|
'mds': {
|
||||||
|
'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
|
||||||
|
},
|
||||||
|
'osd': {
|
||||||
|
'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring',
|
||||||
|
'osd journal size': '1024',
|
||||||
|
'filestore xattr use omap': 'true'
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for section, pairs in expected.iteritems():
|
||||||
|
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||||
|
if ret:
|
||||||
|
message = "ceph config error: {}".format(ret)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
|
def test_restart_on_config_change(self):
|
||||||
|
"""Verify the specified services are restarted on config change."""
|
||||||
|
# NOTE(coreycb): Test not implemented but should it be? ceph-osd svcs
|
||||||
|
# aren't restarted by charm after config change. Should
|
||||||
|
# they be restarted?
|
||||||
|
if self._get_openstack_release() >= self.precise_essex:
|
||||||
|
u.log.error("Test not implemented")
|
||||||
|
return
|
0
tests/charmhelpers/__init__.py
Normal file
0
tests/charmhelpers/__init__.py
Normal file
0
tests/charmhelpers/contrib/__init__.py
Normal file
0
tests/charmhelpers/contrib/__init__.py
Normal file
0
tests/charmhelpers/contrib/amulet/__init__.py
Normal file
0
tests/charmhelpers/contrib/amulet/__init__.py
Normal file
77
tests/charmhelpers/contrib/amulet/deployment.py
Normal file
77
tests/charmhelpers/contrib/amulet/deployment.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
import amulet
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletDeployment(object):
|
||||||
|
"""Amulet deployment.
|
||||||
|
|
||||||
|
This class provides generic Amulet deployment and test runner
|
||||||
|
methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
self.series = None
|
||||||
|
|
||||||
|
if series:
|
||||||
|
self.series = series
|
||||||
|
self.d = amulet.Deployment(series=self.series)
|
||||||
|
else:
|
||||||
|
self.d = amulet.Deployment()
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services.
|
||||||
|
|
||||||
|
Add services to the deployment where this_service is the local charm
|
||||||
|
that we're testing and other_services are the other services that
|
||||||
|
are being used in the local amulet tests.
|
||||||
|
"""
|
||||||
|
if this_service['name'] != os.path.basename(os.getcwd()):
|
||||||
|
s = this_service['name']
|
||||||
|
msg = "The charm's root directory name needs to be {}".format(s)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
if 'units' not in this_service:
|
||||||
|
this_service['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(this_service['name'], units=this_service['units'])
|
||||||
|
|
||||||
|
for svc in other_services:
|
||||||
|
if 'location' in svc:
|
||||||
|
branch_location = svc['location']
|
||||||
|
elif self.series:
|
||||||
|
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
|
||||||
|
else:
|
||||||
|
branch_location = None
|
||||||
|
|
||||||
|
if 'units' not in svc:
|
||||||
|
svc['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
|
||||||
|
|
||||||
|
def _add_relations(self, relations):
|
||||||
|
"""Add all of the relations for the services."""
|
||||||
|
for k, v in relations.iteritems():
|
||||||
|
self.d.relate(k, v)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in configs.iteritems():
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _deploy(self):
|
||||||
|
"""Deploy environment and wait for all hooks to finish executing."""
|
||||||
|
try:
|
||||||
|
self.d.setup(timeout=900)
|
||||||
|
self.d.sentry.wait(timeout=900)
|
||||||
|
except amulet.helpers.TimeoutError:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
|
||||||
|
except Exception:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def run_tests(self):
|
||||||
|
"""Run all of the methods that are prefixed with 'test_'."""
|
||||||
|
for test in dir(self):
|
||||||
|
if test.startswith('test_'):
|
||||||
|
getattr(self, test)()
|
176
tests/charmhelpers/contrib/amulet/utils.py
Normal file
176
tests/charmhelpers/contrib/amulet/utils.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
import ConfigParser
|
||||||
|
import io
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletUtils(object):
|
||||||
|
"""Amulet utilities.
|
||||||
|
|
||||||
|
This class provides common utility functions that are used by Amulet
|
||||||
|
tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=logging.ERROR):
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
|
||||||
|
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
|
def valid_ip(self, ip):
|
||||||
|
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def valid_url(self, url):
|
||||||
|
p = re.compile(
|
||||||
|
r'^(?:http|ftp)s?://'
|
||||||
|
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
||||||
|
r'localhost|'
|
||||||
|
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
||||||
|
r'(?::\d+)?'
|
||||||
|
r'(?:/?|[/?]\S+)$',
|
||||||
|
re.IGNORECASE)
|
||||||
|
if p.match(url):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def validate_services(self, commands):
|
||||||
|
"""Validate services.
|
||||||
|
|
||||||
|
Verify the specified services are running on the corresponding
|
||||||
|
service units.
|
||||||
|
"""
|
||||||
|
for k, v in commands.iteritems():
|
||||||
|
for cmd in v:
|
||||||
|
output, code = k.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
return "command `{}` returned {}".format(cmd, str(code))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_config(self, unit, filename):
|
||||||
|
"""Get a ConfigParser object for parsing a unit's config file."""
|
||||||
|
file_contents = unit.file_contents(filename)
|
||||||
|
config = ConfigParser.ConfigParser()
|
||||||
|
config.readfp(io.StringIO(file_contents))
|
||||||
|
return config
|
||||||
|
|
||||||
|
def validate_config_data(self, sentry_unit, config_file, section,
|
||||||
|
expected):
|
||||||
|
"""Validate config file data.
|
||||||
|
|
||||||
|
Verify that the specified section of the config file contains
|
||||||
|
the expected option key:value pairs.
|
||||||
|
"""
|
||||||
|
config = self._get_config(sentry_unit, config_file)
|
||||||
|
|
||||||
|
if section != 'DEFAULT' and not config.has_section(section):
|
||||||
|
return "section [{}] does not exist".format(section)
|
||||||
|
|
||||||
|
for k in expected.keys():
|
||||||
|
if not config.has_option(section, k):
|
||||||
|
return "section [{}] is missing option {}".format(section, k)
|
||||||
|
if config.get(section, k) != expected[k]:
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, config.get(section, k), k, expected[k])
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _validate_dict_data(self, expected, actual):
|
||||||
|
"""Validate dictionary data.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluate a variable and returns a
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
|
for k, v in expected.iteritems():
|
||||||
|
if k in actual:
|
||||||
|
if (isinstance(v, basestring) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, (int, long))):
|
||||||
|
if v != actual[k]:
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
elif not v(actual[k]):
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
else:
|
||||||
|
return "key '{}' does not exist".format(k)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_relation_data(self, sentry_unit, relation, expected):
|
||||||
|
"""Validate actual relation data based on expected relation data."""
|
||||||
|
actual = sentry_unit.relation(relation[0], relation[1])
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
return self._validate_dict_data(expected, actual)
|
||||||
|
|
||||||
|
def _validate_list_data(self, expected, actual):
|
||||||
|
"""Compare expected list vs actual list data."""
|
||||||
|
for e in expected:
|
||||||
|
if e not in actual:
|
||||||
|
return "expected item {} not found in actual list".format(e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def not_null(self, string):
|
||||||
|
if string is not None:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _get_file_mtime(self, sentry_unit, filename):
|
||||||
|
"""Get last modification time of file."""
|
||||||
|
return sentry_unit.file_stat(filename)['mtime']
|
||||||
|
|
||||||
|
def _get_dir_mtime(self, sentry_unit, directory):
|
||||||
|
"""Get last modification time of directory."""
|
||||||
|
return sentry_unit.directory_stat(directory)['mtime']
|
||||||
|
|
||||||
|
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
|
||||||
|
"""Get process' start time.
|
||||||
|
|
||||||
|
Determine start time of the process based on the last modification
|
||||||
|
time of the /proc/pid directory. If pgrep_full is True, the process
|
||||||
|
name is matched against the full command line.
|
||||||
|
"""
|
||||||
|
if pgrep_full:
|
||||||
|
cmd = 'pgrep -o -f {}'.format(service)
|
||||||
|
else:
|
||||||
|
cmd = 'pgrep -o {}'.format(service)
|
||||||
|
proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
|
||||||
|
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||||
|
|
||||||
|
def service_restarted(self, sentry_unit, service, filename,
|
||||||
|
pgrep_full=False, sleep_time=20):
|
||||||
|
"""Check if service was restarted.
|
||||||
|
|
||||||
|
Compare a service's start time vs a file's last modification time
|
||||||
|
(such as a config file for that service) to determine if the service
|
||||||
|
has been restarted.
|
||||||
|
"""
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||||
|
self._get_file_mtime(sentry_unit, filename)):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def relation_error(self, name, data):
|
||||||
|
return 'unexpected relation data in {} - {}'.format(name, data)
|
||||||
|
|
||||||
|
def endpoint_error(self, name, data):
|
||||||
|
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
0
tests/charmhelpers/contrib/openstack/__init__.py
Normal file
0
tests/charmhelpers/contrib/openstack/__init__.py
Normal file
91
tests/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
91
tests/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
|
AmuletDeployment
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
|
"""OpenStack amulet deployment.
|
||||||
|
|
||||||
|
This class inherits from AmuletDeployment and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.openstack = openstack
|
||||||
|
self.source = source
|
||||||
|
self.stable = stable
|
||||||
|
# Note(coreycb): this needs to be changed when new next branches come
|
||||||
|
# out.
|
||||||
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def _determine_branch_locations(self, other_services):
|
||||||
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
|
Determine if the local branch being tested is derived from its
|
||||||
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
|
stable or next branches for the other_services."""
|
||||||
|
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
|
||||||
|
|
||||||
|
if self.stable:
|
||||||
|
for svc in other_services:
|
||||||
|
temp = 'lp:charms/{}'
|
||||||
|
svc['location'] = temp.format(svc['name'])
|
||||||
|
else:
|
||||||
|
for svc in other_services:
|
||||||
|
if svc['name'] in base_charms:
|
||||||
|
temp = 'lp:charms/{}'
|
||||||
|
svc['location'] = temp.format(svc['name'])
|
||||||
|
else:
|
||||||
|
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||||
|
svc['location'] = temp.format(self.current_next,
|
||||||
|
svc['name'])
|
||||||
|
return other_services
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
services = other_services
|
||||||
|
services.append(this_service)
|
||||||
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
|
'ceph-osd', 'ceph-radosgw']
|
||||||
|
|
||||||
|
if self.openstack:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] not in use_source:
|
||||||
|
config = {'openstack-origin': self.openstack}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
if self.source:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] in use_source:
|
||||||
|
config = {'source': self.source}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in configs.iteritems():
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _get_openstack_release(self):
|
||||||
|
"""Get openstack release.
|
||||||
|
|
||||||
|
Return an integer representing the enum value of the openstack
|
||||||
|
release.
|
||||||
|
"""
|
||||||
|
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||||
|
self.precise_havana, self.precise_icehouse,
|
||||||
|
self.trusty_icehouse) = range(6)
|
||||||
|
releases = {
|
||||||
|
('precise', None): self.precise_essex,
|
||||||
|
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||||
|
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
||||||
|
('precise', 'cloud:precise-havana'): self.precise_havana,
|
||||||
|
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||||
|
('trusty', None): self.trusty_icehouse}
|
||||||
|
return releases[(self.series, self.openstack)]
|
276
tests/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
276
tests/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
import glanceclient.v1.client as glance_client
|
||||||
|
import keystoneclient.v2_0 as keystone_client
|
||||||
|
import novaclient.v1_1.client as nova_client
|
||||||
|
|
||||||
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
|
AmuletUtils
|
||||||
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
"""OpenStack amulet utilities.
|
||||||
|
|
||||||
|
This class inherits from AmuletUtils and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=ERROR):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletUtils, self).__init__(log_level)
|
||||||
|
|
||||||
|
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected):
|
||||||
|
"""Validate endpoint data.
|
||||||
|
|
||||||
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
|
are used to find the matching endpoint.
|
||||||
|
"""
|
||||||
|
found = False
|
||||||
|
for ep in endpoints:
|
||||||
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
|
if (admin_port in ep.adminurl and
|
||||||
|
internal_port in ep.internalurl and
|
||||||
|
public_port in ep.publicurl):
|
||||||
|
found = True
|
||||||
|
actual = {'id': ep.id,
|
||||||
|
'region': ep.region,
|
||||||
|
'adminurl': ep.adminurl,
|
||||||
|
'internalurl': ep.internalurl,
|
||||||
|
'publicurl': ep.publicurl,
|
||||||
|
'service_id': ep.service_id}
|
||||||
|
ret = self._validate_dict_data(expected, actual)
|
||||||
|
if ret:
|
||||||
|
return 'unexpected endpoint data - {}'.format(ret)
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return 'endpoint not found'
|
||||||
|
|
||||||
|
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
|
"""Validate service catalog endpoint data.
|
||||||
|
|
||||||
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
|
expected service catalog endpoints.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for k, v in expected.iteritems():
|
||||||
|
if k in actual:
|
||||||
|
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||||
|
if ret:
|
||||||
|
return self.endpoint_error(k, ret)
|
||||||
|
else:
|
||||||
|
return "endpoint {} does not exist".format(k)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_tenant_data(self, expected, actual):
|
||||||
|
"""Validate tenant data.
|
||||||
|
|
||||||
|
Validate a list of actual tenant data vs list of expected tenant
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'description': act.description,
|
||||||
|
'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected tenant data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "tenant {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_role_data(self, expected, actual):
|
||||||
|
"""Validate role data.
|
||||||
|
|
||||||
|
Validate a list of actual role data vs a list of expected role
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected role data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "role {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_user_data(self, expected, actual):
|
||||||
|
"""Validate user data.
|
||||||
|
|
||||||
|
Validate a list of actual user data vs a list of expected user
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'name': act.name,
|
||||||
|
'email': act.email, 'tenantId': act.tenantId,
|
||||||
|
'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected user data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "user {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_flavor_data(self, expected, actual):
|
||||||
|
"""Validate flavor data.
|
||||||
|
|
||||||
|
Validate a list of actual flavors vs a list of expected flavors.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
act = [a.name for a in actual]
|
||||||
|
return self._validate_list_data(expected, act)
|
||||||
|
|
||||||
|
def tenant_exists(self, keystone, tenant):
|
||||||
|
"""Return True if tenant exists."""
|
||||||
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
|
tenant):
|
||||||
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
|
unit = keystone_sentry
|
||||||
|
service_ip = unit.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_glance_admin(self, keystone):
|
||||||
|
"""Authenticates admin user with glance."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='image',
|
||||||
|
endpoint_type='adminURL')
|
||||||
|
return glance_client.Client(ep, token=keystone.auth_token)
|
||||||
|
|
||||||
|
def authenticate_nova_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with nova-api."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return nova_client.Client(username=user, api_key=password,
|
||||||
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def create_cirros_image(self, glance, image_name):
|
||||||
|
"""Download the latest cirros image and upload it to glance."""
|
||||||
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
|
if http_proxy:
|
||||||
|
proxies = {'http': http_proxy}
|
||||||
|
opener = urllib.FancyURLopener(proxies)
|
||||||
|
else:
|
||||||
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
|
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||||
|
version = f.read().strip()
|
||||||
|
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
||||||
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
|
if not os.path.exists(local_path):
|
||||||
|
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||||
|
version, cirros_img)
|
||||||
|
opener.retrieve(cirros_url, local_path)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
with open(local_path) as f:
|
||||||
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
|
disk_format='qcow2',
|
||||||
|
container_format='bare', data=f)
|
||||||
|
count = 1
|
||||||
|
status = image.status
|
||||||
|
while status != 'active' and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
image = glance.images.get(image.id)
|
||||||
|
status = image.status
|
||||||
|
self.log.debug('image status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'active':
|
||||||
|
self.log.error('image creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
def delete_image(self, glance, image):
|
||||||
|
"""Delete the specified image."""
|
||||||
|
num_before = len(list(glance.images.list()))
|
||||||
|
glance.images.delete(image)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
self.log.debug('number of images: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('image deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
|
"""Create the specified instance."""
|
||||||
|
image = nova.images.find(name=image_name)
|
||||||
|
flavor = nova.flavors.find(name=flavor)
|
||||||
|
instance = nova.servers.create(name=instance_name, image=image,
|
||||||
|
flavor=flavor)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
status = instance.status
|
||||||
|
while status != 'ACTIVE' and count < 60:
|
||||||
|
time.sleep(3)
|
||||||
|
instance = nova.servers.get(instance.id)
|
||||||
|
status = instance.status
|
||||||
|
self.log.debug('instance status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'ACTIVE':
|
||||||
|
self.log.error('instance creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def delete_instance(self, nova, instance):
|
||||||
|
"""Delete the specified instance."""
|
||||||
|
num_before = len(list(nova.servers.list()))
|
||||||
|
nova.servers.delete(instance)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
self.log.debug('number of instances: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('instance deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
Loading…
Reference in New Issue
Block a user