scenario001: deploy Cinder and Nova with RBD backend

This patch enables robust and complete testing of a compute scenario
where RBD is used for Cinder and Nova.

* Allow cinder.pp and nova.pp to configure RBD backend.
* Switch scenario001 to run Cinder and Nova with RBD backend.
* Update README with more documentation about Cinder & Nova backends.
* Configure volume_clear option to 'none' so volumes are quickly
  deleted.
* Enable rbd_default_features to 15 to increase ceph performances.
* Increase compute build_interval in Tempest to 60s, useful for
  Bootfromvolume test where instance deletion can take time with RBD
  backend.

Depends-On: I69a7d40e7d1847be06a843986ace4f0602272fe1
Depends-On: I7302b89da5a995e779ec349ab0c0f519c69a3a98
Depends-On: Ic410cb66e7620b6ca6acbea38360d8dd890000c9

Change-Id: I5c8d9cf2ff8fc361553b3eed73b697ad87170434
This commit is contained in:
Emilien Macchi 2016-02-17 23:26:02 -05:00
parent 7c2ff7daf3
commit 478f8208ad
6 changed files with 115 additions and 52 deletions

View File

@ -35,9 +35,9 @@ scenario](#All-In-One).
|:----------:|:-----------:|:-----------:|:-----------:| |:----------:|:-----------:|:-----------:|:-----------:|
| keystone | X | X | X | | keystone | X | X | X |
| glance | rbd | file | file | | glance | rbd | file | file |
| nova | X | X | X | | nova | rbd | X | X |
| neutron | X | X | X | | neutron | X | X | X |
| cinder | X | | X | | cinder | rbd | iscsi | iscsi |
| ceilometer | X | | | | ceilometer | X | | |
| aodh | X | | | | aodh | X | | |
| gnocchi | X | | | | gnocchi | X | | |

View File

@ -22,8 +22,12 @@ class { '::openstack_integration::glance':
backend => 'rbd', backend => 'rbd',
} }
include ::openstack_integration::neutron include ::openstack_integration::neutron
include ::openstack_integration::nova class { '::openstack_integration::nova':
include ::openstack_integration::cinder libvirt_rbd => true,
}
class { '::openstack_integration::cinder':
backend => 'rbd',
}
include ::openstack_integration::ceilometer include ::openstack_integration::ceilometer
include ::openstack_integration::aodh include ::openstack_integration::aodh
include ::openstack_integration::gnocchi include ::openstack_integration::gnocchi

View File

@ -39,4 +39,11 @@ class openstack_integration::ceph {
class { '::ceph::profile::mon': } class { '::ceph::profile::mon': }
class { '::ceph::profile::osd': } class { '::ceph::profile::osd': }
# Extra Ceph configuration to increase performances
$ceph_extra_config = {
'client/rbd_default_features' => { value => '15' },
}
class { '::ceph::conf':
args => $ceph_extra_config,
}
} }

View File

@ -1,4 +1,13 @@
class openstack_integration::cinder { # Configure the Cinder service
#
# [*backend*]
# (optional) Cinder backend to use.
# Can be 'iscsi' or 'rbd'.
# Defaults to 'iscsi'.
#
class openstack_integration::cinder (
$backend = 'iscsi',
) {
rabbitmq_user { 'cinder': rabbitmq_user { 'cinder':
admin => true, admin => true,
@ -37,16 +46,35 @@ class openstack_integration::cinder {
class { '::cinder::quota': } class { '::cinder::quota': }
class { '::cinder::scheduler': } class { '::cinder::scheduler': }
class { '::cinder::scheduler::filter': } class { '::cinder::scheduler::filter': }
class { '::cinder::volume': } class { '::cinder::volume':
volume_clear => 'none',
}
class { '::cinder::cron::db_purge': } class { '::cinder::cron::db_purge': }
class { '::cinder::glance': class { '::cinder::glance':
glance_api_servers => 'localhost:9292', glance_api_servers => 'localhost:9292',
} }
class { '::cinder::setup_test_volume': case $backend {
size => '15G', 'iscsi': {
} class { '::cinder::setup_test_volume':
cinder::backend::iscsi { 'BACKEND_1': size => '15G',
iscsi_ip_address => '127.0.0.1', }
cinder::backend::iscsi { 'BACKEND_1':
iscsi_ip_address => '127.0.0.1',
}
}
'rbd': {
cinder::backend::rbd { 'BACKEND_1':
rbd_user => 'openstack',
rbd_pool => 'cinder',
rbd_secret_uuid => '7200aea0-2ddd-4a32-aa2a-d49f66ab554c',
}
# make sure ceph pool exists before running Cinder API & Volume
Exec['create-cinder'] -> Service['cinder-api']
Exec['create-cinder'] -> Service['cinder-volume']
}
default: {
fail("Unsupported backend (${backend})")
}
} }
class { '::cinder::backends': class { '::cinder::backends':
enabled_backends => ['BACKEND_1'], enabled_backends => ['BACKEND_1'],

View File

@ -1,4 +1,13 @@
class openstack_integration::nova { # Configure the Cinder service
#
# [*libvirt_rbd*]
# (optional) Boolean to configure or not Nova
# to use Libvirt RBD backend.
# Defaults to false.
#
class openstack_integration::nova (
$libvirt_rbd = false,
) {
rabbitmq_user { 'nova': rabbitmq_user { 'nova':
admin => true, admin => true,
@ -60,6 +69,19 @@ class openstack_integration::nova {
migration_support => true, migration_support => true,
vncserver_listen => '0.0.0.0', vncserver_listen => '0.0.0.0',
} }
if $libvirt_rbd {
class { '::nova::compute::rbd':
libvirt_rbd_user => 'openstack',
libvirt_rbd_secret_uuid => '7200aea0-2ddd-4a32-aa2a-d49f66ab554c',
libvirt_rbd_secret_key => 'AQD7kyJQQGoOBhAAqrPAqSopSwPrrfMMomzVdw==',
libvirt_images_rbd_pool => 'nova',
rbd_keyring => 'client.openstack',
# ceph packaging is already managed by puppet-ceph
manage_ceph_client => false,
}
# make sure ceph pool exists before running nova-compute
Exec['create-nova'] -> Service['nova-compute']
}
class { '::nova::scheduler': } class { '::nova::scheduler': }
class { '::nova::vncproxy': } class { '::nova::vncproxy': }

View File

@ -64,47 +64,49 @@ class openstack_integration::tempest (
) { ) {
class { '::tempest': class { '::tempest':
debug => true, debug => true,
use_stderr => false, use_stderr => false,
log_file => 'tempest.log', log_file => 'tempest.log',
tempest_clone_owner => $::id, tempest_clone_owner => $::id,
git_clone => false, git_clone => false,
tempest_clone_path => '/tmp/openstack/tempest', tempest_clone_path => '/tmp/openstack/tempest',
lock_path => '/tmp/openstack/tempest', lock_path => '/tmp/openstack/tempest',
tempest_config_file => '/tmp/openstack/tempest/etc/tempest.conf', tempest_config_file => '/tmp/openstack/tempest/etc/tempest.conf',
configure_images => true, configure_images => true,
configure_networks => true, configure_networks => true,
identity_uri => 'http://127.0.0.1:5000/v2.0', identity_uri => 'http://127.0.0.1:5000/v2.0',
identity_uri_v3 => 'http://127.0.0.1:5000/v3', identity_uri_v3 => 'http://127.0.0.1:5000/v3',
admin_username => 'admin', admin_username => 'admin',
admin_tenant_name => 'openstack', admin_tenant_name => 'openstack',
admin_password => 'a_big_secret', admin_password => 'a_big_secret',
admin_domain_name => 'Default', admin_domain_name => 'Default',
auth_version => 'v3', auth_version => 'v3',
image_name => 'cirros', image_name => 'cirros',
image_name_alt => 'cirros_alt', image_name_alt => 'cirros_alt',
cinder_available => $cinder, cinder_available => $cinder,
glance_available => $glance, glance_available => $glance,
horizon_available => $horizon, horizon_available => $horizon,
nova_available => $nova, nova_available => $nova,
neutron_available => $neutron, neutron_available => $neutron,
ceilometer_available => $ceilometer, ceilometer_available => $ceilometer,
aodh_available => $aodh, aodh_available => $aodh,
trove_available => $trove, trove_available => $trove,
sahara_available => $sahara, sahara_available => $sahara,
heat_available => $heat, heat_available => $heat,
swift_available => $swift, swift_available => $swift,
ironic_available => $ironic, ironic_available => $ironic,
public_network_name => 'public', public_network_name => 'public',
dashboard_url => "http://${::hostname}/", dashboard_url => "http://${::hostname}/",
flavor_ref => '42', flavor_ref => '42',
flavor_ref_alt => '84', flavor_ref_alt => '84',
image_ssh_user => 'cirros', image_ssh_user => 'cirros',
image_alt_ssh_user => 'cirros', image_alt_ssh_user => 'cirros',
img_file => 'cirros-0.3.4-x86_64-disk.img', img_file => 'cirros-0.3.4-x86_64-disk.img',
compute_build_interval => 60,
# TODO(emilien) optimization by 1/ using Hiera to configure Glance image source # TODO(emilien) optimization by 1/ using Hiera to configure Glance image source
# and 2/ if running in the gate, use /home/jenkins/cache/files/ cirros image. # and 2/ if running in the gate, use /home/jenkins/cache/files/ cirros image.
# img_dir => '/home/jenkins/cache/files', # img_dir => '/home/jenkins/cache/files',
img_dir => '/tmp/openstack/tempest', img_dir => '/tmp/openstack/tempest',
} }
} }