Initial commit of puppet-cinder

This commit is contained in:
Ryan Hefner
2014-06-10 16:18:30 -04:00
parent eb60ddbfeb
commit 3d0f602557
103 changed files with 6490 additions and 143 deletions

19
.fixtures.yml Normal file
View File

@@ -0,0 +1,19 @@
fixtures:
repositories:
'apt': 'git://github.com/puppetlabs/puppetlabs-apt.git'
'inifile': 'git://github.com/puppetlabs/puppetlabs-inifile'
'keystone': 'git://github.com/stackforge/puppet-keystone.git'
'mysql':
repo: 'git://github.com/puppetlabs/puppetlabs-mysql.git'
ref: 'origin/0.x'
'postgresql':
repo: 'git://github.com/puppetlabs/puppet-postgresql.git'
ref: '2.5.0'
'qpid': 'git://github.com/dprince/puppet-qpid.git'
'rabbitmq':
repo: 'git://github.com/puppetlabs/puppetlabs-rabbitmq'
ref: 'origin/2.x'
'stdlib': 'git://github.com/puppetlabs/puppetlabs-stdlib.git'
'sysctl': 'git://github.com/duritong/puppet-sysctl.git'
symlinks:
'cinder': "#{source_dir}"

5
.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
Gemfile.lock
spec/fixtures/modules/*
spec/fixtures/manifests/site.pp
*.swp
pkg

4
.gitreview Normal file
View File

@@ -0,0 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=stackforge/puppet-cinder.git

15
Gemfile Normal file
View File

@@ -0,0 +1,15 @@
source 'https://rubygems.org'
group :development, :test do
gem 'puppetlabs_spec_helper', :require => false
gem 'puppet-lint', '~> 0.3.2'
gem 'rake', '10.1.1'
end
if puppetversion = ENV['PUPPET_GEM_VERSION']
gem 'puppet', puppetversion, :require => false
else
gem 'puppet', :require => false
end
# vim:ft=ruby

201
LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

15
Modulefile Normal file
View File

@@ -0,0 +1,15 @@
name 'puppetlabs-cinder'
version '4.0.0'
source 'https://github.com/stackforge/puppet-cinder'
author 'Puppet Labs'
license 'Apache License 2.0'
summary 'Puppet Labs Cinder Module'
description 'Puppet module to install and configure the Openstack Cinder block storage service'
project_page 'https://launchpad.net/puppet-cinder'
dependency 'dprince/qpid', '>=1.0.0 <2.0.0'
dependency 'puppetlabs/inifile', '>=1.0.0 <2.0.0'
dependency 'puppetlabs/keystone', '>=4.0.0 <5.0.0'
dependency 'puppetlabs/mysql', '>=0.9.0 <3.0.0'
dependency 'puppetlabs/rabbitmq', '>=2.0.2 <4.0.0'
dependency 'puppetlabs/stdlib', '>=3.2.0'

249
README.md Normal file
View File

@@ -0,0 +1,249 @@
cinder
=======
4.0.0 - 2014.1.0 - Icehouse
#### Table of Contents
1. [Overview - What is the cinder module?](#overview)
2. [Module Description - What does the module do?](#module-description)
3. [Setup - The basics of getting started with cinder](#setup)
4. [Implementation - An under-the-hood peek at what the module is doing](#implementation)
5. [Limitations - OS compatibility, etc.](#limitations)
6. [Development - Guide for contributing to the module](#development)
7. [Contributors - Those with commits](#contributors)
8. [Release Notes - Notes on the most recent updates to the module](#release-notes)
Overview
--------
The cinder module is a part of [Stackforge](https://github.com/stackfoge), an effort by the Openstack infrastructure team to provide continuous integration testing and code review for Openstack and Openstack community projects not part of the core software. The module its self is used to flexibly configure and manage the block storage service for Openstack.
Module Description
------------------
The cinder module is a thorough attempt to make Puppet capable of managing the entirety of cinder. This includes manifests to provision such things as keystone endpoints, RPC configurations specific to cinder, and database connections. Types are shipped as part of the cinder module to assist in manipulation of configuration files.
This module is tested in combination with other modules needed to build and leverage an entire Openstack software stack. These modules can be found, all pulled together in the [openstack module](https://github.com/stackfoge/puppet-openstack).
Setup
-----
**What the cinder module affects**
* cinder, the block storage service for Openstack.
### Installing cinder
puppet module install puppetlabs/cinder
### Beginning with cinder
To utilize the cinder module's functionality you will need to declare multiple resources. The following is a modified excerpt from the [openstack module](https://github.com/stackfoge/puppet-openstack). This is not an exhaustive list of all the components needed, we recommend you consult and understand the [openstack module](https://github.com/stackforge/puppet-openstack) and the [core openstack](http://docs.openstack.org) documentation.
**Define a cinder control node**
```puppet
class { 'cinder':
database_connection => 'mysql://cinder:secret_block_password@openstack-controller.example.com/cinder',
rabbit_password => 'secret_rpc_password_for_blocks',
rabbit_host => 'openstack-controller.example.com',
verbose => true,
}
class { 'cinder::api':
keystone_password => $keystone_password,
keystone_enabled => $keystone_enabled,
keystone_user => $keystone_user,
keystone_auth_host => $keystone_auth_host,
keystone_auth_port => $keystone_auth_port,
keystone_auth_protocol => $keystone_auth_protocol,
service_port => $keystone_service_port,
package_ensure => $cinder_api_package_ensure,
bind_host => $cinder_bind_host,
enabled => $cinder_api_enabled,
}
class { 'cinder::scheduler':
scheduler_driver => 'cinder.scheduler.simple.SimpleScheduler',
}
```
**Define a cinder storage node**
```puppet
class { 'cinder':
database_connection => 'mysql://cinder:secret_block_password@openstack-controller.example.com/cinder',
rabbit_password => 'secret_rpc_password_for_blocks',
rabbit_host => 'openstack-controller.example.com',
verbose => true,
}
class { 'cinder::volume': }
class { 'cinder::volume::iscsi':
iscsi_ip_address => '10.0.0.2',
}
```
**Define a cinder storage node with multiple backends **
```puppet
class { 'cinder':
database_connection => 'mysql://cinder:secret_block_password@openstack-controller.example.com/cinder',
rabbit_password => 'secret_rpc_password_for_blocks',
rabbit_host => 'openstack-controller.example.com',
verbose => true,
}
class { 'cinder::volume': }
cinder::backend::iscsi {'iscsi1':
iscsi_ip_address => '10.0.0.2',
}
cinder::backend::iscsi {'iscsi2':
iscsi_ip_address => '10.0.0.3',
}
cinder::backend::iscsi {'iscsi3':
iscsi_ip_address => '10.0.0.4',
volume_backend_name => 'iscsi',
}
cinder::backend::iscsi {'iscsi4':
iscsi_ip_address => '10.0.0.5',
volume_backend_name => 'iscsi',
}
cinder::backend::rbd {'rbd-images':
rbd_pool => 'images',
rbd_user => 'images',
}
# Cinder::Type requires keystone credentials
Cinder::Type {
os_password => 'admin',
os_tenant_name => 'admin',
os_username => 'admin',
os_auth_url => 'http://127.0.0.1:5000/v2.0/',
}
cinder::type {'iscsi':
set_key => 'volume_backend_name',
set_value => ['iscsi1', 'iscsi2', 'iscsi']
}
cinder::type {'rbd':
set_key => 'volume_backend_name',
set_value => 'rbd-images',
}
class { 'cinder::backends':
enabled_backends => ['iscsi1', 'iscsi2', 'rbd-images']
}
```
Note: that the name passed to any backend resource must be unique accross all backends otherwise a duplicate resource will be defined.
** Using type and type_set **
Cinder allows for the usage of type to set extended information that can be used for various reasons. We have resource provider for ``type`` and ``type_set`` Since types are rarely defined with out also setting attributes with it, the resource for ``type`` can also call ``type_set`` if you pass ``set_key`` and ``set_value``
Implementation
--------------
### cinder
cinder is a combination of Puppet manifest and ruby code to delivery configuration and extra functionality through types and providers.
Limitations
------------
* Setup of storage nodes is limited to Linux and LVM, i.e. Puppet won't configure a Nexenta appliance but nova can be configured to use the Nexenta driver with Class['cinder::volume::nexenta'].
* The Cinder Openstack service depends on a sqlalchemy database. If you are using puppetlabs-mysql to achieve this, there is a parameter called mysql_module that can be used to swap between the two supported versions: 0.9 and 2.2. This is needed because the puppetlabs-mysql module was rewritten and the custom type names have changed between versions.
Development
-----------
Developer documentation for the entire puppet-openstack project.
* https://wiki.openstack.org/wiki/Puppet-openstack#Developer_documentation
Contributors
------------
* https://github.com/stackforge/puppet-cinder/graphs/contributors
Release Notes
-------------
**4.0.0**
* Stable Icehouse release.
* Updated NetApp unified driver config options.
* Updated support for latest RabbitMQ module.
* Added Glance support.
* Added GlusterFS driver support.
* Added region support.
* Added support for MySQL module (>= 2.2).
* Added support for Swift and Ceph backup backend.
* Added cinder::config to handle additional custom options.
* Refactored duplicate code for single and multiple backends.
* Removed control exchange flag.
* Removed deprecated cinder::base class.
**3.1.1**
* Fixed resource duplication bug.
**3.1.0**
* Added default_volume_type as a Cinder API parameter.
* Added parameter for endpoint procols.
* Deprecated glance_api_version.
* Added support for VMDK.
* Added support for Cinder multi backend.
* Added support for https authentication endpoints.
* Replaced pip with native package manager (VMDK).
**3.0.0**
* Major release for OpenStack Havana.
* Added support for SolidFire.
* Added support for ceilometer.
* Fixed bug for cinder-volume requirement.
**2.2.0**
* Added support for rate limiting via api-paste.ini
* Added support to configure control_exchange.
* Added parameter check to enable or disable db_sync.
* Added syslog support.
* Added default auth_uri setting for auth token.
* Set package defaults to present.
* Fixed a bug to create empty init script when necessary.
* Various lint fixes.
**2.1.0**
* Added configuration of Cinder quotas.
* Added support for NetApp direct driver backend.
* Added support for ceph backend.
* Added support for SQL idle timeout.
* Added support for RabbitMQ clustering with single IP.
* Fixed allowed_hosts/database connection bug.
* Fixed lvm2 setup failure for Ubuntu.
* Removed unnecessary mysql::server dependency.
* Pinned RabbitMQ and database module versions.
* Various lint and bug fixes.
**2.0.0**
* Upstream is now part of stackfoge.
* Nexenta, NFS, and SAN support added as cinder volume drivers.
* Postgres support added.
* The Apache Qpid and the RabbitMQ message brokers available as RPC backends.
* Configurability of scheduler_driver.
* Various cleanups and bug fixes.

6
Rakefile Normal file
View File

@@ -0,0 +1,6 @@
require 'puppetlabs_spec_helper/rake_tasks'
require 'puppet-lint/tasks/puppet-lint'
PuppetLint.configuration.fail_on_warnings = true
PuppetLint.configuration.send('disable_80chars')
PuppetLint.configuration.send('disable_class_parameter_defaults')

View File

@@ -0,0 +1,40 @@
# Example: managing cinder controller services with pacemaker
#
# By setting enabled to false, these services will not be started at boot. By setting
# manage_service to false, puppet will not kill these services on every run. This
# allows the Pacemaker resource manager to dynamically determine on which node each
# service should run.
#
# The puppet commands below would ideally be applied to at least three nodes.
#
# Note that cinder-api is associated with the virtual IP address as
# it is called from external services. The remaining services connect to the
# database and/or message broker independently.
#
# Example pacemaker resource configuration commands (configured once per cluster):
#
# sudo pcs resource create cinder_vip ocf:heartbeat:IPaddr2 params ip=192.0.2.3 \
# cidr_netmask=24 op monitor interval=10s
#
# sudo pcs resource create cinder_api_service lsb:openstack-cinder-api
# sudo pcs resource create cinder_scheduler_service lsb:openstack-cinder-scheduler
#
# sudo pcs constraint colocation add cinder_api_service with cinder_vip
class { 'cinder':
database_connection => 'mysql://cinder:secret_block_password@openstack-controller.example.com/cinder',
}
class { 'cinder::api':
keystone_password => 'CINDER_PW',
keystone_user => 'cinder',
enabled => false,
manage_service => false,
}
class { 'cinder::scheduler':
scheduler_driver => 'cinder.scheduler.simple.SimpleScheduler',
enabled => false,
manage_service => false,
}

View File

@@ -0,0 +1,27 @@
Puppet::Type.type(:cinder_api_paste_ini).provide(
:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do
def section
resource[:name].split('/', 2).first
end
def setting
resource[:name].split('/', 2).last
end
def separator
'='
end
def self.file_path
'/etc/cinder/api-paste.ini'
end
# added for backwards compatibility with older versions of inifile
def file_path
self.class.file_path
end
end

View File

@@ -0,0 +1,27 @@
Puppet::Type.type(:cinder_config).provide(
:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do
def section
resource[:name].split('/', 2).first
end
def setting
resource[:name].split('/', 2).last
end
def separator
'='
end
def self.file_path
'/etc/cinder/cinder.conf'
end
# added for backwards compatibility with older versions of inifile
def file_path
self.class.file_path
end
end

View File

@@ -0,0 +1,42 @@
Puppet::Type.newtype(:cinder_api_paste_ini) do
ensurable
newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from /etc/cinder/api-paste.ini'
newvalues(/\S+\/\S+/)
end
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
def is_to_s( currentvalue )
if resource.secret?
return '[old secret redacted]'
else
return currentvalue
end
end
def should_to_s( newvalue )
if resource.secret?
return '[new secret redacted]'
else
return newvalue
end
end
end
newparam(:secret, :boolean => true) do
desc 'Whether to hide the value from Puppet logs. Defaults to `false`.'
newvalues(:true, :false)
defaultto false
end
end

View File

@@ -0,0 +1,42 @@
Puppet::Type.newtype(:cinder_config) do
ensurable
newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from /etc/cinder/cinder.conf'
newvalues(/\S+\/\S+/)
end
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
def is_to_s( currentvalue )
if resource.secret?
return '[old secret redacted]'
else
return currentvalue
end
end
def should_to_s( newvalue )
if resource.secret?
return '[new secret redacted]'
else
return newvalue
end
end
end
newparam(:secret, :boolean => true) do
desc 'Whether to hide the value from Puppet logs. Defaults to `false`.'
newvalues(:true, :false)
defaultto false
end
end

View File

@@ -1,27 +1,208 @@
# == Class: cinder::api
#
# Setup and configure the cinder API endpoint
#
# === Parameters
#
# [*keystone_password*]
# The password to use for authentication (keystone)
#
# [*keystone_enabled*]
# (optional) Use keystone for authentification
# Defaults to true
#
# [*keystone_tenant*]
# (optional) The tenant of the auth user
# Defaults to services
#
# [*keystone_user*]
# (optional) The name of the auth user
# Defaults to cinder
#
# [*keystone_auth_host*]
# (optional) The keystone host
# Defaults to localhost
#
# [*keystone_auth_port*]
# (optional) The keystone auth port
# Defaults to 35357
#
# [*keystone_auth_protocol*]
# (optional) The protocol used to access the auth host
# Defaults to http.
#
# [*os_region_name*]
# (optional) Some operations require cinder to make API requests
# to Nova. This sets the keystone region to be used for these
# requests. For example, boot-from-volume.
# Defaults to undef.
#
# [*keystone_auth_admin_prefix*]
# (optional) The admin_prefix used to admin endpoint of the auth host
# This allow admin auth URIs like http://auth_host:35357/keystone.
# (where '/keystone' is the admin prefix)
# Defaults to false for empty. If defined, should be a string with a
# leading '/' and no trailing '/'.
#
# [*service_port*]
# (optional) The cinder api port
# Defaults to 5000
#
# [*package_ensure*]
# (optional) The state of the package
# Defaults to present
#
# [*bind_host*]
# (optional) The cinder api bind address
# Defaults to 0.0.0.0
#
# [*enabled*]
# (optional) The state of the service
# Defaults to true
#
# [*manage_service*]
# (optional) Whether to start/stop the service
# Defaults to true
#
# [*ratelimits*]
# (optional) The state of the service
# Defaults to undef. If undefined the default ratelimiting values are used.
#
# [*ratelimits_factory*]
# (optional) Factory to use for ratelimiting
# Defaults to 'cinder.api.v1.limits:RateLimitingMiddleware.factory'
#
# [*default_volume_type*]
# (optional) default volume type to use.
# This should contain the name of the default volume type to use.
# If not configured, it produces an error when creating a volume
# without specifying a type.
# Defaults to 'false'.
class cinder::api (
$package_ensure = 'latest',
$enabled = true
$keystone_password,
$keystone_enabled = true,
$keystone_tenant = 'services',
$keystone_user = 'cinder',
$keystone_auth_host = 'localhost',
$keystone_auth_port = '35357',
$keystone_auth_protocol = 'http',
$keystone_auth_admin_prefix = false,
$keystone_auth_uri = false,
$os_region_name = undef,
$service_port = '5000',
$package_ensure = 'present',
$bind_host = '0.0.0.0',
$enabled = true,
$manage_service = true,
$ratelimits = undef,
$default_volume_type = false,
$ratelimits_factory =
'cinder.api.v1.limits:RateLimitingMiddleware.factory'
) {
include cinder::params
package { 'cinder-api':
name => $::cinder::params::api_package,
ensure => $package_ensure,
require => Class['cinder'],
Cinder_config<||> ~> Service['cinder-api']
Cinder_api_paste_ini<||> ~> Service['cinder-api']
if $::cinder::params::api_package {
Package['cinder-api'] -> Cinder_config<||>
Package['cinder-api'] -> Cinder_api_paste_ini<||>
Package['cinder-api'] -> Service['cinder-api']
package { 'cinder-api':
ensure => $package_ensure,
name => $::cinder::params::api_package,
}
}
if $enabled {
$ensure = 'running'
Cinder_config<||> ~> Exec['cinder-manage db_sync']
exec { 'cinder-manage db_sync':
command => $::cinder::params::db_sync_command,
path => '/usr/bin',
user => 'cinder',
refreshonly => true,
logoutput => 'on_failure',
require => Package['cinder'],
}
if $manage_service {
$ensure = 'running'
}
} else {
$ensure = 'stopped'
if $manage_service {
$ensure = 'stopped'
}
}
service { $::cinder::params::api_service:
enable => $enabled,
service { 'cinder-api':
ensure => $ensure,
require => Package[$::cinder::params::api_package],
subscribe => File[$::cinder::params::cinder_conf],
name => $::cinder::params::api_service,
enable => $enabled,
hasstatus => true,
require => Package['cinder'],
}
cinder_config {
'DEFAULT/osapi_volume_listen': value => $bind_host
}
if $os_region_name {
cinder_config {
'DEFAULT/os_region_name': value => $os_region_name;
}
}
if $keystone_auth_uri {
cinder_api_paste_ini { 'filter:authtoken/auth_uri': value => $keystone_auth_uri; }
} else {
cinder_api_paste_ini { 'filter:authtoken/auth_uri': value => "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/"; }
}
if $keystone_enabled {
cinder_config {
'DEFAULT/auth_strategy': value => 'keystone' ;
}
cinder_api_paste_ini {
'filter:authtoken/service_protocol': value => $keystone_auth_protocol;
'filter:authtoken/service_host': value => $keystone_auth_host;
'filter:authtoken/service_port': value => $service_port;
'filter:authtoken/auth_protocol': value => $keystone_auth_protocol;
'filter:authtoken/auth_host': value => $keystone_auth_host;
'filter:authtoken/auth_port': value => $keystone_auth_port;
'filter:authtoken/admin_tenant_name': value => $keystone_tenant;
'filter:authtoken/admin_user': value => $keystone_user;
'filter:authtoken/admin_password': value => $keystone_password, secret => true;
}
if ($ratelimits != undef) {
cinder_api_paste_ini {
'filter:ratelimit/paste.filter_factory': value => $ratelimits_factory;
'filter:ratelimit/limits': value => $ratelimits;
}
}
if $keystone_auth_admin_prefix {
validate_re($keystone_auth_admin_prefix, '^(/.+[^/])?$')
cinder_api_paste_ini {
'filter:authtoken/auth_admin_prefix': value => $keystone_auth_admin_prefix;
}
} else {
cinder_api_paste_ini {
'filter:authtoken/auth_admin_prefix': ensure => absent;
}
}
}
if $default_volume_type {
cinder_config {
'DEFAULT/default_volume_type': value => $default_volume_type;
}
} else {
cinder_config {
'DEFAULT/default_volume_type': ensure => absent;
}
}
}

86
manifests/backend/eqlx.pp Normal file
View File

@@ -0,0 +1,86 @@
# == define: cinder::backend::eqlx
#
# Configure the Dell EqualLogic driver for cinder.
#
# === Parameters
#
# [*san_ip*]
# (required) The IP address of the Dell EqualLogic array.
#
# [*san_login*]
# (required) The account to use for issuing SSH commands.
#
# [*san_password*]
# (required) The password for the specified SSH account.
#
# [*san_thin_provision*]
# (optional) Whether or not to use thin provisioning for volumes.
# Defaults to false
#
# [*volume_backend_name*]
# (optional) The backend name.
# Defaults to the name of the resource
#
# [*eqlx_group_name*]
# (optional) The CLI prompt message without '>'.
# Defaults to 'group-0'
#
# [*eqlx_pool*]
# (optional) The pool in which volumes will be created.
# Defaults to 'default'
#
# [*eqlx_use_chap*]
# (optional) Use CHAP authentification for targets?
# Defaults to false
#
# [*eqlx_chap_login*]
# (optional) An existing CHAP account name.
# Defaults to 'chapadmin'
#
# [*eqlx_chap_password*]
# (optional) The password for the specified CHAP account name.
# Defaults to '12345'
#
# [*eqlx_cli_timeout*]
# (optional) The timeout for the Group Manager cli command execution.
# Defaults to 30 seconds
#
# [*eqlx_cli_max_retries*]
# (optional) The maximum retry count for reconnection.
# Defaults to 5
#
define cinder::backend::eqlx (
$san_ip,
$san_login,
$san_password,
$san_thin_provision = false,
$volume_backend_name = $name,
$eqlx_group_name = 'group-0',
$eqlx_pool = 'default',
$eqlx_use_chap = false,
$eqlx_chap_login = 'chapadmin',
$eqlx_chap_password = '12345',
$eqlx_cli_timeout = 30,
$eqlx_cli_max_retries = 5,
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value => 'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver';
"${name}/san_ip": value => $san_ip;
"${name}/san_login": value => $san_login;
"${name}/san_password": value => $san_password;
"${name}/san_thin_provision": value => $san_thin_provision;
"${name}/eqlx_group_name": value => $eqlx_group_name;
"${name}/eqlx_use_chap": value => $eqlx_use_chap;
"${name}/eqlx_cli_timeout": value => $eqlx_cli_timeout;
"${name}/eqlx_cli_max_retries": value => $eqlx_cli_max_retries;
"${name}/eqlx_pool": value => $eqlx_pool;
}
if(str2bool($eqlx_use_chap)) {
cinder_config {
"${name}/eqlx_chap_login": value => $eqlx_chap_login;
"${name}/eqlx_chap_password": value => $eqlx_chap_password;
}
}
}

View File

@@ -0,0 +1,66 @@
#
# == Class: cinder::backend::glusterfs
#
# Configures Cinder to use GlusterFS as a volume driver
#
# === Parameters
#
# [*glusterfs_shares*]
# (required) An array of GlusterFS volume locations.
# Must be an array even if there is only one volume.
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*glusterfs_disk_util*]
# Removed in Icehouse.
#
# [*glusterfs_sparsed_volumes*]
# (optional) Whether or not to use sparse (thin) volumes.
# Defaults to undef which uses the driver's default of "true".
#
# [*glusterfs_mount_point_base*]
# (optional) Where to mount the Gluster volumes.
# Defaults to undef which uses the driver's default of "$state_path/mnt".
#
# [*glusterfs_shares_config*]
# (optional) The config file to store the given $glusterfs_shares.
# Defaults to '/etc/cinder/shares.conf'
#
# === Examples
#
# cinder::backend::glusterfs { 'myGluster':
# glusterfs_shares = ['192.168.1.1:/volumes'],
# }
#
define cinder::backend::glusterfs (
$glusterfs_shares,
$volume_backend_name = $name,
$glusterfs_disk_util = false,
$glusterfs_sparsed_volumes = undef,
$glusterfs_mount_point_base = undef,
$glusterfs_shares_config = '/etc/cinder/shares.conf'
) {
if $glusterfs_disk_util {
fail('glusterfs_disk_util is removed in Icehouse.')
}
$content = join($glusterfs_shares, "\n")
file { $glusterfs_shares_config:
content => "${content}\n",
require => Package['cinder'],
notify => Service['cinder-volume']
}
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value =>
'cinder.volume.drivers.glusterfs.GlusterfsDriver';
"${name}/glusterfs_shares_config": value => $glusterfs_shares_config;
"${name}/glusterfs_sparsed_volumes": value => $glusterfs_sparsed_volumes;
"${name}/glusterfs_mount_point_base": value => $glusterfs_mount_point_base;
}
}

View File

@@ -0,0 +1,63 @@
#
# Define: cinder::backend::iscsi
# Parameters:
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
#
define cinder::backend::iscsi (
$iscsi_ip_address,
$volume_backend_name = $name,
$volume_group = 'cinder-volumes',
$iscsi_helper = $::cinder::params::iscsi_helper,
) {
include cinder::params
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/iscsi_ip_address": value => $iscsi_ip_address;
"${name}/iscsi_helper": value => $iscsi_helper;
"${name}/volume_group": value => $volume_group;
}
case $iscsi_helper {
'tgtadm': {
package { 'tgt':
ensure => present,
name => $::cinder::params::tgt_package_name,
}
if($::osfamily == 'RedHat') {
file_line { 'cinder include':
path => '/etc/tgt/targets.conf',
line => 'include /etc/cinder/volumes/*',
match => '#?include /',
require => Package['tgt'],
notify => Service['tgtd'],
}
}
service { 'tgtd':
ensure => running,
name => $::cinder::params::tgt_service_name,
enable => true,
require => Class['cinder::volume'],
}
}
'lioadm': {
package { 'targetcli':
ensure => present,
name => $::cinder::params::lio_package_name,
}
}
default: {
fail("Unsupported iscsi helper: ${iscsi_helper}.")
}
}
}

201
manifests/backend/netapp.pp Normal file
View File

@@ -0,0 +1,201 @@
# == define: cinder::backend::netapp
#
# Configures Cinder to use the NetApp unified volume driver
# Compatible for multiple backends
#
# === Parameters
#
# [*netapp_login*]
# (required) Administrative user account name used to access the storage
# system or proxy server.
#
# [*netapp_password*]
# (required) Password for the administrative user account specified in the
# netapp_login parameter.
#
# [*netapp_server_hostname*]
# (required) The hostname (or IP address) for the storage system or proxy
# server.
#
# [*netapp_server_port*]
# (optional) The TCP port to use for communication with ONTAPI on the
# storage system. Traditionally, port 80 is used for HTTP and port 443 is
# used for HTTPS; however, this value should be changed if an alternate
# port has been configured on the storage system or proxy server.
# Defaults to 80
#
# [*netapp_size_multiplier*]
# (optional) The quantity to be multiplied by the requested volume size to
# ensure enough space is available on the virtual storage server (Vserver) to
# fulfill the volume creation request.
# Defaults to 1.2
#
# [*netapp_storage_family*]
# (optional) The storage family type used on the storage system; valid values
# are ontap_7mode for using Data ONTAP operating in 7-Mode or ontap_cluster
# for using clustered Data ONTAP, or eseries for NetApp E-Series.
# Defaults to ontap_cluster
#
# [*netapp_storage_protocol*]
# (optional) The storage protocol to be used on the data path with the storage
# system; valid values are iscsi or nfs.
# Defaults to nfs
#
# [*netapp_transport_type*]
# (optional) The transport protocol used when communicating with ONTAPI on the
# storage system or proxy server. Valid values are http or https.
# Defaults to http
#
# [*netapp_vfiler*]
# (optional) The vFiler unit on which provisioning of block storage volumes
# will be done. This parameter is only used by the driver when connecting to
# an instance with a storage family of Data ONTAP operating in 7-Mode and the
# storage protocol selected is iSCSI. Only use this parameter when utilizing
# the MultiStore feature on the NetApp storage system.
# Defaults to ''
#
# [*netapp_volume_list*]
# (optional) This parameter is only utilized when the storage protocol is
# configured to use iSCSI. This parameter is used to restrict provisioning to
# the specified controller volumes. Specify the value of this parameter to be
# a comma separated list of NetApp controller volume names to be used for
# provisioning.
# Defaults to ''
#
# [*netapp_vserver*]
# (optional) This parameter specifies the virtual storage server (Vserver)
# name on the storage cluster on which provisioning of block storage volumes
# should occur. If using the NFS storage protocol, this parameter is mandatory
# for storage service catalog support (utilized by Cinder volume type
# extra_specs support). If this parameter is specified, the exports belonging
# to the Vserver will only be used for provisioning in the future. Block
# storage volumes on exports not belonging to the Vserver specified by
# this parameter will continue to function normally.
# Defaults to ''
#
# [*expiry_thres_minutes*]
# (optional) This parameter specifies the threshold for last access time for
# images in the NFS image cache. When a cache cleaning cycle begins, images
# in the cache that have not been accessed in the last M minutes, where M is
# the value of this parameter, will be deleted from the cache to create free
# space on the NFS share.
# Defaults to 720
#
# [*thres_avl_size_perc_start*]
# (optional) If the percentage of available space for an NFS share has
# dropped below the value specified by this parameter, the NFS image cache
# will be cleaned.
# Defaults to 20
#
# [*thres_avl_size_perc_stop*]
# (optional) When the percentage of available space on an NFS share has
# reached the percentage specified by this parameter, the driver will stop
# clearing files from the NFS image cache that have not been accessed in the
# last M minutes, where M is the value of the expiry_thres_minutes parameter.
# Defaults to 60
#
# [*nfs_shares_config*]
# (optional) File with the list of available NFS shares
# Defaults to ''
#
# [*netapp_copyoffload_tool_path*]
# (optional) This option specifies the path of the NetApp Copy Offload tool
# binary. Ensure that the binary has execute permissions set which allow the
# effective user of the cinder-volume process to execute the file.
# Defaults to ''
#
# [*netapp_controller_ips*]
# (optional) This option is only utilized when the storage family is
# configured to eseries. This option is used to restrict provisioning to the
# specified controllers. Specify the value of this option to be a comma
# separated list of controller hostnames or IP addresses to be used for
# provisioning.
# Defaults to ''
#
# [*netapp_sa_password*]
# (optional) Password for the NetApp E-Series storage array.
# Defaults to ''
#
# [*netapp_storage_pools*]
# (optional) This option is used to restrict provisioning to the specified
# storage pools. Only dynamic disk pools are currently supported. Specify the
# value of this option to be a comma separated list of disk pool names to be
# used for provisioning.
# Defaults to ''
#
# [*netapp_webservice_path*]
# (optional) This option is used to specify the path to the E-Series proxy
# application on a proxy server. The value is combined with the value of the
# netapp_transport_type, netapp_server_hostname, and netapp_server_port
# options to create the URL used by the driver to connect to the proxy
# application.
# Defaults to '/devmgr/v2'
#
# === Examples
#
# cinder::backend::netapp { 'myBackend':
# netapp_login => 'clusterAdmin',
# netapp_password => 'password',
# netapp_server_hostname => 'netapp.mycorp.com',
# netapp_server_port => '443',
# netapp_transport_type => 'https',
# netapp_vserver => 'openstack-vserver',
# }
#
# === Authors
#
# Bob Callaway <bob.callaway@netapp.com>
#
# === Copyright
#
# Copyright 2014 NetApp, Inc.
#
define cinder::backend::netapp (
$netapp_login,
$netapp_password,
$netapp_server_hostname,
$volume_backend_name = $name,
$netapp_server_port = '80',
$netapp_size_multiplier = '1.2',
$netapp_storage_family = 'ontap_cluster',
$netapp_storage_protocol = 'nfs',
$netapp_transport_type = 'http',
$netapp_vfiler = '',
$netapp_volume_list = '',
$netapp_vserver = '',
$expiry_thres_minutes = '720',
$thres_avl_size_perc_start = '20',
$thres_avl_size_perc_stop = '60',
$nfs_shares_config = '',
$netapp_copyoffload_tool_path = '',
$netapp_controller_ips = '',
$netapp_sa_password = '',
$netapp_storage_pools = '',
$netapp_webservice_path = '/devmgr/v2',
) {
cinder_config {
"${volume_backend_name}/volume_backend_name": value => $volume_backend_name;
"${volume_backend_name}/volume_driver": value => 'cinder.volume.drivers.netapp.common.NetAppDriver';
"${volume_backend_name}/netapp_login": value => $netapp_login;
"${volume_backend_name}/netapp_password": value => $netapp_password, secret => true;
"${volume_backend_name}/netapp_server_hostname": value => $netapp_server_hostname;
"${volume_backend_name}/netapp_server_port": value => $netapp_server_port;
"${volume_backend_name}/netapp_size_multiplier": value => $netapp_size_multiplier;
"${volume_backend_name}/netapp_storage_family": value => $netapp_storage_family;
"${volume_backend_name}/netapp_storage_protocol": value => $netapp_storage_protocol;
"${volume_backend_name}/netapp_transport_type": value => $netapp_transport_type;
"${volume_backend_name}/netapp_vfiler": value => $netapp_vfiler;
"${volume_backend_name}/netapp_volume_list": value => $netapp_volume_list;
"${volume_backend_name}/netapp_vserver": value => $netapp_vserver;
"${volume_backend_name}/expiry_thres_minutes": value => $expiry_thres_minutes;
"${volume_backend_name}/thres_avl_size_perc_start": value => $thres_avl_size_perc_start;
"${volume_backend_name}/thres_avl_size_perc_stop": value => $thres_avl_size_perc_stop;
"${volume_backend_name}/nfs_shares_config": value => $nfs_shares_config;
"${volume_backend_name}/netapp_copyoffload_tool_path": value => $netapp_copyoffload_tool_path;
"${volume_backend_name}/netapp_controller_ips": value => $netapp_controller_ips;
"${volume_backend_name}/netapp_sa_password": value => $netapp_sa_password;
"${volume_backend_name}/netapp_storage_pools": value => $netapp_storage_pools;
"${volume_backend_name}/netapp_webservice_path": value => $netapp_webservice_path;
}
}

View File

@@ -0,0 +1,59 @@
# == Class: cinder::backend::nexenta
#
# Setups Cinder with Nexenta volume driver.
#
# === Parameters
#
# [*nexenta_user*]
# (required) User name to connect to Nexenta SA.
#
# [*nexenta_password*]
# (required) Password to connect to Nexenta SA.
#
# [*nexenta_host*]
# (required) IP address of Nexenta SA.
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*nexenta_volume*]
# (optional) Pool on SA that will hold all volumes. Defaults to 'cinder'.
#
# [*nexenta_target_prefix*]
# (optional) IQN prefix for iSCSI targets. Defaults to 'iqn:'.
#
# [*nexenta_target_group_prefix*]
# (optional) Prefix for iSCSI target groups on SA. Defaults to 'cinder/'.
#
# [*nexenta_blocksize*]
# (optional) Block size for volumes. Defaults to '8k'.
#
# [*nexenta_sparse*]
# (optional) Flag to create sparse volumes. Defaults to true.
#
define cinder::backend::nexenta (
$nexenta_user,
$nexenta_password,
$nexenta_host,
$volume_backend_name = $name,
$nexenta_volume = 'cinder',
$nexenta_target_prefix = 'iqn:',
$nexenta_target_group_prefix = 'cinder/',
$nexenta_blocksize = '8k',
$nexenta_sparse = true
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/nexenta_user": value => $nexenta_user;
"${name}/nexenta_password": value => $nexenta_password;
"${name}/nexenta_host": value => $nexenta_host;
"${name}/nexenta_volume": value => $nexenta_volume;
"${name}/nexenta_target_prefix": value => $nexenta_target_prefix;
"${name}/nexenta_target_group_prefix": value => $nexenta_target_group_prefix;
"${name}/nexenta_blocksize": value => $nexenta_blocksize;
"${name}/nexenta_sparse": value => $nexenta_sparse;
"${name}/volume_driver": value => 'cinder.volume.drivers.nexenta.volume.NexentaDriver';
}
}

39
manifests/backend/nfs.pp Normal file
View File

@@ -0,0 +1,39 @@
# ==define cinder::backend::nfs
#
# ===Paramiters
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
#
define cinder::backend::nfs (
$volume_backend_name = $name,
$nfs_servers = [],
$nfs_mount_options = undef,
$nfs_disk_util = undef,
$nfs_sparsed_volumes = undef,
$nfs_mount_point_base = undef,
$nfs_shares_config = '/etc/cinder/shares.conf',
$nfs_used_ratio = '0.95',
$nfs_oversub_ratio = '1.0',
) {
file {$nfs_shares_config:
content => join($nfs_servers, "\n"),
require => Package['cinder'],
notify => Service['cinder-volume']
}
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value =>
'cinder.volume.drivers.nfs.NfsDriver';
"${name}/nfs_shares_config": value => $nfs_shares_config;
"${name}/nfs_mount_options": value => $nfs_mount_options;
"${name}/nfs_disk_util": value => $nfs_disk_util;
"${name}/nfs_sparsed_volumes": value => $nfs_sparsed_volumes;
"${name}/nfs_mount_point_base": value => $nfs_mount_point_base;
"${name}/nfs_used_ratio": value => $nfs_used_ratio;
"${name}/nfs_oversub_ratio": value => $nfs_oversub_ratio;
}
}

109
manifests/backend/rbd.pp Normal file
View File

@@ -0,0 +1,109 @@
# == define: cinder::backend::rbd
#
# Setup Cinder to use the RBD driver.
# Compatible for multiple backends
#
# === Parameters
#
# [*rbd_pool*]
# (required) Specifies the pool name for the block device driver.
#
# [*rbd_user*]
# (required) A required parameter to configure OS init scripts and cephx.
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*rbd_ceph_conf*]
# (optional) Path to the ceph configuration file to use
# Defaults to '/etc/ceph/ceph.conf'
#
# [*rbd_flatten_volume_from_snapshot*]
# (optional) Enable flatten volumes created from snapshots.
# Defaults to false
#
# [*rbd_secret_uuid*]
# (optional) A required parameter to use cephx.
# Defaults to false
#
# [*volume_tmp_dir*]
# (optional) Location to store temporary image files if the volume
# driver does not write them directly to the volume
# Defaults to false
#
# [*rbd_max_clone_depth*]
# (optional) Maximum number of nested clones that can be taken of a
# volume before enforcing a flatten prior to next clone.
# A value of zero disables cloning
# Defaults to '5'
#
# [*glance_api_version*]
# (optional) DEPRECATED: Use cinder::glance Class instead.
# Glance API version. (Defaults to '2')
# Setting this parameter cause a duplicate resource declaration
# with cinder::glance
#
define cinder::backend::rbd (
$rbd_pool,
$rbd_user,
$volume_backend_name = $name,
$rbd_ceph_conf = '/etc/ceph/ceph.conf',
$rbd_flatten_volume_from_snapshot = false,
$rbd_secret_uuid = false,
$volume_tmp_dir = false,
$rbd_max_clone_depth = '5',
# DEPRECATED PARAMETERS
$glance_api_version = undef,
) {
include cinder::params
if $glance_api_version {
warning('The glance_api_version parameter is deprecated, use glance_api_version of cinder::glance class instead.')
}
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value => 'cinder.volume.drivers.rbd.RBDDriver';
"${name}/rbd_ceph_conf": value => $rbd_ceph_conf;
"${name}/rbd_user": value => $rbd_user;
"${name}/rbd_pool": value => $rbd_pool;
"${name}/rbd_max_clone_depth": value => $rbd_max_clone_depth;
"${name}/rbd_flatten_volume_from_snapshot": value => $rbd_flatten_volume_from_snapshot;
}
if $rbd_secret_uuid {
cinder_config {"${name}/rbd_secret_uuid": value => $rbd_secret_uuid;}
} else {
cinder_config {"${name}/rbd_secret_uuid": ensure => absent;}
}
if $volume_tmp_dir {
cinder_config {"${name}/volume_tmp_dir": value => $volume_tmp_dir;}
} else {
cinder_config {"${name}/volume_tmp_dir": ensure => absent;}
}
case $::osfamily {
'Debian': {
$override_line = "env CEPH_ARGS=\"--id ${rbd_user}\""
}
'RedHat': {
$override_line = "export CEPH_ARGS=\"--id ${rbd_user}\""
}
default: {
fail("unsuported osfamily ${::osfamily}, currently Debian and Redhat are the only supported platforms")
}
}
# Creates an empty file if it doesn't yet exist
ensure_resource('file', $::cinder::params::ceph_init_override, {'ensure' => 'present'})
ensure_resource('file_line', 'set initscript env', {
line => $override_line,
path => $::cinder::params::ceph_init_override,
notify => Service['cinder-volume']
})
}

80
manifests/backend/san.pp Normal file
View File

@@ -0,0 +1,80 @@
# == Class: cinder::backend::san
#
# Configures Cinder volume SAN driver.
# Parameters are particular to each volume driver.
#
# === Parameters
#
# [*volume_driver*]
# (required) Setup cinder-volume to use volume driver.
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*san_thin_provision*]
# (optional) Use thin provisioning for SAN volumes? Defaults to true.
#
# [*san_ip*]
# (optional) IP address of SAN controller.
#
# [*san_login*]
# (optional) Username for SAN controller. Defaults to 'admin'.
#
# [*san_password*]
# (optional) Password for SAN controller.
#
# [*san_private_key*]
# (optional) Filename of private key to use for SSH authentication.
#
# [*san_clustername*]
# (optional) Cluster name to use for creating volumes.
#
# [*san_ssh_port*]
# (optional) SSH port to use with SAN. Defaults to 22.
#
# [*san_is_local*]
# (optional) Execute commands locally instead of over SSH
# use if the volume service is running on the SAN device.
#
# [*ssh_conn_timeout*]
# (optional) SSH connection timeout in seconds. Defaults to 30.
#
# [*ssh_min_pool_conn*]
# (optional) Minimum ssh connections in the pool.
#
# [*ssh_min_pool_conn*]
# (optional) Maximum ssh connections in the pool.
#
define cinder::backend::san (
$volume_driver,
$volume_backend_name = $name,
$san_thin_provision = true,
$san_ip = undef,
$san_login = 'admin',
$san_password = undef,
$san_private_key = undef,
$san_clustername = undef,
$san_ssh_port = 22,
$san_is_local = false,
$ssh_conn_timeout = 30,
$ssh_min_pool_conn = 1,
$ssh_max_pool_conn = 5
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value => $volume_driver;
"${name}/san_thin_provision": value => $san_thin_provision;
"${name}/san_ip": value => $san_ip;
"${name}/san_login": value => $san_login;
"${name}/san_password": value => $san_password;
"${name}/san_private_key": value => $san_private_key;
"${name}/san_clustername": value => $san_clustername;
"${name}/san_ssh_port": value => $san_ssh_port;
"${name}/san_is_local": value => $san_is_local;
"${name}/ssh_conn_timeout": value => $ssh_conn_timeout;
"${name}/ssh_min_pool_conn": value => $ssh_min_pool_conn;
"${name}/ssh_max_pool_conn": value => $ssh_max_pool_conn;
}
}

View File

@@ -0,0 +1,64 @@
# == Class: cinder::backend::solidfire
#
# Configures Cinder volume SolidFire driver.
# Parameters are particular to each volume driver.
#
# === Parameters
#
# [*volume_backend_name*]
# (optional) Allows for the volume_backend_name to be separate of $name.
# Defaults to: $name
#
# [*volume_driver*]
# (optional) Setup cinder-volume to use SolidFire volume driver.
# Defaults to 'cinder.volume.drivers.solidfire.SolidFire'
#
# [*san_ip*]
# (required) IP address of SolidFire clusters MVIP.
#
# [*san_login*]
# (required) Username for SolidFire admin account.
#
# [*san_password*]
# (required) Password for SolidFire admin account.
#
# [*sf_emulate_512*]
# (optional) Use 512 byte emulation for volumes.
# Defaults to True
#
# [*sf_allow_tenant_qos*]
# (optional) Allow tenants to specify QoS via volume metadata.
# Defaults to False
#
# [*sf_account_prefix*]
# (optional) Prefix to use when creating tenant accounts on SolidFire Cluster.
# Defaults to None, so account name is simply the tenant-uuid
#
# [*sf_api_port*]
# (optional) Port ID to use to connect to SolidFire API.
# Defaults to 443
#
define cinder::backend::solidfire(
$san_ip,
$san_login,
$san_password,
$volume_backend_name = $name,
$volume_driver = 'cinder.volume.drivers.solidfire.SolidFire',
$sf_emulate_512 = true,
$sf_allow_tenant_qos = false,
$sf_account_prefix = '',
$sf_api_port = '443'
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value => $volume_driver;
"${name}/san_ip": value => $san_ip;
"${name}/san_login": value => $san_login;
"${name}/san_password": value => $san_password;
"${name}/sf_emulate_512": value => $sf_emulate_512;
"${name}/sf_allow_tenant_qos": value => $sf_allow_tenant_qos;
"${name}/sf_account_prefix": value => $sf_account_prefix;
"${name}/sf_api_port": value => $sf_api_port;
}
}

87
manifests/backend/vmdk.pp Normal file
View File

@@ -0,0 +1,87 @@
# == define: cinder::backend::vmdk
#
# Configure the VMware VMDK driver for cinder.
#
# === Parameters
#
# [*host_ip*]
# The IP address of the VMware vCenter server.
#
# [*host_username*]
# The username for connection to VMware vCenter server.
#
# [*host_password*]
# The password for connection to VMware vCenter server.
#
# [*volume_backend_name*]
# Used to set the volume_backend_name in multiple backends.
# Defaults to $name as passed in the title.
#
# [*api_retry_count*]
# (optional) The number of times we retry on failures,
# e.g., socket error, etc.
# Defaults to 10.
#
# [*$max_object_retrieval*]
# (optional) The maximum number of ObjectContent data objects that should
# be returned in a single result. A positive value will cause
# the operation to suspend the retrieval when the count of
# objects reaches the specified maximum. The server may still
# limit the count to something less than the configured value.
# Any remaining objects may be retrieved with additional requests.
# Defaults to 100.
#
# [*task_poll_interval*]
# (optional) The interval in seconds used for polling of remote tasks.
# Defaults to 5.
#
# [*image_transfer_timeout_secs*]
# (optional) The timeout in seconds for VMDK volume transfer between Cinder and Glance.
# Defaults to 7200.
#
# [*wsdl_location*]
# (optional) VIM Service WSDL Location e.g
# http://<server>/vimService.wsdl. Optional over-ride to
# default location for bug work-arounds.
# Defaults to None.
#
# [*volume_folder*]
# (optional) The name for the folder in the VC datacenter that will contain cinder volumes.
# Defaults to 'cinder-volumes'.
#
define cinder::backend::vmdk (
$host_ip,
$host_username,
$host_password,
$volume_backend_name = $name,
$volume_folder = 'cinder-volumes',
$api_retry_count = 10,
$max_object_retrieval = 100,
$task_poll_interval = 5,
$image_transfer_timeout_secs = 7200,
$wsdl_location = undef
) {
cinder_config {
"${name}/volume_backend_name": value => $volume_backend_name;
"${name}/volume_driver": value => 'cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver';
"${name}/vmware_host_ip": value => $host_ip;
"${name}/vmware_host_username": value => $host_username;
"${name}/vmware_host_password": value => $host_password;
"${name}/vmware_volume_folder": value => $volume_folder;
"${name}/vmware_api_retry_count": value => $api_retry_count;
"${name}/vmware_max_object_retrieval": value => $max_object_retrieval;
"${name}/vmware_task_poll_interval": value => $task_poll_interval;
"${name}/vmware_image_transfer_timeout_secs": value => $image_transfer_timeout_secs;
}
if $wsdl_location {
cinder_config {
"${name}/vmware_wsdl_location": value => $wsdl_location;
}
}
package { 'python-suds':
ensure => present
}
}

28
manifests/backends.pp Normal file
View File

@@ -0,0 +1,28 @@
# == Class: cinder::backends
#
# Class to set the enabled_backends list
#
# === Parameters
#
# [*enabled_backends*]
# (required) a list of ini sections to enable.
# This should contain names used in ceph::backend::* resources.
# Example: ['volume1', 'volume2', 'sata3']
#
# Author: Andrew Woodward <awoodward@mirantis.com>
class cinder::backends (
$enabled_backends = undef,
# DEPRECATED
$default_volume_type = false
){
# Maybe this could be extented to dynamicly find the enabled names
cinder_config {
'DEFAULT/enabled_backends': value => join($enabled_backends, ',');
}
if $default_volume_type {
fail('The default_volume_type parameter is deprecated in this class, you should declare it in cinder::api.')
}
}

84
manifests/backup.pp Normal file
View File

@@ -0,0 +1,84 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: cinder::backup
#
# Setup Cinder backup service
#
# === Parameters
#
# [*backup_topic*]
# (optional) The topic volume backup nodes listen on.
# Defaults to 'cinder-backup'
#
# [*backup_manager*]
# (optional) Full class name for the Manager for volume backup.
# Defaults to 'cinder.backup.manager.BackupManager'
#
# [*backup_api_class*]
# (optional) The full class name of the volume backup API class.
# Defaults to 'cinder.backup.api.API'
#
# [*backup_name_template*]
# (optional) Template string to be used to generate backup names.
# Defaults to 'backup-%s'
#
class cinder::backup (
$enabled = true,
$package_ensure = 'present',
$backup_topic = 'cinder-backup',
$backup_manager = 'cinder.backup.manager.BackupManager',
$backup_api_class = 'cinder.backup.api.API',
$backup_name_template = 'backup-%s'
) {
include cinder::params
Cinder_config<||> ~> Service['cinder-backup']
if $::cinder::params::backup_package {
Package['cinder-backup'] -> Cinder_config<||>
Package['cinder-backup'] -> Service['cinder-backup']
package { 'cinder-backup':
ensure => $package_ensure,
name => $::cinder::params::backup_package,
}
}
if $enabled {
$ensure = 'running'
} else {
$ensure = 'stopped'
}
service { 'cinder-backup':
ensure => $ensure,
name => $::cinder::params::backup_service,
enable => $enabled,
hasstatus => true,
require => Package['cinder'],
}
cinder_config {
'DEFAULT/backup_topic': value => $backup_topic;
'DEFAULT/backup_manager': value => $backup_manager;
'DEFAULT/backup_api_class': value => $backup_api_class;
'DEFAULT/backup_name_template': value => $backup_name_template;
}
}

76
manifests/backup/ceph.pp Normal file
View File

@@ -0,0 +1,76 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: cinder::backup::ceph
#
# Setup Cinder to backup volumes into Ceph
#
# === Parameters
#
# [*backup_ceph_conf*]
# (optional) Ceph config file to use.
# Should be a valid ceph configuration file
# Defaults to '/etc/ceph/ceph.conf'
#
# [*backup_ceph_user*]
# (optional) The Ceph user to connect with.
# Should be a valid user
# Defaults to 'cinder'
#
# [*backup_ceph_chunk_size*]
# (optional) The chunk size in bytes that a backup will be broken into
# before transfer to backup store.
# Should be a valid integer
# Defaults to '134217728'
#
# [*backup_ceph_pool*]
# (optional) The Ceph pool to backup to.
# Should be a valid ceph pool
# Defaults to 'backups'
#
# [*backup_ceph_stripe_unit*]
# (optional) RBD stripe unit to use when creating a backup image.
# Should be a valid integer
# Defaults to '0'
#
# [*backup_ceph_stripe_count*]
# (optional) RBD stripe count to use when creating a backup image.
# Should be a valid integer
# Defaults to '0'
#
class cinder::backup::ceph (
$backup_driver = 'cinder.backup.driver.ceph',
$backup_ceph_conf = '/etc/ceph/ceph.conf',
$backup_ceph_user = 'cinder',
$backup_ceph_chunk_size = '134217728',
$backup_ceph_pool = 'backups',
$backup_ceph_stripe_unit = '0',
$backup_ceph_stripe_count = '0'
) {
cinder_config {
'DEFAULT/backup_driver': value => $backup_driver;
'DEFAULT/backup_ceph_conf': value => $backup_ceph_conf;
'DEFAULT/backup_ceph_user': value => $backup_ceph_user;
'DEFAULT/backup_ceph_chunk_size': value => $backup_ceph_chunk_size;
'DEFAULT/backup_ceph_pool': value => $backup_ceph_pool;
'DEFAULT/backup_ceph_stripe_unit': value => $backup_ceph_stripe_unit;
'DEFAULT/backup_ceph_stripe_count': value => $backup_ceph_stripe_count;
}
}

64
manifests/backup/swift.pp Normal file
View File

@@ -0,0 +1,64 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: cinder::backup::swift
#
# Setup Cinder to backup volumes into Swift
#
# === Parameters
#
# [*backup_swift_url*]
# (optional) The URL of the Swift endpoint.
# Should be a valid Swift URL
# Defaults to 'http://localhost:8080/v1/AUTH_'
#
# [*backup_swift_container*]
# (optional) The default Swift container to use.
# Defaults to 'volumes_backup'
#
# [*backup_swift_object_size*]
# (optional) The size in bytes of Swift backup objects.
# Defaults to '52428800'
#
# [*backup_swift_retry_attempts*]
# (optional) The number of retries to make for Swift operations.
# Defaults to '3'
#
# [*backup_swift_retry_backoff*]
# (optional) The backoff time in seconds between Swift retries.
# Defaults to '2'
#
class cinder::backup::swift (
$backup_driver = 'cinder.backup.drivers.swift',
$backup_swift_url = 'http://localhost:8080/v1/AUTH_',
$backup_swift_container = 'volumes_backup',
$backup_swift_object_size = '52428800',
$backup_swift_retry_attempts = '3',
$backup_swift_retry_backoff = '2'
) {
cinder_config {
'DEFAULT/backup_driver': value => $backup_driver;
'DEFAULT/backup_swift_url': value => $backup_swift_url;
'DEFAULT/backup_swift_container': value => $backup_swift_container;
'DEFAULT/backup_swift_object_size': value => $backup_swift_object_size;
'DEFAULT/backup_swift_retry_attempts': value => $backup_swift_retry_attempts;
'DEFAULT/backup_swift_retry_backoff': value => $backup_swift_retry_backoff;
}
}

22
manifests/ceilometer.pp Normal file
View File

@@ -0,0 +1,22 @@
# == Class: cinder::ceilometer
#
# Setup Cinder to enable ceilometer can retrieve volume samples
# Ref: http://docs.openstack.org/developer/ceilometer/install/manual.html
#
# === Parameters
#
# [*notification_driver*]
# (option) Driver or drivers to handle sending notifications.
# Notice: rabbit_notifier has been deprecated in Grizzly, use rpc_notifier instead.
#
class cinder::ceilometer (
$notification_driver = 'cinder.openstack.common.notifier.rpc_notifier'
) {
cinder_config {
'DEFAULT/notification_driver': value => $notification_driver;
}
}

20
manifests/client.pp Normal file
View File

@@ -0,0 +1,20 @@
# == Class: cinder::client
#
# Installs Cinder python client.
#
# === Parameters
#
# [*ensure*]
# Ensure state for package. Defaults to 'present'.
#
class cinder::client(
$package_ensure = 'present'
) {
include cinder::params
package { 'python-cinderclient':
ensure => $package_ensure,
name => $::cinder::params::client_package,
}
}

39
manifests/config.pp Normal file
View File

@@ -0,0 +1,39 @@
# == Class: cinder::config
#
# This class is used to manage arbitrary cinder configurations.
#
# === Parameters
#
# [*xxx_config*]
# (optional) Allow configuration of arbitrary cinder configurations.
# The value is an hash of xxx_config resources. Example:
# { 'DEFAULT/foo' => { value => 'fooValue'},
# 'DEFAULT/bar' => { value => 'barValue'}
# }
#
# In yaml format, Example:
# xxx_config:
# DEFAULT/foo:
# value: fooValue
# DEFAULT/bar:
# value: barValue
#
# [**cinder_config**]
# (optional) Allow configuration of cinder.conf configurations.
#
# [**api_paste_ini_config**]
# (optional) Allow configuration of /etc/cinder/api-paste.ini configurations.
#
# NOTE: The configuration MUST NOT be already handled by this module
# or Puppet catalog compilation will fail with duplicate resources.
#
class cinder::config (
$cinder_config = {},
$api_paste_ini_config = {},
) {
validate_hash($cinder_config)
validate_hash($api_paste_ini_config)
create_resources('cinder_config', $cinder_config)
create_resources('cinder_api_paste_ini', $api_paste_ini_config)
}

View File

@@ -1,18 +1,62 @@
# [*mysql_module*]
# (optional) The puppet-mysql module version to use.
# Tested versions include 0.9 and 2.2
# Defaults to '0.9'
#
class cinder::db::mysql (
$password,
$dbname = 'cinder',
$user = 'cinder',
$dbname = 'cinder',
$user = 'cinder',
$host = '127.0.0.1',
$allowed_hosts = undef,
$charset = 'utf8',
$collate = 'utf8_unicode_ci',
$cluster_id = 'localzone',
$mysql_module = '0.9'
) {
include cinder::params
Class['cinder::db::mysql'] -> Exec<| title == 'cinder-manage db_sync' |>
Class['cinder::db::mysql'] -> Class['cinder::db::sync']
Database[$dbname] ~> Exec<| title == 'cinder-manage db_sync' |>
if ($mysql_module >= 2.2) {
Mysql_database[$dbname] ~> Exec<| title == 'cinder-manage db_sync' |>
mysql::db { $dbname:
host => '127.0.0.1',
user => $user,
password => $password,
mysql::db { $dbname:
user => $user,
password => $password,
host => $host,
charset => $charset,
collate => $collate,
require => Class['mysql::server'],
}
} else {
Database[$dbname] ~> Exec<| title == 'cinder-manage db_sync' |>
mysql::db { $dbname:
user => $user,
password => $password,
host => $host,
charset => $charset,
require => Class['mysql::config'],
}
}
# Check allowed_hosts to avoid duplicate resource declarations
if is_array($allowed_hosts) and delete($allowed_hosts,$host) != [] {
$real_allowed_hosts = delete($allowed_hosts,$host)
} elsif is_string($allowed_hosts) and ($allowed_hosts != $host) {
$real_allowed_hosts = $allowed_hosts
}
if $real_allowed_hosts {
# TODO this class should be in the mysql namespace
cinder::db::mysql::host_access { $real_allowed_hosts:
user => $user,
password => $password,
database => $dbname,
mysql_module => $mysql_module,
}
}
}

View File

@@ -0,0 +1,33 @@
#
# Used to grant access to the cinder mysql DB
#
define cinder::db::mysql::host_access ($user, $password, $database, $mysql_module = '0.9') {
if ($mysql_module >= 2.2) {
mysql_user { "${user}@${name}":
password_hash => mysql_password($password),
provider => 'mysql',
require => Mysql_database[$database],
}
mysql_grant { "${user}@${name}/${database}.*":
privileges => ['ALL'],
options => ['GRANT'],
provider => 'mysql',
table => "${database}.*",
require => Mysql_user["${user}@${name}"],
user => "${user}@${name}"
}
} else {
database_user { "${user}@${name}":
password_hash => mysql_password($password),
provider => 'mysql',
require => Database[$database],
}
database_grant { "${user}@${name}/${database}":
# TODO figure out which privileges to grant.
privileges => 'all',
provider => 'mysql',
require => Database_user["${user}@${name}"]
}
}
}

View File

@@ -0,0 +1,21 @@
#
# Class that configures postgresql for cinder
#
# Requires the Puppetlabs postgresql module.
class cinder::db::postgresql(
$password,
$dbname = 'cinder',
$user = 'cinder'
) {
require postgresql::python
Postgresql::Db[$dbname] ~> Exec<| title == 'cinder-manage db_sync' |>
Package['python-psycopg2'] -> Exec<| title == 'cinder-manage db_sync' |>
postgresql::db { $dbname:
user => $user,
password => $password,
}
}

View File

@@ -9,5 +9,6 @@ class cinder::db::sync {
user => 'cinder',
refreshonly => true,
require => [File[$::cinder::params::cinder_conf], Class['cinder']],
logoutput => 'on_failure',
}
}

82
manifests/glance.pp Normal file
View File

@@ -0,0 +1,82 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# == Class: cinder::glance
#
# Glance drive Cinder as a block storage backend to store image data.
#
# === Parameters
#
# [*glance_api_servers*]
# (optional) A list of the glance api servers available to cinder.
# Should be an array with [hostname|ip]:port
# Defaults to undef
#
# [*glance_api_version*]
# (optional) Glance API version.
# Should be 1 or 2
# Defaults to 2 (current version)
#
# [*glance_num_retries*]
# (optional) Number retries when downloading an image from glance.
# Defaults to 0
#
# [*glance_api_insecure*]
# (optional) Allow to perform insecure SSL (https) requests to glance.
# Defaults to false
#
# [*glance_api_ssl_compression*]
# (optional) Whether to attempt to negotiate SSL layer compression when
# using SSL (https) requests. Set to False to disable SSL
# layer compression. In some cases disabling this may improve
# data throughput, eg when high network bandwidth is available
# and you are using already compressed image formats such as qcow2.
# Defaults to false
#
# [*glance_request_timeout*]
# (optional) http/https timeout value for glance operations.
# Defaults to undef
#
class cinder::glance (
$glance_api_servers = undef,
$glance_api_version = '2',
$glance_num_retries = '0',
$glance_api_insecure = false,
$glance_api_ssl_compression = false,
$glance_request_timeout = undef
) {
if is_array($glance_api_servers) {
cinder_config {
'DEFAULT/glance_api_servers': value => join($glance_api_servers, ',');
}
} elsif is_string($glance_api_servers) {
cinder_config {
'DEFAULT/glance_api_servers': value => $glance_api_servers;
}
}
cinder_config {
'DEFAULT/glance_api_version': value => $glance_api_version;
'DEFAULT/glance_num_retries': value => $glance_num_retries;
'DEFAULT/glance_api_insecure': value => $glance_api_insecure;
'DEFAULT/glance_api_ssl_compression': value => $glance_api_ssl_compression;
'DEFAULT/glance_request_timeout': value => $glance_request_timeout;
}
}

View File

@@ -1,52 +1,360 @@
#
# == Parameters
# [database_connection]
# Url used to connect to database.
# (Optional) Defaults to
# 'sqlite:////var/lib/cinder/cinder.sqlite'
#
# [database_idle_timeout]
# Timeout when db connections should be reaped.
# (Optional) Defaults to 3600.
#
# [*rabbit_use_ssl*]
# (optional) Connect over SSL for RabbitMQ
# Defaults to false
#
# [*kombu_ssl_ca_certs*]
# (optional) SSL certification authority file (valid only if SSL enabled).
# Defaults to undef
#
# [*kombu_ssl_certfile*]
# (optional) SSL cert file (valid only if SSL enabled).
# Defaults to undef
#
# [*kombu_ssl_keyfile*]
# (optional) SSL key file (valid only if SSL enabled).
# Defaults to undef
#
# [*kombu_ssl_version*]
# (optional) SSL version to use (valid only if SSL enabled).
# Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be
# available on some distributions.
# Defaults to 'SSLv3'
#
# [amqp_durable_queues]
# Use durable queues in amqp.
# (Optional) Defaults to false.
#
# [use_syslog]
# Use syslog for logging.
# (Optional) Defaults to false.
#
# [log_facility]
# Syslog facility to receive log lines.
# (Optional) Defaults to LOG_USER.
#
# [*log_dir*]
# (optional) Directory where logs should be stored.
# If set to boolean false, it will not log to any directory.
# Defaults to '/var/log/cinder'
#
# [*use_ssl*]
# (optional) Enable SSL on the API server
# Defaults to false, not set
#
# [*cert_file*]
# (optinal) Certificate file to use when starting API server securely
# Defaults to false, not set
#
# [*key_file*]
# (optional) Private key file to use when starting API server securely
# Defaults to false, not set
#
# [*ca_file*]
# (optional) CA certificate file to use to verify connecting clients
# Defaults to false, not set_
#
# [*mysql_module*]
# (optional) Puppetlabs-mysql module version to use
# Tested versions include 0.9 and 2.2
# Defaults to '0.9'
#
# [*storage_availability_zone*]
# (optional) Availability zone of the node.
# Defaults to 'nova'
#
# [*default_availability_zone*]
# (optional) Default availability zone for new volumes.
# If not set, the storage_availability_zone option value is used as
# the default for new volumes.
# Defaults to false
#
# [sql_connection]
# DEPRECATED
# [sql_idle_timeout]
# DEPRECATED
#
class cinder (
$keystone_password,
$cinder_settings = false,
$keystone_enabled = true,
$keystone_tenant = 'services',
$keystone_user = 'cinder',
$keystone_auth_host = 'localhost',
$keystone_auth_port = '35357',
$keystone_auth_protocol = 'http',
$package_ensure = 'latest',
$database_connection = 'sqlite:////var/lib/cinder/cinder.sqlite',
$database_idle_timeout = '3600',
$rpc_backend = 'cinder.openstack.common.rpc.impl_kombu',
$control_exchange = 'openstack',
$rabbit_host = '127.0.0.1',
$rabbit_port = 5672,
$rabbit_hosts = false,
$rabbit_virtual_host = '/',
$rabbit_userid = 'guest',
$rabbit_password = false,
$rabbit_use_ssl = false,
$kombu_ssl_ca_certs = undef,
$kombu_ssl_certfile = undef,
$kombu_ssl_keyfile = undef,
$kombu_ssl_version = 'SSLv3',
$amqp_durable_queues = false,
$qpid_hostname = 'localhost',
$qpid_port = '5672',
$qpid_username = 'guest',
$qpid_password = false,
$qpid_sasl_mechanisms = false,
$qpid_reconnect = true,
$qpid_reconnect_timeout = 0,
$qpid_reconnect_limit = 0,
$qpid_reconnect_interval_min = 0,
$qpid_reconnect_interval_max = 0,
$qpid_reconnect_interval = 0,
$qpid_heartbeat = 60,
$qpid_protocol = 'tcp',
$qpid_tcp_nodelay = true,
$package_ensure = 'present',
$use_ssl = false,
$ca_file = false,
$cert_file = false,
$key_file = false,
$api_paste_config = '/etc/cinder/api-paste.ini',
$use_syslog = false,
$log_facility = 'LOG_USER',
$log_dir = '/var/log/cinder',
$verbose = false,
$debug = false,
$mysql_module = '0.9',
$storage_availability_zone = 'nova',
$default_availability_zone = false,
# DEPRECATED PARAMETERS
$sql_connection = undef,
$sql_idle_timeout = undef,
) {
include cinder::params
package { 'cinder':
name => $::cinder::params::package_name,
ensure => $package_ensure,
Package['cinder'] -> Cinder_config<||>
Package['cinder'] -> Cinder_api_paste_ini<||>
if $sql_connection {
warning('The sql_connection parameter is deprecated, use database_connection instead.')
$database_connection_real = $sql_connection
} else {
$database_connection_real = $database_connection
}
File {
if $sql_idle_timeout {
warning('The sql_idle_timeout parameter is deprecated, use database_idle_timeout instead.')
$database_idle_timeout_real = $sql_idle_timeout
} else {
$database_idle_timeout_real = $database_idle_timeout
}
if $use_ssl {
if !$cert_file {
fail('The cert_file parameter is required when use_ssl is set to true')
}
if !$key_file {
fail('The key_file parameter is required when use_ssl is set to true')
}
}
if $rabbit_use_ssl {
if !$kombu_ssl_ca_certs {
fail('The kombu_ssl_ca_certs parameter is required when rabbit_use_ssl is set to true')
}
if !$kombu_ssl_certfile {
fail('The kombu_ssl_certfile parameter is required when rabbit_use_ssl is set to true')
}
if !$kombu_ssl_keyfile {
fail('The kombu_ssl_keyfile parameter is required when rabbit_use_ssl is set to true')
}
}
# this anchor is used to simplify the graph between cinder components by
# allowing a resource to serve as a point where the configuration of cinder begins
anchor { 'cinder-start': }
package { 'cinder':
ensure => $package_ensure,
name => $::cinder::params::package_name,
require => Anchor['cinder-start'],
}
file { $::cinder::params::cinder_conf:
ensure => present,
owner => 'cinder',
group => 'cinder',
mode => '0644',
require => Package[$::cinder::params::package_name],
mode => '0600',
require => Package['cinder'],
}
file { $::cinder::params::cinder_conf: }
file { $::cinder::params::cinder_paste_api_ini: }
if $cinder_settings {
multini($::cinder::params::cinder_conf, $cinder_settings)
file { $::cinder::params::cinder_paste_api_ini:
ensure => present,
owner => 'cinder',
group => 'cinder',
mode => '0600',
require => Package['cinder'],
}
if $keystone_enabled {
multini($::cinder::params::cinder_conf, { 'DEFAULT' => { 'auth_strategy' => 'keystone' } })
$keystone_settings = {
'filter:authtoken' => {
'auth_host' => $keystone_auth_host,
'auth_port' => $keystone_auth_port,
'auth_protocol' => $keystone_auth_protocol,
'admin_user' => $keystone_user,
'admin_password' => $keystone_password,
'admin_tenant_name' => $keystone_tenant
if $rpc_backend == 'cinder.openstack.common.rpc.impl_kombu' {
if ! $rabbit_password {
fail('Please specify a rabbit_password parameter.')
}
cinder_config {
'DEFAULT/rabbit_password': value => $rabbit_password, secret => true;
'DEFAULT/rabbit_userid': value => $rabbit_userid;
'DEFAULT/rabbit_virtual_host': value => $rabbit_virtual_host;
'DEFAULT/rabbit_use_ssl': value => $rabbit_use_ssl;
'DEFAULT/control_exchange': value => $control_exchange;
'DEFAULT/amqp_durable_queues': value => $amqp_durable_queues;
}
if $rabbit_hosts {
cinder_config { 'DEFAULT/rabbit_hosts': value => join($rabbit_hosts, ',') }
cinder_config { 'DEFAULT/rabbit_ha_queues': value => true }
} else {
cinder_config { 'DEFAULT/rabbit_host': value => $rabbit_host }
cinder_config { 'DEFAULT/rabbit_port': value => $rabbit_port }
cinder_config { 'DEFAULT/rabbit_hosts': value => "${rabbit_host}:${rabbit_port}" }
cinder_config { 'DEFAULT/rabbit_ha_queues': value => false }
}
if $rabbit_use_ssl {
cinder_config {
'DEFAULT/kombu_ssl_ca_certs': value => $kombu_ssl_ca_certs;
'DEFAULT/kombu_ssl_certfile': value => $kombu_ssl_certfile;
'DEFAULT/kombu_ssl_keyfile': value => $kombu_ssl_keyfile;
'DEFAULT/kombu_ssl_version': value => $kombu_ssl_version;
}
} else {
cinder_config {
'DEFAULT/kombu_ssl_ca_certs': ensure => absent;
'DEFAULT/kombu_ssl_certfile': ensure => absent;
'DEFAULT/kombu_ssl_keyfile': ensure => absent;
'DEFAULT/kombu_ssl_version': ensure => absent;
}
}
multini($::cinder::params::cinder_paste_api_ini, $keystone_settings)
}
if $rpc_backend == 'cinder.openstack.common.rpc.impl_qpid' {
if ! $qpid_password {
fail('Please specify a qpid_password parameter.')
}
cinder_config {
'DEFAULT/qpid_hostname': value => $qpid_hostname;
'DEFAULT/qpid_port': value => $qpid_port;
'DEFAULT/qpid_username': value => $qpid_username;
'DEFAULT/qpid_password': value => $qpid_password, secret => true;
'DEFAULT/qpid_reconnect': value => $qpid_reconnect;
'DEFAULT/qpid_reconnect_timeout': value => $qpid_reconnect_timeout;
'DEFAULT/qpid_reconnect_limit': value => $qpid_reconnect_limit;
'DEFAULT/qpid_reconnect_interval_min': value => $qpid_reconnect_interval_min;
'DEFAULT/qpid_reconnect_interval_max': value => $qpid_reconnect_interval_max;
'DEFAULT/qpid_reconnect_interval': value => $qpid_reconnect_interval;
'DEFAULT/qpid_heartbeat': value => $qpid_heartbeat;
'DEFAULT/qpid_protocol': value => $qpid_protocol;
'DEFAULT/qpid_tcp_nodelay': value => $qpid_tcp_nodelay;
'DEFAULT/amqp_durable_queues': value => $amqp_durable_queues;
}
if is_array($qpid_sasl_mechanisms) {
cinder_config {
'DEFAULT/qpid_sasl_mechanisms': value => join($qpid_sasl_mechanisms, ' ');
}
} elsif $qpid_sasl_mechanisms {
cinder_config {
'DEFAULT/qpid_sasl_mechanisms': value => $qpid_sasl_mechanisms;
}
} else {
cinder_config {
'DEFAULT/qpid_sasl_mechanisms': ensure => absent;
}
}
}
if ! $default_availability_zone {
$default_availability_zone_real = $storage_availability_zone
} else {
$default_availability_zone_real = $default_availability_zone
}
cinder_config {
'database/connection': value => $database_connection_real, secret => true;
'database/idle_timeout': value => $database_idle_timeout_real;
'DEFAULT/verbose': value => $verbose;
'DEFAULT/debug': value => $debug;
'DEFAULT/api_paste_config': value => $api_paste_config;
'DEFAULT/rpc_backend': value => $rpc_backend;
'DEFAULT/storage_availability_zone': value => $storage_availability_zone;
'DEFAULT/default_availability_zone': value => $default_availability_zone_real;
}
if($database_connection_real =~ /mysql:\/\/\S+:\S+@\S+\/\S+/) {
if ($mysql_module >= 2.2) {
require 'mysql::bindings'
require 'mysql::bindings::python'
} else {
require 'mysql::python'
}
} elsif($database_connection_real =~ /postgresql:\/\/\S+:\S+@\S+\/\S+/) {
} elsif($database_connection_real =~ /sqlite:\/\//) {
} else {
fail("Invalid db connection ${database_connection_real}")
}
if $log_dir {
cinder_config {
'DEFAULT/log_dir': value => $log_dir;
}
} else {
cinder_config {
'DEFAULT/log_dir': ensure => absent;
}
}
# SSL Options
if $use_ssl {
cinder_config {
'DEFAULT/ssl_cert_file' : value => $cert_file;
'DEFAULT/ssl_key_file' : value => $key_file;
}
if $ca_file {
cinder_config { 'DEFAULT/ssl_ca_file' :
value => $ca_file,
}
} else {
cinder_config { 'DEFAULT/ssl_ca_file' :
ensure => absent,
}
}
} else {
cinder_config {
'DEFAULT/ssl_cert_file' : ensure => absent;
'DEFAULT/ssl_key_file' : ensure => absent;
'DEFAULT/ssl_ca_file' : ensure => absent;
}
}
if $use_syslog {
cinder_config {
'DEFAULT/use_syslog': value => true;
'DEFAULT/syslog_log_facility': value => $log_facility;
}
} else {
cinder_config {
'DEFAULT/use_syslog': value => false;
}
}
}

View File

@@ -1,21 +1,86 @@
# == Class: cinder::keystone::auth
#
# Configures Cinder user, service and endpoint in Keystone.
#
# === Parameters
#
# [*password*]
# Password for Cinder user. Required.
#
# [*email*]
# Email for Cinder user. Optional. Defaults to 'cinder@localhost'.
#
# [*auth_name*]
# Username for Cinder service. Optional. Defaults to 'cinder'.
#
# [*auth_name_v2*]
# Username for Cinder v2 service. Optional. Defaults to 'cinder2'.
#
# [*configure_endpoint*]
# Should Cinder endpoint be configured? Optional. Defaults to 'true'.
# API v1 endpoint should be enabled in Icehouse for compatibility with Nova.
#
# [*configure_endpoint_v2*]
# Should Cinder v2 endpoint be configured? Optional. Defaults to 'true'.
#
# [*service_type*]
# Type of service. Optional. Defaults to 'volume'.
#
# [*service_type_v2*]
# Type of API v2 service. Optional. Defaults to 'volume2'.
#
# [*public_address*]
# Public address for endpoint. Optional. Defaults to '127.0.0.1'.
#
# [*admin_address*]
# Admin address for endpoint. Optional. Defaults to '127.0.0.1'.
#
# [*internal_address*]
# Internal address for endpoint. Optional. Defaults to '127.0.0.1'.
#
# [*port*]
# Port for endpoint. Optional. Defaults to '8776'.
#
# [*volume_version*]
# Cinder API version. Optional. Defaults to 'v1'.
#
# [*region*]
# Region for endpoint. Optional. Defaults to 'RegionOne'.
#
# [*tenant*]
# Tenant for Cinder user. Optional. Defaults to 'services'.
#
# [*public_protocol*]
# Protocol for public endpoint. Optional. Defaults to 'http'.
#
# [*internal_protocol*]
# Protocol for internal endpoint. Optional. Defaults to 'http'.
#
# [*admin_protocol*]
# Protocol for admin endpoint. Optional. Defaults to 'http'.
#
class cinder::keystone::auth (
$password,
$auth_name = 'cinder',
$email = 'cinder@localhost',
$tenant = 'services',
$configure_endpoint = true,
$service_type = 'volume',
$public_address = '127.0.0.1',
$admin_address = '127.0.0.1',
$internal_address = '127.0.0.1',
$port = '8776',
$volume_version = 'v1',
$region = 'RegionOne'
$auth_name = 'cinder',
$auth_name_v2 = 'cinderv2',
$email = 'cinder@localhost',
$tenant = 'services',
$configure_endpoint = true,
$configure_endpoint_v2 = true,
$service_type = 'volume',
$service_type_v2 = 'volumev2',
$public_address = '127.0.0.1',
$admin_address = '127.0.0.1',
$internal_address = '127.0.0.1',
$port = '8776',
$volume_version = 'v1',
$region = 'RegionOne',
$public_protocol = 'http',
$admin_protocol = 'http',
$internal_protocol = 'http'
) {
Class['keystone::db::sync'] -> Class['cinder::keystone::auth']
Keystone_user_role["${auth_name}@services"] ~> Service <| name == 'cinder-api' |>
Keystone_user_role["${auth_name}@${tenant}"] ~> Service <| name == 'cinder-api' |>
keystone_user { $auth_name:
ensure => present,
@@ -23,23 +88,35 @@ class cinder::keystone::auth (
email => $email,
tenant => $tenant,
}
keystone_user_role { "${auth_name}@services":
keystone_user_role { "${auth_name}@${tenant}":
ensure => present,
roles => 'admin',
}
keystone_service { $auth_name:
ensure => present,
type => $service_type,
description => "Cinder Service",
description => 'Cinder Service',
}
keystone_service { $auth_name_v2:
ensure => present,
type => $service_type_v2,
description => 'Cinder Service v2',
}
if $configure_endpoint {
keystone_endpoint { $auth_name:
keystone_endpoint { "${region}/${auth_name}":
ensure => present,
region => $region,
public_url => "http://${public_address}:${port}/${volume_version}/%(tenant_id)s",
admin_url => "http://${admin_address}:${port}/${volume_version}/%(tenant_id)s",
internal_url => "http://${internal_address}:${port}/${volume_version}/%(tenant_id)s",
public_url => "${public_protocol}://${public_address}:${port}/${volume_version}/%(tenant_id)s",
admin_url => "${admin_protocol}://${admin_address}:${port}/${volume_version}/%(tenant_id)s",
internal_url => "${internal_protocol}://${internal_address}:${port}/${volume_version}/%(tenant_id)s",
}
}
if $configure_endpoint_v2 {
keystone_endpoint { "${region}/${auth_name_v2}":
ensure => present,
public_url => "${public_protocol}://${public_address}:${port}/v2/%(tenant_id)s",
admin_url => "http://${admin_address}:${port}/v2/%(tenant_id)s",
internal_url => "http://${internal_address}:${port}/v2/%(tenant_id)s",
}
}
}

View File

@@ -4,20 +4,49 @@ class cinder::params {
$cinder_conf = '/etc/cinder/cinder.conf'
$cinder_paste_api_ini = '/etc/cinder/api-paste.ini'
case $::osfamily {
'Debian': {
$package_name = 'cinder-common'
$api_package = 'cinder-api'
$api_service = 'cinder-api'
$scheduler_package = 'cinder-scheduler'
$scheduler_service = 'cinder-scheduler'
$volume_package = 'cinder-volume'
$volume_service = 'cinder-volume'
$db_sync_command = 'cinder-manage db sync'
if $::osfamily == 'Debian' {
$package_name = 'cinder-common'
$client_package = 'python-cinderclient'
$api_package = 'cinder-api'
$api_service = 'cinder-api'
$backup_package = 'cinder-backup'
$backup_service = 'cinder-backup'
$scheduler_package = 'cinder-scheduler'
$scheduler_service = 'cinder-scheduler'
$volume_package = 'cinder-volume'
$volume_service = 'cinder-volume'
$db_sync_command = 'cinder-manage db sync'
$tgt_package_name = 'tgt'
$tgt_service_name = 'tgt'
$ceph_init_override = '/etc/init/cinder-volume.override'
$iscsi_helper = 'tgtadm'
$lio_package_name = 'targetcli'
$tgt_package_name = 'tgt'
$tgt_service_name = 'tgt'
} elsif($::osfamily == 'RedHat') {
$package_name = 'openstack-cinder'
$client_package = 'python-cinderclient'
$api_package = false
$api_service = 'openstack-cinder-api'
$backup_package = false
$backup_service = 'openstack-cinder-backup'
$scheduler_package = false
$scheduler_service = 'openstack-cinder-scheduler'
$volume_package = false
$volume_service = 'openstack-cinder-volume'
$db_sync_command = 'cinder-manage db sync'
$tgt_package_name = 'scsi-target-utils'
$tgt_service_name = 'tgtd'
$ceph_init_override = '/etc/sysconfig/openstack-cinder-volume'
$lio_package_name = 'targetcli'
if $::operatingsystem == 'RedHat' and $::operatingsystemrelease >= 7 {
$iscsi_helper = 'lioadm'
} else {
$iscsi_helper = 'tgtadm'
}
} else {
fail("unsuported osfamily ${::osfamily}, currently Debian and Redhat are the only supported platforms")
}
}

35
manifests/qpid.pp Normal file
View File

@@ -0,0 +1,35 @@
#
# class for installing qpid server for cinder
#
#
class cinder::qpid(
$enabled = true,
$user='guest',
$password='guest',
$file='/var/lib/qpidd/qpidd.sasldb',
$realm='OPENSTACK'
) {
# only configure cinder after the queue is up
Class['qpid::server'] -> Package<| title == 'cinder' |>
if ($enabled) {
$service_ensure = 'running'
qpid_user { $user:
password => $password,
file => $file,
realm => $realm,
provider => 'saslpasswd2',
require => Class['qpid::server'],
}
} else {
$service_ensure = 'stopped'
}
class { 'qpid::server':
service_ensure => $service_ensure
}
}

34
manifests/quota.pp Normal file
View File

@@ -0,0 +1,34 @@
# == Class: cinder::quota
#
# Setup and configure Cinder quotas.
#
# === Parameters
#
# [*quota_volumes*]
# (optional) Number of volumes allowed per project. Defaults to 10.
#
# [*quota_snapshots*]
# (optional) Number of volume snapshots allowed per project. Defaults to 10.
#
# [*quota_gigabytes*]
# (optional) Number of volume gigabytes (snapshots are also included)
# allowed per project. Defaults to 1000.
#
# [*quota_driver*]
# (optional) Default driver to use for quota checks.
# Defaults to 'cinder.quota.DbQuotaDriver'.
#
class cinder::quota (
$quota_volumes = 10,
$quota_snapshots = 10,
$quota_gigabytes = 1000,
$quota_driver = 'cinder.quota.DbQuotaDriver'
) {
cinder_config {
'DEFAULT/quota_volumes': value => $quota_volumes;
'DEFAULT/quota_snapshots': value => $quota_snapshots;
'DEFAULT/quota_gigabytes': value => $quota_gigabytes;
'DEFAULT/quota_driver': value => $quota_driver;
}
}

81
manifests/rabbitmq.pp Normal file
View File

@@ -0,0 +1,81 @@
# == Class: cinder::rabbitmq
#
# Installs and manages rabbitmq server for cinder
#
# == Parameters:
#
# [*userid*]
# (optional) The username to use when connecting to Rabbit
# Defaults to 'guest'
#
# [*password*]
# (optional) The password to use when connecting to Rabbit
# Defaults to 'guest'
#
# [*port*]
# (optional) The port to use when connecting to Rabbit
# Defaults to '5672'
#
# [*virtual_host*]
# (optional) The virtual host to use when connecting to Rabbit
# Defaults to '/'
#
# [*enabled*]
# (optional) Whether to enable the Rabbit service
# Defaults to false
#
# [*rabbitmq_class*]
# (optional) The rabbitmq puppet class to depend on,
# which is dependent on the puppet-rabbitmq version.
# Use the default for 1.x, use 'rabbitmq' for 3.x
# Defaults to 'rabbitmq::server'
#
class cinder::rabbitmq(
$userid = 'guest',
$password = 'guest',
$port = '5672',
$virtual_host = '/',
$enabled = true,
$rabbitmq_class = 'rabbitmq::server',
) {
# only configure cinder after the queue is up
Class[$rabbitmq_class] -> Anchor<| title == 'cinder-start' |>
if ($enabled) {
if $userid == 'guest' {
$delete_guest_user = false
} else {
$delete_guest_user = true
rabbitmq_user { $userid:
admin => true,
password => $password,
provider => 'rabbitmqctl',
require => Class[$rabbitmq_class],
}
# I need to figure out the appropriate permissions
rabbitmq_user_permissions { "${userid}@${virtual_host}":
configure_permission => '.*',
write_permission => '.*',
read_permission => '.*',
provider => 'rabbitmqctl',
}->Anchor<| title == 'cinder-start' |>
}
$service_ensure = 'running'
} else {
$service_ensure = 'stopped'
}
class { $rabbitmq_class:
service_ensure => $service_ensure,
port => $port,
delete_guest_user => $delete_guest_user,
}
if ($enabled) {
rabbitmq_vhost { $virtual_host:
provider => 'rabbitmqctl',
require => Class[$rabbitmq_class],
}
}
}

View File

@@ -1,27 +1,46 @@
#
class cinder::scheduler (
$package_ensure = 'latest',
$enabled = true
$scheduler_driver = false,
$package_ensure = 'present',
$enabled = true,
$manage_service = true
) {
include cinder::params
package { 'cinder-scheduler':
name => $::cinder::params::scheduler_package,
ensure => $package_ensure,
require => Class['cinder'],
Cinder_config<||> ~> Service['cinder-scheduler']
Cinder_api_paste_ini<||> ~> Service['cinder-scheduler']
Exec<| title == 'cinder-manage db_sync' |> ~> Service['cinder-scheduler']
if $scheduler_driver {
cinder_config {
'DEFAULT/scheduler_driver': value => $scheduler_driver;
}
}
if $enabled {
$ensure = 'running'
} else {
$ensure = 'stopped'
if $::cinder::params::scheduler_package {
Package['cinder-scheduler'] -> Cinder_config<||>
Package['cinder-scheduler'] -> Cinder_api_paste_ini<||>
Package['cinder-scheduler'] -> Service['cinder-scheduler']
package { 'cinder-scheduler':
ensure => $package_ensure,
name => $::cinder::params::scheduler_package,
}
}
service { $::cinder::params::scheduler_service:
enable => $enabled,
if $manage_service {
if $enabled {
$ensure = 'running'
} else {
$ensure = 'stopped'
}
}
service { 'cinder-scheduler':
ensure => $ensure,
require => Package[$::cinder::params::scheduler_package],
subscribe => File[$::cinder::params::cinder_conf],
name => $::cinder::params::scheduler_service,
enable => $enabled,
hasstatus => true,
require => Package['cinder'],
}
}

View File

@@ -0,0 +1,47 @@
# == Class: cinder::setup_test_volume
#
# Setup a volume group on a loop device for test purposes.
#
# === Parameters
#
# [*volume_name*]
# Volume group name. Defaults to 'cinder-volumes'.
#
# [*size*]
# Volume group size. Defaults to '4G'.
#
# [*loopback_device*]
# Loop device name. Defaults to '/dev/loop2'.
#
class cinder::setup_test_volume(
$volume_name = 'cinder-volumes',
$size = '4G',
$loopback_device = '/dev/loop2'
) {
Exec {
cwd => '/tmp/',
}
package { 'lvm2':
ensure => present,
} ~>
exec { "/bin/dd if=/dev/zero of=${volume_name} bs=1 count=0 seek=${size}":
unless => "/sbin/vgdisplay ${volume_name}"
} ~>
exec { "/sbin/losetup ${loopback_device} ${volume_name}":
refreshonly => true,
} ~>
exec { "/sbin/pvcreate ${loopback_device}":
refreshonly => true,
} ~>
exec { "/sbin/vgcreate ${volume_name} ${loopback_device}":
refreshonly => true,
}
}

67
manifests/type.pp Normal file
View File

@@ -0,0 +1,67 @@
# ==Define: cinder::type
#
# Creates cinder type and assigns backends.
#
# === Parameters
#
# [*os_password*]
# (required) The keystone tenant:username password.
#
# [*set_key*]
# (optional) Must be used with set_value. Accepts a single string be used
# as the key in type_set
#
# [*set_value*]
# (optional) Accepts list of strings or singular string. A list of values
# passed to type_set
#
# [*os_tenant_name*]
# (optional) The keystone tenant name. Defaults to 'admin'.
#
# [*os_username*]
# (optional) The keystone user name. Defaults to 'admin.
#
# [*os_auth_url*]
# (optional) The keystone auth url. Defaults to 'http://127.0.0.1:5000/v2.0/'.
#
# Author: Andrew Woodward <awoodward@mirantis.com>
define cinder::type (
$os_password,
$set_key = undef,
$set_value = undef,
$os_tenant_name = 'admin',
$os_username = 'admin',
$os_auth_url = 'http://127.0.0.1:5000/v2.0/',
) {
$volume_name = $name
# TODO: (xarses) This should be moved to a ruby provider so that among other
# reasons, the credential discovery magic can occur like in neutron.
exec {"cinder type-create ${volume_name}":
command => "cinder type-create ${volume_name}",
unless => "cinder type-list | grep ${volume_name}",
environment => [
"OS_TENANT_NAME=${os_tenant_name}",
"OS_USERNAME=${os_username}",
"OS_PASSWORD=${os_password}",
"OS_AUTH_URL=${os_auth_url}",
],
require => Package['python-cinderclient']
}
if ($set_value and $set_key) {
Exec["cinder type-create ${volume_name}"] ->
cinder::type_set { $set_value:
type => $volume_name,
key => $set_key,
os_password => $os_password,
os_tenant_name => $os_tenant_name,
os_username => $os_username,
os_auth_url => $os_auth_url,
}
}
}

51
manifests/type_set.pp Normal file
View File

@@ -0,0 +1,51 @@
# ==Define: cinder::type_set
#
# Assigns keys after the volume type is set.
#
# === Parameters
#
# [*os_password*]
# (required) The keystone tenant:username password.
#
# [*type*]
# (required) Accepts single name of type to set.
#
# [*key*]
# (required) the key name that we are setting the value for.
#
# [*os_tenant_name*]
# (optional) The keystone tenant name. Defaults to 'admin'.
#
# [*os_username*]
# (optional) The keystone user name. Defaults to 'admin.
#
# [*os_auth_url*]
# (optional) The keystone auth url. Defaults to 'http://127.0.0.1:5000/v2.0/'.
#
# Author: Andrew Woodward <awoodward@mirantis.com>
define cinder::type_set (
$type,
$key,
$os_password,
$os_tenant_name = 'admin',
$os_username = 'admin',
$os_auth_url = 'http://127.0.0.1:5000/v2.0/',
) {
# TODO: (xarses) This should be moved to a ruby provider so that among other
# reasons, the credential discovery magic can occur like in neutron.
exec {"cinder type-key ${type} set ${key}=${name}":
path => '/usr/bin',
command => "cinder type-key ${type} set ${key}=${name}",
environment => [
"OS_TENANT_NAME=${os_tenant_name}",
"OS_USERNAME=${os_username}",
"OS_PASSWORD=${os_password}",
"OS_AUTH_URL=${os_auth_url}",
],
require => Package['python-cinderclient']
}
}

53
manifests/vmware.pp Normal file
View File

@@ -0,0 +1,53 @@
# ==Define: cinder::vmware
#
# Creates vmdk specific disk file type & clone type.
#
# === Parameters
#
# [*os_password*]
# (required) The keystone tenant:username password.
#
# [*os_tenant_name*]
# (optional) The keystone tenant name. Defaults to 'admin'.
#
# [*os_username*]
# (optional) The keystone user name. Defaults to 'admin.
#
# [*os_auth_url*]
# (optional) The keystone auth url. Defaults to 'http://127.0.0.1:5000/v2.0/'.
#
class cinder::vmware (
$os_password,
$os_tenant_name = 'admin',
$os_username = 'admin',
$os_auth_url = 'http://127.0.0.1:5000/v2.0/'
) {
Cinder::Type {
os_password => $os_password,
os_tenant_name => $os_tenant_name,
os_username => $os_username,
os_auth_url => $os_auth_url
}
cinder::type {'vmware-thin':
set_value => 'thin',
set_key => 'vmware:vmdk_type'
}
cinder::type {'vmware-thick':
set_value => 'thick',
set_key => 'vmware:vmdk_type'
}
cinder::type {'vmware-eagerZeroedThick':
set_value => 'eagerZeroedThick',
set_key => 'vmware:vmdk_type'
}
cinder::type {'vmware-full':
set_value => 'full',
set_key => 'vmware:clone_type'
}
cinder::type {'vmware-linked':
set_value => 'linked',
set_key => 'vmware:clone_type'
}
}

View File

@@ -1,27 +1,40 @@
#
# $volume_name_template = volume-%s
class cinder::volume (
$package_ensure = 'latest',
$enabled = true
$package_ensure = 'present',
$enabled = true,
$manage_service = true
) {
include cinder::params
package { 'cinder-volume':
name => $::cinder::params::volume_package,
ensure => $package_ensure,
require => Class['cinder'],
Cinder_config<||> ~> Service['cinder-volume']
Cinder_api_paste_ini<||> ~> Service['cinder-volume']
Exec<| title == 'cinder-manage db_sync' |> ~> Service['cinder-volume']
if $::cinder::params::volume_package {
Package['cinder-volume'] -> Cinder_config<||>
Package['cinder-volume'] -> Cinder_api_paste_ini<||>
Package['cinder'] -> Package['cinder-volume']
Package['cinder-volume'] -> Service['cinder-volume']
package { 'cinder-volume':
ensure => $package_ensure,
name => $::cinder::params::volume_package,
}
}
if $enabled {
$ensure = 'running'
} else {
$ensure = 'stopped'
if $manage_service {
if $enabled {
$ensure = 'running'
} else {
$ensure = 'stopped'
}
}
service { $::cinder::params::volume_service:
enable => $enabled,
service { 'cinder-volume':
ensure => $ensure,
require => Package[$::cinder::params::volume_package],
subscribe => File[$::cinder::params::cinder_conf],
name => $::cinder::params::volume_service,
enable => $enabled,
hasstatus => true,
require => Package['cinder'],
}
}

74
manifests/volume/eqlx.pp Normal file
View File

@@ -0,0 +1,74 @@
# == define: cinder::volume::eqlx
#
# Configure the Dell EqualLogic driver for cinder.
#
# === Parameters
#
# [*san_ip*]
# (required) The IP address of the Dell EqualLogic array.
#
# [*san_login*]
# (required) The account to use for issuing SSH commands.
#
# [*san_password*]
# (required) The password for the specified SSH account.
#
# [*san_thin_provision*]
# (optional) Whether or not to use thin provisioning for volumes.
# Defaults to false
#
# [*eqlx_group_name*]
# (optional) The CLI prompt message without '>'.
# Defaults to 'group-0'
#
# [*eqlx_pool*]
# (optional) The pool in which volumes will be created.
# Defaults to 'default'
#
# [*eqlx_use_chap*]
# (optional) Use CHAP authentification for targets?
# Defaults to false
#
# [*eqlx_chap_login*]
# (optional) An existing CHAP account name.
# Defaults to 'chapadmin'
#
# [*eqlx_chap_password*]
# (optional) The password for the specified CHAP account name.
# Defaults to '12345'
#
# [*eqlx_cli_timeout*]
# (optional) The timeout for the Group Manager cli command execution.
# Defaults to 30 seconds
#
# [*eqlx_cli_max_retries*]
# (optional) The maximum retry count for reconnection.
# Defaults to 5
#
class cinder::volume::eqlx (
$san_ip,
$san_login,
$san_password,
$san_thin_provision = false,
$eqlx_group_name = 'group-0',
$eqlx_pool = 'default',
$eqlx_use_chap = false,
$eqlx_chap_login = 'chapadmin',
$eqlx_chap_password = '12345',
$eqlx_cli_timeout = 30,
$eqlx_cli_max_retries = 5,
) {
cinder::backend::eqlx { 'DEFAULT':
san_ip => $san_ip,
san_login => $san_login,
san_password => $san_password,
san_thin_provision => $san_thin_provision,
eqlx_group_name => $eqlx_group_name,
eqlx_pool => $eqlx_pool,
eqlx_use_chap => $eqlx_use_chap,
eqlx_chap_login => $eqlx_chap_login,
eqlx_chap_password => $eqlx_chap_password,
eqlx_cli_timeout => $eqlx_cli_timeout,
eqlx_cli_max_retries => $eqlx_cli_max_retries,
}
}

View File

@@ -0,0 +1,48 @@
#
# == Class: cinder::volume::glusterfs
#
# Configures Cinder to use GlusterFS as a volume driver
#
# === Parameters
#
# [*glusterfs_shares*]
# (required) An array of GlusterFS volume locations.
# Must be an array even if there is only one volume.
#
# [*glusterfs_disk_util*]
# Removed in Icehouse.
#
# [*glusterfs_sparsed_volumes*]
# (optional) Whether or not to use sparse (thin) volumes.
# Defaults to undef which uses the driver's default of "true".
#
# [*glusterfs_mount_point_base*]
# (optional) Where to mount the Gluster volumes.
# Defaults to undef which uses the driver's default of "$state_path/mnt".
#
# [*glusterfs_shares_config*]
# (optional) The config file to store the given $glusterfs_shares.
# Defaults to '/etc/cinder/shares.conf'
#
# === Examples
#
# class { 'cinder::volume::glusterfs':
# glusterfs_shares = ['192.168.1.1:/volumes'],
# }
#
class cinder::volume::glusterfs (
$glusterfs_shares,
$glusterfs_disk_util = false,
$glusterfs_sparsed_volumes = undef,
$glusterfs_mount_point_base = undef,
$glusterfs_shares_config = '/etc/cinder/shares.conf'
) {
cinder::backend::glusterfs { 'DEFAULT':
glusterfs_shares => $glusterfs_shares,
glusterfs_disk_util => $glusterfs_disk_util,
glusterfs_sparsed_volumes => $glusterfs_sparsed_volumes,
glusterfs_mount_point_base => $glusterfs_mount_point_base,
glusterfs_shares_config => $glusterfs_shares_config,
}
}

View File

@@ -1,34 +1,13 @@
#
class cinder::volume::iscsi (
$iscsi_settings = false,
$iscsi_helper = 'tgtadm'
$iscsi_ip_address,
$volume_group = 'cinder-volumes',
$iscsi_helper = $cinder::params::iscsi_helper,
) {
include cinder::params
if $iscsi_settings {
multini($::cinder::params::cinder_conf, $iscsi_settings)
cinder::backend::iscsi { 'DEFAULT':
iscsi_ip_address => $iscsi_ip_address,
volume_group => $volume_group,
iscsi_helper => $iscsi_helper
}
case $iscsi_helper {
'tgtadm': {
package { 'tgt':
name => $::cinder::params::tgt_package_name,
ensure => present,
}
service { 'tgtd':
name => $::cinder::params::tgt_service_name,
ensure => running,
enable => true,
require => Class['cinder::volume'],
}
multini($::cinder::params::cinder_conf, { 'DEFAULT' => { 'iscsi_helper' => 'tgtadm' } } )
}
default: {
fail("Unsupported iscsi helper: ${iscsi_helper}.")
}
}
}

196
manifests/volume/netapp.pp Normal file
View File

@@ -0,0 +1,196 @@
# == Class: cinder::volume::netapp
#
# Configures Cinder to use the NetApp unified volume driver
#
# === Parameters
#
# [*netapp_login*]
# (required) Administrative user account name used to access the storage
# system.
#
# [*netapp_password*]
# (required) Password for the administrative user account specified in the
# netapp_login parameter.
#
# [*netapp_server_hostname*]
# (required) The hostname (or IP address) for the storage system.
#
# [*netapp_server_port*]
# (optional) The TCP port to use for communication with ONTAPI on the
# storage system. Traditionally, port 80 is used for HTTP and port 443 is
# used for HTTPS; however, this value should be changed if an alternate
# port has been configured on the storage system.
# Defaults to 80
#
# [*netapp_size_multiplier*]
# (optional) The quantity to be multiplied by the requested volume size to
# ensure enough space is available on the virtual storage server (Vserver) to
# fulfill the volume creation request.
# Defaults to 1.2
#
# [*netapp_storage_family*]
# (optional) The storage family type used on the storage system; valid values
# are ontap_7mode for using Data ONTAP operating in 7-Mode or ontap_cluster
# for using clustered Data ONTAP.
# Defaults to ontap_cluster
#
# [*netapp_storage_protocol*]
# (optional) The storage protocol to be used on the data path with the storage
# system; valid values are iscsi or nfs.
# Defaults to nfs
#
# [*netapp_transport_type*]
# (optional) The transport protocol used when communicating with ONTAPI on the
# storage system. Valid values are http or https.
# Defaults to http
#
# [*netapp_vfiler*]
# (optional) The vFiler unit on which provisioning of block storage volumes
# will be done. This parameter is only used by the driver when connecting to
# an instance with a storage family of Data ONTAP operating in 7-Mode and the
# storage protocol selected is iSCSI. Only use this parameter when utilizing
# the MultiStore feature on the NetApp storage system.
# Defaults to ''
#
# [*netapp_volume_list*]
# (optional) This parameter is only utilized when the storage protocol is
# configured to use iSCSI. This parameter is used to restrict provisioning to
# the specified controller volumes. Specify the value of this parameter to be
# a comma separated list of NetApp controller volume names to be used for
# provisioning.
# Defaults to ''
#
# [*netapp_vserver*]
# (optional) This parameter specifies the virtual storage server (Vserver)
# name on the storage cluster on which provisioning of block storage volumes
# should occur. If using the NFS storage protocol, this parameter is mandatory
# for storage service catalog support (utilized by Cinder volume type
# extra_specs support). If this parameter is specified, the exports belonging
# to the Vserver will only be used for provisioning in the future. Block
# storage volumes on exports not belonging to the Vserver specified by
# this parameter will continue to function normally.
# Defaults to ''
#
# [*expiry_thres_minutes*]
# (optional) This parameter specifies the threshold for last access time for
# images in the NFS image cache. When a cache cleaning cycle begins, images
# in the cache that have not been accessed in the last M minutes, where M is
# the value of this parameter, will be deleted from the cache to create free
# space on the NFS share.
# Defaults to 720
#
# [*thres_avl_size_perc_start*]
# (optional) If the percentage of available space for an NFS share has
# dropped below the value specified by this parameter, the NFS image cache
# will be cleaned.
# Defaults to 20
#
# [*thres_avl_size_perc_stop*]
# (optional) When the percentage of available space on an NFS share has reached the
# percentage specified by this parameter, the driver will stop clearing files
# from the NFS image cache that have not been accessed in the last M
# 'minutes, where M is the value of the expiry_thres_minutes parameter.
# Defaults to 60
#
# [*nfs_shares_config*]
# (optional) File with the list of available NFS shares
# Defaults to ''
#
# [*netapp_copyoffload_tool_path*]
# (optional) This option specifies the path of the NetApp Copy Offload tool
# binary. Ensure that the binary has execute permissions set which allow the
# effective user of the cinder-volume process to execute the file.
# Defaults to ''
#
# [*netapp_controller_ips*]
# (optional) This option is only utilized when the storage family is
# configured to eseries. This option is used to restrict provisioning to the
# specified controllers. Specify the value of this option to be a comma
# separated list of controller hostnames or IP addresses to be used for
# provisioning.
# Defaults to ''
#
# [*netapp_sa_password*]
# (optional) Password for the NetApp E-Series storage array.
# Defaults to ''
#
# [*netapp_storage_pools*]
# (optional) This option is used to restrict provisioning to the specified
# storage pools. Only dynamic disk pools are currently supported. Specify the
# value of this option to be a comma separated list of disk pool names to be
# used for provisioning.
# Defaults to ''
#
# [*netapp_webservice_path*]
# (optional) This option is used to specify the path to the E-Series proxy
# application on a proxy server. The value is combined with the value of the
# netapp_transport_type, netapp_server_hostname, and netapp_server_port
# options to create the URL used by the driver to connect to the proxy
# application.
# Defaults to '/devmgr/v2'
#
# === Examples
#
# class { 'cinder::volume::netapp':
# netapp_login => 'clusterAdmin',
# netapp_password => 'password',
# netapp_server_hostname => 'netapp.mycorp.com',
# netapp_server_port => '443',
# netapp_transport_type => 'https',
# netapp_vserver => 'openstack-vserver',
# }
#
# === Authors
#
# Bob Callaway <bob.callaway@netapp.com>
#
# === Copyright
#
# Copyright 2013 NetApp, Inc.
#
class cinder::volume::netapp (
$netapp_login,
$netapp_password,
$netapp_server_hostname,
$netapp_server_port = '80',
$netapp_size_multiplier = '1.2',
$netapp_storage_family = 'ontap_cluster',
$netapp_storage_protocol = 'nfs',
$netapp_transport_type = 'http',
$netapp_vfiler = '',
$netapp_volume_list = '',
$netapp_vserver = '',
$expiry_thres_minutes = '720',
$thres_avl_size_perc_start = '20',
$thres_avl_size_perc_stop = '60',
$nfs_shares_config = '',
$netapp_copyoffload_tool_path = '',
$netapp_controller_ips = '',
$netapp_sa_password = '',
$netapp_storage_pools = '',
$netapp_webservice_path = '/devmgr/v2',
) {
cinder::backend::netapp { 'DEFAULT':
netapp_login => $netapp_login,
netapp_password => $netapp_password,
netapp_server_hostname => $netapp_server_hostname,
netapp_server_port => $netapp_server_port,
netapp_size_multiplier => $netapp_size_multiplier,
netapp_storage_family => $netapp_storage_family,
netapp_storage_protocol => $netapp_storage_protocol,
netapp_transport_type => $netapp_transport_type,
netapp_vfiler => $netapp_vfiler,
netapp_volume_list => $netapp_volume_list,
netapp_vserver => $netapp_vserver,
expiry_thres_minutes => $expiry_thres_minutes,
thres_avl_size_perc_start => $thres_avl_size_perc_start,
thres_avl_size_perc_stop => $thres_avl_size_perc_stop,
nfs_shares_config => $nfs_shares_config,
netapp_copyoffload_tool_path => $netapp_copyoffload_tool_path,
netapp_controller_ips => $netapp_controller_ips,
netapp_sa_password => $netapp_sa_password,
netapp_storage_pools => $netapp_storage_pools,
netapp_webservice_path => $netapp_webservice_path,
}
}

View File

@@ -0,0 +1,52 @@
# == Class: cinder::volume::nexenta
#
# Setups Cinder with Nexenta volume driver.
#
# === Parameters
#
# [*nexenta_user*]
# (required) User name to connect to Nexenta SA.
#
# [*nexenta_password*]
# (required) Password to connect to Nexenta SA.
#
# [*nexenta_host*]
# (required) IP address of Nexenta SA.
#
# [*nexenta_volume*]
# (optional) Pool on SA that will hold all volumes. Defaults to 'cinder'.
#
# [*nexenta_target_prefix*]
# (optional) IQN prefix for iSCSI targets. Defaults to 'iqn:'.
#
# [*nexenta_target_group_prefix*]
# (optional) Prefix for iSCSI target groups on SA. Defaults to 'cinder/'.
#
# [*nexenta_blocksize*]
# (optional) Block size for volumes. Defaults to '8k'.
#
# [*nexenta_sparse*]
# (optional) Flag to create sparse volumes. Defaults to true.
#
class cinder::volume::nexenta (
$nexenta_user,
$nexenta_password,
$nexenta_host,
$nexenta_volume = 'cinder',
$nexenta_target_prefix = 'iqn:',
$nexenta_target_group_prefix = 'cinder/',
$nexenta_blocksize = '8k',
$nexenta_sparse = true
) {
cinder::backend::nexenta { 'DEFAULT':
nexenta_user => $nexenta_user,
nexenta_password => $nexenta_password,
nexenta_host => $nexenta_host,
nexenta_volume => $nexenta_volume,
nexenta_target_prefix => $nexenta_target_prefix,
nexenta_target_group_prefix => $nexenta_target_group_prefix,
nexenta_blocksize => $nexenta_blocksize,
nexenta_sparse => $nexenta_sparse,
}
}

23
manifests/volume/nfs.pp Normal file
View File

@@ -0,0 +1,23 @@
#
class cinder::volume::nfs (
$nfs_servers = [],
$nfs_mount_options = undef,
$nfs_disk_util = undef,
$nfs_sparsed_volumes = undef,
$nfs_mount_point_base = undef,
$nfs_shares_config = '/etc/cinder/shares.conf',
$nfs_used_ratio = '0.95',
$nfs_oversub_ratio = '1.0',
) {
cinder::backend::nfs { 'DEFAULT':
nfs_servers => $nfs_servers,
nfs_mount_options => $nfs_mount_options,
nfs_disk_util => $nfs_disk_util,
nfs_sparsed_volumes => $nfs_sparsed_volumes,
nfs_mount_point_base => $nfs_mount_point_base,
nfs_shares_config => $nfs_shares_config,
nfs_used_ratio => $nfs_used_ratio,
nfs_oversub_ratio => $nfs_oversub_ratio,
}
}

64
manifests/volume/rbd.pp Normal file
View File

@@ -0,0 +1,64 @@
# == Class: cinder::volume::rbd
#
# Setup Cinder to use the RBD driver.
#
# === Parameters
#
# [*rbd_pool*]
# (required) Specifies the pool name for the block device driver.
#
# [*rbd_user*]
# (required) A required parameter to configure OS init scripts and cephx.
#
# [*rbd_ceph_conf*]
# (optional) Path to the ceph configuration file to use
# Defaults to '/etc/ceph/ceph.conf'
#
# [*rbd_flatten_volume_from_snapshot*]
# (optional) Enable flatten volumes created from snapshots.
# Defaults to false
#
# [*rbd_secret_uuid*]
# (optional) A required parameter to use cephx.
# Defaults to false
#
# [*volume_tmp_dir*]
# (optional) Location to store temporary image files if the volume
# driver does not write them directly to the volume
# Defaults to false
#
# [*rbd_max_clone_depth*]
# (optional) Maximum number of nested clones that can be taken of a
# volume before enforcing a flatten prior to next clone.
# A value of zero disables cloning
# Defaults to '5'
#
# [*glance_api_version*]
# (optional) DEPRECATED: Use cinder::glance Class instead.
# Glance API version. (Defaults to '2')
# Setting this parameter cause a duplicate resource declaration
# with cinder::glance
#
class cinder::volume::rbd (
$rbd_pool,
$rbd_user,
$rbd_ceph_conf = '/etc/ceph/ceph.conf',
$rbd_flatten_volume_from_snapshot = false,
$rbd_secret_uuid = false,
$volume_tmp_dir = false,
$rbd_max_clone_depth = '5',
# DEPRECATED PARAMETERS
$glance_api_version = undef,
) {
cinder::backend::rbd { 'DEFAULT':
rbd_pool => $rbd_pool,
rbd_user => $rbd_user,
rbd_ceph_conf => $rbd_ceph_conf,
rbd_flatten_volume_from_snapshot => $rbd_flatten_volume_from_snapshot,
rbd_secret_uuid => $rbd_secret_uuid,
volume_tmp_dir => $volume_tmp_dir,
rbd_max_clone_depth => $rbd_max_clone_depth,
glance_api_version => $glance_api_version,
}
}

74
manifests/volume/san.pp Normal file
View File

@@ -0,0 +1,74 @@
# == Class: cinder::volume::san
#
# Configures Cinder volume SAN driver.
# Parameters are particular to each volume driver.
#
# === Parameters
#
# [*volume_driver*]
# (required) Setup cinder-volume to use volume driver.
#
# [*san_thin_provision*]
# (optional) Use thin provisioning for SAN volumes? Defaults to true.
#
# [*san_ip*]
# (optional) IP address of SAN controller.
#
# [*san_login*]
# (optional) Username for SAN controller. Defaults to 'admin'.
#
# [*san_password*]
# (optional) Password for SAN controller.
#
# [*san_private_key*]
# (optional) Filename of private key to use for SSH authentication.
#
# [*san_clustername*]
# (optional) Cluster name to use for creating volumes.
#
# [*san_ssh_port*]
# (optional) SSH port to use with SAN. Defaults to 22.
#
# [*san_is_local*]
# (optional) Execute commands locally instead of over SSH
# use if the volume service is running on the SAN device.
#
# [*ssh_conn_timeout*]
# (optional) SSH connection timeout in seconds. Defaults to 30.
#
# [*ssh_min_pool_conn*]
# (optional) Minimum ssh connections in the pool.
#
# [*ssh_min_pool_conn*]
# (optional) Maximum ssh connections in the pool.
#
class cinder::volume::san (
$volume_driver,
$san_thin_provision = true,
$san_ip = undef,
$san_login = 'admin',
$san_password = undef,
$san_private_key = undef,
$san_clustername = undef,
$san_ssh_port = 22,
$san_is_local = false,
$ssh_conn_timeout = 30,
$ssh_min_pool_conn = 1,
$ssh_max_pool_conn = 5
) {
cinder::backend::san { 'DEFAULT':
volume_driver => $volume_driver,
san_thin_provision => $san_thin_provision,
san_ip => $san_ip,
san_login => $san_login,
san_password => $san_password,
san_private_key => $san_private_key,
san_clustername => $san_clustername,
san_ssh_port => $san_ssh_port,
san_is_local => $san_is_local,
ssh_conn_timeout => $ssh_conn_timeout,
ssh_min_pool_conn => $ssh_min_pool_conn,
ssh_max_pool_conn => $ssh_max_pool_conn,
}
}

View File

@@ -0,0 +1,58 @@
# == Class: cinder::volume::solidfire
#
# Configures Cinder volume SolidFire driver.
# Parameters are particular to each volume driver.
#
# === Parameters
#
# [*volume_driver*]
# (optional) Setup cinder-volume to use SolidFire volume driver.
# Defaults to 'cinder.volume.drivers.solidfire.SolidFire'
#
# [*san_ip*]
# (required) IP address of SolidFire clusters MVIP.
#
# [*san_login*]
# (required) Username for SolidFire admin account.
#
# [*san_password*]
# (required) Password for SolidFire admin account.
#
# [*sf_emulate_512*]
# (optional) Use 512 byte emulation for volumes.
# Defaults to True
#
# [*sf_allow_tenant_qos*]
# (optional) Allow tenants to specify QoS via volume metadata.
# Defaults to False
#
# [*sf_account_prefix*]
# (optional) Prefix to use when creating tenant accounts on SolidFire Cluster.
# Defaults to None, so account name is simply the tenant-uuid
#
# [*sf_api_port*]
# (optional) Port ID to use to connect to SolidFire API.
# Defaults to 443
#
class cinder::volume::solidfire(
$san_ip,
$san_login,
$san_password,
$volume_driver = 'cinder.volume.drivers.solidfire.SolidFire',
$sf_emulate_512 = true,
$sf_allow_tenant_qos = false,
$sf_account_prefix = '',
$sf_api_port = '443'
) {
cinder::backend::solidfire { 'DEFAULT':
san_ip => $san_ip,
san_login => $san_login,
san_password => $san_password,
volume_driver => $volume_driver,
sf_emulate_512 => $sf_emulate_512,
sf_allow_tenant_qos => $sf_allow_tenant_qos,
sf_account_prefix => $sf_account_prefix,
sf_api_port => $sf_api_port,
}
}

72
manifests/volume/vmdk.pp Normal file
View File

@@ -0,0 +1,72 @@
# == define: cinder::volume::vmdk
#
# Configure the VMware VMDK driver for cinder.
#
# === Parameters
#
# [*host_ip*]
# The IP address of the VMware vCenter server.
#
# [*host_username*]
# The username for connection to VMware vCenter server.
#
# [*host_password*]
# The password for connection to VMware vCenter server.
#
# [*api_retry_count*]
# (optional) The number of times we retry on failures,
# e.g., socket error, etc.
# Defaults to 10.
#
# [*max_object_retrieval*]
# (optional) The maximum number of ObjectContent data objects that should
# be returned in a single result. A positive value will cause
# the operation to suspend the retrieval when the count of
# objects reaches the specified maximum. The server may still
# limit the count to something less than the configured value.
# Any remaining objects may be retrieved with additional requests.
# Defaults to 100.
#
# [*task_poll_interval*]
# (optional) The interval in seconds used for polling of remote tasks.
# Defaults to 5.
#
# [*image_transfer_timeout_secs*]
# (optional) The timeout in seconds for VMDK volume transfer between Cinder and Glance.
# Defaults to 7200.
#
# [*wsdl_location*]
# (optional) VIM Service WSDL Location e.g
# http://<server>/vimService.wsdl. Optional over-ride to
# default location for bug work-arounds.
# Defaults to None.
#
# [*volume_folder*]
# (optional) The name for the folder in the VC datacenter that will contain cinder volumes.
# Defaults to 'cinder-volumes'.
#
class cinder::volume::vmdk(
$host_ip,
$host_username,
$host_password,
$volume_folder = 'cinder-volumes',
$api_retry_count = 10,
$max_object_retrieval = 100,
$task_poll_interval = 5,
$image_transfer_timeout_secs = 7200,
$wsdl_location = undef
) {
cinder::backend::vmdk { 'DEFAULT':
host_ip => $host_ip,
host_username => $host_username,
host_password => $host_password,
volume_folder => $volume_folder,
api_retry_count => $api_retry_count,
max_object_retrieval => $max_object_retrieval,
task_poll_interval => $task_poll_interval,
image_transfer_timeout_secs => $image_transfer_timeout_secs,
wsdl_location => $wsdl_location,
}
}

View File

@@ -0,0 +1,183 @@
require 'spec_helper'
describe 'cinder::api' do
let :req_params do
{:keystone_password => 'foo'}
end
let :facts do
{:osfamily => 'Debian'}
end
describe 'with only required params' do
let :params do
req_params
end
it { should contain_service('cinder-api').with(
'hasstatus' => true,
'ensure' => 'running'
)}
it 'should configure cinder api correctly' do
should contain_cinder_config('DEFAULT/auth_strategy').with(
:value => 'keystone'
)
should contain_cinder_config('DEFAULT/osapi_volume_listen').with(
:value => '0.0.0.0'
)
should contain_cinder_config('DEFAULT/default_volume_type').with(
:ensure => 'absent'
)
should contain_cinder_api_paste_ini('filter:authtoken/service_protocol').with(
:value => 'http'
)
should contain_cinder_api_paste_ini('filter:authtoken/service_host').with(
:value => 'localhost'
)
should contain_cinder_api_paste_ini('filter:authtoken/service_port').with(
:value => '5000'
)
should contain_cinder_api_paste_ini('filter:authtoken/auth_protocol').with(
:value => 'http'
)
should contain_cinder_api_paste_ini('filter:authtoken/auth_host').with(
:value => 'localhost'
)
should contain_cinder_api_paste_ini('filter:authtoken/auth_port').with(
:value => '35357'
)
should contain_cinder_api_paste_ini('filter:authtoken/auth_admin_prefix').with(
:ensure => 'absent'
)
should contain_cinder_api_paste_ini('filter:authtoken/admin_tenant_name').with(
:value => 'services'
)
should contain_cinder_api_paste_ini('filter:authtoken/admin_user').with(
:value => 'cinder'
)
should contain_cinder_api_paste_ini('filter:authtoken/admin_password').with(
:value => 'foo',
:secret => true
)
should contain_cinder_api_paste_ini('filter:authtoken/auth_uri').with(
:value => 'http://localhost:5000/'
)
should_not contain_cinder_config('DEFAULT/os_region_name')
end
end
describe 'with a custom region for nova' do
let :params do
req_params.merge({'os_region_name' => 'MyRegion'})
end
it 'should configure the region for nova' do
should contain_cinder_config('DEFAULT/os_region_name').with(
:value => 'MyRegion'
)
end
end
describe 'with a default volume type' do
let :params do
req_params.merge({'default_volume_type' => 'foo'})
end
it 'should configure the default volume type for cinder' do
should contain_cinder_config('DEFAULT/default_volume_type').with(
:value => 'foo'
)
end
end
describe 'with custom auth_uri' do
let :params do
req_params.merge({'keystone_auth_uri' => 'http://foo.bar:8080/v2.0/'})
end
it 'should configure cinder auth_uri correctly' do
should contain_cinder_api_paste_ini('filter:authtoken/auth_uri').with(
:value => 'http://foo.bar:8080/v2.0/'
)
end
end
describe 'with only required params' do
let :params do
req_params.merge({'bind_host' => '192.168.1.3'})
end
it 'should configure cinder api correctly' do
should contain_cinder_config('DEFAULT/osapi_volume_listen').with(
:value => '192.168.1.3'
)
end
end
[ '/keystone', '/keystone/admin', '' ].each do |keystone_auth_admin_prefix|
describe "with keystone_auth_admin_prefix containing incorrect value #{keystone_auth_admin_prefix}" do
let :params do
{
:keystone_auth_admin_prefix => keystone_auth_admin_prefix,
:keystone_password => 'dummy'
}
end
it { should contain_cinder_api_paste_ini('filter:authtoken/auth_admin_prefix').with(
:value => keystone_auth_admin_prefix
)}
end
end
[
'/keystone/',
'keystone/',
'keystone',
'/keystone/admin/',
'keystone/admin/',
'keystone/admin'
].each do |keystone_auth_admin_prefix|
describe "with keystone_auth_admin_prefix containing incorrect value #{keystone_auth_admin_prefix}" do
let :params do
{
:keystone_auth_admin_prefix => keystone_auth_admin_prefix,
:keystone_password => 'dummy'
}
end
it { expect { should contain_cinder_api_paste_ini('filter:authtoken/auth_admin_prefix') }.to \
raise_error(Puppet::Error, /validate_re\(\): "#{keystone_auth_admin_prefix}" does not match/) }
end
end
describe 'with enabled false' do
let :params do
req_params.merge({'enabled' => false})
end
it 'should stop the service' do
should contain_service('cinder-api').with_ensure('stopped')
end
it 'should contain db_sync exec' do
should_not contain_exec('cinder-manage db_sync')
end
end
describe 'with manage_service false' do
let :params do
req_params.merge({'manage_service' => false})
end
it 'should not change the state of the service' do
should contain_service('cinder-api').without_ensure
end
end
describe 'with ratelimits' do
let :params do
req_params.merge({ :ratelimits => '(GET, "*", .*, 100, MINUTE);(POST, "*", .*, 200, MINUTE)' })
end
it { should contain_cinder_api_paste_ini('filter:ratelimit/limits').with(
:value => '(GET, "*", .*, 100, MINUTE);(POST, "*", .*, 200, MINUTE)'
)}
end
end

View File

@@ -0,0 +1,83 @@
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Unit tests for cinder::backends class
#
require 'spec_helper'
describe 'cinder::backends' do
let :default_params do
{}
end
let :params do
{}
end
shared_examples_for 'cinder backends' do
let :p do
default_params.merge(params)
end
context 'configure cinder with default parameters' do
before :each do
params.merge!(
:enabled_backends => ['lowcost', 'regular', 'premium'],
:default_volume_type => false
)
end
it 'configures cinder.conf with default params' do
should contain_cinder_config('DEFAULT/enabled_backends').with_value(p[:enabled_backends].join(','))
end
end
context 'configure cinder with a default volume type' do
before :each do
params.merge!(
:enabled_backends => ['foo', 'bar'],
:default_volume_type => 'regular'
)
end
it 'should fail to configure default volume type' do
expect { subject }.to raise_error(Puppet::Error, /The default_volume_type parameter is deprecated in this class, you should declare it in cinder::api./)
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
it_configures 'cinder backends'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
it_configures 'cinder backends'
end
end

View File

@@ -0,0 +1,89 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Unit tests for cinder::ceph class
#
require 'spec_helper'
describe 'cinder::backup::ceph' do
let :default_params do
{ :backup_ceph_conf => '/etc/ceph/ceph.conf',
:backup_ceph_user => 'cinder',
:backup_ceph_chunk_size => '134217728',
:backup_ceph_pool => 'backups',
:backup_ceph_stripe_unit => '0',
:backup_ceph_stripe_count => '0' }
end
let :params do
{}
end
shared_examples_for 'cinder backup with ceph' do
let :p do
default_params.merge(params)
end
it 'configures cinder.conf' do
should contain_cinder_config('DEFAULT/backup_driver').with_value('cinder.backup.driver.ceph')
should contain_cinder_config('DEFAULT/backup_ceph_conf').with_value(p[:backup_ceph_conf])
should contain_cinder_config('DEFAULT/backup_ceph_user').with_value(p[:backup_ceph_user])
should contain_cinder_config('DEFAULT/backup_ceph_chunk_size').with_value(p[:backup_ceph_chunk_size])
should contain_cinder_config('DEFAULT/backup_ceph_pool').with_value(p[:backup_ceph_pool])
should contain_cinder_config('DEFAULT/backup_ceph_stripe_unit').with_value(p[:backup_ceph_stripe_unit])
should contain_cinder_config('DEFAULT/backup_ceph_stripe_count').with_value(p[:backup_ceph_stripe_count])
end
context 'when overriding default parameters' do
before :each do
params.merge!(:backup_ceph_conf => '/tmp/ceph.conf')
params.merge!(:backup_ceph_user => 'toto')
params.merge!(:backup_ceph_chunk_size => '123')
params.merge!(:backup_ceph_pool => 'foo')
params.merge!(:backup_ceph_stripe_unit => '56')
params.merge!(:backup_ceph_stripe_count => '67')
end
it 'should replace default parameters with new values' do
should contain_cinder_config('DEFAULT/backup_ceph_conf').with_value(p[:backup_ceph_conf])
should contain_cinder_config('DEFAULT/backup_ceph_user').with_value(p[:backup_ceph_user])
should contain_cinder_config('DEFAULT/backup_ceph_chunk_size').with_value(p[:backup_ceph_chunk_size])
should contain_cinder_config('DEFAULT/backup_ceph_pool').with_value(p[:backup_ceph_pool])
should contain_cinder_config('DEFAULT/backup_ceph_stripe_unit').with_value(p[:backup_ceph_stripe_unit])
should contain_cinder_config('DEFAULT/backup_ceph_stripe_count').with_value(p[:backup_ceph_stripe_count])
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
it_configures 'cinder backup with ceph'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
it_configures 'cinder backup with ceph'
end
end

View File

@@ -0,0 +1,101 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Unit tests for cinder::backup class
#
require 'spec_helper'
describe 'cinder::backup' do
let :default_params do
{ :enable => true,
:backup_topic => 'cinder-backup',
:backup_manager => 'cinder.backup.manager.BackupManager',
:backup_api_class => 'cinder.backup.api.API',
:backup_name_template => 'backup-%s' }
end
let :params do
{}
end
shared_examples_for 'cinder backup' do
let :p do
default_params.merge(params)
end
it { should contain_class('cinder::params') }
it 'installs cinder backup package' do
if platform_params.has_key?(:backup_package)
should contain_package('cinder-backup').with(
:name => platform_params[:backup_package],
:ensure => 'present'
)
should contain_package('cinder-backup').with_before(/Cinder_config\[.+\]/)
should contain_package('cinder-backup').with_before(/Service\[cinder-backup\]/)
end
end
it 'ensure cinder backup service is running' do
should contain_service('cinder-backup').with('hasstatus' => true)
end
it 'configures cinder.conf' do
should contain_cinder_config('DEFAULT/backup_topic').with_value(p[:backup_topic])
should contain_cinder_config('DEFAULT/backup_manager').with_value(p[:backup_manager])
should contain_cinder_config('DEFAULT/backup_api_class').with_value(p[:backup_api_class])
should contain_cinder_config('DEFAULT/backup_name_template').with_value(p[:backup_name_template])
end
context 'when overriding backup_name_template' do
before :each do
params.merge!(:backup_name_template => 'foo-bar-%s')
end
it 'should replace default parameter with new value' do
should contain_cinder_config('DEFAULT/backup_name_template').with_value(p[:backup_name_template])
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
let :platform_params do
{ :backup_package => 'cinder-backup',
:backup_service => 'cinder-backup' }
end
it_configures 'cinder backup'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
let :platform_params do
{ :backup_service => 'cinder-backup' }
end
it_configures 'cinder backup'
end
end

View File

@@ -0,0 +1,85 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Unit tests for cinder::backup::swift class
#
require 'spec_helper'
describe 'cinder::backup::swift' do
let :default_params do
{ :backup_swift_url => 'http://localhost:8080/v1/AUTH_',
:backup_swift_container => 'volumes_backup',
:backup_swift_object_size => '52428800',
:backup_swift_retry_attempts => '3',
:backup_swift_retry_backoff => '2' }
end
let :params do
{}
end
shared_examples_for 'cinder backup with swift' do
let :p do
default_params.merge(params)
end
it 'configures cinder.conf' do
should contain_cinder_config('DEFAULT/backup_driver').with_value('cinder.backup.drivers.swift')
should contain_cinder_config('DEFAULT/backup_swift_url').with_value(p[:backup_swift_url])
should contain_cinder_config('DEFAULT/backup_swift_container').with_value(p[:backup_swift_container])
should contain_cinder_config('DEFAULT/backup_swift_object_size').with_value(p[:backup_swift_object_size])
should contain_cinder_config('DEFAULT/backup_swift_retry_attempts').with_value(p[:backup_swift_retry_attempts])
should contain_cinder_config('DEFAULT/backup_swift_retry_backoff').with_value(p[:backup_swift_retry_backoff])
end
context 'when overriding default parameters' do
before :each do
params.merge!(:backup_swift_url => 'https://controller2:8080/v1/AUTH_')
params.merge!(:backup_swift_container => 'toto')
params.merge!(:backup_swift_object_size => '123')
params.merge!(:backup_swift_retry_attempts => '99')
params.merge!(:backup_swift_retry_backoff => '56')
end
it 'should replace default parameters with new values' do
should contain_cinder_config('DEFAULT/backup_swift_url').with_value(p[:backup_swift_url])
should contain_cinder_config('DEFAULT/backup_swift_container').with_value(p[:backup_swift_container])
should contain_cinder_config('DEFAULT/backup_swift_object_size').with_value(p[:backup_swift_object_size])
should contain_cinder_config('DEFAULT/backup_swift_retry_attempts').with_value(p[:backup_swift_retry_attempts])
should contain_cinder_config('DEFAULT/backup_swift_retry_backoff').with_value(p[:backup_swift_retry_backoff])
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
it_configures 'cinder backup with swift'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
it_configures 'cinder backup with swift'
end
end

View File

@@ -0,0 +1,11 @@
require 'spec_helper'
describe 'cinder::ceilometer' do
describe 'with default parameters' do
it 'contains default values' do
should contain_cinder_config('DEFAULT/notification_driver').with(
:value => 'cinder.openstack.common.notifier.rpc_notifier')
end
end
end

View File

@@ -0,0 +1,14 @@
require 'spec_helper'
describe 'cinder::client' do
it { should contain_package('python-cinderclient').with_ensure('present') }
let :facts do
{:osfamily => 'Debian'}
end
context 'with params' do
let :params do
{:package_ensure => 'latest'}
end
it { should contain_package('python-cinderclient').with_ensure('latest') }
end
end

View File

@@ -0,0 +1,77 @@
require 'spec_helper'
describe 'cinder::db::mysql' do
let :req_params do
{:password => 'pw',
:mysql_module => '0.9'}
end
let :facts do
{:osfamily => 'Debian'}
end
let :pre_condition do
'include mysql::server'
end
describe 'with only required params' do
let :params do
req_params
end
it { should contain_mysql__db('cinder').with(
:user => 'cinder',
:password => 'pw',
:host => '127.0.0.1',
:charset => 'utf8'
) }
end
describe "overriding allowed_hosts param to array" do
let :params do
{
:password => 'cinderpass',
:allowed_hosts => ['127.0.0.1','%']
}
end
it {should_not contain_cinder__db__mysql__host_access("127.0.0.1").with(
:user => 'cinder',
:password => 'cinderpass',
:database => 'cinder'
)}
it {should contain_cinder__db__mysql__host_access("%").with(
:user => 'cinder',
:password => 'cinderpass',
:database => 'cinder'
)}
end
describe "overriding allowed_hosts param to string" do
let :params do
{
:password => 'cinderpass2',
:allowed_hosts => '192.168.1.1'
}
end
it {should contain_cinder__db__mysql__host_access("192.168.1.1").with(
:user => 'cinder',
:password => 'cinderpass2',
:database => 'cinder'
)}
end
describe "overriding allowed_hosts param equals to host param " do
let :params do
{
:password => 'cinderpass2',
:allowed_hosts => '127.0.0.1'
}
end
it {should_not contain_cinder__db__mysql__host_access("127.0.0.1").with(
:user => 'cinder',
:password => 'cinderpass2',
:database => 'cinder'
)}
end
end

View File

@@ -0,0 +1,26 @@
require 'spec_helper'
describe 'cinder::db::postgresql' do
let :req_params do
{:password => 'pw'}
end
let :facts do
{
:postgres_default_version => '8.4',
:osfamily => 'RedHat',
}
end
describe 'with only required params' do
let :params do
req_params
end
it { should contain_postgresql__db('cinder').with(
:user => 'cinder',
:password => 'pw'
) }
end
end

View File

@@ -0,0 +1,16 @@
require 'spec_helper'
describe 'cinder::db::sync' do
let :facts do
{:osfamily => 'Debian'}
end
it { should contain_exec('cinder-manage db_sync').with(
:command => 'cinder-manage db sync',
:path => '/usr/bin',
:user => 'cinder',
:refreshonly => true,
:logoutput => 'on_failure'
) }
end

View File

@@ -0,0 +1,82 @@
#
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Unit tests for cinder::glance class
#
require 'spec_helper'
describe 'cinder::glance' do
let :default_params do
{ :glance_api_version => '2',
:glance_num_retries => '0',
:glance_api_insecure => false,
:glance_api_ssl_compression => false }
end
let :params do
{}
end
shared_examples_for 'cinder with glance' do
let :p do
default_params.merge(params)
end
it 'configures cinder.conf with default params' do
should contain_cinder_config('DEFAULT/glance_api_version').with_value(p[:glance_api_version])
should contain_cinder_config('DEFAULT/glance_num_retries').with_value(p[:glance_num_retries])
should contain_cinder_config('DEFAULT/glance_api_insecure').with_value(p[:glance_api_insecure])
end
context 'configure cinder with one glance server' do
before :each do
params.merge!(:glance_api_servers => '10.0.0.1:9292')
end
it 'should configure one glance server' do
should contain_cinder_config('DEFAULT/glance_api_servers').with_value(p[:glance_api_servers])
end
end
context 'configure cinder with two glance servers' do
before :each do
params.merge!(:glance_api_servers => ['10.0.0.1:9292','10.0.0.2:9292'])
end
it 'should configure two glance servers' do
should contain_cinder_config('DEFAULT/glance_api_servers').with_value(p[:glance_api_servers].join(','))
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
it_configures 'cinder with glance'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
it_configures 'cinder with glance'
end
end

View File

@@ -0,0 +1,90 @@
require 'spec_helper'
describe 'cinder::keystone::auth' do
let :req_params do
{:password => 'pw'}
end
describe 'with only required params' do
let :params do
req_params
end
it 'should contain auth info' do
should contain_keystone_user('cinder').with(
:ensure => 'present',
:password => 'pw',
:email => 'cinder@localhost',
:tenant => 'services'
)
should contain_keystone_user_role('cinder@services').with(
:ensure => 'present',
:roles => 'admin'
)
should contain_keystone_service('cinder').with(
:ensure => 'present',
:type => 'volume',
:description => 'Cinder Service'
)
should contain_keystone_service('cinderv2').with(
:ensure => 'present',
:type => 'volumev2',
:description => 'Cinder Service v2'
)
end
it { should contain_keystone_endpoint('RegionOne/cinder').with(
:ensure => 'present',
:public_url => 'http://127.0.0.1:8776/v1/%(tenant_id)s',
:admin_url => 'http://127.0.0.1:8776/v1/%(tenant_id)s',
:internal_url => 'http://127.0.0.1:8776/v1/%(tenant_id)s'
) }
it { should contain_keystone_endpoint('RegionOne/cinderv2').with(
:ensure => 'present',
:public_url => 'http://127.0.0.1:8776/v2/%(tenant_id)s',
:admin_url => 'http://127.0.0.1:8776/v2/%(tenant_id)s',
:internal_url => 'http://127.0.0.1:8776/v2/%(tenant_id)s'
) }
end
context 'when overriding endpoint params' do
let :params do
req_params.merge(
:public_address => '10.0.42.1',
:admin_address => '10.0.42.2',
:internal_address => '10.0.42.3',
:region => 'RegionThree',
:port => '4242',
:admin_protocol => 'https',
:internal_protocol => 'https',
:public_protocol => 'https',
:volume_version => 'v42'
)
end
it { should contain_keystone_endpoint('RegionThree/cinder').with(
:ensure => 'present',
:public_url => 'https://10.0.42.1:4242/v42/%(tenant_id)s',
:admin_url => 'https://10.0.42.2:4242/v42/%(tenant_id)s',
:internal_url => 'https://10.0.42.3:4242/v42/%(tenant_id)s'
)}
end
describe 'when endpoint should not be configured' do
let :params do
req_params.merge(
:configure_endpoint => false,
:configure_endpoint_v2 => false
)
end
it { should_not contain_keystone_endpoint('RegionOne/cinder') }
it { should_not contain_keystone_endpoint('RegionOne/cinderv2') }
end
end

View File

@@ -0,0 +1,12 @@
require 'spec_helper'
describe 'cinder::params' do
let :facts do
{:osfamily => 'Debian'}
end
it 'should compile' do
subject
end
end

View File

@@ -0,0 +1,51 @@
require 'spec_helper'
describe 'cinder::qpid' do
let :facts do
{:puppetversion => '2.7',
:osfamily => 'RedHat'}
end
describe 'with defaults' do
it 'should contain all of the default resources' do
should contain_class('qpid::server').with(
:service_ensure => 'running',
:port => '5672'
)
end
it 'should contain user' do
should contain_qpid_user('guest').with(
:password => 'guest',
:file => '/var/lib/qpidd/qpidd.sasldb',
:realm => 'OPENSTACK',
:provider => 'saslpasswd2'
)
end
end
describe 'when disabled' do
let :params do
{
:enabled => false
}
end
it 'should be disabled' do
should_not contain_qpid_user('guest')
should contain_class('qpid::server').with(
:service_ensure => 'stopped'
)
end
end
end

View File

@@ -0,0 +1,35 @@
require 'spec_helper'
describe 'cinder::quota' do
describe 'with default parameters' do
it 'contains default values' do
should contain_cinder_config('DEFAULT/quota_volumes').with(
:value => 10)
should contain_cinder_config('DEFAULT/quota_snapshots').with(
:value => 10)
should contain_cinder_config('DEFAULT/quota_gigabytes').with(
:value => 1000)
should contain_cinder_config('DEFAULT/quota_driver').with(
:value => 'cinder.quota.DbQuotaDriver')
end
end
describe 'with overridden parameters' do
let :params do
{ :quota_volumes => 1000,
:quota_snapshots => 1000,
:quota_gigabytes => 100000 }
end
it 'contains overrided values' do
should contain_cinder_config('DEFAULT/quota_volumes').with(
:value => 1000)
should contain_cinder_config('DEFAULT/quota_snapshots').with(
:value => 1000)
should contain_cinder_config('DEFAULT/quota_gigabytes').with(
:value => 100000)
should contain_cinder_config('DEFAULT/quota_driver').with(
:value => 'cinder.quota.DbQuotaDriver')
end
end
end

View File

@@ -0,0 +1,81 @@
require 'spec_helper'
describe 'cinder::rabbitmq' do
let :facts do
{ :puppetversion => '2.7',
:osfamily => 'Debian',
}
end
describe 'with defaults' do
it 'should contain all of the default resources' do
should contain_class('rabbitmq::server').with(
:service_ensure => 'running',
:port => '5672',
:delete_guest_user => false
)
should contain_rabbitmq_vhost('/').with(
:provider => 'rabbitmqctl'
)
end
end
describe 'when a rabbitmq user is specified' do
let :params do
{
:userid => 'dan',
:password => 'pass'
}
end
it 'should contain user and permissions' do
should contain_rabbitmq_user('dan').with(
:admin => true,
:password => 'pass',
:provider => 'rabbitmqctl'
)
should contain_rabbitmq_user_permissions('dan@/').with(
:configure_permission => '.*',
:write_permission => '.*',
:read_permission => '.*',
:provider => 'rabbitmqctl'
)
end
end
describe 'when disabled' do
let :params do
{
:userid => 'dan',
:password => 'pass',
:enabled => false
}
end
it 'should be disabled' do
should_not contain_rabbitmq_user('dan')
should_not contain_rabbitmq_user_permissions('dan@/')
should contain_class('rabbitmq::server').with(
:service_ensure => 'stopped',
:port => '5672',
:delete_guest_user => false
)
should_not contain_rabbitmq_vhost('/')
end
end
end

View File

@@ -0,0 +1,81 @@
require 'spec_helper'
describe 'cinder::scheduler' do
describe 'on debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
describe 'with default parameters' do
it { should contain_class('cinder::params') }
it { should contain_package('cinder-scheduler').with(
:name => 'cinder-scheduler',
:ensure => 'present',
:before => 'Service[cinder-scheduler]'
) }
it { should contain_service('cinder-scheduler').with(
:name => 'cinder-scheduler',
:enable => true,
:ensure => 'running',
:require => 'Package[cinder]',
:hasstatus => true
) }
end
describe 'with parameters' do
let :params do
{ :scheduler_driver => 'cinder.scheduler.filter_scheduler.FilterScheduler',
:package_ensure => 'present'
}
end
it { should contain_cinder_config('DEFAULT/scheduler_driver').with_value('cinder.scheduler.filter_scheduler.FilterScheduler') }
it { should contain_package('cinder-scheduler').with_ensure('present') }
end
describe 'with manage_service false' do
let :params do
{ 'manage_service' => false
}
end
it 'should not change the state of the service' do
should contain_service('cinder-scheduler').without_ensure
end
end
end
describe 'on rhel platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
describe 'with default parameters' do
it { should contain_class('cinder::params') }
it { should contain_service('cinder-scheduler').with(
:name => 'openstack-cinder-scheduler',
:enable => true,
:ensure => 'running',
:require => 'Package[cinder]'
) }
end
describe 'with parameters' do
let :params do
{ :scheduler_driver => 'cinder.scheduler.filter_scheduler.FilterScheduler' }
end
it { should contain_cinder_config('DEFAULT/scheduler_driver').with_value('cinder.scheduler.filter_scheduler.FilterScheduler') }
end
end
end

View File

@@ -0,0 +1,15 @@
require 'spec_helper'
describe 'cinder::setup_test_volume' do
it { should contain_package('lvm2').with(
:ensure => 'present'
) }
it 'should contain volume creation execs' do
should contain_exec('/bin/dd if=/dev/zero of=cinder-volumes bs=1 count=0 seek=4G')
should contain_exec('/sbin/losetup /dev/loop2 cinder-volumes')
should contain_exec('/sbin/pvcreate /dev/loop2')
should contain_exec('/sbin/vgcreate cinder-volumes /dev/loop2')
end
end

347
spec/classes/cinder_spec.rb Normal file
View File

@@ -0,0 +1,347 @@
require 'spec_helper'
describe 'cinder' do
let :req_params do
{:rabbit_password => 'guest', :database_connection => 'mysql://user:password@host/database'}
end
let :facts do
{:osfamily => 'Debian'}
end
describe 'with only required params' do
let :params do
req_params
end
it { should contain_class('cinder::params') }
it { should contain_class('mysql::python') }
it 'should contain default config' do
should contain_cinder_config('DEFAULT/rpc_backend').with(
:value => 'cinder.openstack.common.rpc.impl_kombu'
)
should contain_cinder_config('DEFAULT/control_exchange').with(
:value => 'openstack'
)
should contain_cinder_config('DEFAULT/rabbit_password').with(
:value => 'guest',
:secret => true
)
should contain_cinder_config('DEFAULT/rabbit_host').with(
:value => '127.0.0.1'
)
should contain_cinder_config('DEFAULT/rabbit_port').with(
:value => '5672'
)
should contain_cinder_config('DEFAULT/rabbit_hosts').with(
:value => '127.0.0.1:5672'
)
should contain_cinder_config('DEFAULT/rabbit_ha_queues').with(
:value => false
)
should contain_cinder_config('DEFAULT/rabbit_virtual_host').with(
:value => '/'
)
should contain_cinder_config('DEFAULT/rabbit_userid').with(
:value => 'guest'
)
should contain_cinder_config('database/connection').with(
:value => 'mysql://user:password@host/database',
:secret => true
)
should contain_cinder_config('database/idle_timeout').with(
:value => '3600'
)
should contain_cinder_config('DEFAULT/verbose').with(
:value => false
)
should contain_cinder_config('DEFAULT/debug').with(
:value => false
)
should contain_cinder_config('DEFAULT/storage_availability_zone').with(
:value => 'nova'
)
should contain_cinder_config('DEFAULT/default_availability_zone').with(
:value => 'nova'
)
should contain_cinder_config('DEFAULT/api_paste_config').with(
:value => '/etc/cinder/api-paste.ini'
)
should contain_cinder_config('DEFAULT/log_dir').with(:value => '/var/log/cinder')
end
it { should contain_file('/etc/cinder/cinder.conf').with(
:owner => 'cinder',
:group => 'cinder',
:mode => '0600',
:require => 'Package[cinder]'
) }
it { should contain_file('/etc/cinder/api-paste.ini').with(
:owner => 'cinder',
:group => 'cinder',
:mode => '0600',
:require => 'Package[cinder]'
) }
end
describe 'with modified rabbit_hosts' do
let :params do
req_params.merge({'rabbit_hosts' => ['rabbit1:5672', 'rabbit2:5672']})
end
it 'should contain many' do
should_not contain_cinder_config('DEFAULT/rabbit_host')
should_not contain_cinder_config('DEFAULT/rabbit_port')
should contain_cinder_config('DEFAULT/rabbit_hosts').with(
:value => 'rabbit1:5672,rabbit2:5672'
)
should contain_cinder_config('DEFAULT/rabbit_ha_queues').with(
:value => true
)
end
end
describe 'with a single rabbit_hosts entry' do
let :params do
req_params.merge({'rabbit_hosts' => ['rabbit1:5672']})
end
it 'should contain many' do
should_not contain_cinder_config('DEFAULT/rabbit_host')
should_not contain_cinder_config('DEFAULT/rabbit_port')
should contain_cinder_config('DEFAULT/rabbit_hosts').with(
:value => 'rabbit1:5672'
)
should contain_cinder_config('DEFAULT/rabbit_ha_queues').with(
:value => true
)
end
end
describe 'with qpid rpc supplied' do
let :params do
{
:database_connection => 'mysql://user:password@host/database',
:qpid_password => 'guest',
:rpc_backend => 'cinder.openstack.common.rpc.impl_qpid'
}
end
it { should contain_cinder_config('database/connection').with_value('mysql://user:password@host/database') }
it { should contain_cinder_config('DEFAULT/rpc_backend').with_value('cinder.openstack.common.rpc.impl_qpid') }
it { should contain_cinder_config('DEFAULT/qpid_hostname').with_value('localhost') }
it { should contain_cinder_config('DEFAULT/qpid_port').with_value('5672') }
it { should contain_cinder_config('DEFAULT/qpid_username').with_value('guest') }
it { should contain_cinder_config('DEFAULT/qpid_password').with_value('guest').with_secret(true) }
it { should contain_cinder_config('DEFAULT/qpid_reconnect').with_value(true) }
it { should contain_cinder_config('DEFAULT/qpid_reconnect_timeout').with_value('0') }
it { should contain_cinder_config('DEFAULT/qpid_reconnect_limit').with_value('0') }
it { should contain_cinder_config('DEFAULT/qpid_reconnect_interval_min').with_value('0') }
it { should contain_cinder_config('DEFAULT/qpid_reconnect_interval_max').with_value('0') }
it { should contain_cinder_config('DEFAULT/qpid_reconnect_interval').with_value('0') }
it { should contain_cinder_config('DEFAULT/qpid_heartbeat').with_value('60') }
it { should contain_cinder_config('DEFAULT/qpid_protocol').with_value('tcp') }
it { should contain_cinder_config('DEFAULT/qpid_tcp_nodelay').with_value(true) }
end
describe 'with qpid rpc and no qpid_sasl_mechanisms' do
let :params do
{
:database_connection => 'mysql://user:password@host/database',
:qpid_password => 'guest',
:rpc_backend => 'cinder.openstack.common.rpc.impl_qpid'
}
end
it { should contain_cinder_config('DEFAULT/qpid_sasl_mechanisms').with_ensure('absent') }
end
describe 'with qpid rpc and qpid_sasl_mechanisms string' do
let :params do
{
:database_connection => 'mysql://user:password@host/database',
:qpid_password => 'guest',
:qpid_sasl_mechanisms => 'PLAIN',
:rpc_backend => 'cinder.openstack.common.rpc.impl_qpid'
}
end
it { should contain_cinder_config('DEFAULT/qpid_sasl_mechanisms').with_value('PLAIN') }
end
describe 'with qpid rpc and qpid_sasl_mechanisms array' do
let :params do
{
:database_connection => 'mysql://user:password@host/database',
:qpid_password => 'guest',
:qpid_sasl_mechanisms => [ 'DIGEST-MD5', 'GSSAPI', 'PLAIN' ],
:rpc_backend => 'cinder.openstack.common.rpc.impl_qpid'
}
end
it { should contain_cinder_config('DEFAULT/qpid_sasl_mechanisms').with_value('DIGEST-MD5 GSSAPI PLAIN') }
end
describe 'with SSL enabled' do
let :params do
req_params.merge!({
:rabbit_use_ssl => true,
:kombu_ssl_ca_certs => '/path/to/ssl/ca/certs',
:kombu_ssl_certfile => '/path/to/ssl/cert/file',
:kombu_ssl_keyfile => '/path/to/ssl/keyfile',
:kombu_ssl_version => 'SSLv3'
})
end
it do
should contain_cinder_config('DEFAULT/rabbit_use_ssl').with_value('true')
should contain_cinder_config('DEFAULT/kombu_ssl_ca_certs').with_value('/path/to/ssl/ca/certs')
should contain_cinder_config('DEFAULT/kombu_ssl_certfile').with_value('/path/to/ssl/cert/file')
should contain_cinder_config('DEFAULT/kombu_ssl_keyfile').with_value('/path/to/ssl/keyfile')
should contain_cinder_config('DEFAULT/kombu_ssl_version').with_value('SSLv3')
end
end
describe 'with SSL disabled' do
let :params do
req_params.merge!({
:rabbit_use_ssl => false,
:kombu_ssl_ca_certs => 'undef',
:kombu_ssl_certfile => 'undef',
:kombu_ssl_keyfile => 'undef',
:kombu_ssl_version => 'SSLv3'
})
end
it do
should contain_cinder_config('DEFAULT/rabbit_use_ssl').with_value('false')
should contain_cinder_config('DEFAULT/kombu_ssl_ca_certs').with_ensure('absent')
should contain_cinder_config('DEFAULT/kombu_ssl_certfile').with_ensure('absent')
should contain_cinder_config('DEFAULT/kombu_ssl_keyfile').with_ensure('absent')
should contain_cinder_config('DEFAULT/kombu_ssl_version').with_ensure('absent')
end
end
describe 'with syslog disabled' do
let :params do
req_params
end
it { should contain_cinder_config('DEFAULT/use_syslog').with_value(false) }
end
describe 'with syslog enabled' do
let :params do
req_params.merge({
:use_syslog => 'true',
})
end
it { should contain_cinder_config('DEFAULT/use_syslog').with_value(true) }
it { should contain_cinder_config('DEFAULT/syslog_log_facility').with_value('LOG_USER') }
end
describe 'with syslog enabled and custom settings' do
let :params do
req_params.merge({
:use_syslog => 'true',
:log_facility => 'LOG_LOCAL0'
})
end
it { should contain_cinder_config('DEFAULT/use_syslog').with_value(true) }
it { should contain_cinder_config('DEFAULT/syslog_log_facility').with_value('LOG_LOCAL0') }
end
describe 'with log_dir disabled' do
let(:params) { req_params.merge!({:log_dir => false}) }
it { should contain_cinder_config('DEFAULT/log_dir').with_ensure('absent') }
end
describe 'with amqp_durable_queues disabled' do
let :params do
req_params
end
it { should contain_cinder_config('DEFAULT/amqp_durable_queues').with_value(false) }
end
describe 'with amqp_durable_queues enabled' do
let :params do
req_params.merge({
:amqp_durable_queues => true,
})
end
it { should contain_cinder_config('DEFAULT/amqp_durable_queues').with_value(true) }
end
describe 'with postgresql' do
let :params do
{
:database_connection => 'postgresql://user:drowssap@host/database',
:rabbit_password => 'guest',
}
end
it { should contain_cinder_config('database/connection').with(
:value => 'postgresql://user:drowssap@host/database',
:secret => true
) }
it { should_not contain_class('mysql::python') }
it { should_not contain_class('mysql::bindings') }
it { should_not contain_class('mysql::bindings::python') }
end
describe 'with SSL socket options set' do
let :params do
{
:use_ssl => true,
:cert_file => '/path/to/cert',
:ca_file => '/path/to/ca',
:key_file => '/path/to/key',
:rabbit_password => 'guest',
}
end
it { should contain_cinder_config('DEFAULT/ssl_ca_file').with_value('/path/to/ca') }
it { should contain_cinder_config('DEFAULT/ssl_cert_file').with_value('/path/to/cert') }
it { should contain_cinder_config('DEFAULT/ssl_key_file').with_value('/path/to/key') }
end
describe 'with SSL socket options set to false' do
let :params do
{
:use_ssl => false,
:cert_file => false,
:ca_file => false,
:key_file => false,
:rabbit_password => 'guest',
}
end
it { should contain_cinder_config('DEFAULT/ssl_ca_file').with_ensure('absent') }
it { should contain_cinder_config('DEFAULT/ssl_cert_file').with_ensure('absent') }
it { should contain_cinder_config('DEFAULT/ssl_key_file').with_ensure('absent') }
end
describe 'with SSL socket options set wrongly configured' do
let :params do
{
:use_ssl => true,
:ca_file => '/path/to/ca',
:key_file => '/path/to/key',
:rabbit_password => 'guest',
}
end
it 'should raise an error' do
expect {
should compile
}.to raise_error Puppet::Error, /The cert_file parameter is required when use_ssl is set to true/
end
end
end

View File

@@ -0,0 +1,35 @@
require 'spec_helper'
describe 'cinder::vmware' do
let :params do
{:os_password => 'asdf',
:os_tenant_name => 'admin',
:os_username => 'admin',
:os_auth_url => 'http://127.127.127.1:5000/v2.0/'}
end
describe 'with defaults' do
it 'should create vmware special types' do
should contain_cinder__type('vmware-thin').with(
:set_key => 'vmware:vmdk_type',
:set_value => 'thin')
should contain_cinder__type('vmware-thick').with(
:set_key => 'vmware:vmdk_type',
:set_value => 'thick')
should contain_cinder__type('vmware-eagerZeroedThick').with(
:set_key => 'vmware:vmdk_type',
:set_value => 'eagerZeroedThick')
should contain_cinder__type('vmware-full').with(
:set_key => 'vmware:clone_type',
:set_value => 'full')
should contain_cinder__type('vmware-linked').with(
:set_key => 'vmware:clone_type',
:set_value => 'linked')
end
end
end

View File

@@ -0,0 +1,33 @@
require 'spec_helper'
describe 'cinder::volume::eqlx' do
let :params do {
:san_ip => '192.168.100.10',
:san_login => 'grpadmin',
:san_password => '12345',
:san_thin_provision => true,
:eqlx_group_name => 'group-a',
:eqlx_pool => 'apool',
:eqlx_use_chap => true,
:eqlx_chap_login => 'chapadm',
:eqlx_chap_password => '56789',
:eqlx_cli_timeout => 31,
:eqlx_cli_max_retries => 6,
}
end
describe 'eqlx volume driver' do
it 'configures eqlx volume driver' do
should contain_cinder_config(
"DEFAULT/volume_driver").with_value(
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver')
should contain_cinder_config(
"DEFAULT/volume_backend_name").with_value('DEFAULT')
params.each_pair do |config,value|
should contain_cinder_config("DEFAULT/#{config}").with_value(value)
end
end
end
end

View File

@@ -0,0 +1,59 @@
require 'spec_helper'
describe 'cinder::volume::glusterfs' do
shared_examples_for 'glusterfs volume driver' do
let :params do
{
:glusterfs_shares => ['10.10.10.10:/volumes', '10.10.10.11:/volumes'],
:glusterfs_shares_config => '/etc/cinder/other_shares.conf',
:glusterfs_sparsed_volumes => true,
:glusterfs_mount_point_base => '/cinder_mount_point',
}
end
it 'configures glusterfs volume driver' do
should contain_cinder_config('DEFAULT/volume_driver').with_value(
'cinder.volume.drivers.glusterfs.GlusterfsDriver')
should contain_cinder_config('DEFAULT/glusterfs_shares_config').with_value(
'/etc/cinder/other_shares.conf')
should contain_cinder_config('DEFAULT/glusterfs_sparsed_volumes').with_value(
true)
should contain_cinder_config('DEFAULT/glusterfs_mount_point_base').with_value(
'/cinder_mount_point')
should contain_file('/etc/cinder/other_shares.conf').with(
:content => "10.10.10.10:/volumes\n10.10.10.11:/volumes\n",
:require => 'Package[cinder]',
:notify => 'Service[cinder-volume]'
)
end
context "with an parameter which has been removed" do
before do
params.merge!({
:glusterfs_disk_util => 'foo',
})
end
it 'should fails' do
expect { subject }.to raise_error(Puppet::Error, /glusterfs_disk_util is removed in Icehouse./)
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
it_configures 'glusterfs volume driver'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
it_configures 'glusterfs volume driver'
end
end

View File

@@ -0,0 +1,66 @@
require 'spec_helper'
describe 'cinder::volume::iscsi' do
let :req_params do {
:iscsi_ip_address => '127.0.0.2',
:iscsi_helper => 'tgtadm'
}
end
let :facts do
{:osfamily => 'Debian'}
end
describe 'with default params' do
let :params do
req_params
end
it { should contain_cinder_config('DEFAULT/iscsi_ip_address').with(
:value => '127.0.0.2'
) }
it { should contain_cinder_config('DEFAULT/iscsi_helper').with(
:value => 'tgtadm'
) }
it { should contain_cinder_config('DEFAULT/volume_group').with(
:value => 'cinder-volumes'
) }
end
describe 'with RedHat' do
let :params do
req_params
end
let :facts do
{:osfamily => 'RedHat'}
end
it { should contain_file_line('cinder include').with(
:line => 'include /etc/cinder/volumes/*',
:path => '/etc/tgt/targets.conf'
) }
end
describe 'with lioadm' do
let :params do {
:iscsi_ip_address => '127.0.0.2',
:iscsi_helper => 'lioadm'
}
end
let :facts do
{:osfamily => 'RedHat'}
end
it { should contain_package('targetcli').with_ensure('present')}
end
end

View File

@@ -0,0 +1,66 @@
require 'spec_helper'
describe 'cinder::volume::netapp' do
let :params do
{
:netapp_login => 'netapp',
:netapp_password => 'password',
:netapp_server_hostname => '127.0.0.2',
}
end
let :default_params do
{
:netapp_server_port => '80',
:netapp_size_multiplier => '1.2',
:netapp_storage_family => 'ontap_cluster',
:netapp_storage_protocol => 'nfs',
:netapp_transport_type => 'http',
:netapp_vfiler => '',
:netapp_volume_list => '',
:netapp_vserver => '',
:expiry_thres_minutes => '720',
:thres_avl_size_perc_start => '20',
:thres_avl_size_perc_stop => '60',
:nfs_shares_config => '',
:netapp_copyoffload_tool_path => '',
:netapp_controller_ips => '',
:netapp_sa_password => '',
:netapp_storage_pools => '',
:netapp_webservice_path => '/devmgr/v2',
}
end
shared_examples_for 'netapp volume driver' do
let :params_hash do
default_params.merge(params)
end
it 'configures netapp volume driver' do
should contain_cinder_config('DEFAULT/volume_driver').with_value(
'cinder.volume.drivers.netapp.common.NetAppDriver')
params_hash.each_pair do |config,value|
should contain_cinder_config("DEFAULT/#{config}").with_value( value )
end
end
it 'marks netapp_password as secret' do
should contain_cinder_config('DEFAULT/netapp_password').with_secret( true )
end
end
context 'with default parameters' do
before do
params = {}
end
it_configures 'netapp volume driver'
end
context 'with provided parameters' do
it_configures 'netapp volume driver'
end
end

View File

@@ -0,0 +1,38 @@
# author 'Aimon Bustardo <abustardo at morphlabs dot com>'
# license 'Apache License 2.0'
# description 'configures openstack cinder nexenta driver'
require 'spec_helper'
describe 'cinder::volume::nexenta' do
let :params do
{ :nexenta_user => 'nexenta',
:nexenta_password => 'password',
:nexenta_host => '127.0.0.2' }
end
let :default_params do
{ :nexenta_volume => 'cinder',
:nexenta_target_prefix => 'iqn:',
:nexenta_target_group_prefix => 'cinder/',
:nexenta_blocksize => '8k',
:nexenta_sparse => true }
end
let :facts do
{ :osfamily => 'Debian' }
end
context 'with required params' do
let :params_hash do
default_params.merge(params)
end
it 'configures nexenta volume driver' do
params_hash.each_pair do |config, value|
should contain_cinder_config("DEFAULT/#{config}").with_value(value)
end
end
end
end

View File

@@ -0,0 +1,43 @@
require 'spec_helper'
describe 'cinder::volume::nfs' do
let :params do
{
:nfs_servers => ['10.10.10.10:/shares', '10.10.10.10:/shares2'],
:nfs_mount_options => 'vers=3',
:nfs_shares_config => '/etc/cinder/other_shares.conf',
:nfs_disk_util => 'du',
:nfs_sparsed_volumes => true,
:nfs_mount_point_base => '/cinder_mount_point',
:nfs_used_ratio => '0.95',
:nfs_oversub_ratio => '1.0',
}
end
describe 'nfs volume driver' do
it 'configures nfs volume driver' do
should contain_cinder_config('DEFAULT/volume_driver').with_value(
'cinder.volume.drivers.nfs.NfsDriver')
should contain_cinder_config('DEFAULT/nfs_shares_config').with_value(
'/etc/cinder/other_shares.conf')
should contain_cinder_config('DEFAULT/nfs_mount_options').with_value(
'vers=3')
should contain_cinder_config('DEFAULT/nfs_sparsed_volumes').with_value(
true)
should contain_cinder_config('DEFAULT/nfs_mount_point_base').with_value(
'/cinder_mount_point')
should contain_cinder_config('DEFAULT/nfs_disk_util').with_value(
'du')
should contain_cinder_config('DEFAULT/nfs_used_ratio').with_value(
'0.95')
should contain_cinder_config('DEFAULT/nfs_oversub_ratio').with_value(
'1.0')
should contain_file('/etc/cinder/other_shares.conf').with(
:content => "10.10.10.10:/shares\n10.10.10.10:/shares2",
:require => 'Package[cinder]',
:notify => 'Service[cinder-volume]'
)
end
end
end

View File

@@ -0,0 +1,82 @@
require 'spec_helper'
describe 'cinder::volume::rbd' do
let :req_params do
{
:rbd_pool => 'volumes',
:glance_api_version => '2',
:rbd_user => 'test',
:rbd_secret_uuid => '0123456789',
:rbd_ceph_conf => '/foo/boo/zoo/ceph.conf',
:rbd_flatten_volume_from_snapshot => true,
:volume_tmp_dir => '/foo/tmp',
:rbd_max_clone_depth => '0'
}
end
it { should contain_class('cinder::params') }
let :params do
req_params
end
let :facts do
{:osfamily => 'Debian'}
end
describe 'rbd volume driver' do
it 'configure rbd volume driver' do
should contain_cinder_config('DEFAULT/volume_driver').with_value('cinder.volume.drivers.rbd.RBDDriver')
should contain_cinder_config('DEFAULT/rbd_ceph_conf').with_value(req_params[:rbd_ceph_conf])
should contain_cinder_config('DEFAULT/rbd_flatten_volume_from_snapshot').with_value(req_params[:rbd_flatten_volume_from_snapshot])
should contain_cinder_config('DEFAULT/volume_tmp_dir').with_value(req_params[:volume_tmp_dir])
should contain_cinder_config('DEFAULT/rbd_max_clone_depth').with_value(req_params[:rbd_max_clone_depth])
should contain_cinder_config('DEFAULT/rbd_pool').with_value(req_params[:rbd_pool])
should contain_cinder_config('DEFAULT/rbd_user').with_value(req_params[:rbd_user])
should contain_cinder_config('DEFAULT/rbd_secret_uuid').with_value(req_params[:rbd_secret_uuid])
should contain_file('/etc/init/cinder-volume.override').with(:ensure => 'present')
should contain_file_line('set initscript env').with(
:line => /env CEPH_ARGS=\"--id test\"/,
:path => '/etc/init/cinder-volume.override',
:notify => 'Service[cinder-volume]')
end
context 'with rbd_secret_uuid disabled' do
let(:params) { req_params.merge!({:rbd_secret_uuid => false}) }
it { should contain_cinder_config('DEFAULT/rbd_secret_uuid').with_ensure('absent') }
end
context 'with volume_tmp_dir disabled' do
let(:params) { req_params.merge!({:volume_tmp_dir => false}) }
it { should contain_cinder_config('DEFAULT/volume_tmp_dir').with_ensure('absent') }
end
end
describe 'with RedHat' do
let :facts do
{ :osfamily => 'RedHat' }
end
let :params do
req_params
end
it 'should ensure that the cinder-volume sysconfig file is present' do
should contain_file('/etc/sysconfig/openstack-cinder-volume').with(
:ensure => 'present'
)
end
it 'should configure RedHat init override' do
should contain_file_line('set initscript env').with(
:line => /export CEPH_ARGS=\"--id test\"/,
:path => '/etc/sysconfig/openstack-cinder-volume',
:notify => 'Service[cinder-volume]')
end
end
end

View File

@@ -0,0 +1,39 @@
require 'spec_helper'
describe 'cinder::volume::san' do
let :params do
{ :volume_driver => 'cinder.volume.san.SolarisISCSIDriver',
:san_ip => '127.0.0.1',
:san_login => 'cluster_operator',
:san_password => '007',
:san_clustername => 'storage_cluster' }
end
let :default_params do
{ :san_thin_provision => true,
:san_login => 'admin',
:san_ssh_port => 22,
:san_is_local => false,
:ssh_conn_timeout => 30,
:ssh_min_pool_conn => 1,
:ssh_max_pool_conn => 5 }
end
shared_examples_for 'a san volume driver' do
let :params_hash do
default_params.merge(params)
end
it 'configures cinder volume driver' do
params_hash.each_pair do |config,value|
should contain_cinder_config("DEFAULT/#{config}").with_value( value )
end
end
end
context 'with parameters' do
it_configures 'a san volume driver'
end
end

View File

@@ -0,0 +1,28 @@
require 'spec_helper'
describe 'cinder::volume::solidfire' do
let :req_params do
{
:san_ip => '127.0.0.2',
:san_login => 'solidfire',
:san_password => 'password',
}
end
let :params do
req_params
end
describe 'solidfire volume driver' do
it 'configure solidfire volume driver' do
should contain_cinder_config('DEFAULT/volume_driver').with_value(
'cinder.volume.drivers.solidfire.SolidFire')
should contain_cinder_config('DEFAULT/san_ip').with_value(
'127.0.0.2')
should contain_cinder_config('DEFAULT/san_login').with_value(
'solidfire')
should contain_cinder_config('DEFAULT/san_password').with_value(
'password')
end
end
end

View File

@@ -0,0 +1,26 @@
require 'spec_helper'
describe 'cinder::volume' do
let :pre_condition do
'class { "cinder": rabbit_password => "fpp", database_connection => "mysql://a:b@c/d" }'
end
let :facts do
{:osfamily => 'Debian'}
end
it { should contain_package('cinder-volume').with_ensure('present') }
it { should contain_service('cinder-volume').with(
'hasstatus' => true
)}
describe 'with manage_service false' do
let :params do
{ 'manage_service' => false }
end
it 'should not change the state of the service' do
should contain_service('cinder-volume').without_ensure
end
end
end

View File

@@ -0,0 +1,57 @@
require 'spec_helper'
describe 'cinder::volume::vmdk' do
let :params do
{
:host_ip => '172.16.16.16',
:host_password => 'asdf',
:host_username => 'user'
}
end
let :optional_params do
{
:volume_folder => 'cinder-volume-folder',
:api_retry_count => 5,
:max_object_retrieval => 200,
:task_poll_interval => 10,
:image_transfer_timeout_secs => 3600,
:wsdl_location => 'http://127.0.0.1:8080/vmware/SDK/wsdl/vim25/vimService.wsdl'
}
end
it 'should configure vmdk driver in cinder.conf' do
should contain_cinder_config('DEFAULT/volume_driver').with_value('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver')
should contain_cinder_config('DEFAULT/vmware_host_ip').with_value(params[:host_ip])
should contain_cinder_config('DEFAULT/vmware_host_username').with_value(params[:host_username])
should contain_cinder_config('DEFAULT/vmware_host_password').with_value(params[:host_password])
should contain_cinder_config('DEFAULT/vmware_volume_folder').with_value('cinder-volumes')
should contain_cinder_config('DEFAULT/vmware_api_retry_count').with_value(10)
should contain_cinder_config('DEFAULT/vmware_max_object_retrieval').with_value(100)
should contain_cinder_config('DEFAULT/vmware_task_poll_interval').with_value(5)
should contain_cinder_config('DEFAULT/vmware_image_transfer_timeout_secs').with_value(7200)
should_not contain_cinder_config('DEFAULT/vmware_wsdl_location')
end
it 'installs vmdk python driver' do
should contain_package('python-suds').with(
:ensure => 'present'
)
end
context 'with optional parameters' do
before :each do
params.merge!(optional_params)
end
it 'should configure vmdk driver in cinder.conf' do
should contain_cinder_config('DEFAULT/vmware_volume_folder').with_value(params[:volume_folder])
should contain_cinder_config('DEFAULT/vmware_api_retry_count').with_value(params[:api_retry_count])
should contain_cinder_config('DEFAULT/vmware_max_object_retrieval').with_value(params[:max_object_retrieval])
should contain_cinder_config('DEFAULT/vmware_task_poll_interval').with_value(params[:task_poll_interval])
should contain_cinder_config('DEFAULT/vmware_image_transfer_timeout_secs').with_value(params[:image_transfer_timeout_secs])
should contain_cinder_config('DEFAULT/vmware_wsdl_location').with_value(params[:wsdl_location])
end
end
end

View File

@@ -0,0 +1,36 @@
require 'spec_helper'
describe 'cinder::backend::eqlx' do
let (:config_group_name) { 'eqlx-1' }
let (:title) { config_group_name }
let :params do
{
:san_ip => '192.168.100.10',
:san_login => 'grpadmin',
:san_password => '12345',
:volume_backend_name => 'Dell_EQLX',
:san_thin_provision => true,
:eqlx_group_name => 'group-a',
:eqlx_pool => 'apool',
:eqlx_use_chap => true,
:eqlx_chap_login => 'chapadm',
:eqlx_chap_password => '56789',
:eqlx_cli_timeout => 31,
:eqlx_cli_max_retries => 6,
}
end
describe 'eqlx volume driver' do
it 'configure eqlx volume driver' do
should contain_cinder_config(
"#{config_group_name}/volume_driver").with_value(
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver')
params.each_pair do |config,value|
should contain_cinder_config(
"#{config_group_name}/#{config}").with_value(value)
end
end
end
end

View File

@@ -0,0 +1,61 @@
require 'spec_helper'
describe 'cinder::backend::glusterfs' do
shared_examples_for 'glusterfs volume driver' do
let(:title) {'mygluster'}
let :params do
{
:glusterfs_shares => ['10.10.10.10:/volumes', '10.10.10.11:/volumes'],
:glusterfs_shares_config => '/etc/cinder/other_shares.conf',
:glusterfs_sparsed_volumes => true,
:glusterfs_mount_point_base => '/cinder_mount_point',
}
end
it 'configures glusterfs volume driver' do
should contain_cinder_config('mygluster/volume_driver').with_value(
'cinder.volume.drivers.glusterfs.GlusterfsDriver')
should contain_cinder_config('mygluster/glusterfs_shares_config').with_value(
'/etc/cinder/other_shares.conf')
should contain_cinder_config('mygluster/glusterfs_sparsed_volumes').with_value(
true)
should contain_cinder_config('mygluster/glusterfs_mount_point_base').with_value(
'/cinder_mount_point')
should contain_file('/etc/cinder/other_shares.conf').with(
:content => "10.10.10.10:/volumes\n10.10.10.11:/volumes\n",
:require => 'Package[cinder]',
:notify => 'Service[cinder-volume]'
)
end
context "with an parameter which has been removed" do
before do
params.merge!({
:glusterfs_disk_util => 'foo',
})
end
it 'should fails' do
expect { subject }.to raise_error(Puppet::Error, /glusterfs_disk_util is removed in Icehouse./)
end
end
end
context 'on Debian platforms' do
let :facts do
{ :osfamily => 'Debian' }
end
it_configures 'glusterfs volume driver'
end
context 'on RedHat platforms' do
let :facts do
{ :osfamily => 'RedHat' }
end
it_configures 'glusterfs volume driver'
end
end

View File

@@ -0,0 +1,48 @@
require 'spec_helper'
describe 'cinder::backend::iscsi' do
let(:title) {'hippo'}
let :req_params do {
:iscsi_ip_address => '127.0.0.2',
:iscsi_helper => 'tgtadm',
}
end
let :facts do
{:osfamily => 'Debian'}
end
let :params do
req_params
end
describe 'with default params' do
it 'should configure iscsi driver' do
should contain_cinder_config('hippo/volume_backend_name').with(
:value => 'hippo')
should contain_cinder_config('hippo/iscsi_ip_address').with(
:value => '127.0.0.2')
should contain_cinder_config('hippo/iscsi_helper').with(
:value => 'tgtadm')
should contain_cinder_config('hippo/volume_group').with(
:value => 'cinder-volumes')
end
end
describe 'with RedHat' do
let :facts do
{:osfamily => 'RedHat'}
end
it { should contain_file_line('cinder include').with(
:line => 'include /etc/cinder/volumes/*',
:path => '/etc/tgt/targets.conf'
) }
end
end

View File

@@ -0,0 +1,69 @@
require 'spec_helper'
describe 'cinder::backend::netapp' do
let(:title) {'hippo'}
let :params do
{
:volume_backend_name => 'netapp-cdot-nfs',
:netapp_login => 'netapp',
:netapp_password => 'password',
:netapp_server_hostname => '127.0.0.2',
}
end
let :default_params do
{
:netapp_server_port => '80',
:netapp_size_multiplier => '1.2',
:netapp_storage_family => 'ontap_cluster',
:netapp_storage_protocol => 'nfs',
:netapp_transport_type => 'http',
:netapp_vfiler => '',
:netapp_volume_list => '',
:netapp_vserver => '',
:expiry_thres_minutes => '720',
:thres_avl_size_perc_start => '20',
:thres_avl_size_perc_stop => '60',
:nfs_shares_config => '',
:netapp_copyoffload_tool_path => '',
:netapp_controller_ips => '',
:netapp_sa_password => '',
:netapp_storage_pools => '',
:netapp_webservice_path => '/devmgr/v2',
}
end
shared_examples_for 'netapp volume driver' do
let :params_hash do
default_params.merge(params)
end
it 'configures netapp volume driver' do
should contain_cinder_config("#{params_hash[:volume_backend_name]}/volume_driver").with_value(
'cinder.volume.drivers.netapp.common.NetAppDriver')
params_hash.each_pair do |config,value|
should contain_cinder_config("#{params_hash[:volume_backend_name]}/#{config}").with_value( value )
end
end
it 'marks netapp_password as secret' do
should contain_cinder_config("#{params_hash[:volume_backend_name]}/netapp_password").with_secret( true )
end
end
context 'with default parameters' do
before do
params = {}
end
it_configures 'netapp volume driver'
end
context 'with provided parameters' do
it_configures 'netapp volume driver'
end
end

View File

@@ -0,0 +1,39 @@
# author 'Aimon Bustardo <abustardo at morphlabs dot com>'
# license 'Apache License 2.0'
# description 'configures openstack cinder nexenta driver'
require 'spec_helper'
describe 'cinder::backend::nexenta' do
let (:title) { 'nexenta' }
let :params do
{ :nexenta_user => 'nexenta',
:nexenta_password => 'password',
:nexenta_host => '127.0.0.2' }
end
let :default_params do
{ :nexenta_volume => 'cinder',
:nexenta_target_prefix => 'iqn:',
:nexenta_target_group_prefix => 'cinder/',
:nexenta_blocksize => '8k',
:nexenta_sparse => true }
end
let :facts do
{ :osfamily => 'Debian' }
end
context 'with required params' do
let :params_hash do
default_params.merge(params)
end
it 'configures nexenta volume driver' do
params_hash.each_pair do |config, value|
should contain_cinder_config("nexenta/#{config}").with_value(value)
end
end
end
end

View File

@@ -0,0 +1,48 @@
require 'spec_helper'
describe 'cinder::backend::nfs' do
let(:title) {'hippo'}
let :params do
{
:nfs_servers => ['10.10.10.10:/shares', '10.10.10.10:/shares2'],
:nfs_mount_options => 'vers=3',
:nfs_shares_config => '/etc/cinder/other_shares.conf',
:nfs_disk_util => 'du',
:nfs_sparsed_volumes => true,
:nfs_mount_point_base => '/cinder_mount_point',
:nfs_used_ratio => '0.7',
:nfs_oversub_ratio => '0.9'
}
end
describe 'nfs volume driver' do
it 'configures nfs volume driver' do
should contain_cinder_config('hippo/volume_backend_name').with(
:value => 'hippo')
should contain_cinder_config('hippo/volume_driver').with_value(
'cinder.volume.drivers.nfs.NfsDriver')
should contain_cinder_config('hippo/nfs_shares_config').with_value(
'/etc/cinder/other_shares.conf')
should contain_cinder_config('hippo/nfs_mount_options').with_value(
'vers=3')
should contain_cinder_config('hippo/nfs_sparsed_volumes').with_value(
true)
should contain_cinder_config('hippo/nfs_mount_point_base').with_value(
'/cinder_mount_point')
should contain_cinder_config('hippo/nfs_disk_util').with_value(
'du')
should contain_cinder_config('hippo/nfs_used_ratio').with_value(
'0.7')
should contain_cinder_config('hippo/nfs_oversub_ratio').with_value(
'0.9')
should contain_file('/etc/cinder/other_shares.conf').with(
:content => "10.10.10.10:/shares\n10.10.10.10:/shares2",
:require => 'Package[cinder]',
:notify => 'Service[cinder-volume]'
)
end
end
end

View File

@@ -0,0 +1,98 @@
require 'spec_helper'
describe 'cinder::backend::rbd' do
let(:title) {'rbd-ssd'}
let :req_params do
{
:volume_backend_name => 'rbd-ssd',
:rbd_pool => 'volumes',
:glance_api_version => '2',
:rbd_user => 'test',
:rbd_secret_uuid => '0123456789',
:rbd_ceph_conf => '/foo/boo/zoo/ceph.conf',
:rbd_flatten_volume_from_snapshot => true,
:volume_tmp_dir => '/foo/tmp',
:rbd_max_clone_depth => '0'
}
end
it { should contain_class('cinder::params') }
let :params do
req_params
end
let :facts do
{:osfamily => 'Debian'}
end
describe 'rbd backend volume driver' do
it 'configure rbd volume driver' do
should contain_cinder_config("#{req_params[:volume_backend_name]}/volume_backend_name").with_value(req_params[:volume_backend_name])
should contain_cinder_config("#{req_params[:volume_backend_name]}/volume_driver").with_value('cinder.volume.drivers.rbd.RBDDriver')
should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_ceph_conf").with_value(req_params[:rbd_ceph_conf])
should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_flatten_volume_from_snapshot").with_value(req_params[:rbd_flatten_volume_from_snapshot])
should contain_cinder_config("#{req_params[:volume_backend_name]}/volume_tmp_dir").with_value(req_params[:volume_tmp_dir])
should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_max_clone_depth").with_value(req_params[:rbd_max_clone_depth])
should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_pool").with_value(req_params[:rbd_pool])
should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_user").with_value(req_params[:rbd_user])
should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_secret_uuid").with_value(req_params[:rbd_secret_uuid])
should contain_file('/etc/init/cinder-volume.override').with(:ensure => 'present')
should contain_file_line('set initscript env').with(
:line => /env CEPH_ARGS=\"--id test\"/,
:path => '/etc/init/cinder-volume.override',
:notify => 'Service[cinder-volume]')
end
context 'with rbd_secret_uuid disabled' do
let(:params) { req_params.merge!({:rbd_secret_uuid => false}) }
it { should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_secret_uuid").with_ensure('absent') }
end
context 'with volume_tmp_dir disabled' do
let(:params) { req_params.merge!({:volume_tmp_dir => false}) }
it { should contain_cinder_config("#{req_params[:volume_backend_name]}/volume_tmp_dir").with_ensure('absent') }
end
context 'with another RBD backend' do
let :pre_condition do
"cinder::backend::rbd { 'ceph2':
rbd_pool => 'volumes2',
rbd_user => 'test'
}"
end
it { should contain_cinder_config("#{req_params[:volume_backend_name]}/volume_driver").with_value('cinder.volume.drivers.rbd.RBDDriver') }
it { should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_pool").with_value(req_params[:rbd_pool]) }
it { should contain_cinder_config("#{req_params[:volume_backend_name]}/rbd_user").with_value(req_params[:rbd_user]) }
it { should contain_cinder_config("ceph2/volume_driver").with_value('cinder.volume.drivers.rbd.RBDDriver') }
it { should contain_cinder_config("ceph2/rbd_pool").with_value('volumes2') }
it { should contain_cinder_config("ceph2/rbd_user").with_value('test') }
end
end
describe 'with RedHat' do
let :facts do
{ :osfamily => 'RedHat' }
end
let :params do
req_params
end
it 'should ensure that the cinder-volume sysconfig file is present' do
should contain_file('/etc/sysconfig/openstack-cinder-volume').with(
:ensure => 'present'
)
end
it 'should configure RedHat init override' do
should contain_file_line('set initscript env').with(
:line => /export CEPH_ARGS=\"--id test\"/,
:path => '/etc/sysconfig/openstack-cinder-volume',
:notify => 'Service[cinder-volume]')
end
end
end

View File

@@ -0,0 +1,40 @@
require 'spec_helper'
describe 'cinder::backend::san' do
let (:title) { 'mysan' }
let :params do
{ :volume_driver => 'cinder.volume.san.SolarisISCSIDriver',
:san_ip => '127.0.0.1',
:san_login => 'cluster_operator',
:san_password => '007',
:san_clustername => 'storage_cluster' }
end
let :default_params do
{ :san_thin_provision => true,
:san_login => 'admin',
:san_ssh_port => 22,
:san_is_local => false,
:ssh_conn_timeout => 30,
:ssh_min_pool_conn => 1,
:ssh_max_pool_conn => 5 }
end
shared_examples_for 'a san volume driver' do
let :params_hash do
default_params.merge(params)
end
it 'configures cinder volume driver' do
params_hash.each_pair do |config,value|
should contain_cinder_config("mysan/#{config}").with_value( value )
end
end
end
context 'with parameters' do
it_configures 'a san volume driver'
end
end

View File

@@ -0,0 +1,30 @@
require 'spec_helper'
describe 'cinder::backend::solidfire' do
let (:title) { 'solidfire' }
let :req_params do
{
:san_ip => '127.0.0.2',
:san_login => 'solidfire',
:san_password => 'password',
}
end
let :params do
req_params
end
describe 'solidfire volume driver' do
it 'configure solidfire volume driver' do
should contain_cinder_config('solidfire/volume_driver').with_value(
'cinder.volume.drivers.solidfire.SolidFire')
should contain_cinder_config('solidfire/san_ip').with_value(
'127.0.0.2')
should contain_cinder_config('solidfire/san_login').with_value(
'solidfire')
should contain_cinder_config('solidfire/san_password').with_value(
'password')
end
end
end

View File

@@ -0,0 +1,59 @@
require 'spec_helper'
describe 'cinder::backend::vmdk' do
let(:title) { 'hippo' }
let :params do
{
:host_ip => '172.16.16.16',
:host_password => 'asdf',
:host_username => 'user'
}
end
let :optional_params do
{
:volume_folder => 'cinder-volume-folder',
:api_retry_count => 5,
:max_object_retrieval => 200,
:task_poll_interval => 10,
:image_transfer_timeout_secs => 3600,
:wsdl_location => 'http://127.0.0.1:8080/vmware/SDK/wsdl/vim25/vimService.wsdl'
}
end
it 'should configure vmdk driver in cinder.conf' do
should contain_cinder_config('hippo/volume_backend_name').with_value('hippo')
should contain_cinder_config('hippo/volume_driver').with_value('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver')
should contain_cinder_config('hippo/vmware_host_ip').with_value(params[:host_ip])
should contain_cinder_config('hippo/vmware_host_username').with_value(params[:host_username])
should contain_cinder_config('hippo/vmware_host_password').with_value(params[:host_password])
should contain_cinder_config('hippo/vmware_volume_folder').with_value('cinder-volumes')
should contain_cinder_config('hippo/vmware_api_retry_count').with_value(10)
should contain_cinder_config('hippo/vmware_max_object_retrieval').with_value(100)
should contain_cinder_config('hippo/vmware_task_poll_interval').with_value(5)
should contain_cinder_config('hippo/vmware_image_transfer_timeout_secs').with_value(7200)
should_not contain_cinder_config('hippo/vmware_wsdl_location')
end
it 'installs suds python package' do
should contain_package('python-suds').with(
:ensure => 'present')
end
context 'with optional parameters' do
before :each do
params.merge!(optional_params)
end
it 'should configure vmdk driver in cinder.conf' do
should contain_cinder_config('hippo/vmware_volume_folder').with_value(params[:volume_folder])
should contain_cinder_config('hippo/vmware_api_retry_count').with_value(params[:api_retry_count])
should contain_cinder_config('hippo/vmware_max_object_retrieval').with_value(params[:max_object_retrieval])
should contain_cinder_config('hippo/vmware_task_poll_interval').with_value(params[:task_poll_interval])
should contain_cinder_config('hippo/vmware_image_transfer_timeout_secs').with_value(params[:image_transfer_timeout_secs])
should contain_cinder_config('hippo/vmware_wsdl_location').with_value(params[:wsdl_location])
end
end
end

View File

@@ -0,0 +1,29 @@
#Author: Andrew Woodward <awoodward@mirantis.com>
require 'spec_helper'
describe 'cinder::type_set' do
let(:title) {'hippo'}
let :params do {
:type => 'sith',
:key => 'monchichi',
:os_password => 'asdf',
:os_tenant_name => 'admin',
:os_username => 'admin',
:os_auth_url => 'http://127.127.127.1:5000/v2.0/',
}
end
it 'should have its execs' do
should contain_exec('cinder type-key sith set monchichi=hippo').with(
:command => 'cinder type-key sith set monchichi=hippo',
:environment => [
'OS_TENANT_NAME=admin',
'OS_USERNAME=admin',
'OS_PASSWORD=asdf',
'OS_AUTH_URL=http://127.127.127.1:5000/v2.0/'],
:require => 'Package[python-cinderclient]')
end
end

Some files were not shown because too many files have changed in this diff Show More