Fix puppet-lint warnings and errors
This update addresses the following errors and warnings from puppet-lint, with most corrections done automatically using puppet-lint --fix: - 2sp_soft_tabs - arrow_alignment - arrow_on_right_operand_line - double_quoted_strings - hard_tabs - only_variable_string - quoted_booleans - star_comments - trailing_whitespace - variables_not_enclosed Change-Id: I7a2b0109534dd4715d459635fa33b09e7fd0a6a6 Story: 2004515 Task: 28683 Signed-off-by: Don Penney <don.penney@windriver.com>
This commit is contained in:
parent
4bf75f16ce
commit
e6c0e0af8c
@ -118,7 +118,7 @@ include ::openstack::ironic
|
||||
include ::openstack::ironic::api
|
||||
|
||||
include ::platform::dcmanager
|
||||
include ::platform::dcmanager::manager
|
||||
include ::platform::dcmanager::manager
|
||||
|
||||
include ::platform::dcorch
|
||||
include ::platform::dcorch::engine
|
||||
|
@ -31,18 +31,18 @@ class openstack::aodh
|
||||
}
|
||||
|
||||
class { '::aodh':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
# WRS register aodh-expirer-active in cron to run daily at the 35 minute mark
|
||||
cron { 'aodh-expirer':
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/aodh-expirer-active',
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/aodh-expirer-active',
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => '35',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
minute => '35',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -62,8 +62,8 @@ class openstack::aodh::haproxy
|
||||
inherits ::openstack::aodh::params {
|
||||
|
||||
platform::haproxy::proxy { 'aodh-restapi':
|
||||
server_name => 's-aodh-restapi',
|
||||
public_port => $api_port,
|
||||
server_name => 's-aodh-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -96,11 +96,11 @@ class openstack::aodh::api
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
} ->
|
||||
class { '::aodh::api':
|
||||
host => $api_host,
|
||||
sync_db => $::platform::params::init_database,
|
||||
enable_proxy_headers_parsing => true,
|
||||
}
|
||||
-> class { '::aodh::api':
|
||||
host => $api_host,
|
||||
sync_db => $::platform::params::init_database,
|
||||
enable_proxy_headers_parsing => true,
|
||||
}
|
||||
|
||||
include ::openstack::aodh::firewall
|
||||
@ -113,7 +113,7 @@ class openstack::aodh::runtime {
|
||||
include ::platform::amqp::params
|
||||
|
||||
class { '::aodh':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
}
|
||||
|
@ -28,12 +28,12 @@ class openstack::barbican
|
||||
}
|
||||
|
||||
cron { 'barbican-cleaner':
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/barbican-manage db clean -p -e -L /var/log/barbican/barbican-clean.log',
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/barbican-manage db clean -p -e -L /var/log/barbican/barbican-clean.log',
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => '50',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
minute => '50',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -53,8 +53,8 @@ class openstack::barbican::haproxy
|
||||
inherits ::openstack::barbican::params {
|
||||
|
||||
platform::haproxy::proxy { 'barbican-restapi':
|
||||
server_name => 's-barbican-restapi',
|
||||
public_port => $api_port,
|
||||
server_name => 's-barbican-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -104,13 +104,13 @@ class openstack::barbican::api
|
||||
include ::platform::amqp::params
|
||||
|
||||
class { '::barbican::api':
|
||||
bind_host => $api_host,
|
||||
bind_port => $api_port,
|
||||
host_href => $url_host,
|
||||
sync_db => $::platform::params::init_database,
|
||||
enable_proxy_headers_parsing => true,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
bind_host => $api_host,
|
||||
bind_port => $api_port,
|
||||
host_href => $url_host,
|
||||
sync_db => $::platform::params::init_database,
|
||||
enable_proxy_headers_parsing => true,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
class { '::barbican::keystone::notification':
|
||||
|
@ -13,8 +13,8 @@ class openstack::ceilometer {
|
||||
include ::platform::kubernetes::params
|
||||
|
||||
class { '::ceilometer':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
rabbit_qos_prefetch_count => 100,
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@ class openstack::ceilometer {
|
||||
|
||||
class { '::ceilometer::db::sync':
|
||||
extra_params => '--skip-metering-database',
|
||||
require => [Keystone::Resource::Service_identity["ceilometer", "gnocchi"]]
|
||||
require => [Keystone::Resource::Service_identity['ceilometer', 'gnocchi']]
|
||||
}
|
||||
|
||||
if $::platform::params::vswitch_type !~ '^ovs' {
|
||||
@ -44,16 +44,16 @@ class openstack::ceilometer {
|
||||
$os_password = $::gnocchi::keystone::authtoken::password
|
||||
$os_interface = 'internalURL'
|
||||
|
||||
Class['::ceilometer::db::sync'] ->
|
||||
exec { 'Creating vswitch resource types':
|
||||
command => 'gnocchi resource-type create vswitch_engine \
|
||||
-a cpu_id:number:true:min=0 \
|
||||
-a host:string:true:max_length=64;
|
||||
gnocchi resource-type create vswitch_interface_and_port \
|
||||
-a host:string:false:max_length=64 \
|
||||
-a network_uuid:string:false:max_length=255 \
|
||||
-a network_id:string:false:max_length=255 \
|
||||
-a link-speed:number:false:min=0',
|
||||
Class['::ceilometer::db::sync']
|
||||
-> exec { 'Creating vswitch resource types':
|
||||
command => 'gnocchi resource-type create vswitch_engine \
|
||||
-a cpu_id:number:true:min=0 \
|
||||
-a host:string:true:max_length=64;
|
||||
gnocchi resource-type create vswitch_interface_and_port \
|
||||
-a host:string:false:max_length=64 \
|
||||
-a network_uuid:string:false:max_length=255 \
|
||||
-a network_id:string:false:max_length=255 \
|
||||
-a link-speed:number:false:min=0',
|
||||
environment => ["OS_AUTH_URL=${os_auth_url}",
|
||||
"OS_USERNAME=${os_username}",
|
||||
"OS_USER_DOMAIN_NAME=${os_user_domain}",
|
||||
@ -92,15 +92,15 @@ class openstack::ceilometer {
|
||||
$memcache_ip_version = $::platform::memcached::params::listen_ip_version
|
||||
|
||||
$memcache_servers = $memcache_ip_version ? {
|
||||
4 => "'$memcache_ip:$memcache_port'",
|
||||
6 => "'inet6:[$memcache_ip]:$memcache_port'",
|
||||
4 => "'${memcache_ip}:${memcache_port}'",
|
||||
6 => "'inet6:[${memcache_ip}]:${memcache_port}'",
|
||||
}
|
||||
|
||||
oslo::cache { 'ceilometer_config':
|
||||
enabled => true,
|
||||
backend => 'dogpile.cache.memcached',
|
||||
enabled => true,
|
||||
backend => 'dogpile.cache.memcached',
|
||||
memcache_servers => $memcache_servers,
|
||||
expiration_time => 86400,
|
||||
expiration_time => 86400,
|
||||
}
|
||||
}
|
||||
|
||||
@ -113,8 +113,8 @@ class openstack::ceilometer {
|
||||
# skip the check if cinder region name has not been configured
|
||||
if ($::openstack::cinder::params::region_name != undef and
|
||||
$::openstack::cinder::params::region_name != $::platform::params::region_2_name) {
|
||||
$shared_service_cinder = [$::openstack::cinder::params::service_type,
|
||||
$::openstack::cinder::params::service_type_v2,
|
||||
$shared_service_cinder = [$::openstack::cinder::params::service_type,
|
||||
$::openstack::cinder::params::service_type_v2,
|
||||
$::openstack::cinder::params::service_type_v3]
|
||||
} else {
|
||||
$shared_service_cinder = []
|
||||
@ -137,41 +137,41 @@ class openstack::ceilometer::agent::notification {
|
||||
$ceilometer_directory_csv = "${ceilometer_directory}/csv"
|
||||
$ceilometer_directory_versioned = "${ceilometer_directory}/${::platform::params::software_version}"
|
||||
|
||||
file { "/etc/ceilometer/pipeline.yaml":
|
||||
file { '/etc/ceilometer/pipeline.yaml':
|
||||
ensure => 'present',
|
||||
content => template('openstack/pipeline.yaml.erb'),
|
||||
mode => '0640',
|
||||
owner => 'root',
|
||||
group => 'ceilometer',
|
||||
tag => 'ceilometer-yamls',
|
||||
} ->
|
||||
file { "${ceilometer_directory}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${ceilometer_directory_csv}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${ceilometer_directory_versioned}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${ceilometer_directory_versioned}/pipeline.yaml":
|
||||
}
|
||||
-> file { $ceilometer_directory:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $ceilometer_directory_csv:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $ceilometer_directory_versioned:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { "${ceilometer_directory_versioned}/pipeline.yaml":
|
||||
ensure => 'file',
|
||||
source => '/etc/ceilometer/pipeline.yaml',
|
||||
ensure => 'file',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
|
||||
file { "/etc/ceilometer/gnocchi_resources.yaml":
|
||||
file { '/etc/ceilometer/gnocchi_resources.yaml':
|
||||
ensure => 'present',
|
||||
content => template('openstack/gnocchi_resources.yaml.erb'),
|
||||
mode => '0640',
|
||||
@ -191,7 +191,7 @@ class openstack::ceilometer::agent::notification {
|
||||
|
||||
# FIXME(mpeters): generic parameter can be moved to the puppet module
|
||||
ceilometer_config {
|
||||
'DEFAULT/csv_location': value => "${ceilometer_directory_csv}";
|
||||
'DEFAULT/csv_location': value => $ceilometer_directory_csv;
|
||||
'DEFAULT/csv_location_strict': value => true;
|
||||
'notification/workers': value => $agent_workers_count;
|
||||
'notification/batch_size': value => 100;
|
||||
@ -209,55 +209,55 @@ class openstack::ceilometer::polling (
|
||||
$image_polling_interval = 600,
|
||||
$volume_polling_interval = 600,
|
||||
) {
|
||||
include ::platform::params
|
||||
include ::platform::kubernetes::params
|
||||
include ::platform::params
|
||||
include ::platform::kubernetes::params
|
||||
|
||||
file { "/etc/ceilometer/polling.yaml":
|
||||
ensure => 'present',
|
||||
content => template('openstack/polling.yaml.erb'),
|
||||
mode => '0640',
|
||||
owner => 'root',
|
||||
group => 'ceilometer',
|
||||
tag => 'ceilometer-yamls',
|
||||
}
|
||||
file { '/etc/ceilometer/polling.yaml':
|
||||
ensure => 'present',
|
||||
content => template('openstack/polling.yaml.erb'),
|
||||
mode => '0640',
|
||||
owner => 'root',
|
||||
group => 'ceilometer',
|
||||
tag => 'ceilometer-yamls',
|
||||
}
|
||||
|
||||
if $::personality == 'controller' {
|
||||
$central_namespace = true
|
||||
} else {
|
||||
$central_namespace = false
|
||||
}
|
||||
if $::personality == 'controller' {
|
||||
$central_namespace = true
|
||||
} else {
|
||||
$central_namespace = false
|
||||
}
|
||||
|
||||
if (str2bool($::disable_worker_services) or
|
||||
$::platform::kubernetes::params::enabled) {
|
||||
$agent_enable = false
|
||||
$compute_namespace = false
|
||||
if (str2bool($::disable_worker_services) or
|
||||
$::platform::kubernetes::params::enabled) {
|
||||
$agent_enable = false
|
||||
$compute_namespace = false
|
||||
|
||||
file { '/etc/pmon.d/ceilometer-polling.conf':
|
||||
ensure => absent,
|
||||
}
|
||||
} else {
|
||||
$agent_enable = true
|
||||
file { '/etc/pmon.d/ceilometer-polling.conf':
|
||||
ensure => absent,
|
||||
}
|
||||
} else {
|
||||
$agent_enable = true
|
||||
|
||||
if str2bool($::is_worker_subfunction) {
|
||||
$pmon_target = "/etc/ceilometer/ceilometer-polling-compute.conf.pmon"
|
||||
$compute_namespace = true
|
||||
} else {
|
||||
$pmon_target = "/etc/ceilometer/ceilometer-polling.conf.pmon"
|
||||
$compute_namespace = false
|
||||
}
|
||||
if str2bool($::is_worker_subfunction) {
|
||||
$pmon_target = '/etc/ceilometer/ceilometer-polling-compute.conf.pmon'
|
||||
$compute_namespace = true
|
||||
} else {
|
||||
$pmon_target = '/etc/ceilometer/ceilometer-polling.conf.pmon'
|
||||
$compute_namespace = false
|
||||
}
|
||||
|
||||
file { "/etc/pmon.d/ceilometer-polling.conf":
|
||||
ensure => link,
|
||||
target => $pmon_target,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
}
|
||||
file { '/etc/pmon.d/ceilometer-polling.conf':
|
||||
ensure => link,
|
||||
target => $pmon_target,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
}
|
||||
|
||||
class { '::ceilometer::agent::polling':
|
||||
enabled => $agent_enable,
|
||||
central_namespace => $central_namespace,
|
||||
compute_namespace => $compute_namespace,
|
||||
}
|
||||
class { '::ceilometer::agent::polling':
|
||||
enabled => $agent_enable,
|
||||
central_namespace => $central_namespace,
|
||||
compute_namespace => $compute_namespace,
|
||||
}
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ class openstack::cinder::params (
|
||||
} else {
|
||||
$is_initial_cinder_ceph = false
|
||||
}
|
||||
|
||||
|
||||
# Cinder needs to be running on initial configuration of either Ceph or LVM
|
||||
if str2bool($::is_controller_active) and ($is_initial_cinder_lvm or $is_initial_cinder_ceph) {
|
||||
$enable_cinder_service = true
|
||||
@ -139,32 +139,32 @@ class openstack::cinder
|
||||
}
|
||||
|
||||
if $service_enabled {
|
||||
file { "${cinder_directory}":
|
||||
file { $cinder_directory:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${cinder_image_conversion_dir}":
|
||||
}
|
||||
-> file { $cinder_image_conversion_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${cinder_directory}/data":
|
||||
}
|
||||
-> file { "${cinder_directory}/data":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
} else {
|
||||
file { "${cinder_directory}":
|
||||
file { $cinder_directory:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${cinder_directory}/data":
|
||||
}
|
||||
-> file { "${cinder_directory}/data":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -189,7 +189,7 @@ class openstack::cinder
|
||||
|
||||
include ::openstack::cinder::backup
|
||||
include ::platform::multipath::params
|
||||
|
||||
|
||||
# TODO(mpeters): move to puppet module formal parameters
|
||||
cinder_config {
|
||||
'DEFAULT/my_ip': value => $controller_address;
|
||||
@ -294,24 +294,24 @@ class openstack::cinder::lvm::filesystem::drbd (
|
||||
$ha_primary = true
|
||||
$initial_setup = true
|
||||
$service_enable = true
|
||||
$service_ensure = "running"
|
||||
$service_ensure = 'running'
|
||||
} else {
|
||||
$ha_primary = false
|
||||
$initial_setup = false
|
||||
$service_enable = false
|
||||
$service_ensure = "stopped"
|
||||
$service_ensure = 'stopped'
|
||||
}
|
||||
|
||||
if $is_node_cinder_lvm {
|
||||
|
||||
# prepare disk for drbd
|
||||
file { '/etc/udev/mount.blacklist':
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
} ->
|
||||
file_line { 'blacklist ${cinder_disk} automount':
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
-> file_line { 'blacklist ${cinder_disk} automount':
|
||||
ensure => present,
|
||||
line => $cinder_disk,
|
||||
path => '/etc/udev/mount.blacklist',
|
||||
@ -357,33 +357,33 @@ class openstack::cinder::lvm::filesystem::drbd (
|
||||
# Note: Cinder disk replacement is triggered from sysinv by removing
|
||||
# the checkpoint file behind is_node_cinder_lvm.
|
||||
physical_volume { $device:
|
||||
ensure => present,
|
||||
ensure => present,
|
||||
require => Drbd::Resource[$drbd_resource]
|
||||
} ->
|
||||
volume_group { $vg_name:
|
||||
}
|
||||
-> volume_group { $vg_name:
|
||||
ensure => present,
|
||||
physical_volumes => $device,
|
||||
} ->
|
||||
}
|
||||
# Create an initial LV, because the LVM ocf resource does not work with
|
||||
# an empty VG.
|
||||
logical_volume { 'anchor-lv':
|
||||
-> logical_volume { 'anchor-lv':
|
||||
ensure => present,
|
||||
volume_group => $vg_name,
|
||||
size => '1M',
|
||||
size_is_minsize => true,
|
||||
} ->
|
||||
}
|
||||
# Deactivate the VG now. If this isn't done, it prevents DRBD from
|
||||
# being stopped later by the SM.
|
||||
exec { 'Deactivate VG':
|
||||
-> exec { 'Deactivate VG':
|
||||
command => "vgchange -a ln ${vg_name}",
|
||||
} ->
|
||||
}
|
||||
# Make sure the primary resource is in the correct state so that on swact to
|
||||
# controller-1 sm has the resource in an acceptable state to become managed
|
||||
# and primary. But, if this primary is a single controller we will restart
|
||||
# SM so keep it primary
|
||||
|
||||
# TODO (rchurch): fix up the drbd_handoff logic.
|
||||
exec { 'Set $drbd_resource role':
|
||||
-> exec { 'Set $drbd_resource role':
|
||||
command => str2bool($drbd_handoff) ? {true => "drbdadm secondary ${drbd_resource}", default => '/bin/true'},
|
||||
unless => "drbdadm role ${drbd_resource} | egrep '^Secondary'",
|
||||
}
|
||||
@ -420,8 +420,8 @@ class openstack::cinder::lvm(
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
require => File[$cinder_directory],
|
||||
} ->
|
||||
file { "${cinder_directory}/iscsi-target/saveconfig.json":
|
||||
}
|
||||
-> file { "${cinder_directory}/iscsi-target/saveconfig.json":
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -434,23 +434,23 @@ class openstack::cinder::lvm(
|
||||
}
|
||||
|
||||
if $lvm_type == 'thin' {
|
||||
$iscsi_lvm_config = {
|
||||
'lvm/iscsi_target_flags' => {'value' => 'direct'},
|
||||
'lvm/lvm_type' => {'value' => 'thin'},
|
||||
'DEFAULT/max_over_subscription_ratio' => {'value' => 1.0}
|
||||
}
|
||||
$iscsi_lvm_config = {
|
||||
'lvm/iscsi_target_flags' => {'value' => 'direct'},
|
||||
'lvm/lvm_type' => {'value' => 'thin'},
|
||||
'DEFAULT/max_over_subscription_ratio' => {'value' => 1.0}
|
||||
}
|
||||
} else {
|
||||
$iscsi_lvm_config = {
|
||||
'lvm/iscsi_target_flags' => {'value' => 'direct'},
|
||||
'lvm/lvm_type' => {'value' => 'default'},
|
||||
'lvm/volume_clear' => {'value' => 'none'}
|
||||
}
|
||||
$iscsi_lvm_config = {
|
||||
'lvm/iscsi_target_flags' => {'value' => 'direct'},
|
||||
'lvm/lvm_type' => {'value' => 'default'},
|
||||
'lvm/volume_clear' => {'value' => 'none'}
|
||||
}
|
||||
}
|
||||
|
||||
cinder::backend::iscsi { 'lvm':
|
||||
iscsi_ip_address => $iscsi_ip_address,
|
||||
extra_options => $iscsi_lvm_config ,
|
||||
volumes_dir => "${cinder_directory}/data/volumes",
|
||||
extra_options => $iscsi_lvm_config ,
|
||||
volumes_dir => "${cinder_directory}/data/volumes",
|
||||
}
|
||||
}
|
||||
|
||||
@ -464,9 +464,9 @@ define openstack::cinder::backend::ceph(
|
||||
|
||||
if $backend_enabled {
|
||||
cinder::backend::rbd {$backend_name:
|
||||
backend_host => '$host',
|
||||
rbd_pool => $rbd_pool,
|
||||
rbd_user => $rbd_user,
|
||||
backend_host => '$host',
|
||||
rbd_pool => $rbd_pool,
|
||||
rbd_user => $rbd_user,
|
||||
rbd_ceph_conf => $rbd_ceph_conf,
|
||||
}
|
||||
} else {
|
||||
@ -521,11 +521,11 @@ define openstack::cinder::backend::hpe3par
|
||||
$feature_enabled = "openstack::cinder::${name}::feature_enabled"
|
||||
|
||||
create_resources('cinder_config', hiera_hash($hiera_params, {}))
|
||||
|
||||
|
||||
if $feature_enabled {
|
||||
exec {"Including $name configuration":
|
||||
exec {"Including ${name} configuration":
|
||||
path => [ '/usr/bin', '/usr/sbin', '/bin', '/sbin' ],
|
||||
command => "echo Including $name configuration",
|
||||
command => "echo Including ${name} configuration",
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -561,7 +561,7 @@ class openstack::cinder::firewall
|
||||
if $service_enabled {
|
||||
platform::firewall::rule { 'cinder-api':
|
||||
service_name => 'cinder',
|
||||
ports => $api_port,
|
||||
ports => $api_port,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -572,8 +572,8 @@ class openstack::cinder::haproxy
|
||||
|
||||
if $service_enabled {
|
||||
platform::haproxy::proxy { 'cinder-restapi':
|
||||
server_name => 's-cinder',
|
||||
public_port => $api_port,
|
||||
server_name => 's-cinder',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -644,10 +644,10 @@ class openstack::cinder::api
|
||||
}
|
||||
|
||||
class { '::cinder::api':
|
||||
bind_host => $api_host,
|
||||
service_workers => $api_workers,
|
||||
sync_db => $::platform::params::init_database,
|
||||
enabled => str2bool($enable_cinder_service)
|
||||
bind_host => $api_host,
|
||||
service_workers => $api_workers,
|
||||
sync_db => $::platform::params::init_database,
|
||||
enabled => str2bool($enable_cinder_service)
|
||||
}
|
||||
|
||||
if $::openstack::cinder::params::configure_endpoint {
|
||||
@ -674,7 +674,7 @@ class openstack::cinder::pre {
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' and $enabled {
|
||||
# need to enable cinder-api-proxy in order to apply the cinder manifest
|
||||
exec { 'Enable Dcorch Cinder API Proxy':
|
||||
command => "systemctl enable dcorch-cinder-api-proxy; systemctl start dcorch-cinder-api-proxy",
|
||||
command => 'systemctl enable dcorch-cinder-api-proxy; systemctl start dcorch-cinder-api-proxy',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -704,7 +704,7 @@ class openstack::cinder::post
|
||||
# To workaround an upstream bug in rbd code, we need to create
|
||||
# an empty file /etc/ceph/ceph.client.None.keyring in order to
|
||||
# do cinder backup and restore.
|
||||
file { "/etc/ceph/ceph.client.None.keyring":
|
||||
file { '/etc/ceph/ceph.client.None.keyring':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -723,14 +723,14 @@ class openstack::cinder::post
|
||||
# To allow for the transition it must be explicitly stopped. Once puppet
|
||||
# can directly handle SM managed services, then this can be removed.
|
||||
exec { 'Disable OpenStack - Cinder API':
|
||||
command => "systemctl stop openstack-cinder-api; systemctl disable openstack-cinder-api",
|
||||
command => 'systemctl stop openstack-cinder-api; systemctl disable openstack-cinder-api',
|
||||
require => Class['openstack::cinder'],
|
||||
}
|
||||
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
# stop and disable the cinder api proxy to allow SM to manage the service
|
||||
exec { 'Disable Dcorch Cinder API Proxy':
|
||||
command => "systemctl stop dcorch-cinder-api-proxy; systemctl disable dcorch-cinder-api-proxy",
|
||||
command => 'systemctl stop dcorch-cinder-api-proxy; systemctl disable dcorch-cinder-api-proxy',
|
||||
require => Class['openstack::cinder'],
|
||||
}
|
||||
}
|
||||
|
@ -4,22 +4,22 @@ class openstack::client
|
||||
include ::platform::client::credentials::params
|
||||
$keyring_file = $::platform::client::credentials::params::keyring_file
|
||||
|
||||
file {"/etc/nova/openrc":
|
||||
ensure => "present",
|
||||
file {'/etc/nova/openrc':
|
||||
ensure => 'present',
|
||||
mode => '0640',
|
||||
owner => 'nova',
|
||||
group => 'root',
|
||||
content => template('openstack/openrc.admin.erb'),
|
||||
}
|
||||
|
||||
file {"/etc/nova/ldap_openrc_template":
|
||||
ensure => "present",
|
||||
file {'/etc/nova/ldap_openrc_template':
|
||||
ensure => 'present',
|
||||
mode => '0644',
|
||||
content => template('openstack/openrc.ldap.erb'),
|
||||
}
|
||||
|
||||
file {"/etc/bash_completion.d/openstack":
|
||||
ensure => "present",
|
||||
file {'/etc/bash_completion.d/openstack':
|
||||
ensure => 'present',
|
||||
mode => '0644',
|
||||
content => generate('/usr/bin/openstack', 'complete'),
|
||||
}
|
||||
|
@ -24,25 +24,25 @@ class openstack::glance
|
||||
include ::platform::params
|
||||
include ::platform::amqp::params
|
||||
|
||||
file { "${glance_directory}":
|
||||
file { $glance_directory:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${glance_directory}/image-cache":
|
||||
}
|
||||
-> file { "${glance_directory}/image-cache":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${glance_directory}/images":
|
||||
}
|
||||
-> file { "${glance_directory}/images":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${glance_image_conversion_dir}":
|
||||
}
|
||||
-> file { $glance_image_conversion_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -55,7 +55,7 @@ class openstack::glance
|
||||
}
|
||||
|
||||
if $::platform::params::init_database {
|
||||
class { "::glance::db::postgresql":
|
||||
class { '::glance::db::postgresql':
|
||||
encoding => 'UTF8',
|
||||
}
|
||||
}
|
||||
@ -79,13 +79,13 @@ class openstack::glance
|
||||
}
|
||||
|
||||
cron { 'glance-cleaner':
|
||||
ensure => 'present',
|
||||
command => "/usr/bin/glance-cleaner --config-file /etc/glance/glance-api.conf --delete-interval $glance_delete_interval",
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => '35',
|
||||
hour => "*/$glance_delete_interval",
|
||||
user => 'root',
|
||||
}
|
||||
ensure => 'present',
|
||||
command => "/usr/bin/glance-cleaner --config-file /etc/glance/glance-api.conf --delete-interval ${glance_delete_interval}",
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => '35',
|
||||
hour => "*/${glance_delete_interval}",
|
||||
user => 'root',
|
||||
}
|
||||
|
||||
# In glance cached mode run the pruner once every 6 hours to clean
|
||||
# stale or orphaned images
|
||||
@ -101,7 +101,7 @@ class openstack::glance
|
||||
}
|
||||
|
||||
class { '::glance::notify::rabbitmq':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ class openstack::glance::firewall
|
||||
|
||||
platform::firewall::rule { 'glance-api':
|
||||
service_name => 'glance',
|
||||
ports => $api_port,
|
||||
ports => $api_port,
|
||||
}
|
||||
}
|
||||
|
||||
@ -126,9 +126,9 @@ class openstack::glance::haproxy
|
||||
inherits ::openstack::glance::params {
|
||||
|
||||
platform::haproxy::proxy { 'glance-restapi':
|
||||
server_name => 's-glance',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
server_name => 's-glance',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
private_ip_address => $api_host,
|
||||
}
|
||||
}
|
||||
@ -139,7 +139,7 @@ class openstack::glance::api
|
||||
include ::platform::params
|
||||
|
||||
if $service_enabled {
|
||||
if ($::openstack::glance::params::service_create and
|
||||
if ($::openstack::glance::params::service_create and
|
||||
$::platform::params::init_keystone) {
|
||||
include ::glance::keystone::auth
|
||||
}
|
||||
@ -170,19 +170,19 @@ class openstack::glance::api
|
||||
}
|
||||
|
||||
class { '::glance::api':
|
||||
bind_host => $api_host,
|
||||
use_user_token => $api_use_user_token,
|
||||
registry_host => $registry_host,
|
||||
bind_host => $api_host,
|
||||
use_user_token => $api_use_user_token,
|
||||
registry_host => $registry_host,
|
||||
remote_registry_region_name => $remote_registry_region_name,
|
||||
workers => $api_workers,
|
||||
sync_db => $::platform::params::init_database,
|
||||
show_image_direct_url => $show_image_direct_url,
|
||||
workers => $api_workers,
|
||||
sync_db => $::platform::params::init_database,
|
||||
show_image_direct_url => $show_image_direct_url,
|
||||
}
|
||||
|
||||
if 'rbd' in $enabled_backends {
|
||||
class { '::glance::backend::rbd':
|
||||
rbd_store_pool => $rbd_store_pool,
|
||||
rbd_store_ceph_conf => $rbd_store_ceph_conf,
|
||||
rbd_store_pool => $rbd_store_pool,
|
||||
rbd_store_ceph_conf => $rbd_store_ceph_conf,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,8 +40,8 @@ class openstack::gnocchi::haproxy
|
||||
inherits ::openstack::gnocchi::params {
|
||||
|
||||
platform::haproxy::proxy { 'gnocchi-restapi':
|
||||
server_name => 's-gnocchi-restapi',
|
||||
public_port => $api_port,
|
||||
server_name => 's-gnocchi-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -79,7 +79,7 @@ class openstack::gnocchi::api
|
||||
# gnocchi::keystone::auth::configure_endpoint which is
|
||||
# set via sysinv puppet
|
||||
if $::openstack::gnocchi::params::service_create and
|
||||
$::platform::params::init_keystone {
|
||||
$::platform::params::init_keystone {
|
||||
include ::gnocchi::keystone::auth
|
||||
}
|
||||
|
||||
@ -104,9 +104,9 @@ class openstack::gnocchi::api
|
||||
$sacks_number = $::openstack::gnocchi::metricd::metricd_workers + 2
|
||||
|
||||
if $::platform::params::init_database {
|
||||
$options = "--sacks-number $sacks_number"
|
||||
$options = "--sacks-number ${sacks_number}"
|
||||
} else {
|
||||
$options = "--sacks-number $sacks_number --skip-index --skip-archive-policies-creation"
|
||||
$options = "--sacks-number ${sacks_number} --skip-index --skip-archive-policies-creation"
|
||||
}
|
||||
|
||||
class { '::gnocchi::db::sync':
|
||||
|
@ -8,7 +8,7 @@ class openstack::heat::params (
|
||||
$domain_pwd = undef,
|
||||
$service_name = 'openstack-heat',
|
||||
$service_tenant = undef,
|
||||
$default_endpoint_type = "internalURL",
|
||||
$default_endpoint_type = 'internalURL',
|
||||
$service_create = false,
|
||||
$service_enabled = true,
|
||||
) {
|
||||
@ -34,10 +34,10 @@ class openstack::heat
|
||||
include ::heat::keystone::authtoken
|
||||
|
||||
class { '::heat':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
heat_clients_endpoint_type => $default_endpoint_type,
|
||||
sync_db => $::platform::params::init_database,
|
||||
sync_db => $::platform::params::init_database,
|
||||
}
|
||||
|
||||
class { '::heat::engine':
|
||||
@ -69,7 +69,7 @@ class openstack::heat
|
||||
keystone_tenant { $service_tenant:
|
||||
ensure => present,
|
||||
enabled => true,
|
||||
description => "Tenant for $::platform::params::region_2_name",
|
||||
description => "Tenant for ${::platform::params::region_2_name}",
|
||||
}
|
||||
class { '::heat::keystone::domain':
|
||||
domain_name => $domain_name,
|
||||
@ -91,8 +91,8 @@ class openstack::heat
|
||||
}
|
||||
} else {
|
||||
keystone_user_role { 'admin@admin':
|
||||
ensure => present,
|
||||
roles => ['admin', '_member_', 'heat_stack_owner'],
|
||||
ensure => present,
|
||||
roles => ['admin', '_member_', 'heat_stack_owner'],
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,15 +103,15 @@ class openstack::heat
|
||||
|
||||
class { '::heat::keystone::domain':
|
||||
manage_domain => true,
|
||||
manage_user => true,
|
||||
manage_role => true,
|
||||
manage_user => true,
|
||||
manage_role => true,
|
||||
}
|
||||
} else {
|
||||
# Second controller does not invoke keystone, but does need configuration
|
||||
class { '::heat::keystone::domain':
|
||||
manage_domain => false,
|
||||
manage_user => false,
|
||||
manage_role => false,
|
||||
manage_user => false,
|
||||
manage_role => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -124,18 +124,18 @@ class openstack::heat
|
||||
'clients_glance/endpoint_type': value => $default_endpoint_type;
|
||||
'clients_cinder/endpoint_type': value => $default_endpoint_type;
|
||||
'clients_ceilometer/endpoint_type':value => $default_endpoint_type;
|
||||
'clients_heat/endpoint_type': value => "publicURL";
|
||||
'clients_heat/endpoint_type': value => 'publicURL';
|
||||
'clients_keystone/endpoint_type': value => $default_endpoint_type;
|
||||
}
|
||||
|
||||
# Run heat-manage purge_deleted daily at the 20 minute mark
|
||||
cron { 'heat-purge-deleted':
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/heat-purge-deleted-active',
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/heat-purge-deleted-active',
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => '20',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
minute => '20',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -166,20 +166,20 @@ class openstack::heat::haproxy
|
||||
inherits ::openstack::heat::params {
|
||||
|
||||
platform::haproxy::proxy { 'heat-restapi':
|
||||
server_name => 's-heat',
|
||||
public_port => $api_port,
|
||||
server_name => 's-heat',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
|
||||
platform::haproxy::proxy { 'heat-cfn-restapi':
|
||||
server_name => 's-heat-cfn',
|
||||
public_port => $cfn_port,
|
||||
server_name => 's-heat-cfn',
|
||||
public_port => $cfn_port,
|
||||
private_port => $cfn_port,
|
||||
}
|
||||
|
||||
platform::haproxy::proxy { 'heat-cloudwatch':
|
||||
server_name => 's-heat-cloudwatch',
|
||||
public_port => $cloudwatch_port,
|
||||
server_name => 's-heat-cloudwatch',
|
||||
public_port => $cloudwatch_port,
|
||||
private_port => $cloudwatch_port,
|
||||
}
|
||||
}
|
||||
@ -203,17 +203,17 @@ class openstack::heat::api
|
||||
if $service_enabled {
|
||||
class { '::heat::api':
|
||||
bind_host => $api_host,
|
||||
workers => $api_workers,
|
||||
workers => $api_workers,
|
||||
}
|
||||
|
||||
class { '::heat::api_cfn':
|
||||
bind_host => $api_host,
|
||||
workers => $api_workers,
|
||||
workers => $api_workers,
|
||||
}
|
||||
|
||||
class { '::heat::api_cloudwatch':
|
||||
bind_host => $api_host,
|
||||
workers => $api_workers,
|
||||
workers => $api_workers,
|
||||
}
|
||||
|
||||
include ::openstack::heat::firewall
|
||||
|
@ -47,33 +47,33 @@ class openstack::horizon
|
||||
groups => ['wrs_protected'],
|
||||
}
|
||||
|
||||
file { "/www/tmp":
|
||||
path => "/www/tmp",
|
||||
file { '/www/tmp':
|
||||
ensure => directory,
|
||||
path => '/www/tmp',
|
||||
mode => '1700',
|
||||
}
|
||||
|
||||
file {"/www/var":
|
||||
path => "/www/var",
|
||||
ensure => directory,
|
||||
owner => "www",
|
||||
file {'/www/var':
|
||||
ensure => directory,
|
||||
path => '/www/var',
|
||||
owner => 'www',
|
||||
require => User['www']
|
||||
}
|
||||
|
||||
file {"/www/var/log":
|
||||
path => "/www/var/log",
|
||||
ensure => directory,
|
||||
owner => "www",
|
||||
file {'/www/var/log':
|
||||
ensure => directory,
|
||||
path => '/www/var/log',
|
||||
owner => 'www',
|
||||
require => User['www']
|
||||
}
|
||||
|
||||
file {"/etc/lighttpd/lighttpd.conf":
|
||||
ensure => present,
|
||||
file {'/etc/lighttpd/lighttpd.conf':
|
||||
ensure => present,
|
||||
content => template('openstack/lighttpd.conf.erb')
|
||||
}
|
||||
|
||||
file {"/etc/lighttpd/lighttpd-inc.conf":
|
||||
ensure => present,
|
||||
file {'/etc/lighttpd/lighttpd-inc.conf':
|
||||
ensure => present,
|
||||
content => template('openstack/lighttpd-inc.conf.erb')
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ class openstack::horizon
|
||||
|
||||
if str2bool($::is_initial_config) {
|
||||
exec { 'Stop lighttpd':
|
||||
command => "systemctl stop lighttpd; systemctl disable lighttpd",
|
||||
command => 'systemctl stop lighttpd; systemctl disable lighttpd',
|
||||
require => User['www']
|
||||
}
|
||||
}
|
||||
@ -112,8 +112,8 @@ class openstack::horizon
|
||||
|
||||
include ::horizon::params
|
||||
file { '/etc/openstack-dashboard/horizon-config.ini':
|
||||
content => template('openstack/horizon-params.erb'),
|
||||
ensure => present,
|
||||
content => template('openstack/horizon-params.erb'),
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => $::horizon::params::apache_group,
|
||||
@ -132,8 +132,8 @@ class openstack::horizon
|
||||
$region_2_name = $::platform::params::region_2_name
|
||||
$region_openstack_host = $openstack_host
|
||||
file { '/etc/openstack-dashboard/region-config.ini':
|
||||
content => template('openstack/horizon-region-config.erb'),
|
||||
ensure => present,
|
||||
content => template('openstack/horizon-region-config.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
} else {
|
||||
@ -162,8 +162,8 @@ class openstack::horizon
|
||||
'enable_firewall' => $neutron_enable_firewall,
|
||||
'enable_vpn' => $neutron_enable_vpn
|
||||
},
|
||||
configure_apache => false,
|
||||
compress_offline => false,
|
||||
configure_apache => false,
|
||||
compress_offline => false,
|
||||
}
|
||||
|
||||
# hack for memcached, for now we bind to localhost on ipv6
|
||||
@ -177,12 +177,12 @@ class openstack::horizon
|
||||
|
||||
# Run clearsessions daily at the 40 minute mark
|
||||
cron { 'clearsessions':
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/horizon-clearsessions',
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/horizon-clearsessions',
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => '40',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
minute => '40',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
}
|
||||
|
||||
include ::openstack::horizon::firewall
|
||||
@ -216,11 +216,11 @@ class openstack::horizon::reload {
|
||||
# Remove all active Horizon user sessions
|
||||
# so that we don't use any stale cached data
|
||||
# such as endpoints
|
||||
exec { "remove-Horizon-user-sessions":
|
||||
exec { 'remove-Horizon-user-sessions':
|
||||
path => ['/usr/bin'],
|
||||
command => "/usr/bin/rm -f /var/tmp/sessionid*",
|
||||
command => '/usr/bin/rm -f /var/tmp/sessionid*',
|
||||
}
|
||||
|
||||
|
||||
platform::sm::restart {'horizon': }
|
||||
platform::sm::restart {'lighttpd': }
|
||||
}
|
||||
|
@ -2,8 +2,8 @@ class openstack::ironic::params (
|
||||
$api_port = 6485,
|
||||
$service_enabled = false,
|
||||
$service_name = 'openstack-ironic',
|
||||
$region_name = undef,
|
||||
$default_endpoint_type = "internalURL",
|
||||
$region_name = undef,
|
||||
$default_endpoint_type = 'internalURL',
|
||||
$tftp_server = undef,
|
||||
$provisioning_network = undef,
|
||||
$controller_0_if = undef,
|
||||
@ -15,7 +15,7 @@ class openstack::ironic::params (
|
||||
|
||||
include ::platform::params
|
||||
$sw_version = $::platform::params::software_version
|
||||
$ironic_basedir = "/opt/cgcs/ironic"
|
||||
$ironic_basedir = '/opt/cgcs/ironic'
|
||||
$ironic_versioned_dir = "${ironic_basedir}/${sw_version}"
|
||||
$ironic_tftpboot_dir = "${ironic_versioned_dir}/tftpboot"
|
||||
}
|
||||
@ -37,17 +37,17 @@ class openstack::ironic::haproxy
|
||||
|
||||
if $service_enabled {
|
||||
platform::haproxy::proxy { 'ironic-restapi':
|
||||
server_name => 's-ironic-restapi',
|
||||
public_port => $api_port,
|
||||
server_name => 's-ironic-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
|
||||
platform::haproxy::proxy { 'ironic-tftp-restapi':
|
||||
server_name => 's-ironic-tftp-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
server_name => 's-ironic-tftp-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
public_ip_address => $tftp_server,
|
||||
enable_https => false,
|
||||
enable_https => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,13 +70,13 @@ class openstack::ironic
|
||||
}
|
||||
|
||||
class {'::ironic':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
sync_db => false,
|
||||
my_ip => $api_host,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
sync_db => false,
|
||||
my_ip => $api_host,
|
||||
}
|
||||
if $tftp_server != undef {
|
||||
$ipa_api_url = "http://$tftp_server:$api_port"
|
||||
$ipa_api_url = "http://${tftp_server}:${api_port}"
|
||||
}
|
||||
else {
|
||||
$ipa_api_url = undef
|
||||
@ -84,53 +84,53 @@ class openstack::ironic
|
||||
|
||||
# provisioning and cleaning networks are intentionally the same
|
||||
class {'::ironic::conductor':
|
||||
provisioning_network => $provisioning_network,
|
||||
cleaning_network => $provisioning_network,
|
||||
api_url => $ipa_api_url,
|
||||
provisioning_network => $provisioning_network,
|
||||
cleaning_network => $provisioning_network,
|
||||
api_url => $ipa_api_url,
|
||||
}
|
||||
|
||||
$tftp_master_path = "${ironic_tftpboot_dir}/master_images"
|
||||
class {'::ironic::drivers::pxe':
|
||||
tftp_server => $tftp_server,
|
||||
tftp_root => $ironic_tftpboot_dir,
|
||||
tftp_master_path => $tftp_master_path,
|
||||
tftp_server => $tftp_server,
|
||||
tftp_root => $ironic_tftpboot_dir,
|
||||
tftp_master_path => $tftp_master_path,
|
||||
pxe_append_params => 'nofb nomodeset vga=normal console=ttyS0,115200n8',
|
||||
}
|
||||
|
||||
# configure tftp root directory
|
||||
if $::platform::params::init_database {
|
||||
$ironic_tftp_root_dir = "/opt/cgcs/ironic/${sw_version}"
|
||||
file { "${$ironic_basedir}":
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${ironic_versioned_dir}":
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${ironic_tftpboot_dir}":
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
file { $ironic_basedir:
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $ironic_versioned_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $ironic_tftpboot_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
}
|
||||
if str2bool($::is_controller_active) {
|
||||
file { "${ironic_tftpboot_dir}/pxelinux.0":
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
source => "/usr/share/syslinux/pxelinux.0"
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
source => '/usr/share/syslinux/pxelinux.0'
|
||||
}
|
||||
file { "${ironic_tftpboot_dir}/chain.c32":
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
source => "/usr/share/syslinux/chain.c32"
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
source => '/usr/share/syslinux/chain.c32'
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -152,25 +152,25 @@ class openstack::ironic::api
|
||||
|
||||
}
|
||||
|
||||
class openstack::ironic::upgrade
|
||||
inherits ::openstack::ironic::params{
|
||||
class openstack::ironic::upgrade
|
||||
inherits ::openstack::ironic::params{
|
||||
|
||||
file { "${$ironic_basedir}":
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${ironic_versioned_dir}":
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${ironic_tftpboot_dir}":
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
file { $ironic_basedir:
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $ironic_versioned_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $ironic_tftpboot_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'ironic',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ class openstack::keystone (
|
||||
# In the case of a Distributed Cloud deployment, apply the Keystone
|
||||
# controller configuration for each SubCloud, since Keystone is also
|
||||
# a localized service.
|
||||
if (!$::platform::params::region_config or
|
||||
if (!$::platform::params::region_config or
|
||||
$::platform::params::distributed_cloud_role == 'subcloud') {
|
||||
include ::platform::amqp::params
|
||||
include ::platform::network::mgmt::params
|
||||
@ -55,12 +55,12 @@ class openstack::keystone (
|
||||
Class[$name] -> Class['::platform::client'] -> Class['::openstack::client']
|
||||
|
||||
include ::keystone::client
|
||||
|
||||
|
||||
|
||||
# Configure keystone graceful shutdown timeout
|
||||
# TODO(mpeters): move to puppet-keystone for module configuration
|
||||
keystone_config {
|
||||
"DEFAULT/graceful_shutdown_timeout": value => 15;
|
||||
'DEFAULT/graceful_shutdown_timeout': value => 15;
|
||||
}
|
||||
|
||||
# (Pike Rebase) Disable token post expiration window since this
|
||||
@ -68,28 +68,28 @@ class openstack::keystone (
|
||||
# TODO(knasim): move this to puppet-keystone along with graceful
|
||||
# shutdown timeout param
|
||||
keystone_config {
|
||||
"token/allow_expired_window": value => 0;
|
||||
'token/allow_expired_window': value => 0;
|
||||
}
|
||||
|
||||
|
||||
file { "/etc/keystone/keystone-extra.conf":
|
||||
file { '/etc/keystone/keystone-extra.conf':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'keystone',
|
||||
mode => '0640',
|
||||
content => template('openstack/keystone-extra.conf.erb'),
|
||||
} ->
|
||||
class { '::keystone':
|
||||
enabled => $enabled,
|
||||
enable_fernet_setup => false,
|
||||
fernet_key_repository => "$keystone_key_repo_path/fernet-keys",
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
service_name => $service_name,
|
||||
token_expiration => $token_expiration,
|
||||
}
|
||||
-> class { '::keystone':
|
||||
enabled => $enabled,
|
||||
enable_fernet_setup => false,
|
||||
fernet_key_repository => "${keystone_key_repo_path}/fernet-keys",
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
service_name => $service_name,
|
||||
token_expiration => $token_expiration,
|
||||
}
|
||||
|
||||
# create keystone policy configuration
|
||||
file { "/etc/keystone/policy.json":
|
||||
file { '/etc/keystone/policy.json':
|
||||
ensure => present,
|
||||
owner => 'keystone',
|
||||
group => 'keystone',
|
||||
@ -97,7 +97,7 @@ class openstack::keystone (
|
||||
content => template('openstack/keystone-policy.json.erb'),
|
||||
}
|
||||
|
||||
# Keystone users can only be added to the SQL backend (write support for
|
||||
# Keystone users can only be added to the SQL backend (write support for
|
||||
# the LDAP backend has been removed). We can therefore set password rules
|
||||
# irrespective of the backend
|
||||
if ! str2bool($::is_restore_in_progress) {
|
||||
@ -175,15 +175,15 @@ class openstack::keystone::api
|
||||
# the subcloud region.
|
||||
if ($::platform::params::distributed_cloud_role == 'subcloud' and
|
||||
$::platform::params::region_2_name != 'RegionOne') {
|
||||
Keystone_endpoint["${platform::params::region_2_name}/keystone::identity"] -> Keystone_endpoint["RegionOne/keystone::identity"]
|
||||
keystone_endpoint { "RegionOne/keystone::identity":
|
||||
ensure => "absent",
|
||||
name => "keystone",
|
||||
type => "identity",
|
||||
region => "RegionOne",
|
||||
public_url => "http://127.0.0.1:5000/v3",
|
||||
admin_url => "http://127.0.0.1:5000/v3",
|
||||
internal_url => "http://127.0.0.1:5000/v3"
|
||||
Keystone_endpoint["${platform::params::region_2_name}/keystone::identity"] -> Keystone_endpoint['RegionOne/keystone::identity']
|
||||
keystone_endpoint { 'RegionOne/keystone::identity':
|
||||
ensure => 'absent',
|
||||
name => 'keystone',
|
||||
type => 'identity',
|
||||
region => 'RegionOne',
|
||||
public_url => 'http://127.0.0.1:5000/v3',
|
||||
admin_url => 'http://127.0.0.1:5000/v3',
|
||||
internal_url => 'http://127.0.0.1:5000/v3'
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -203,7 +203,7 @@ class openstack::keystone::bootstrap(
|
||||
$keystone_key_repo_path = "${::platform::drbd::cgcs::params::mountpoint}/keystone"
|
||||
$eng_workers = $::platform::params::eng_workers
|
||||
$bind_host = '0.0.0.0'
|
||||
|
||||
|
||||
# In the case of a classical Multi-Region deployment, apply the Keystone
|
||||
# controller configuration for Primary Region ONLY
|
||||
# (i.e. on which region_config is False), since Keystone is a Shared service
|
||||
@ -212,35 +212,35 @@ class openstack::keystone::bootstrap(
|
||||
# controller configuration for each SubCloud, since Keystone is also
|
||||
# a localized service.
|
||||
if ($::platform::params::init_keystone and
|
||||
(!$::platform::params::region_config or
|
||||
$::platform::params::distributed_cloud_role == 'subcloud')) {
|
||||
(!$::platform::params::region_config or
|
||||
$::platform::params::distributed_cloud_role == 'subcloud')) {
|
||||
|
||||
include ::keystone::db::postgresql
|
||||
|
||||
Class[$name] -> Class['::platform::client'] -> Class['::openstack::client']
|
||||
|
||||
# Create the parent directory for fernet keys repository
|
||||
file { "${keystone_key_repo_path}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
file { $keystone_key_repo_path:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
require => Class['::platform::drbd::cgcs'],
|
||||
} ->
|
||||
file { "/etc/keystone/keystone-extra.conf":
|
||||
}
|
||||
-> file { '/etc/keystone/keystone-extra.conf':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'keystone',
|
||||
mode => '0640',
|
||||
content => template('openstack/keystone-extra.conf.erb'),
|
||||
} ->
|
||||
class { '::keystone':
|
||||
enabled => true,
|
||||
enable_bootstrap => true,
|
||||
fernet_key_repository => "$keystone_key_repo_path/fernet-keys",
|
||||
sync_db => true,
|
||||
default_domain => $default_domain,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
-> class { '::keystone':
|
||||
enabled => true,
|
||||
enable_bootstrap => true,
|
||||
fernet_key_repository => "${keystone_key_repo_path}/fernet-keys",
|
||||
sync_db => true,
|
||||
default_domain => $default_domain,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
include ::keystone::client
|
||||
@ -290,29 +290,29 @@ class openstack::keystone::endpointgroup
|
||||
group => 'keystone',
|
||||
mode => '0640',
|
||||
content => template('openstack/keystone-defaultregion-filter.erb'),
|
||||
} ->
|
||||
file { "/etc/keystone/keystone-${system_controller_region}-filter.conf":
|
||||
}
|
||||
-> file { "/etc/keystone/keystone-${system_controller_region}-filter.conf":
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'keystone',
|
||||
mode => '0640',
|
||||
content => template('openstack/keystone-systemcontroller-filter.erb'),
|
||||
} ->
|
||||
exec { 'endpointgroup-${reference_region}-command':
|
||||
cwd => '/etc/keystone',
|
||||
}
|
||||
-> exec { 'endpointgroup-${reference_region}-command':
|
||||
cwd => '/etc/keystone',
|
||||
logoutput => true,
|
||||
provider => shell,
|
||||
require => [ Class['openstack::keystone::api'], Class['::keystone::endpoint'] ],
|
||||
command => template('openstack/keystone-defaultregion.erb'),
|
||||
path => ['/usr/bin/', '/bin/', '/sbin/', '/usr/sbin/'],
|
||||
} ->
|
||||
exec { 'endpointgroup-${system_controller_region}-command':
|
||||
cwd => '/etc/keystone',
|
||||
provider => shell,
|
||||
require => [ Class['openstack::keystone::api'], Class['::keystone::endpoint'] ],
|
||||
command => template('openstack/keystone-defaultregion.erb'),
|
||||
path => ['/usr/bin/', '/bin/', '/sbin/', '/usr/sbin/'],
|
||||
}
|
||||
-> exec { 'endpointgroup-${system_controller_region}-command':
|
||||
cwd => '/etc/keystone',
|
||||
logoutput => true,
|
||||
provider => shell,
|
||||
require => [ Class['openstack::keystone::api'], Class['::keystone::endpoint'] ],
|
||||
command => template('openstack/keystone-systemcontroller.erb'),
|
||||
path => ['/usr/bin/', '/bin/', '/sbin/', '/usr/sbin/'],
|
||||
provider => shell,
|
||||
require => [ Class['openstack::keystone::api'], Class['::keystone::endpoint'] ],
|
||||
command => template('openstack/keystone-systemcontroller.erb'),
|
||||
path => ['/usr/bin/', '/bin/', '/sbin/', '/usr/sbin/'],
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -438,28 +438,28 @@ class openstack::keystone::upgrade (
|
||||
|
||||
# Need to create the parent directory for fernet keys repository
|
||||
# This is a workaround to a puppet bug.
|
||||
file { "${keystone_key_repo}":
|
||||
file { $keystone_key_repo:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755'
|
||||
} ->
|
||||
file { "/etc/keystone/keystone-extra.conf":
|
||||
}
|
||||
-> file { '/etc/keystone/keystone-extra.conf':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'keystone',
|
||||
mode => '0640',
|
||||
content => template('openstack/keystone-extra.conf.erb'),
|
||||
} ->
|
||||
class { '::keystone':
|
||||
upgrade_token_cmd => $upgrade_token_cmd,
|
||||
upgrade_token_file => $upgrade_token_file,
|
||||
enable_fernet_setup => true,
|
||||
enable_bootstrap => false,
|
||||
fernet_key_repository => "$keystone_key_repo/fernet-keys",
|
||||
sync_db => false,
|
||||
default_domain => undef,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
-> class { '::keystone':
|
||||
upgrade_token_cmd => $upgrade_token_cmd,
|
||||
upgrade_token_file => $upgrade_token_file,
|
||||
enable_fernet_setup => true,
|
||||
enable_bootstrap => false,
|
||||
fernet_key_repository => "${keystone_key_repo}/fernet-keys",
|
||||
sync_db => false,
|
||||
default_domain => undef,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
# Add service account and endpoints for any new R6 services...
|
||||
|
@ -27,8 +27,8 @@ class openstack::magnum
|
||||
include ::magnum::certificates
|
||||
|
||||
class {'::magnum':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
if $::platform::params::init_database {
|
||||
@ -53,8 +53,8 @@ class openstack::magnum::haproxy
|
||||
|
||||
if $service_enabled {
|
||||
platform::haproxy::proxy { 'magnum-restapi':
|
||||
server_name => 's-magnum',
|
||||
public_port => $api_port,
|
||||
server_name => 's-magnum',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -74,7 +74,7 @@ class openstack::magnum::api
|
||||
|
||||
class { '::magnum::api':
|
||||
enabled => false,
|
||||
host => $api_host,
|
||||
host => $api_host,
|
||||
sync_db => false,
|
||||
}
|
||||
|
||||
|
@ -38,9 +38,9 @@ class openstack::murano::firewall
|
||||
ports => 5671,
|
||||
}
|
||||
platform::firewall::rule { 'murano-rabbit-regular':
|
||||
service_name => 'murano-rabbit-regular',
|
||||
ports => 5672,
|
||||
ensure => absent,
|
||||
ports => 5672,
|
||||
service_name => 'murano-rabbit-regular',
|
||||
}
|
||||
} else {
|
||||
platform::firewall::rule { 'murano-rabbit-regular':
|
||||
@ -48,21 +48,21 @@ class openstack::murano::firewall
|
||||
ports => 5672,
|
||||
}
|
||||
platform::firewall::rule { 'murano-rabbit-ssl':
|
||||
service_name => 'murano-rabbit-ssl',
|
||||
ports => 5671,
|
||||
ensure => absent,
|
||||
ports => 5671,
|
||||
service_name => 'murano-rabbit-ssl',
|
||||
}
|
||||
}
|
||||
} else {
|
||||
platform::firewall::rule { 'murano-rabbit-regular':
|
||||
service_name => 'murano-rabbit-regular',
|
||||
ports => 5672,
|
||||
ensure => absent,
|
||||
ports => 5672,
|
||||
service_name => 'murano-rabbit-regular',
|
||||
}
|
||||
platform::firewall::rule { 'murano-rabbit-ssl':
|
||||
service_name => 'murano-rabbit-ssl',
|
||||
ports => 5671,
|
||||
ensure => absent,
|
||||
ports => 5671,
|
||||
service_name => 'murano-rabbit-ssl',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -73,8 +73,8 @@ class openstack::murano::haproxy
|
||||
|
||||
if $service_enabled {
|
||||
platform::haproxy::proxy { 'murano-restapi':
|
||||
server_name => 's-murano-restapi',
|
||||
public_port => $api_port,
|
||||
server_name => 's-murano-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -115,24 +115,24 @@ class openstack::murano
|
||||
include ::murano::params
|
||||
|
||||
class {'::murano':
|
||||
use_syslog => true,
|
||||
log_facility => 'local2',
|
||||
service_host => $::platform::network::mgmt::params::controller_address,
|
||||
service_port => '8082',
|
||||
database_idle_timeout => $database_idle_timeout,
|
||||
database_max_pool_size => $database_max_pool_size,
|
||||
database_max_overflow => $database_max_overflow,
|
||||
sync_db => false,
|
||||
rabbit_own_user => $::openstack::murano::params::auth_user,
|
||||
rabbit_own_password => $::openstack::murano::params::auth_password,
|
||||
rabbit_own_host => $::platform::network::oam::params::controller_address,
|
||||
rabbit_own_port => $murano_rabbit_port,
|
||||
rabbit_own_vhost => "/",
|
||||
rabbit_own_use_ssl => $ssl,
|
||||
rabbit_own_ca_certs => $murano_cacert,
|
||||
disable_murano_agent => $disable_murano_agent,
|
||||
api_workers => $::platform::params::eng_workers_by_4,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
use_syslog => true,
|
||||
log_facility => 'local2',
|
||||
service_host => $::platform::network::mgmt::params::controller_address,
|
||||
service_port => '8082',
|
||||
database_idle_timeout => $database_idle_timeout,
|
||||
database_max_pool_size => $database_max_pool_size,
|
||||
database_max_overflow => $database_max_overflow,
|
||||
sync_db => false,
|
||||
rabbit_own_user => $::openstack::murano::params::auth_user,
|
||||
rabbit_own_password => $::openstack::murano::params::auth_password,
|
||||
rabbit_own_host => $::platform::network::oam::params::controller_address,
|
||||
rabbit_own_port => $murano_rabbit_port,
|
||||
rabbit_own_vhost => '/',
|
||||
rabbit_own_use_ssl => $ssl,
|
||||
rabbit_own_ca_certs => $murano_cacert,
|
||||
disable_murano_agent => $disable_murano_agent,
|
||||
api_workers => $::platform::params::eng_workers_by_4,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
# this rabbitmq is separate from the main one and used only for murano
|
||||
@ -169,11 +169,11 @@ define enable_murano_agent_rabbitmq {
|
||||
|
||||
# Rabbit configuration parameters
|
||||
$amqp_platform_sw_version = $::platform::params::software_version
|
||||
$kombu_ssl_ca_certs = "$::openstack::murano::params::rabbit_certs_dir/ca-cert.pem"
|
||||
$kombu_ssl_keyfile = "$::openstack::murano::params::rabbit_certs_dir/key.pem"
|
||||
$kombu_ssl_certfile = "$::openstack::murano::params::rabbit_certs_dir/cert.pem"
|
||||
$kombu_ssl_ca_certs = "${::openstack::murano::params::rabbit_certs_dir}/ca-cert.pem"
|
||||
$kombu_ssl_keyfile = "${::openstack::murano::params::rabbit_certs_dir}/key.pem"
|
||||
$kombu_ssl_certfile = "${::openstack::murano::params::rabbit_certs_dir}/cert.pem"
|
||||
|
||||
$murano_rabbit_dir = "/var/lib/rabbitmq/murano"
|
||||
$murano_rabbit_dir = '/var/lib/rabbitmq/murano'
|
||||
$rabbit_home = "${murano_rabbit_dir}/${amqp_platform_sw_version}"
|
||||
$mnesia_base = "${rabbit_home}/mnesia"
|
||||
$rabbit_node = $::platform::amqp::params::node
|
||||
@ -196,33 +196,33 @@ define enable_murano_agent_rabbitmq {
|
||||
$rabbit_tcp_listen_options = $::openstack::murano::params::rabbit_tcp_listen_options
|
||||
|
||||
# murano rabbit ssl certificates are placed here
|
||||
file { "$::openstack::murano::params::rabbit_certs_dir":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
file { $::openstack::murano::params::rabbit_certs_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
if $::platform::params::init_database {
|
||||
file { "${murano_rabbit_dir}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { $murano_rabbit_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { "${rabbit_home}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
-> file { $rabbit_home:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { "${mnesia_base}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
-> file { $mnesia_base:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} -> Class['::rabbitmq']
|
||||
}
|
||||
|
||||
@ -240,7 +240,7 @@ define enable_murano_agent_rabbitmq {
|
||||
$rabbitmq_conf_template= 'openstack/murano-rabbitmq.config.erb'
|
||||
}
|
||||
|
||||
file { "/etc/rabbitmq/murano-rabbitmq.config":
|
||||
file { '/etc/rabbitmq/murano-rabbitmq.config':
|
||||
ensure => present,
|
||||
owner => 'rabbitmq',
|
||||
group => 'rabbitmq',
|
||||
@ -248,7 +248,7 @@ define enable_murano_agent_rabbitmq {
|
||||
content => template($rabbitmq_conf_template),
|
||||
}
|
||||
|
||||
file { "/etc/rabbitmq/murano-rabbitmq-env.conf":
|
||||
file { '/etc/rabbitmq/murano-rabbitmq-env.conf':
|
||||
ensure => present,
|
||||
owner => 'rabbitmq',
|
||||
group => 'rabbitmq',
|
||||
@ -261,28 +261,28 @@ class openstack::murano::upgrade {
|
||||
include ::platform::params
|
||||
|
||||
$amqp_platform_sw_version = $::platform::params::software_version
|
||||
$murano_rabbit_dir = "/var/lib/rabbitmq/murano"
|
||||
$murano_rabbit_dir = '/var/lib/rabbitmq/murano'
|
||||
$rabbit_home = "${murano_rabbit_dir}/${amqp_platform_sw_version}"
|
||||
$mnesia_base = "${rabbit_home}/mnesia"
|
||||
|
||||
file { "${murano_rabbit_dir}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { $murano_rabbit_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { "${rabbit_home}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
-> file { $rabbit_home:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { "${mnesia_base}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
-> file { $mnesia_base:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ class openstack::neutron
|
||||
include ::neutron::logging
|
||||
|
||||
class { '::neutron':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
}
|
||||
@ -50,14 +50,14 @@ define openstack::neutron::sdn::controller (
|
||||
|
||||
platform::firewall::rule { $name:
|
||||
service_name => $name,
|
||||
table => 'nat',
|
||||
chain => 'POSTROUTING',
|
||||
proto => $firewall_proto_transport,
|
||||
outiface => $oam_interface,
|
||||
tosource => $oam_address,
|
||||
destination => $ip_address,
|
||||
host => $mgmt_subnet,
|
||||
jump => 'SNAT',
|
||||
table => 'nat',
|
||||
chain => 'POSTROUTING',
|
||||
proto => $firewall_proto_transport,
|
||||
outiface => $oam_interface,
|
||||
tosource => $oam_address,
|
||||
destination => $ip_address,
|
||||
host => $mgmt_subnet,
|
||||
jump => 'SNAT',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -80,9 +80,9 @@ class openstack::neutron::odl
|
||||
create_resources('openstack::neutron::sdn::controller', $controller_config, {})
|
||||
}
|
||||
class {'::neutron::plugins::ml2::opendaylight':
|
||||
odl_username => $username,
|
||||
odl_password => $password,
|
||||
odl_url => $url,
|
||||
odl_username => $username,
|
||||
odl_password => $password,
|
||||
odl_url => $url,
|
||||
port_binding_controller => $port_binding_controller,
|
||||
}
|
||||
}
|
||||
@ -91,7 +91,7 @@ class openstack::neutron::odl
|
||||
class openstack::neutron::bgp
|
||||
inherits ::openstack::neutron::params {
|
||||
|
||||
if $bgp_router_id {
|
||||
if $bgp_router_id {
|
||||
class {'::neutron::bgp':
|
||||
bgp_router_id => $bgp_router_id,
|
||||
}
|
||||
@ -100,38 +100,38 @@ class openstack::neutron::bgp
|
||||
}
|
||||
|
||||
exec { 'systemctl enable neutron-bgp-dragent.service':
|
||||
command => "systemctl enable neutron-bgp-dragent.service",
|
||||
command => 'systemctl enable neutron-bgp-dragent.service',
|
||||
}
|
||||
|
||||
exec { 'systemctl restart neutron-bgp-dragent.service':
|
||||
command => "systemctl restart neutron-bgp-dragent.service",
|
||||
command => 'systemctl restart neutron-bgp-dragent.service',
|
||||
}
|
||||
|
||||
file { '/etc/pmon.d/':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { "/etc/pmon.d/neutron-bgp-dragent.conf":
|
||||
file { '/etc/pmon.d/neutron-bgp-dragent.conf':
|
||||
ensure => link,
|
||||
target => "/etc/neutron/pmon/neutron-bgp-dragent.conf",
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
target => '/etc/neutron/pmon/neutron-bgp-dragent.conf',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
} else {
|
||||
exec { 'pmon-stop neutron-bgp-dragent':
|
||||
command => "pmon-stop neutron-bgp-dragent",
|
||||
} ->
|
||||
exec { 'rm -f /etc/pmon.d/neutron-bgp-dragent.conf':
|
||||
command => "rm -f /etc/pmon.d/neutron-bgp-dragent.conf",
|
||||
} ->
|
||||
exec { 'systemctl disable neutron-bgp-dragent.service':
|
||||
command => "systemctl disable neutron-bgp-dragent.service",
|
||||
} ->
|
||||
exec { 'systemctl stop neutron-bgp-dragent.service':
|
||||
command => "systemctl stop neutron-bgp-dragent.service",
|
||||
command => 'pmon-stop neutron-bgp-dragent',
|
||||
}
|
||||
-> exec { 'rm -f /etc/pmon.d/neutron-bgp-dragent.conf':
|
||||
command => 'rm -f /etc/pmon.d/neutron-bgp-dragent.conf',
|
||||
}
|
||||
-> exec { 'systemctl disable neutron-bgp-dragent.service':
|
||||
command => 'systemctl disable neutron-bgp-dragent.service',
|
||||
}
|
||||
-> exec { 'systemctl stop neutron-bgp-dragent.service':
|
||||
command => 'systemctl stop neutron-bgp-dragent.service',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -148,12 +148,12 @@ class openstack::neutron::sfc (
|
||||
|
||||
if $sfc_drivers {
|
||||
class {'::neutron::sfc':
|
||||
sfc_drivers => $sfc_drivers,
|
||||
sfc_drivers => $sfc_drivers,
|
||||
flowclassifier_drivers => $flowclassifier_drivers,
|
||||
quota_flow_classifier => $sfc_quota_flow_classifier,
|
||||
quota_port_chain => $sfc_quota_port_chain,
|
||||
quota_port_pair_group => $sfc_quota_port_pair_group,
|
||||
quota_port_pair => $sfc_quota_port_pair,
|
||||
quota_flow_classifier => $sfc_quota_flow_classifier,
|
||||
quota_port_chain => $sfc_quota_port_chain,
|
||||
quota_port_pair_group => $sfc_quota_port_pair_group,
|
||||
quota_port_pair => $sfc_quota_port_pair,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -174,12 +174,12 @@ class openstack::neutron::server {
|
||||
class { '::neutron::server':
|
||||
api_workers => $::platform::params::eng_workers_by_2,
|
||||
rpc_workers => $::platform::params::eng_workers_by_2,
|
||||
sync_db => $::platform::params::init_database,
|
||||
sync_db => $::platform::params::init_database,
|
||||
}
|
||||
|
||||
file { '/etc/neutron/api-paste.ini':
|
||||
ensure => file,
|
||||
mode => '0640',
|
||||
ensure => file,
|
||||
mode => '0640',
|
||||
}
|
||||
|
||||
Class['::neutron::server'] -> File['/etc/neutron/api-paste.ini']
|
||||
@ -238,28 +238,28 @@ class openstack::neutron::agents
|
||||
}
|
||||
}
|
||||
|
||||
file { "/etc/pmon.d/neutron-dhcp-agent.conf":
|
||||
file { '/etc/pmon.d/neutron-dhcp-agent.conf':
|
||||
ensure => $pmon_ensure,
|
||||
target => "/etc/neutron/pmon/neutron-dhcp-agent.conf",
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
target => '/etc/neutron/pmon/neutron-dhcp-agent.conf',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { "/etc/pmon.d/neutron-metadata-agent.conf":
|
||||
file { '/etc/pmon.d/neutron-metadata-agent.conf':
|
||||
ensure => $pmon_ensure,
|
||||
target => "/etc/neutron/pmon/neutron-metadata-agent.conf",
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
target => '/etc/neutron/pmon/neutron-metadata-agent.conf',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file { "/etc/pmon.d/neutron-sriov-nic-agent.conf":
|
||||
file { '/etc/pmon.d/neutron-sriov-nic-agent.conf':
|
||||
ensure => $pmon_ensure,
|
||||
target => "/etc/neutron/pmon/neutron-sriov-nic-agent.conf",
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
target => '/etc/neutron/pmon/neutron-sriov-nic-agent.conf',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
}
|
||||
|
||||
@ -272,19 +272,18 @@ class openstack::neutron::firewall
|
||||
ports => $api_port,
|
||||
}
|
||||
|
||||
if $bgp_router_id {
|
||||
platform::firewall::rule { 'ryu-bgp-port':
|
||||
service_name => 'neutron',
|
||||
ports => $bgp_port,
|
||||
}
|
||||
} else {
|
||||
platform::firewall::rule { 'ryu-bgp-port':
|
||||
service_name => 'neutron',
|
||||
ports => $bgp_port,
|
||||
ensure => absent
|
||||
}
|
||||
}
|
||||
|
||||
if $bgp_router_id {
|
||||
platform::firewall::rule { 'ryu-bgp-port':
|
||||
service_name => 'neutron',
|
||||
ports => $bgp_port,
|
||||
}
|
||||
} else {
|
||||
platform::firewall::rule { 'ryu-bgp-port':
|
||||
service_name => 'neutron',
|
||||
ports => $bgp_port,
|
||||
ensure => absent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -292,8 +291,8 @@ class openstack::neutron::haproxy
|
||||
inherits ::openstack::neutron::params {
|
||||
|
||||
platform::haproxy::proxy { 'neutron-restapi':
|
||||
server_name => 's-neutron',
|
||||
public_port => $api_port,
|
||||
server_name => 's-neutron',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ class openstack::nova {
|
||||
$metadata_host = $::platform::network::mgmt::params::controller_address
|
||||
|
||||
class { '::nova':
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
rabbit_use_ssl => $::platform::amqp::params::ssl_enabled,
|
||||
default_transport_url => $::platform::amqp::params::transport_url,
|
||||
}
|
||||
|
||||
@ -68,9 +68,9 @@ class openstack::nova::sshd
|
||||
enable => true,
|
||||
}
|
||||
|
||||
file { "/etc/ssh/sshd_config":
|
||||
notify => Service['sshd'],
|
||||
file { '/etc/ssh/sshd_config':
|
||||
ensure => 'present' ,
|
||||
notify => Service['sshd'],
|
||||
mode => '0600',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -79,7 +79,7 @@ class openstack::nova::sshd
|
||||
|
||||
}
|
||||
|
||||
class openstack::nova::controller
|
||||
class openstack::nova::controller
|
||||
inherits ::openstack::nova::params {
|
||||
|
||||
include ::platform::params
|
||||
@ -108,12 +108,12 @@ class openstack::nova::controller
|
||||
|
||||
# Run nova-manage to purge deleted rows daily at 15 minute mark
|
||||
cron { 'nova-purge-deleted':
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/nova-purge-deleted-active',
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/nova-purge-deleted-active',
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => '15',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
minute => '15',
|
||||
hour => '*/24',
|
||||
user => 'root',
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,9 +142,9 @@ class openstack::nova::compute (
|
||||
include ::openstack::nova::sshd
|
||||
|
||||
$host_private_key_file = $host_key_type ? {
|
||||
'ssh-rsa' => "/etc/ssh/ssh_host_rsa_key",
|
||||
'ssh-dsa' => "/etc/ssh/ssh_host_dsa_key",
|
||||
'ssh-ecdsa' => "/etc/ssh/ssh_host_ecdsa_key",
|
||||
'ssh-rsa' => '/etc/ssh/ssh_host_rsa_key',
|
||||
'ssh-dsa' => '/etc/ssh/ssh_host_dsa_key',
|
||||
'ssh-ecdsa' => '/etc/ssh/ssh_host_ecdsa_key',
|
||||
default => undef
|
||||
}
|
||||
|
||||
@ -153,9 +153,9 @@ class openstack::nova::compute (
|
||||
}
|
||||
|
||||
$host_public_key_file = $host_key_type ? {
|
||||
'ssh-rsa' => "/etc/ssh/ssh_host_rsa_key.pub",
|
||||
'ssh-dsa' => "/etc/ssh/ssh_host_dsa_key.pub",
|
||||
'ssh-ecdsa' => "/etc/ssh/ssh_host_ecdsa_key.pub",
|
||||
'ssh-rsa' => '/etc/ssh/ssh_host_rsa_key.pub',
|
||||
'ssh-dsa' => '/etc/ssh/ssh_host_dsa_key.pub',
|
||||
'ssh-ecdsa' => '/etc/ssh/ssh_host_ecdsa_key.pub',
|
||||
default => undef
|
||||
}
|
||||
|
||||
@ -164,20 +164,20 @@ class openstack::nova::compute (
|
||||
}
|
||||
|
||||
file { '/etc/ssh':
|
||||
ensure => directory,
|
||||
mode => '0700',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
} ->
|
||||
ensure => directory,
|
||||
mode => '0700',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
|
||||
file { $host_private_key_file:
|
||||
-> file { $host_private_key_file:
|
||||
content => $host_private_key,
|
||||
mode => '0600',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
} ->
|
||||
}
|
||||
|
||||
file { $host_public_key_file:
|
||||
-> file { $host_public_key_file:
|
||||
content => "${host_public_header} ${host_public_key}",
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
@ -200,20 +200,20 @@ class openstack::nova::compute (
|
||||
"command=\"/usr/bin/nova_authorized_cmds\"" ]
|
||||
|
||||
file { '/root/.ssh':
|
||||
ensure => directory,
|
||||
mode => '0700',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
} ->
|
||||
ensure => directory,
|
||||
mode => '0700',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
|
||||
file { $migration_private_key_file:
|
||||
-> file { $migration_private_key_file:
|
||||
content => $migration_private_key,
|
||||
mode => '0600',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
} ->
|
||||
}
|
||||
|
||||
ssh_authorized_key { 'nova-migration-key-authorization':
|
||||
-> ssh_authorized_key { 'nova-migration-key-authorization':
|
||||
ensure => present,
|
||||
key => $migration_public_key,
|
||||
type => $migration_key_type,
|
||||
@ -253,17 +253,17 @@ class openstack::nova::compute (
|
||||
}
|
||||
|
||||
include ::openstack::glance::params
|
||||
if "rbd" in $::openstack::glance::params::enabled_backends {
|
||||
$libvirt_inject_partition = "-2"
|
||||
$libvirt_images_type = "rbd"
|
||||
if 'rbd' in $::openstack::glance::params::enabled_backends {
|
||||
$libvirt_inject_partition = '-2'
|
||||
$libvirt_images_type = 'rbd'
|
||||
} else {
|
||||
$libvirt_inject_partition = "-1"
|
||||
$libvirt_images_type = "default"
|
||||
$libvirt_inject_partition = '-1'
|
||||
$libvirt_images_type = 'default'
|
||||
}
|
||||
|
||||
class { '::nova::compute::libvirt':
|
||||
libvirt_virt_type => $libvirt_virt_type,
|
||||
vncserver_listen => $libvirt_vnc_bind_host,
|
||||
libvirt_virt_type => $libvirt_virt_type,
|
||||
vncserver_listen => $libvirt_vnc_bind_host,
|
||||
libvirt_inject_partition => $libvirt_inject_partition,
|
||||
}
|
||||
|
||||
@ -277,32 +277,32 @@ class openstack::nova::compute (
|
||||
'libvirt/volume_use_multipath': value => $::platform::multipath::params::enabled;
|
||||
|
||||
# enable auto-converge by default
|
||||
'libvirt/live_migration_permit_auto_converge': value => "True";
|
||||
'libvirt/live_migration_permit_auto_converge': value => 'True';
|
||||
|
||||
# Change the nfs mount options to provide faster detection of unclean
|
||||
# shutdown (e.g. if controller is powered down).
|
||||
"DEFAULT/nfs_mount_options": value => $::platform::params::nfs_mount_options;
|
||||
'DEFAULT/nfs_mount_options': value => $::platform::params::nfs_mount_options;
|
||||
|
||||
# WRS extension: compute_resource_debug
|
||||
"DEFAULT/compute_resource_debug": value => "False";
|
||||
'DEFAULT/compute_resource_debug': value => 'False';
|
||||
|
||||
# WRS extension: reap running deleted VMs
|
||||
"DEFAULT/running_deleted_instance_action": value => "reap";
|
||||
"DEFAULT/running_deleted_instance_poll_interval": value => "60";
|
||||
'DEFAULT/running_deleted_instance_action': value => 'reap';
|
||||
'DEFAULT/running_deleted_instance_poll_interval': value => '60';
|
||||
|
||||
# Delete rbd_user, for now
|
||||
"DEFAULT/rbd_user": ensure => 'absent';
|
||||
'DEFAULT/rbd_user': ensure => 'absent';
|
||||
|
||||
# write metadata to a special configuration drive
|
||||
"DEFAULT/mkisofs_cmd": value => "/usr/bin/genisoimage";
|
||||
'DEFAULT/mkisofs_cmd': value => '/usr/bin/genisoimage';
|
||||
|
||||
# configure metrics
|
||||
"DEFAULT/compute_available_monitors":
|
||||
value => "nova.compute.monitors.all_monitors";
|
||||
"DEFAULT/compute_monitors": value => $compute_monitors;
|
||||
'DEFAULT/compute_available_monitors':
|
||||
value => 'nova.compute.monitors.all_monitors';
|
||||
'DEFAULT/compute_monitors': value => $compute_monitors;
|
||||
|
||||
# need retries under heavy I/O loads
|
||||
"DEFAULT/network_allocate_retries": value => 2;
|
||||
'DEFAULT/network_allocate_retries': value => 2;
|
||||
|
||||
# TODO(mpeters): confirm if this is still required - deprecated
|
||||
'DEFAULT/volume_api_class': value => 'nova.volume.cinder.API';
|
||||
@ -310,7 +310,7 @@ class openstack::nova::compute (
|
||||
'DEFAULT/default_ephemeral_format': value => 'ext4';
|
||||
|
||||
# turn on service tokens
|
||||
'service_user/send_service_user_token': value => 'true';
|
||||
'service_user/send_service_user_token': value => true;
|
||||
'service_user/project_name': value => $::nova::keystone::authtoken::project_name;
|
||||
'service_user/password': value => $::nova::keystone::authtoken::password;
|
||||
'service_user/username': value => $::nova::keystone::authtoken::username;
|
||||
@ -323,57 +323,57 @@ class openstack::nova::compute (
|
||||
|
||||
file_line {'cgroup_controllers':
|
||||
ensure => present,
|
||||
path => '/etc/libvirt/qemu.conf',
|
||||
line => 'cgroup_controllers = [ "cpu", "cpuacct" ]',
|
||||
match => '^cgroup_controllers = .*',
|
||||
path => '/etc/libvirt/qemu.conf',
|
||||
line => 'cgroup_controllers = [ "cpu", "cpuacct" ]',
|
||||
match => '^cgroup_controllers = .*',
|
||||
}
|
||||
|
||||
if $iscsi_initiator_name {
|
||||
$initiator_content = "InitiatorName=${iscsi_initiator_name}\n"
|
||||
file { "/etc/iscsi/initiatorname.iscsi":
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
file { '/etc/iscsi/initiatorname.iscsi':
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => $initiator_content,
|
||||
} ->
|
||||
exec { "Restart iscsid.service":
|
||||
}
|
||||
-> exec { 'Restart iscsid.service':
|
||||
command => "bash -c 'systemctl restart iscsid.service'",
|
||||
onlyif => "systemctl status iscsid.service",
|
||||
onlyif => 'systemctl status iscsid.service',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define openstack::nova::storage::wipe_new_pv {
|
||||
$cmd = join(["/sbin/pvs --nosuffix --noheadings ",$name," 2>/dev/null | grep nova-local || true"])
|
||||
$result = generate("/bin/sh", "-c", $cmd)
|
||||
$cmd = join(['/sbin/pvs --nosuffix --noheadings ',$name,' 2>/dev/null | grep nova-local || true'])
|
||||
$result = generate('/bin/sh', '-c', $cmd)
|
||||
if $result !~ /nova-local/ {
|
||||
exec { "Wipe New PV not in VG - $name":
|
||||
exec { "Wipe New PV not in VG - ${name}":
|
||||
provider => shell,
|
||||
command => "wipefs -a $name",
|
||||
before => Lvm::Volume[instances_lv],
|
||||
require => Exec['remove device mapper mapping']
|
||||
command => "wipefs -a ${name}",
|
||||
before => Lvm::Volume[instances_lv],
|
||||
require => Exec['remove device mapper mapping']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define openstack::nova::storage::wipe_pv_and_format {
|
||||
if $name !~ /part/ {
|
||||
exec { "Wipe removing PV $name":
|
||||
exec { "Wipe removing PV ${name}":
|
||||
provider => shell,
|
||||
command => "wipefs -a $name",
|
||||
require => File_line[disable_old_lvg_disks]
|
||||
} ->
|
||||
exec { "GPT format disk PV - $name":
|
||||
command => "wipefs -a ${name}",
|
||||
require => File_line[disable_old_lvg_disks]
|
||||
}
|
||||
-> exec { "GPT format disk PV - ${name}":
|
||||
provider => shell,
|
||||
command => "parted -a optimal --script $name -- mktable gpt",
|
||||
command => "parted -a optimal --script ${name} -- mktable gpt",
|
||||
}
|
||||
}
|
||||
else {
|
||||
exec { "Wipe removing PV $name":
|
||||
exec { "Wipe removing PV ${name}":
|
||||
provider => shell,
|
||||
command => "wipefs -a $name",
|
||||
require => File_line[disable_old_lvg_disks]
|
||||
command => "wipefs -a ${name}",
|
||||
require => File_line[disable_old_lvg_disks]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -389,8 +389,8 @@ class openstack::nova::storage (
|
||||
$images_rbd_pool = 'ephemeral',
|
||||
$images_rbd_ceph_conf = '/etc/ceph/ceph.conf'
|
||||
) {
|
||||
$adding_pvs_str = join($adding_pvs," ")
|
||||
$removing_pvs_str = join($removing_pvs," ")
|
||||
$adding_pvs_str = join($adding_pvs,' ')
|
||||
$removing_pvs_str = join($removing_pvs,' ')
|
||||
|
||||
# Ensure partitions update prior to local storage configuration
|
||||
Class['::platform::partitions'] -> Class[$name]
|
||||
@ -418,7 +418,7 @@ class openstack::nova::storage (
|
||||
}
|
||||
|
||||
nova_config {
|
||||
"DEFAULT/concurrent_disk_operations": value => $concurrent_disk_operations;
|
||||
'DEFAULT/concurrent_disk_operations': value => $concurrent_disk_operations;
|
||||
}
|
||||
|
||||
::openstack::nova::storage::wipe_new_pv { $adding_pvs: }
|
||||
@ -428,56 +428,56 @@ class openstack::nova::storage (
|
||||
path => '/etc/lvm/lvm.conf',
|
||||
line => " global_filter = ${lvm_update_filter}",
|
||||
match => '^[ ]*global_filter =',
|
||||
} ->
|
||||
nova_config {
|
||||
"libvirt/images_type": value => $images_type;
|
||||
"libvirt/images_volume_group": value => $images_volume_group;
|
||||
"libvirt/images_rbd_pool": value => $images_rbd_pool_real;
|
||||
"libvirt/images_rbd_ceph_conf": value => $images_rbd_ceph_conf_real;
|
||||
} ->
|
||||
exec { 'umount /var/lib/nova/instances':
|
||||
}
|
||||
-> nova_config {
|
||||
'libvirt/images_type': value => $images_type;
|
||||
'libvirt/images_volume_group': value => $images_volume_group;
|
||||
'libvirt/images_rbd_pool': value => $images_rbd_pool_real;
|
||||
'libvirt/images_rbd_ceph_conf': value => $images_rbd_ceph_conf_real;
|
||||
}
|
||||
-> exec { 'umount /var/lib/nova/instances':
|
||||
command => 'umount /var/lib/nova/instances; true',
|
||||
} ->
|
||||
exec { 'umount /dev/nova-local/instances_lv':
|
||||
}
|
||||
-> exec { 'umount /dev/nova-local/instances_lv':
|
||||
command => 'umount /dev/nova-local/instances_lv; true',
|
||||
} ->
|
||||
exec { 'remove udev leftovers':
|
||||
}
|
||||
-> exec { 'remove udev leftovers':
|
||||
unless => 'vgs nova-local',
|
||||
command => 'rm -rf /dev/nova-local || true',
|
||||
} ->
|
||||
exec { 'remove device mapper mapping':
|
||||
command => "dmsetup remove /dev/mapper/nova--local-instances_lv || true",
|
||||
} ->
|
||||
file_line { 'disable_old_lvg_disks':
|
||||
}
|
||||
-> exec { 'remove device mapper mapping':
|
||||
command => 'dmsetup remove /dev/mapper/nova--local-instances_lv || true',
|
||||
}
|
||||
-> file_line { 'disable_old_lvg_disks':
|
||||
path => '/etc/lvm/lvm.conf',
|
||||
line => " global_filter = ${lvm_global_filter}",
|
||||
match => '^[ ]*global_filter =',
|
||||
} ->
|
||||
exec { 'add device mapper mapping':
|
||||
}
|
||||
-> exec { 'add device mapper mapping':
|
||||
command => 'lvchange -ay /dev/nova-local/instances_lv || true',
|
||||
} ->
|
||||
lvm::volume { 'instances_lv':
|
||||
ensure => 'present',
|
||||
vg => 'nova-local',
|
||||
pv => $final_pvs,
|
||||
size => 'max',
|
||||
round_to_extent => $round_to_extent,
|
||||
allow_reduce => true,
|
||||
}
|
||||
-> lvm::volume { 'instances_lv':
|
||||
ensure => 'present',
|
||||
vg => 'nova-local',
|
||||
pv => $final_pvs,
|
||||
size => 'max',
|
||||
round_to_extent => $round_to_extent,
|
||||
allow_reduce => true,
|
||||
nuke_fs_on_resize_failure => true,
|
||||
} ->
|
||||
filesystem { '/dev/nova-local/instances_lv':
|
||||
}
|
||||
-> filesystem { '/dev/nova-local/instances_lv':
|
||||
ensure => present,
|
||||
fs_type => 'ext4',
|
||||
options => '-F -F',
|
||||
require => Logical_volume['instances_lv']
|
||||
} ->
|
||||
file { '/var/lib/nova/instances':
|
||||
}
|
||||
-> file { '/var/lib/nova/instances':
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
exec { 'mount /dev/nova-local/instances_lv':
|
||||
}
|
||||
-> exec { 'mount /dev/nova-local/instances_lv':
|
||||
unless => 'mount | grep -q /var/lib/nova/instances',
|
||||
command => 'mount -t ext4 /dev/nova-local/instances_lv /var/lib/nova/instances',
|
||||
}
|
||||
@ -523,30 +523,30 @@ class openstack::nova::haproxy
|
||||
inherits ::openstack::nova::params {
|
||||
|
||||
platform::haproxy::proxy { 'nova-restapi':
|
||||
server_name => 's-nova',
|
||||
public_port => $nova_api_port,
|
||||
server_name => 's-nova',
|
||||
public_port => $nova_api_port,
|
||||
private_port => $nova_api_port,
|
||||
}
|
||||
|
||||
platform::haproxy::proxy { 'placement-restapi':
|
||||
server_name => 's-placement',
|
||||
public_port => $placement_port,
|
||||
server_name => 's-placement',
|
||||
public_port => $placement_port,
|
||||
private_port => $placement_port,
|
||||
}
|
||||
|
||||
platform::haproxy::proxy { 'nova-novnc':
|
||||
server_name => 's-nova-novnc',
|
||||
public_port => $nova_novnc_port,
|
||||
private_port => $nova_novnc_port,
|
||||
server_name => 's-nova-novnc',
|
||||
public_port => $nova_novnc_port,
|
||||
private_port => $nova_novnc_port,
|
||||
x_forwarded_proto => false,
|
||||
}
|
||||
|
||||
platform::haproxy::proxy { 'nova-serial':
|
||||
server_name => 's-nova-serial',
|
||||
public_port => $nova_serial_port,
|
||||
private_port => $nova_serial_port,
|
||||
server_timeout => $timeout,
|
||||
client_timeout => $timeout,
|
||||
server_name => 's-nova-serial',
|
||||
public_port => $nova_serial_port,
|
||||
private_port => $nova_serial_port,
|
||||
server_timeout => $timeout,
|
||||
client_timeout => $timeout,
|
||||
x_forwarded_proto => false,
|
||||
}
|
||||
}
|
||||
@ -564,10 +564,10 @@ class openstack::nova::api::services
|
||||
include ::nova_api_proxy::config
|
||||
|
||||
class {'::nova::api':
|
||||
sync_db => $::platform::params::init_database,
|
||||
sync_db_api => $::platform::params::init_database,
|
||||
sync_db => $::platform::params::init_database,
|
||||
sync_db_api => $::platform::params::init_database,
|
||||
osapi_compute_workers => $::platform::params::eng_workers,
|
||||
metadata_workers => $::platform::params::eng_workers_by_2,
|
||||
metadata_workers => $::platform::params::eng_workers_by_2,
|
||||
}
|
||||
}
|
||||
|
||||
@ -597,7 +597,7 @@ class openstack::nova::api
|
||||
|
||||
class openstack::nova::conductor::reload {
|
||||
exec { 'signal-nova-conductor':
|
||||
command => "pkill -HUP nova-conductor",
|
||||
command => 'pkill -HUP nova-conductor',
|
||||
}
|
||||
}
|
||||
|
||||
@ -646,7 +646,7 @@ class openstack::nova::compute::pci
|
||||
# empty string if the list is empty, causing the nova-compute process to fail.
|
||||
if $pci_sriov_whitelist {
|
||||
class { '::nova::compute::pci':
|
||||
passthrough => generate("/usr/bin/nova-sriov",
|
||||
passthrough => generate('/usr/bin/nova-sriov',
|
||||
$pci_pt_whitelist, $pci_sriov_whitelist),
|
||||
}
|
||||
} else {
|
||||
@ -662,7 +662,7 @@ class openstack::nova::compute::reload {
|
||||
|
||||
if $::platform::kubernetes::params::enabled != true {
|
||||
exec { 'pmon-restart-nova-compute':
|
||||
command => "pmon-restart nova-compute",
|
||||
command => 'pmon-restart nova-compute',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,13 +30,13 @@ class openstack::panko
|
||||
|
||||
# WRS register panko-expirer-active in cron to run once each hour
|
||||
cron { 'panko-expirer':
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/panko-expirer-active',
|
||||
ensure => 'present',
|
||||
command => '/usr/bin/panko-expirer-active',
|
||||
environment => 'PATH=/bin:/usr/bin:/usr/sbin',
|
||||
minute => 10,
|
||||
hour => '*',
|
||||
monthday => '*',
|
||||
user => 'root',
|
||||
minute => 10,
|
||||
hour => '*',
|
||||
monthday => '*',
|
||||
user => 'root',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -55,8 +55,8 @@ class openstack::panko::haproxy
|
||||
inherits ::openstack::panko::params {
|
||||
|
||||
platform::haproxy::proxy { 'panko-restapi':
|
||||
server_name => 's-panko-restapi',
|
||||
public_port => $api_port,
|
||||
server_name => 's-panko-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -74,7 +74,7 @@ class openstack::panko::api
|
||||
# panko::keystone::auth::configure_endpoint which is
|
||||
# set via sysinv puppet
|
||||
if $::openstack::panko::params::service_create and
|
||||
$::platform::params::init_keystone {
|
||||
$::platform::params::init_keystone {
|
||||
include ::panko::keystone::auth
|
||||
}
|
||||
|
||||
@ -96,9 +96,9 @@ class openstack::panko::api
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
} ->
|
||||
class { '::panko::api':
|
||||
host => $api_host,
|
||||
}
|
||||
-> class { '::panko::api':
|
||||
host => $api_host,
|
||||
workers => $api_workers,
|
||||
sync_db => $::platform::params::init_database,
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ class openstack::swift::firewall
|
||||
|
||||
platform::firewall::rule { 'swift-api':
|
||||
service_name => 'swift',
|
||||
ports => $api_port,
|
||||
ports => $api_port,
|
||||
}
|
||||
}
|
||||
|
||||
@ -22,8 +22,8 @@ class openstack::swift::haproxy
|
||||
inherits ::openstack::swift::params {
|
||||
|
||||
platform::haproxy::proxy { 'swift-restapi':
|
||||
server_name => 's-swift',
|
||||
public_port => $api_port,
|
||||
server_name => 's-swift',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -43,7 +43,7 @@ class openstack::swift
|
||||
|
||||
if $service_enabled {
|
||||
if str2bool($::is_controller_active) or
|
||||
str2bool($::is_standalone_controller) {
|
||||
str2bool($::is_standalone_controller) {
|
||||
class { '::swift::keystone::auth':
|
||||
configure_s3_endpoint => false,
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
{default_pass, <<"<%= @default_pass %>">>}
|
||||
]},
|
||||
{kernel, [
|
||||
|
||||
|
||||
]}
|
||||
].
|
||||
% EOF
|
@ -24,7 +24,7 @@
|
||||
{default_pass, <<"<%= @default_pass %>">>}
|
||||
]},
|
||||
{kernel, [
|
||||
|
||||
|
||||
]}
|
||||
].
|
||||
% EOF
|
||||
|
@ -15,7 +15,7 @@ status()
|
||||
RETVAL=0
|
||||
echo "$DESC is running"
|
||||
return
|
||||
else
|
||||
else
|
||||
echo "$DESC is Not running"
|
||||
RETVAL=1
|
||||
fi
|
||||
@ -33,7 +33,7 @@ start()
|
||||
rm -f $PIDFILE
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo "Starting $SERVICE..."
|
||||
|
||||
systemctl start $SERVICE
|
||||
@ -45,7 +45,7 @@ start()
|
||||
echo "$SERVICE failed!"
|
||||
RETVAL=1
|
||||
fi
|
||||
|
||||
|
||||
}
|
||||
|
||||
stop()
|
||||
|
@ -27,7 +27,7 @@ status()
|
||||
RETVAL=0
|
||||
echo "$DESC is running"
|
||||
return
|
||||
else
|
||||
else
|
||||
echo "$DESC is Not running"
|
||||
RETVAL=1
|
||||
fi
|
||||
@ -45,7 +45,7 @@ start()
|
||||
rm -f $PIDFILE
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo "Starting $SERVICE..."
|
||||
|
||||
systemctl start $SERVICE
|
||||
@ -57,7 +57,7 @@ start()
|
||||
echo "$SERVICE failed!"
|
||||
RETVAL=1
|
||||
fi
|
||||
|
||||
|
||||
}
|
||||
|
||||
stop()
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Returns true if cinder ceph needs to be configured
|
||||
# Returns true if cinder ceph needs to be configured
|
||||
|
||||
Facter.add("is_initial_cinder_ceph_config") do
|
||||
setcode do
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Returns true if cinder lvm needs to be configured
|
||||
# Returns true if cinder lvm needs to be configured
|
||||
|
||||
Facter.add("is_initial_cinder_lvm_config") do
|
||||
setcode do
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Returns true is this is the only configured controller in the system else
|
||||
# return false if both controllers are configured.
|
||||
# return false if both controllers are configured.
|
||||
|
||||
Facter.add("is_standalone_controller") do
|
||||
setcode do
|
||||
|
@ -50,20 +50,20 @@ class platform::amqp::rabbitmq (
|
||||
$rabbit_dbdir = "/var/lib/rabbitmq/${::platform::params::software_version}"
|
||||
|
||||
class { '::rabbitmq':
|
||||
port => $port,
|
||||
ssl => $ssl_enabled,
|
||||
default_user => $auth_user,
|
||||
default_pass => $auth_password,
|
||||
service_ensure => $service_ensure,
|
||||
rabbitmq_home => $rabbit_dbdir,
|
||||
port => $port,
|
||||
ssl => $ssl_enabled,
|
||||
default_user => $auth_user,
|
||||
default_pass => $auth_password,
|
||||
service_ensure => $service_ensure,
|
||||
rabbitmq_home => $rabbit_dbdir,
|
||||
environment_variables => {
|
||||
'RABBITMQ_NODENAME' => $node,
|
||||
'RABBITMQ_NODENAME' => $node,
|
||||
'RABBITMQ_MNESIA_BASE' => "${rabbit_dbdir}/mnesia",
|
||||
'HOME' => $rabbit_dbdir,
|
||||
'HOME' => $rabbit_dbdir,
|
||||
},
|
||||
config_variables => {
|
||||
'disk_free_limit' => '100000000',
|
||||
'heartbeat' => '30',
|
||||
config_variables => {
|
||||
'disk_free_limit' => '100000000',
|
||||
'heartbeat' => '30',
|
||||
'tcp_listen_options' => '[binary,
|
||||
{packet,raw},
|
||||
{reuseaddr,true},
|
||||
@ -83,7 +83,7 @@ class platform::amqp::post {
|
||||
# To allow for the transition it must be explicitely stopped. Once puppet
|
||||
# can directly handle SM managed services, then this can be removed.
|
||||
exec { 'stop rabbitmq-server service':
|
||||
command => "systemctl stop rabbitmq-server; systemctl disable rabbitmq-server",
|
||||
command => 'systemctl stop rabbitmq-server; systemctl disable rabbitmq-server',
|
||||
}
|
||||
}
|
||||
|
||||
@ -99,38 +99,38 @@ class platform::amqp::bootstrap {
|
||||
|
||||
# Ensure the rabbit data directory is created in the rabbit filesystem.
|
||||
$rabbit_dbdir = "/var/lib/rabbitmq/${::platform::params::software_version}"
|
||||
file { "${rabbit_dbdir}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
file { $rabbit_dbdir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} -> Class['::rabbitmq']
|
||||
|
||||
rabbitmq_policy {'notifications_queues_maxlen@/':
|
||||
require => Class['::rabbitmq'],
|
||||
pattern => '.*notifications.*',
|
||||
priority => 0,
|
||||
applyto => 'queues',
|
||||
require => Class['::rabbitmq'],
|
||||
pattern => '.*notifications.*',
|
||||
priority => 0,
|
||||
applyto => 'queues',
|
||||
definition => {
|
||||
'max-length' => '10000',
|
||||
},
|
||||
}
|
||||
|
||||
rabbitmq_policy {'sample_queues_maxlen@/':
|
||||
require => Class['::rabbitmq'],
|
||||
pattern => '.*sample$',
|
||||
priority => 0,
|
||||
applyto => 'queues',
|
||||
require => Class['::rabbitmq'],
|
||||
pattern => '.*sample$',
|
||||
priority => 0,
|
||||
applyto => 'queues',
|
||||
definition => {
|
||||
'max-length' => '100000',
|
||||
},
|
||||
}
|
||||
|
||||
rabbitmq_policy {'all_queues_ttl@/':
|
||||
require => Class['::rabbitmq'],
|
||||
pattern => '.*',
|
||||
priority => 0,
|
||||
applyto => 'queues',
|
||||
require => Class['::rabbitmq'],
|
||||
pattern => '.*',
|
||||
priority => 0,
|
||||
applyto => 'queues',
|
||||
definition => {
|
||||
'expires' => '14400000',
|
||||
}
|
||||
@ -146,11 +146,11 @@ class platform::amqp::upgrade {
|
||||
|
||||
# Ensure the rabbit data directory is created in the rabbit filesystem.
|
||||
$rabbit_dbdir = "/var/lib/rabbitmq/${::platform::params::software_version}"
|
||||
file { "${rabbit_dbdir}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
file { $rabbit_dbdir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} -> Class['::rabbitmq']
|
||||
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
class platform::anchors {
|
||||
anchor { 'platform::networking': } ->
|
||||
anchor { 'platform::services': }
|
||||
anchor { 'platform::networking': }
|
||||
-> anchor { 'platform::services': }
|
||||
}
|
||||
|
@ -63,35 +63,35 @@ class platform::ceph
|
||||
}
|
||||
|
||||
class { '::ceph':
|
||||
fsid => $cluster_uuid,
|
||||
fsid => $cluster_uuid,
|
||||
authentication_type => $authentication_type,
|
||||
mon_initial_members => $mon_initial_members
|
||||
} ->
|
||||
ceph_config {
|
||||
"mon/mon clock drift allowed": value => ".1";
|
||||
"client.restapi/public_addr": value => $restapi_public_addr;
|
||||
}
|
||||
-> ceph_config {
|
||||
'mon/mon clock drift allowed': value => '.1';
|
||||
'client.restapi/public_addr': value => $restapi_public_addr;
|
||||
}
|
||||
if $system_type == 'All-in-one' {
|
||||
# 1 and 2 node configurations have a single monitor
|
||||
if 'duplex' in $system_mode {
|
||||
# Floating monitor, running on active controller.
|
||||
Class['::ceph'] ->
|
||||
ceph_config {
|
||||
Class['::ceph']
|
||||
-> ceph_config {
|
||||
"mon.${floating_mon_host}/host": value => $floating_mon_host;
|
||||
"mon.${floating_mon_host}/mon_addr": value => $floating_mon_addr;
|
||||
}
|
||||
} else {
|
||||
# Simplex case, a single monitor binded to the controller.
|
||||
Class['::ceph'] ->
|
||||
ceph_config {
|
||||
Class['::ceph']
|
||||
-> ceph_config {
|
||||
"mon.${mon_0_host}/host": value => $mon_0_host;
|
||||
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
# Multinode has 3 monitors.
|
||||
Class['::ceph'] ->
|
||||
ceph_config {
|
||||
Class['::ceph']
|
||||
-> ceph_config {
|
||||
"mon.${mon_0_host}/host": value => $mon_0_host;
|
||||
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
|
||||
"mon.${mon_1_host}/host": value => $mon_1_host;
|
||||
@ -111,11 +111,11 @@ class platform::ceph::post
|
||||
inherits ::platform::ceph::params {
|
||||
# Enable ceph process recovery after all configuration is done
|
||||
file { $ceph_config_ready_path:
|
||||
ensure => present,
|
||||
ensure => present,
|
||||
content => '',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
if $service_enabled {
|
||||
@ -134,19 +134,19 @@ class platform::ceph::monitor
|
||||
$system_type = $::platform::params::system_type
|
||||
|
||||
if $service_enabled {
|
||||
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
|
||||
if str2bool($::is_controller_active) {
|
||||
# Ceph mon is configured on a DRBD partition, on the active controller,
|
||||
# when 'ceph' storage backend is added in sysinv.
|
||||
# Then SM takes care of starting ceph after manifests are applied.
|
||||
$configure_ceph_mon = true
|
||||
} else {
|
||||
$configure_ceph_mon = false
|
||||
}
|
||||
} else {
|
||||
# Simplex, multinode. Ceph is pmon managed.
|
||||
$configure_ceph_mon = true
|
||||
}
|
||||
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
|
||||
if str2bool($::is_controller_active) {
|
||||
# Ceph mon is configured on a DRBD partition, on the active controller,
|
||||
# when 'ceph' storage backend is added in sysinv.
|
||||
# Then SM takes care of starting ceph after manifests are applied.
|
||||
$configure_ceph_mon = true
|
||||
} else {
|
||||
$configure_ceph_mon = false
|
||||
}
|
||||
} else {
|
||||
# Simplex, multinode. Ceph is pmon managed.
|
||||
$configure_ceph_mon = true
|
||||
}
|
||||
}
|
||||
else {
|
||||
$configure_ceph_mon = false
|
||||
@ -154,18 +154,18 @@ class platform::ceph::monitor
|
||||
|
||||
if $configure_ceph_mon {
|
||||
file { '/var/lib/ceph':
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
|
||||
# ensure DRBD config is complete before enabling the ceph monitor
|
||||
Drbd::Resource <| |> -> Class['::ceph']
|
||||
} else {
|
||||
File['/var/lib/ceph'] ->
|
||||
platform::filesystem { $mon_lv_name:
|
||||
File['/var/lib/ceph']
|
||||
-> platform::filesystem { $mon_lv_name:
|
||||
lv_name => $mon_lv_name,
|
||||
lv_size => $mon_lv_size,
|
||||
mountpoint => $mon_mountpoint,
|
||||
@ -173,12 +173,12 @@ class platform::ceph::monitor
|
||||
fs_options => $mon_fs_options,
|
||||
} -> Class['::ceph']
|
||||
|
||||
file { "/etc/pmon.d/ceph.conf":
|
||||
ensure => link,
|
||||
target => "/etc/ceph/ceph.conf.pmon",
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
file { '/etc/pmon.d/ceph.conf':
|
||||
ensure => link,
|
||||
target => '/etc/ceph/ceph.conf.pmon',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,9 +188,9 @@ class platform::ceph::monitor
|
||||
# Start service on AIO SX and on active controller
|
||||
# to allow in-service configuration.
|
||||
if str2bool($::is_controller_active) or $system_type == 'All-in-one' {
|
||||
$service_ensure = "running"
|
||||
$service_ensure = 'running'
|
||||
} else {
|
||||
$service_ensure = "stopped"
|
||||
$service_ensure = 'stopped'
|
||||
}
|
||||
|
||||
# default configuration for all ceph monitor resources
|
||||
@ -215,23 +215,23 @@ class platform::ceph::monitor
|
||||
# and set the drbd role to secondary, so that the handoff to
|
||||
# SM is done properly once we swact to the standby controller.
|
||||
# TODO: Remove this once SM supports in-service config reload.
|
||||
Ceph::Mon <| |> ->
|
||||
exec { "Stop Ceph monitor":
|
||||
command =>"/etc/init.d/ceph stop mon",
|
||||
onlyif => "/etc/init.d/ceph status mon",
|
||||
logoutput => true,
|
||||
} ->
|
||||
exec { "umount ceph-mon partition":
|
||||
command => "umount $mon_mountpoint",
|
||||
onlyif => "mount | grep -q $mon_mountpoint",
|
||||
logoutput => true,
|
||||
} ->
|
||||
exec { 'Set cephmon secondary':
|
||||
command => "drbdadm secondary drbd-cephmon",
|
||||
unless => "drbdadm role drbd-cephmon | egrep '^Secondary'",
|
||||
Ceph::Mon <| |>
|
||||
-> exec { 'Stop Ceph monitor':
|
||||
command =>'/etc/init.d/ceph stop mon',
|
||||
onlyif => '/etc/init.d/ceph status mon',
|
||||
logoutput => true,
|
||||
}
|
||||
}
|
||||
-> exec { 'umount ceph-mon partition':
|
||||
command => "umount ${mon_mountpoint}",
|
||||
onlyif => "mount | grep -q ${mon_mountpoint}",
|
||||
logoutput => true,
|
||||
}
|
||||
-> exec { 'Set cephmon secondary':
|
||||
command => 'drbdadm secondary drbd-cephmon',
|
||||
unless => "drbdadm role drbd-cephmon | egrep '^Secondary'",
|
||||
logoutput => true,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if $::hostname == $mon_0_host {
|
||||
ceph::mon { $mon_0_host:
|
||||
@ -270,16 +270,16 @@ define platform_ceph_osd(
|
||||
}
|
||||
file { "/var/lib/ceph/osd/ceph-${osd_id}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
ceph::osd { $disk_path:
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> ceph::osd { $disk_path:
|
||||
uuid => $osd_uuid,
|
||||
} ->
|
||||
exec { "configure journal location ${name}":
|
||||
}
|
||||
-> exec { "configure journal location ${name}":
|
||||
logoutput => true,
|
||||
command => template('platform/ceph.journal.location.erb')
|
||||
command => template('platform/ceph.journal.location.erb')
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,7 +290,7 @@ define platform_ceph_journal(
|
||||
) {
|
||||
exec { "configure journal partitions ${name}":
|
||||
logoutput => true,
|
||||
command => template('platform/ceph.journal.partitions.erb')
|
||||
command => template('platform/ceph.journal.partitions.erb')
|
||||
}
|
||||
}
|
||||
|
||||
@ -304,8 +304,8 @@ class platform::ceph::storage(
|
||||
Class['::platform::partitions'] -> Class[$name]
|
||||
|
||||
file { '/var/lib/ceph/osd':
|
||||
path => '/var/lib/ceph/osd',
|
||||
ensure => 'directory',
|
||||
path => '/var/lib/ceph/osd',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
@ -390,12 +390,12 @@ class platform::ceph::rgw
|
||||
|
||||
ceph_config {
|
||||
# increase limit for single operation uploading to 50G (50*1024*1024*1024)
|
||||
"client.$rgw_client_name/rgw_max_put_size": value => $rgw_max_put_size;
|
||||
"client.${rgw_client_name}/rgw_max_put_size": value => $rgw_max_put_size;
|
||||
# increase frequency and scope of garbage collection
|
||||
"client.$rgw_client_name/rgw_gc_max_objs": value => $rgw_gc_max_objs;
|
||||
"client.$rgw_client_name/rgw_gc_obj_min_wait": value => $rgw_gc_obj_min_wait;
|
||||
"client.$rgw_client_name/rgw_gc_processor_max_time": value => $rgw_gc_processor_max_time;
|
||||
"client.$rgw_client_name/rgw_gc_processor_period": value => $rgw_gc_processor_period;
|
||||
"client.${rgw_client_name}/rgw_gc_max_objs": value => $rgw_gc_max_objs;
|
||||
"client.${rgw_client_name}/rgw_gc_obj_min_wait": value => $rgw_gc_obj_min_wait;
|
||||
"client.${rgw_client_name}/rgw_gc_processor_max_time": value => $rgw_gc_processor_max_time;
|
||||
"client.${rgw_client_name}/rgw_gc_processor_period": value => $rgw_gc_processor_period;
|
||||
}
|
||||
}
|
||||
|
||||
@ -446,9 +446,9 @@ class platform::ceph::controller::runtime {
|
||||
# Make sure ceph-rest-api is running as it is needed by sysinv config
|
||||
# TODO(oponcea): Remove when sm supports in-service config reload
|
||||
if str2bool($::is_controller_active) {
|
||||
Ceph::Mon <| |> ->
|
||||
exec { "/etc/init.d/ceph-rest-api start":
|
||||
command => "/etc/init.d/ceph-rest-api start"
|
||||
Ceph::Mon <| |>
|
||||
-> exec { '/etc/init.d/ceph-rest-api start':
|
||||
command => '/etc/init.d/ceph-rest-api start'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,8 +15,8 @@ class platform::client
|
||||
include ::platform::client::credentials::params
|
||||
$keyring_file = $::platform::client::credentials::params::keyring_file
|
||||
|
||||
file {"/etc/platform/openrc":
|
||||
ensure => "present",
|
||||
file {'/etc/platform/openrc':
|
||||
ensure => 'present',
|
||||
mode => '0640',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -33,25 +33,25 @@ class platform::client::credentials::params (
|
||||
class platform::client::credentials
|
||||
inherits ::platform::client::credentials::params {
|
||||
|
||||
Class['::platform::drbd::platform'] ->
|
||||
file { "${keyring_base}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${keyring_directory}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { "${keyring_file}":
|
||||
Class['::platform::drbd::platform']
|
||||
-> file { $keyring_base:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $keyring_directory:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { $keyring_file:
|
||||
ensure => 'file',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
content => "keyring get CGCS admin"
|
||||
content => 'keyring get CGCS admin'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,45 +1,45 @@
|
||||
class platform::collectd::params (
|
||||
$interval = undef,
|
||||
$timeout = undef,
|
||||
$read_threads = undef,
|
||||
$write_threads = undef,
|
||||
$write_queue_limit_high = undef,
|
||||
$write_queue_limit_low = undef,
|
||||
$server_addrs = [],
|
||||
$server_port = undef,
|
||||
$max_read_interval = undef,
|
||||
$interval = undef,
|
||||
$timeout = undef,
|
||||
$read_threads = undef,
|
||||
$write_threads = undef,
|
||||
$write_queue_limit_high = undef,
|
||||
$write_queue_limit_low = undef,
|
||||
$server_addrs = [],
|
||||
$server_port = undef,
|
||||
$max_read_interval = undef,
|
||||
|
||||
# python plugin controls
|
||||
$module_path = undef,
|
||||
$plugins = [],
|
||||
$mtce_notifier_port = undef,
|
||||
$log_traces = undef,
|
||||
$encoding = undef,
|
||||
# python plugin controls
|
||||
$module_path = undef,
|
||||
$plugins = [],
|
||||
$mtce_notifier_port = undef,
|
||||
$log_traces = undef,
|
||||
$encoding = undef,
|
||||
|
||||
$collectd_d_dir = undef,
|
||||
$collectd_d_dir = undef,
|
||||
) {}
|
||||
|
||||
|
||||
class platform::collectd
|
||||
inherits ::platform::collectd::params {
|
||||
|
||||
file { "/etc/collectd.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/collectd.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/collectd.conf.erb'),
|
||||
} -> # now start collectd
|
||||
} # now start collectd
|
||||
|
||||
# ensure that collectd is running
|
||||
service { 'collectd':
|
||||
ensure => running,
|
||||
enable => true,
|
||||
provider => 'systemd'
|
||||
} -> # now get pmond to monitor the process
|
||||
-> service { 'collectd':
|
||||
ensure => running,
|
||||
enable => true,
|
||||
provider => 'systemd'
|
||||
} # now get pmond to monitor the process
|
||||
|
||||
# ensure pmon soft link for process monitoring
|
||||
file { "/etc/pmon.d/collectd.conf":
|
||||
-> file { '/etc/pmon.d/collectd.conf':
|
||||
ensure => 'link',
|
||||
target => "/opt/collectd/extensions/config/collectd.conf.pmon",
|
||||
target => '/opt/collectd/extensions/config/collectd.conf.pmon',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
@ -53,7 +53,7 @@ class platform::collectd::runtime {
|
||||
# restart target
|
||||
class platform::collectd::restart {
|
||||
include ::platform::collectd
|
||||
exec { "collectd-restart":
|
||||
exec { 'collectd-restart':
|
||||
command => '/usr/local/sbin/pmon-restart collect'
|
||||
}
|
||||
}
|
||||
|
@ -10,8 +10,8 @@ class platform::compute::params (
|
||||
class platform::compute::config
|
||||
inherits ::platform::compute::params {
|
||||
|
||||
file { "/etc/platform/worker_reserved.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/platform/worker_reserved.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/worker_reserved.conf.erb')
|
||||
}
|
||||
@ -32,7 +32,7 @@ class platform::compute::grub::params (
|
||||
}
|
||||
|
||||
if $::is_gb_page_supported {
|
||||
$gb_hugepages = "hugepagesz=1G hugepages=$::number_of_numa_nodes"
|
||||
$gb_hugepages = "hugepagesz=1G hugepages=${::number_of_numa_nodes}"
|
||||
} else {
|
||||
$gb_hugepages = ''
|
||||
}
|
||||
@ -43,25 +43,25 @@ class platform::compute::grub::params (
|
||||
class platform::compute::grub::update
|
||||
inherits ::platform::compute::grub::params {
|
||||
|
||||
notice("Updating grub configuration")
|
||||
notice('Updating grub configuration')
|
||||
|
||||
$to_be_removed = join($keys, " ")
|
||||
exec { "Remove the cpu arguments":
|
||||
command => "grubby --update-kernel=ALL --remove-args='$to_be_removed'",
|
||||
} ->
|
||||
exec { "Add the cpu arguments":
|
||||
command => "grubby --update-kernel=ALL --args='$grub_updates'",
|
||||
$to_be_removed = join($keys, ' ')
|
||||
exec { 'Remove the cpu arguments':
|
||||
command => "grubby --update-kernel=ALL --remove-args='${to_be_removed}'",
|
||||
}
|
||||
-> exec { 'Add the cpu arguments':
|
||||
command => "grubby --update-kernel=ALL --args='${grub_updates}'",
|
||||
}
|
||||
}
|
||||
|
||||
class platform::compute::grub::recovery {
|
||||
|
||||
notice("Update Grub and Reboot")
|
||||
notice('Update Grub and Reboot')
|
||||
|
||||
class {'platform::compute::grub::update': } -> Exec['reboot-recovery']
|
||||
|
||||
exec { "reboot-recovery":
|
||||
command => "reboot",
|
||||
exec { 'reboot-recovery':
|
||||
command => 'reboot',
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,29 +70,31 @@ class platform::compute::grub::audit
|
||||
|
||||
if ! str2bool($::is_initial_config_primary) {
|
||||
|
||||
notice("Audit CPU and Grub Configuration")
|
||||
notice('Audit CPU and Grub Configuration')
|
||||
|
||||
$expected_n_cpus = $::number_of_logical_cpus
|
||||
$n_cpus_ok = ("$n_cpus" == "$expected_n_cpus")
|
||||
$expected_n_cpus = Integer($::number_of_logical_cpus)
|
||||
$n_cpus_ok = ($n_cpus == $expected_n_cpus)
|
||||
|
||||
$cmd_ok = check_grub_config($grub_updates)
|
||||
|
||||
if $cmd_ok and $n_cpus_ok {
|
||||
$ensure = present
|
||||
notice("CPU and Boot Argument audit passed.")
|
||||
notice('CPU and Boot Argument audit passed.')
|
||||
} else {
|
||||
$ensure = absent
|
||||
if !$cmd_ok {
|
||||
notice("Kernel Boot Argument Mismatch")
|
||||
notice('Kernel Boot Argument Mismatch')
|
||||
include ::platform::compute::grub::recovery
|
||||
} else {
|
||||
notice("Mismatched CPUs: Found=${n_cpus}, Expected=${expected_n_cpus}")
|
||||
}
|
||||
}
|
||||
|
||||
file { "/var/run/worker_goenabled":
|
||||
ensure => $ensure,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
file { '/var/run/worker_goenabled':
|
||||
ensure => $ensure,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -106,47 +108,47 @@ class platform::compute::hugetlbf {
|
||||
|
||||
if str2bool($::is_hugetlbfs_enabled) {
|
||||
|
||||
$fs_list = generate("/bin/bash", "-c", "ls -1d /sys/kernel/mm/hugepages/hugepages-*")
|
||||
$fs_list = generate('/bin/bash', '-c', 'ls -1d /sys/kernel/mm/hugepages/hugepages-*')
|
||||
$array = split($fs_list, '\n')
|
||||
$array.each | String $val | {
|
||||
$page_name = generate("/bin/bash", "-c", "basename $val")
|
||||
$page_name = generate('/bin/bash', '-c', "basename ${val}")
|
||||
$page_size = strip(regsubst($page_name, 'hugepages-', ''))
|
||||
$hugemnt ="/mnt/huge-$page_size"
|
||||
$hugemnt ="/mnt/huge-${page_size}"
|
||||
$options = "pagesize=${page_size}"
|
||||
|
||||
# TODO: Once all the code is switched over to use the /dev
|
||||
# mount point we can get rid of this mount point.
|
||||
notice("Mounting hugetlbfs at: $hugemnt")
|
||||
exec { "create $hugemnt":
|
||||
notice("Mounting hugetlbfs at: ${hugemnt}")
|
||||
exec { "create ${hugemnt}":
|
||||
command => "mkdir -p ${hugemnt}",
|
||||
onlyif => "test ! -d ${hugemnt}",
|
||||
} ->
|
||||
mount { "${hugemnt}":
|
||||
name => "${hugemnt}",
|
||||
}
|
||||
-> mount { $hugemnt:
|
||||
ensure => 'mounted',
|
||||
device => 'none',
|
||||
fstype => 'hugetlbfs',
|
||||
ensure => 'mounted',
|
||||
options => "${options}",
|
||||
name => $hugemnt,
|
||||
options => $options,
|
||||
atboot => 'yes',
|
||||
remounts => true,
|
||||
}
|
||||
|
||||
# The libvirt helm chart expects hugepages to be mounted
|
||||
# under /dev so let's do that.
|
||||
$hugemnt2 ="/dev/huge-$page_size"
|
||||
notice("Mounting hugetlbfs at: $hugemnt2")
|
||||
file { "${hugemnt2}":
|
||||
$hugemnt2 ="/dev/huge-${page_size}"
|
||||
notice("Mounting hugetlbfs at: ${hugemnt2}")
|
||||
file { $hugemnt2:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}->
|
||||
mount { "${hugemnt2}":
|
||||
name => "${hugemnt2}",
|
||||
}
|
||||
-> mount { $hugemnt2:
|
||||
ensure => 'mounted',
|
||||
device => 'none',
|
||||
fstype => 'hugetlbfs',
|
||||
ensure => 'mounted',
|
||||
options => "${options}",
|
||||
name => $hugemnt2,
|
||||
options => $options,
|
||||
atboot => 'yes',
|
||||
remounts => true,
|
||||
}
|
||||
@ -157,20 +159,20 @@ class platform::compute::hugetlbf {
|
||||
# Once we upstream a fix to the helm chart to automatically determine
|
||||
# the mountpoint then we can remove this.
|
||||
$page_size = '2M'
|
||||
$hugemnt ="/dev/hugepages"
|
||||
$hugemnt ='/dev/hugepages'
|
||||
$options = "pagesize=${page_size}"
|
||||
|
||||
notice("Mounting hugetlbfs at: $hugemnt")
|
||||
exec { "create $hugemnt":
|
||||
notice("Mounting hugetlbfs at: ${hugemnt}")
|
||||
exec { "create ${hugemnt}":
|
||||
command => "mkdir -p ${hugemnt}",
|
||||
onlyif => "test ! -d ${hugemnt}",
|
||||
} ->
|
||||
mount { "${hugemnt}":
|
||||
name => "${hugemnt}",
|
||||
}
|
||||
-> mount { $hugemnt:
|
||||
ensure => 'mounted',
|
||||
device => 'none',
|
||||
fstype => 'hugetlbfs',
|
||||
ensure => 'mounted',
|
||||
options => "${options}",
|
||||
name => $hugemnt,
|
||||
options => $options,
|
||||
atboot => 'yes',
|
||||
remounts => true,
|
||||
}
|
||||
@ -193,8 +195,8 @@ define allocate_pages (
|
||||
$page_count,
|
||||
) {
|
||||
exec { "Allocate ${page_count} ${path}":
|
||||
command => "echo $page_count > $path",
|
||||
onlyif => "test -f $path",
|
||||
command => "echo ${page_count} > ${path}",
|
||||
onlyif => "test -f ${path}",
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,7 +220,7 @@ class platform::compute::allocate
|
||||
$node = $per_node_2M[0]
|
||||
$page_size = $per_node_2M[1]
|
||||
allocate_pages { "Start ${node} ${page_size}":
|
||||
path => "${nodefs}/${node}/hugepages/hugepages-${page_size}/nr_hugepages",
|
||||
path => "${nodefs}/${node}/hugepages/hugepages-${page_size}/nr_hugepages",
|
||||
page_count => $per_node_2M[2],
|
||||
}
|
||||
}
|
||||
@ -233,7 +235,7 @@ class platform::compute::allocate
|
||||
$node = $per_node_1G[0]
|
||||
$page_size = $per_node_1G[1]
|
||||
allocate_pages { "Start ${node} ${page_size}":
|
||||
path => "${nodefs}/${node}/hugepages/hugepages-${page_size}/nr_hugepages",
|
||||
path => "${nodefs}/${node}/hugepages/hugepages-${page_size}/nr_hugepages",
|
||||
page_count => $per_node_1G[2],
|
||||
}
|
||||
}
|
||||
@ -246,8 +248,8 @@ class platform::compute::extend
|
||||
|
||||
# nova-compute reads on init, extended nova compute options
|
||||
# used with nova accounting
|
||||
file { "/etc/nova/compute_extend.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/nova/compute_extend.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/compute_extend.conf.erb')
|
||||
}
|
||||
@ -257,11 +259,11 @@ class platform::compute::extend
|
||||
class platform::compute::resctrl {
|
||||
|
||||
if str2bool($::is_resctrl_supported) {
|
||||
mount { "/sys/fs/resctrl":
|
||||
name => '/sys/fs/resctrl',
|
||||
mount { '/sys/fs/resctrl':
|
||||
ensure => 'mounted',
|
||||
device => 'resctrl',
|
||||
fstype => 'resctrl',
|
||||
ensure => 'mounted',
|
||||
name => '/sys/fs/resctrl',
|
||||
atboot => 'yes',
|
||||
remounts => true,
|
||||
}
|
||||
@ -278,22 +280,22 @@ class platform::compute::pmqos (
|
||||
|
||||
if str2bool($::is_worker_subfunction) and str2bool($::is_lowlatency_subfunction) {
|
||||
|
||||
$script = "/usr/bin/set-cpu-wakeup-latency.sh"
|
||||
$script = '/usr/bin/set-cpu-wakeup-latency.sh'
|
||||
|
||||
if $low_wakeup_cpus != '""' {
|
||||
# Set low wakeup latency (shallow C-state) for vswitch CPUs using PM QoS interface
|
||||
exec { "low-wakeup-latency":
|
||||
command => "${script} low ${low_wakeup_cpus}",
|
||||
onlyif => "test -f ${script}",
|
||||
exec { 'low-wakeup-latency':
|
||||
command => "${script} low ${low_wakeup_cpus}",
|
||||
onlyif => "test -f ${script}",
|
||||
logoutput => true,
|
||||
}
|
||||
}
|
||||
|
||||
if $hight_wakeup_cpus != '""' {
|
||||
#Set high wakeup latency (deep C-state) for non-vswitch CPUs using PM QoS interface
|
||||
exec { "high-wakeup-latency":
|
||||
command => "${script} high ${hight_wakeup_cpus}",
|
||||
onlyif => "test -f ${script}",
|
||||
exec { 'high-wakeup-latency':
|
||||
command => "${script} high ${hight_wakeup_cpus}",
|
||||
onlyif => "test -f ${script}",
|
||||
logoutput => true,
|
||||
}
|
||||
}
|
||||
|
@ -4,18 +4,18 @@ class platform::config::params (
|
||||
$timezone = 'UTC',
|
||||
) { }
|
||||
|
||||
class platform::config
|
||||
class platform::config
|
||||
inherits ::platform::config::params {
|
||||
|
||||
include ::platform::params
|
||||
include ::platform::anchors
|
||||
|
||||
stage { 'pre':
|
||||
before => Stage["main"],
|
||||
before => Stage['main'],
|
||||
}
|
||||
|
||||
stage { 'post':
|
||||
require => Stage["main"],
|
||||
require => Stage['main'],
|
||||
}
|
||||
|
||||
class { '::platform::config::pre':
|
||||
@ -43,32 +43,32 @@ class platform::config::file {
|
||||
$platform_conf = '/etc/platform/platform.conf'
|
||||
|
||||
file_line { "${platform_conf} sw_version":
|
||||
path => $platform_conf,
|
||||
line => "sw_version=${::platform::params::software_version}",
|
||||
match => '^sw_version=',
|
||||
path => $platform_conf,
|
||||
line => "sw_version=${::platform::params::software_version}",
|
||||
match => '^sw_version=',
|
||||
}
|
||||
|
||||
if $management_interface {
|
||||
file_line { "${platform_conf} management_interface":
|
||||
path => $platform_conf,
|
||||
line => "management_interface=${management_interface}",
|
||||
match => '^management_interface=',
|
||||
path => $platform_conf,
|
||||
line => "management_interface=${management_interface}",
|
||||
match => '^management_interface=',
|
||||
}
|
||||
}
|
||||
|
||||
if $infrastructure_interface {
|
||||
file_line { "${platform_conf} infrastructure_interface":
|
||||
path => '/etc/platform/platform.conf',
|
||||
line => "infrastructure_interface=${infrastructure_interface}",
|
||||
match => '^infrastructure_interface=',
|
||||
path => '/etc/platform/platform.conf',
|
||||
line => "infrastructure_interface=${infrastructure_interface}",
|
||||
match => '^infrastructure_interface=',
|
||||
}
|
||||
}
|
||||
|
||||
if $oam_interface {
|
||||
file_line { "${platform_conf} oam_interface":
|
||||
path => $platform_conf,
|
||||
line => "oam_interface=${oam_interface}",
|
||||
match => '^oam_interface=',
|
||||
path => $platform_conf,
|
||||
line => "oam_interface=${oam_interface}",
|
||||
match => '^oam_interface=',
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,80 +82,80 @@ class platform::config::file {
|
||||
|
||||
if $::platform::params::system_type {
|
||||
file_line { "${platform_conf} system_type":
|
||||
path => $platform_conf,
|
||||
line => "system_type=${::platform::params::system_type}",
|
||||
match => '^system_type=*',
|
||||
path => $platform_conf,
|
||||
line => "system_type=${::platform::params::system_type}",
|
||||
match => '^system_type=*',
|
||||
}
|
||||
}
|
||||
|
||||
if $::platform::params::system_mode {
|
||||
file_line { "${platform_conf} system_mode":
|
||||
path => $platform_conf,
|
||||
line => "system_mode=${::platform::params::system_mode}",
|
||||
match => '^system_mode=*',
|
||||
path => $platform_conf,
|
||||
line => "system_mode=${::platform::params::system_mode}",
|
||||
match => '^system_mode=*',
|
||||
}
|
||||
}
|
||||
|
||||
if $::platform::params::security_profile {
|
||||
file_line { "${platform_conf} security_profile":
|
||||
path => $platform_conf,
|
||||
line => "security_profile=${::platform::params::security_profile}",
|
||||
match => '^security_profile=*',
|
||||
path => $platform_conf,
|
||||
line => "security_profile=${::platform::params::security_profile}",
|
||||
match => '^security_profile=*',
|
||||
}
|
||||
}
|
||||
|
||||
if $::platform::params::sdn_enabled {
|
||||
file_line { "${platform_conf}f sdn_enabled":
|
||||
path => $platform_conf,
|
||||
line => "sdn_enabled=yes",
|
||||
match => '^sdn_enabled=',
|
||||
path => $platform_conf,
|
||||
line => 'sdn_enabled=yes',
|
||||
match => '^sdn_enabled=',
|
||||
}
|
||||
}
|
||||
else {
|
||||
file_line { "${platform_conf} sdn_enabled":
|
||||
path => $platform_conf,
|
||||
line => 'sdn_enabled=no',
|
||||
match => '^sdn_enabled=',
|
||||
path => $platform_conf,
|
||||
line => 'sdn_enabled=no',
|
||||
match => '^sdn_enabled=',
|
||||
}
|
||||
}
|
||||
|
||||
if $::platform::params::region_config {
|
||||
file_line { "${platform_conf} region_config":
|
||||
path => $platform_conf,
|
||||
line => 'region_config=yes',
|
||||
match => '^region_config=',
|
||||
path => $platform_conf,
|
||||
line => 'region_config=yes',
|
||||
match => '^region_config=',
|
||||
}
|
||||
file_line { "${platform_conf} region_1_name":
|
||||
path => $platform_conf,
|
||||
line => "region_1_name=${::platform::params::region_1_name}",
|
||||
match => '^region_1_name=',
|
||||
path => $platform_conf,
|
||||
line => "region_1_name=${::platform::params::region_1_name}",
|
||||
match => '^region_1_name=',
|
||||
}
|
||||
file_line { "${platform_conf} region_2_name":
|
||||
path => $platform_conf,
|
||||
line => "region_2_name=${::platform::params::region_2_name}",
|
||||
match => '^region_2_name=',
|
||||
path => $platform_conf,
|
||||
line => "region_2_name=${::platform::params::region_2_name}",
|
||||
match => '^region_2_name=',
|
||||
}
|
||||
} else {
|
||||
file_line { "${platform_conf} region_config":
|
||||
path => $platform_conf,
|
||||
line => 'region_config=no',
|
||||
match => '^region_config=',
|
||||
path => $platform_conf,
|
||||
line => 'region_config=no',
|
||||
match => '^region_config=',
|
||||
}
|
||||
}
|
||||
|
||||
if $::platform::params::distributed_cloud_role {
|
||||
file_line { "${platform_conf} distributed_cloud_role":
|
||||
path => $platform_conf,
|
||||
line => "distributed_cloud_role=${::platform::params::distributed_cloud_role}",
|
||||
match => '^distributed_cloud_role=',
|
||||
path => $platform_conf,
|
||||
line => "distributed_cloud_role=${::platform::params::distributed_cloud_role}",
|
||||
match => '^distributed_cloud_role=',
|
||||
}
|
||||
}
|
||||
|
||||
if $::platform::params::security_feature {
|
||||
file_line { "${platform_conf} security_feature":
|
||||
path => $platform_conf,
|
||||
line => "security_feature=\"${::platform::params::security_feature}\"",
|
||||
match => '^security_feature=*',
|
||||
path => $platform_conf,
|
||||
line => "security_feature=\"${::platform::params::security_feature}\"",
|
||||
match => '^security_feature=*',
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,18 +165,18 @@ class platform::config::file {
|
||||
class platform::config::hostname {
|
||||
include ::platform::params
|
||||
|
||||
file { "/etc/hostname":
|
||||
file { '/etc/hostname':
|
||||
ensure => present,
|
||||
owner => root,
|
||||
group => root,
|
||||
mode => '0644',
|
||||
content => "${::platform::params::hostname}\n",
|
||||
notify => Exec["set-hostname"],
|
||||
notify => Exec['set-hostname'],
|
||||
}
|
||||
|
||||
exec { "set-hostname":
|
||||
exec { 'set-hostname':
|
||||
command => 'hostname -F /etc/hostname',
|
||||
unless => "test `hostname` = `cat /etc/hostname`",
|
||||
unless => 'test `hostname` = `cat /etc/hostname`',
|
||||
}
|
||||
}
|
||||
|
||||
@ -214,11 +214,11 @@ class platform::config::tpm {
|
||||
# iterate through each tpm_cert creating it if it doesn't exist
|
||||
$tpm_certs.each |String $key, String $value| {
|
||||
file { "create-TPM-cert-${key}":
|
||||
path => $key,
|
||||
ensure => present,
|
||||
owner => root,
|
||||
group => root,
|
||||
mode => '0644',
|
||||
ensure => present,
|
||||
path => $key,
|
||||
owner => root,
|
||||
group => root,
|
||||
mode => '0644',
|
||||
content => $value,
|
||||
}
|
||||
}
|
||||
@ -280,44 +280,44 @@ class platform::config::controller::post
|
||||
}
|
||||
}
|
||||
|
||||
file { "/etc/platform/.initial_controller_config_complete":
|
||||
ensure => present,
|
||||
file { '/etc/platform/.initial_controller_config_complete':
|
||||
ensure => present,
|
||||
}
|
||||
|
||||
file { "/var/run/.controller_config_complete":
|
||||
ensure => present,
|
||||
file { '/var/run/.controller_config_complete':
|
||||
ensure => present,
|
||||
}
|
||||
}
|
||||
|
||||
class platform::config::worker::post
|
||||
{
|
||||
file { "/etc/platform/.initial_worker_config_complete":
|
||||
ensure => present,
|
||||
file { '/etc/platform/.initial_worker_config_complete':
|
||||
ensure => present,
|
||||
}
|
||||
|
||||
file { "/var/run/.worker_config_complete":
|
||||
ensure => present,
|
||||
file { '/var/run/.worker_config_complete':
|
||||
ensure => present,
|
||||
}
|
||||
}
|
||||
|
||||
class platform::config::storage::post
|
||||
{
|
||||
file { "/etc/platform/.initial_storage_config_complete":
|
||||
ensure => present,
|
||||
file { '/etc/platform/.initial_storage_config_complete':
|
||||
ensure => present,
|
||||
}
|
||||
|
||||
file { "/var/run/.storage_config_complete":
|
||||
ensure => present,
|
||||
file { '/var/run/.storage_config_complete':
|
||||
ensure => present,
|
||||
}
|
||||
}
|
||||
|
||||
class platform::config::bootstrap {
|
||||
stage { 'pre':
|
||||
before => Stage["main"],
|
||||
before => Stage['main'],
|
||||
}
|
||||
|
||||
stage { 'post':
|
||||
require => Stage["main"],
|
||||
require => Stage['main'],
|
||||
}
|
||||
|
||||
include ::platform::params
|
||||
|
@ -5,7 +5,7 @@ class platform::dcmanager::params (
|
||||
$domain_admin = undef,
|
||||
$domain_pwd = undef,
|
||||
$service_name = 'dcmanager',
|
||||
$default_endpoint_type = "internalURL",
|
||||
$default_endpoint_type = 'internalURL',
|
||||
$service_create = false,
|
||||
) {
|
||||
include ::platform::params
|
||||
@ -18,7 +18,7 @@ class platform::dcmanager::params (
|
||||
class platform::dcmanager
|
||||
inherits ::platform::dcmanager::params {
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
include ::platform::params
|
||||
include ::platform::params
|
||||
include ::platform::amqp::params
|
||||
|
||||
if $::platform::params::init_database {
|
||||
@ -26,9 +26,9 @@ class platform::dcmanager
|
||||
}
|
||||
|
||||
class { '::dcmanager':
|
||||
rabbit_host => $::platform::amqp::params::host_url,
|
||||
rabbit_port => $::platform::amqp::params::port,
|
||||
rabbit_userid => $::platform::amqp::params::auth_user,
|
||||
rabbit_host => $::platform::amqp::params::host_url,
|
||||
rabbit_port => $::platform::amqp::params::port,
|
||||
rabbit_userid => $::platform::amqp::params::auth_user,
|
||||
rabbit_password => $::platform::amqp::params::auth_password,
|
||||
}
|
||||
}
|
||||
@ -37,7 +37,7 @@ class platform::dcmanager
|
||||
|
||||
class platform::dcmanager::firewall
|
||||
inherits ::platform::dcmanager::params {
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
platform::firewall::rule { 'dcmanager-api':
|
||||
service_name => 'dcmanager',
|
||||
ports => $api_port,
|
||||
@ -50,8 +50,8 @@ class platform::dcmanager::haproxy
|
||||
inherits ::platform::dcmanager::params {
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
platform::haproxy::proxy { 'dcmanager-restapi':
|
||||
server_name => 's-dcmanager',
|
||||
public_port => $api_port,
|
||||
server_name => 's-dcmanager',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -74,7 +74,7 @@ class platform::dcmanager::api
|
||||
class { '::dcmanager::api':
|
||||
bind_host => $api_host,
|
||||
}
|
||||
|
||||
|
||||
|
||||
include ::platform::dcmanager::firewall
|
||||
include ::platform::dcmanager::haproxy
|
||||
|
@ -5,7 +5,7 @@ class platform::dcorch::params (
|
||||
$domain_admin = undef,
|
||||
$domain_pwd = undef,
|
||||
$service_name = 'dcorch',
|
||||
$default_endpoint_type = "internalURL",
|
||||
$default_endpoint_type = 'internalURL',
|
||||
$service_create = false,
|
||||
$neutron_api_proxy_port = 29696,
|
||||
$nova_api_proxy_port = 28774,
|
||||
@ -33,11 +33,11 @@ class platform::dcorch
|
||||
}
|
||||
|
||||
class { '::dcorch':
|
||||
rabbit_host => $::platform::amqp::params::host_url,
|
||||
rabbit_port => $::platform::amqp::params::port,
|
||||
rabbit_userid => $::platform::amqp::params::auth_user,
|
||||
rabbit_password => $::platform::amqp::params::auth_password,
|
||||
proxy_bind_host => $api_host,
|
||||
rabbit_host => $::platform::amqp::params::host_url,
|
||||
rabbit_port => $::platform::amqp::params::port,
|
||||
rabbit_userid => $::platform::amqp::params::auth_user,
|
||||
rabbit_password => $::platform::amqp::params::auth_password,
|
||||
proxy_bind_host => $api_host,
|
||||
proxy_remote_host => $api_host,
|
||||
}
|
||||
}
|
||||
@ -54,29 +54,29 @@ class platform::dcorch::firewall
|
||||
}
|
||||
platform::firewall::rule { 'dcorch-sysinv-api-proxy':
|
||||
service_name => 'dcorch-sysinv-api-proxy',
|
||||
ports => $sysinv_api_proxy_port,
|
||||
ports => $sysinv_api_proxy_port,
|
||||
}
|
||||
platform::firewall::rule { 'dcorch-nova-api-proxy':
|
||||
service_name => 'dcorch-nova-api-proxy',
|
||||
ports => $nova_api_proxy_port,
|
||||
ports => $nova_api_proxy_port,
|
||||
}
|
||||
platform::firewall::rule { 'dcorch-neutron-api-proxy':
|
||||
service_name => 'dcorch-neutron-api-proxy',
|
||||
ports => $neutron_api_proxy_port,
|
||||
ports => $neutron_api_proxy_port,
|
||||
}
|
||||
if $::openstack::cinder::params::service_enabled {
|
||||
platform::firewall::rule { 'dcorch-cinder-api-proxy':
|
||||
service_name => 'dcorch-cinder-api-proxy',
|
||||
ports => $cinder_api_proxy_port,
|
||||
ports => $cinder_api_proxy_port,
|
||||
}
|
||||
}
|
||||
platform::firewall::rule { 'dcorch-patch-api-proxy':
|
||||
service_name => 'dcorch-patch-api-proxy',
|
||||
ports => $patch_api_proxy_port,
|
||||
ports => $patch_api_proxy_port,
|
||||
}
|
||||
platform::firewall::rule { 'dcorch-identity-api-proxy':
|
||||
service_name => 'dcorch-identity-api-proxy',
|
||||
ports => $identity_api_proxy_port,
|
||||
ports => $identity_api_proxy_port,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -87,48 +87,48 @@ class platform::dcorch::haproxy
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
include ::openstack::cinder::params
|
||||
platform::haproxy::proxy { 'dcorch-neutron-api-proxy':
|
||||
server_name => 's-dcorch-neutron-api-proxy',
|
||||
public_port => $neutron_api_proxy_port,
|
||||
server_name => 's-dcorch-neutron-api-proxy',
|
||||
public_port => $neutron_api_proxy_port,
|
||||
private_port => $neutron_api_proxy_port,
|
||||
}
|
||||
platform::haproxy::proxy { 'dcorch-nova-api-proxy':
|
||||
server_name => 's-dcorch-nova-api-proxy',
|
||||
public_port => $nova_api_proxy_port,
|
||||
server_name => 's-dcorch-nova-api-proxy',
|
||||
public_port => $nova_api_proxy_port,
|
||||
private_port => $nova_api_proxy_port,
|
||||
}
|
||||
platform::haproxy::proxy { 'dcorch-sysinv-api-proxy':
|
||||
server_name => 's-dcorch-sysinv-api-proxy',
|
||||
public_port => $sysinv_api_proxy_port,
|
||||
server_name => 's-dcorch-sysinv-api-proxy',
|
||||
public_port => $sysinv_api_proxy_port,
|
||||
private_port => $sysinv_api_proxy_port,
|
||||
}
|
||||
if $::openstack::cinder::params::service_enabled {
|
||||
platform::haproxy::proxy { 'dcorch-cinder-api-proxy':
|
||||
server_name => 's-cinder-dc-api-proxy',
|
||||
public_port => $cinder_api_proxy_port,
|
||||
server_name => 's-cinder-dc-api-proxy',
|
||||
public_port => $cinder_api_proxy_port,
|
||||
private_port => $cinder_api_proxy_port,
|
||||
}
|
||||
}
|
||||
platform::haproxy::proxy { 'dcorch-patch-api-proxy':
|
||||
server_name => 's-dcorch-patch-api-proxy',
|
||||
public_port => $patch_api_proxy_port,
|
||||
server_name => 's-dcorch-patch-api-proxy',
|
||||
public_port => $patch_api_proxy_port,
|
||||
private_port => $patch_api_proxy_port,
|
||||
}
|
||||
platform::haproxy::proxy { 'dcorch-identity-api-proxy':
|
||||
server_name => 's-dcorch-identity-api-proxy',
|
||||
public_port => $identity_api_proxy_port,
|
||||
server_name => 's-dcorch-identity-api-proxy',
|
||||
public_port => $identity_api_proxy_port,
|
||||
private_port => $identity_api_proxy_port,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class platform::dcorch::engine
|
||||
class platform::dcorch::engine
|
||||
inherits ::platform::dcorch::params {
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
include ::dcorch::engine
|
||||
}
|
||||
}
|
||||
|
||||
class platform::dcorch::snmp
|
||||
class platform::dcorch::snmp
|
||||
inherits ::platform::dcorch::params {
|
||||
if $::platform::params::distributed_cloud_role =='systemcontroller' {
|
||||
class { '::dcorch::snmp':
|
||||
|
@ -2,23 +2,23 @@ define qat_device_files(
|
||||
$qat_idx,
|
||||
$device_id,
|
||||
) {
|
||||
if $device_id == "dh895xcc"{
|
||||
if $device_id == 'dh895xcc'{
|
||||
file { "/etc/dh895xcc_dev${qat_idx}.conf":
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
notify => Service['qat_service'],
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
notify => Service['qat_service'],
|
||||
}
|
||||
}
|
||||
|
||||
if $device_id == "c62x"{
|
||||
if $device_id == 'c62x'{
|
||||
file { "/etc/c62x_dev${qat_idx}.conf":
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
notify => Service['qat_service'],
|
||||
ensure => 'present',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
notify => Service['qat_service'],
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -35,7 +35,7 @@ class platform::devices::qat (
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
hasrestart => true,
|
||||
notify => Service['sysinv-agent'],
|
||||
notify => Service['sysinv-agent'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10,11 +10,11 @@ class platform::dhclient
|
||||
$infra_interface = $::platform::network::infra::params::interface_name
|
||||
$infra_subnet_version = $::platform::network::infra::params::subnet_version
|
||||
|
||||
file { "/etc/dhcp/dhclient.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/dhcp/dhclient.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/dhclient.conf.erb'),
|
||||
before => Class['::platform::network::apply'],
|
||||
before => Class['::platform::network::apply'],
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,8 +63,8 @@ class platform::dns::dnsmasq {
|
||||
}
|
||||
}
|
||||
|
||||
file { "/etc/dnsmasq.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/dnsmasq.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/dnsmasq.conf.erb'),
|
||||
}
|
||||
@ -74,8 +74,8 @@ class platform::dns::dnsmasq {
|
||||
class platform::dns::resolv (
|
||||
$servers,
|
||||
) {
|
||||
file { "/etc/resolv.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/resolv.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/resolv.conf.erb')
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ class platform::docker::params (
|
||||
$package_name = 'docker-ce',
|
||||
) { }
|
||||
|
||||
class platform::docker::config
|
||||
class platform::docker::config
|
||||
inherits ::platform::docker::params {
|
||||
|
||||
include ::platform::kubernetes::params
|
||||
@ -12,23 +12,23 @@ class platform::docker::config
|
||||
Class['::platform::filesystem::docker'] ~> Class[$name]
|
||||
|
||||
service { 'docker':
|
||||
ensure => 'running',
|
||||
name => 'docker',
|
||||
enable => true,
|
||||
require => Package['docker']
|
||||
} ->
|
||||
exec { 'enable-docker':
|
||||
ensure => 'running',
|
||||
name => 'docker',
|
||||
enable => true,
|
||||
require => Package['docker']
|
||||
}
|
||||
-> exec { 'enable-docker':
|
||||
command => '/usr/bin/systemctl enable docker.service',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class platform::docker::install
|
||||
class platform::docker::install
|
||||
inherits ::platform::docker::params {
|
||||
|
||||
package { 'docker':
|
||||
ensure => 'installed',
|
||||
name => $package_name,
|
||||
ensure => 'installed',
|
||||
name => $package_name,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,21 +12,21 @@ class platform::dockerdistribution::config
|
||||
|
||||
# currently docker registry is running insecure mode
|
||||
# when proper authentication is implemented, this would go away
|
||||
file { "/etc/docker":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
} ->
|
||||
file { "/etc/docker/daemon.json":
|
||||
file { '/etc/docker':
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
-> file { '/etc/docker/daemon.json':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('platform/insecuredockerregistry.conf.erb'),
|
||||
} ->
|
||||
}
|
||||
|
||||
file { "/etc/docker-distribution/registry/config.yml":
|
||||
-> file { '/etc/docker-distribution/registry/config.yml':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
@ -36,10 +36,10 @@ class platform::dockerdistribution::config
|
||||
|
||||
# copy the startup script to where it is supposed to be
|
||||
file {'docker_distribution_initd_script':
|
||||
path => '/etc/init.d/docker-distribution',
|
||||
ensure => 'present',
|
||||
mode => '0755',
|
||||
source => "puppet:///modules/${module_name}/docker-distribution"
|
||||
ensure => 'present',
|
||||
path => '/etc/init.d/docker-distribution',
|
||||
mode => '0755',
|
||||
source => "puppet:///modules/${module_name}/docker-distribution"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -57,13 +57,13 @@ class platform::dockerdistribution::compute
|
||||
|
||||
# currently docker registry is running insecure mode
|
||||
# when proper authentication is implemented, this would go away
|
||||
file { "/etc/docker":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
} ->
|
||||
file { "/etc/docker/daemon.json":
|
||||
file { '/etc/docker':
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
-> file { '/etc/docker/daemon.json':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
|
@ -76,10 +76,10 @@ define platform::drbd::filesystem (
|
||||
volume_group => $vg_name,
|
||||
size => "${lv_size}G",
|
||||
size_is_minsize => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
|
||||
drbd::resource { $title:
|
||||
-> drbd::resource { $title:
|
||||
disk => "/dev/${vg_name}/${lv_name}",
|
||||
port => $port,
|
||||
device => $device,
|
||||
@ -111,13 +111,13 @@ define platform::drbd::filesystem (
|
||||
# NOTE: The DRBD file system can only be resized immediately if not peering,
|
||||
# otherwise it must wait for the peer backing storage device to be
|
||||
# resized before issuing the resize locally.
|
||||
Drbd::Resource[$title] ->
|
||||
Drbd::Resource[$title]
|
||||
|
||||
exec { "drbd resize ${title}":
|
||||
-> exec { "drbd resize ${title}":
|
||||
command => "drbdadm -- --assume-peer-has-space resize ${title}",
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { "resize2fs ${title}":
|
||||
-> exec { "resize2fs ${title}":
|
||||
command => "resize2fs ${device}",
|
||||
}
|
||||
}
|
||||
@ -163,12 +163,12 @@ class platform::drbd::rabbit ()
|
||||
inherits ::platform::drbd::rabbit::params {
|
||||
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => 'drbd-pgsql',
|
||||
}
|
||||
}
|
||||
@ -188,12 +188,12 @@ class platform::drbd::platform ()
|
||||
inherits ::platform::drbd::platform::params {
|
||||
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => 'drbd-rabbit',
|
||||
}
|
||||
}
|
||||
@ -213,12 +213,12 @@ class platform::drbd::cgcs ()
|
||||
inherits ::platform::drbd::cgcs::params {
|
||||
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => 'drbd-platform',
|
||||
}
|
||||
}
|
||||
@ -251,12 +251,12 @@ class platform::drbd::extension (
|
||||
}
|
||||
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => $resync_after,
|
||||
}
|
||||
}
|
||||
@ -289,17 +289,17 @@ class platform::drbd::patch_vault (
|
||||
|
||||
if $service_enabled {
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => 'drbd-extension',
|
||||
manage_override => $drbd_manage,
|
||||
ha_primary_override => $drbd_primary,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => 'drbd-extension',
|
||||
manage_override => $drbd_manage,
|
||||
ha_primary_override => $drbd_primary,
|
||||
initial_setup_override => $drbd_initial,
|
||||
automount_override => $drbd_automount,
|
||||
automount_override => $drbd_automount,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -335,17 +335,17 @@ class platform::drbd::etcd (
|
||||
|
||||
if $::platform::kubernetes::params::enabled {
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => undef,
|
||||
manage_override => $drbd_manage,
|
||||
ha_primary_override => $drbd_primary,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => undef,
|
||||
manage_override => $drbd_manage,
|
||||
ha_primary_override => $drbd_primary,
|
||||
initial_setup_override => $drbd_initial,
|
||||
automount_override => $drbd_automount,
|
||||
automount_override => $drbd_automount,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -379,17 +379,17 @@ class platform::drbd::dockerdistribution ()
|
||||
|
||||
if $::platform::kubernetes::params::enabled {
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => undef,
|
||||
manage_override => $drbd_manage,
|
||||
ha_primary_override => $drbd_primary,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => undef,
|
||||
manage_override => $drbd_manage,
|
||||
ha_primary_override => $drbd_primary,
|
||||
initial_setup_override => $drbd_initial,
|
||||
automount_override => $drbd_automount,
|
||||
automount_override => $drbd_automount,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -436,17 +436,17 @@ class platform::drbd::cephmon ()
|
||||
if ($::platform::ceph::params::service_enabled and
|
||||
$system_type == 'All-in-one' and 'duplex' in $system_mode) {
|
||||
platform::drbd::filesystem { $resource_name:
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $::platform::ceph::params::mon_lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => undef,
|
||||
manage_override => true,
|
||||
ha_primary_override => $drbd_primary,
|
||||
vg_name => $vg_name,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $::platform::ceph::params::mon_lv_size,
|
||||
port => $port,
|
||||
device => $device,
|
||||
mountpoint => $mountpoint,
|
||||
resync_after => undef,
|
||||
manage_override => true,
|
||||
ha_primary_override => $drbd_primary,
|
||||
initial_setup_override => $drbd_initial,
|
||||
automount_override => $drbd_automount,
|
||||
automount_override => $drbd_automount,
|
||||
} -> Class['::ceph']
|
||||
}
|
||||
}
|
||||
@ -488,9 +488,9 @@ class platform::drbd(
|
||||
include ::platform::drbd::cephmon
|
||||
|
||||
# network changes need to be applied prior to DRBD resources
|
||||
Anchor['platform::networking'] ->
|
||||
Drbd::Resource <| |> ->
|
||||
Anchor['platform::services']
|
||||
Anchor['platform::networking']
|
||||
-> Drbd::Resource <| |>
|
||||
-> Anchor['platform::services']
|
||||
}
|
||||
|
||||
|
||||
@ -503,9 +503,9 @@ class platform::drbd::bootstrap {
|
||||
|
||||
# override the defaults to initialize and activate the file systems
|
||||
class { '::platform::drbd::params':
|
||||
ha_primary => true,
|
||||
ha_primary => true,
|
||||
initial_setup => true,
|
||||
automount => true,
|
||||
automount => true,
|
||||
}
|
||||
|
||||
include ::platform::drbd::pgsql
|
||||
|
@ -1,46 +1,46 @@
|
||||
class platform::etcd::params (
|
||||
$bind_address = '0.0.0.0',
|
||||
$port = 2379,
|
||||
$node = "controller",
|
||||
$node = 'controller',
|
||||
)
|
||||
{
|
||||
include ::platform::params
|
||||
|
||||
$sw_version = $::platform::params::software_version
|
||||
$etcd_basedir = "/opt/etcd"
|
||||
$etcd_basedir = '/opt/etcd'
|
||||
$etcd_versioned_dir = "${etcd_basedir}/${sw_version}"
|
||||
}
|
||||
|
||||
# Modify the systemd service file for etcd and
|
||||
# Modify the systemd service file for etcd and
|
||||
# create an init.d script for SM to manage the service
|
||||
class platform::etcd::setup {
|
||||
|
||||
file {'etcd_override_dir':
|
||||
path => '/etc/systemd/system/etcd.service.d',
|
||||
ensure => directory,
|
||||
path => '/etc/systemd/system/etcd.service.d',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file {'etcd_override':
|
||||
path => '/etc/systemd/system/etcd.service.d/etcd-override.conf',
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
source => "puppet:///modules/${module_name}/etcd-override.conf"
|
||||
} ->
|
||||
file {'etcd_initd_script':
|
||||
path => '/etc/init.d/etcd',
|
||||
ensure => 'present',
|
||||
mode => '0755',
|
||||
source => "puppet:///modules/${module_name}/etcd"
|
||||
} ->
|
||||
exec { 'systemd-reload-daemon':
|
||||
}
|
||||
-> file {'etcd_override':
|
||||
ensure => present,
|
||||
path => '/etc/systemd/system/etcd.service.d/etcd-override.conf',
|
||||
mode => '0644',
|
||||
source => "puppet:///modules/${module_name}/etcd-override.conf"
|
||||
}
|
||||
-> file {'etcd_initd_script':
|
||||
ensure => 'present',
|
||||
path => '/etc/init.d/etcd',
|
||||
mode => '0755',
|
||||
source => "puppet:///modules/${module_name}/etcd"
|
||||
}
|
||||
-> exec { 'systemd-reload-daemon':
|
||||
command => '/usr/bin/systemctl daemon-reload',
|
||||
} ->
|
||||
Service['etcd']
|
||||
}
|
||||
-> Service['etcd']
|
||||
}
|
||||
|
||||
class platform::etcd::init
|
||||
inherits ::platform::etcd::params {
|
||||
|
||||
|
||||
$client_url = "http://${bind_address}:${port}"
|
||||
|
||||
if str2bool($::is_initial_config_primary) {
|
||||
@ -51,16 +51,16 @@ class platform::etcd::init
|
||||
}
|
||||
|
||||
class { 'etcd':
|
||||
ensure => 'present',
|
||||
etcd_name => $node,
|
||||
service_enable => false,
|
||||
service_ensure => $service_ensure,
|
||||
cluster_enabled => false,
|
||||
listen_client_urls => $client_url,
|
||||
ensure => 'present',
|
||||
etcd_name => $node,
|
||||
service_enable => false,
|
||||
service_ensure => $service_ensure,
|
||||
cluster_enabled => false,
|
||||
listen_client_urls => $client_url,
|
||||
advertise_client_urls => $client_url,
|
||||
data_dir => "${etcd_versioned_dir}/${node}.etcd",
|
||||
proxy => "off",
|
||||
}
|
||||
data_dir => "${etcd_versioned_dir}/${node}.etcd",
|
||||
proxy => 'off',
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -70,30 +70,30 @@ class platform::etcd
|
||||
include ::platform::kubernetes::params
|
||||
|
||||
Class['::platform::drbd::etcd'] -> Class[$name]
|
||||
|
||||
|
||||
if $::platform::kubernetes::params::enabled {
|
||||
include ::platform::etcd::datadir
|
||||
include ::platform::etcd::setup
|
||||
include ::platform::etcd::init
|
||||
|
||||
Class['::platform::etcd::datadir'] ->
|
||||
Class['::platform::etcd::setup'] ->
|
||||
Class['::platform::etcd::init']
|
||||
Class['::platform::etcd::datadir']
|
||||
-> Class['::platform::etcd::setup']
|
||||
-> Class['::platform::etcd::init']
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class platform::etcd::datadir
|
||||
class platform::etcd::datadir
|
||||
inherits ::platform::etcd::params {
|
||||
|
||||
Class['::platform::drbd::etcd'] -> Class[$name]
|
||||
|
||||
if $::platform::params::init_database {
|
||||
file { "${etcd_versioned_dir}":
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
file { $etcd_versioned_dir:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,17 +3,17 @@ class platform::exports {
|
||||
include ::platform::params
|
||||
|
||||
file { '/etc/exports':
|
||||
ensure => present,
|
||||
mode => '0600',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
} ->
|
||||
file_line { '/etc/exports /etc/platform':
|
||||
path => '/etc/exports',
|
||||
line => "/etc/platform\t\t ${::platform::params::mate_ipaddress}(no_root_squash,no_subtree_check,rw)",
|
||||
match => '^/etc/platform\s',
|
||||
} ->
|
||||
exec { 'Re-export filesystems':
|
||||
ensure => present,
|
||||
mode => '0600',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
-> file_line { '/etc/exports /etc/platform':
|
||||
path => '/etc/exports',
|
||||
line => "/etc/platform\t\t ${::platform::params::mate_ipaddress}(no_root_squash,no_subtree_check,rw)",
|
||||
match => '^/etc/platform\s',
|
||||
}
|
||||
-> exec { 'Re-export filesystems':
|
||||
command => 'exportfs -r',
|
||||
}
|
||||
}
|
||||
|
@ -25,46 +25,46 @@ define platform::filesystem (
|
||||
# use all available space
|
||||
$size = undef
|
||||
$fs_size_is_minsize = false
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# create logical volume
|
||||
logical_volume { $lv_name:
|
||||
ensure => present,
|
||||
volume_group => $vg_name,
|
||||
size => $size,
|
||||
size_is_minsize => $fs_size_is_minsize,
|
||||
} ->
|
||||
}
|
||||
|
||||
# create filesystem
|
||||
filesystem { $device:
|
||||
-> filesystem { $device:
|
||||
ensure => present,
|
||||
fs_type => $fs_type,
|
||||
options => $fs_options,
|
||||
} ->
|
||||
}
|
||||
|
||||
file { $mountpoint:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => $mode,
|
||||
} ->
|
||||
-> file { $mountpoint:
|
||||
ensure => 'directory',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => $mode,
|
||||
}
|
||||
|
||||
mount { $name:
|
||||
name => "$mountpoint",
|
||||
atboot => 'yes',
|
||||
ensure => 'mounted',
|
||||
device => "${device}",
|
||||
-> mount { $name:
|
||||
ensure => 'mounted',
|
||||
atboot => 'yes',
|
||||
name => $mountpoint,
|
||||
device => $device,
|
||||
options => 'defaults',
|
||||
fstype => $fs_type,
|
||||
} ->
|
||||
fstype => $fs_type,
|
||||
}
|
||||
|
||||
# The above mount resource doesn't actually remount devices that were already present in /etc/fstab, but were
|
||||
# unmounted during manifest application. To get around this, we attempt to mount them again, if they are not
|
||||
# already mounted.
|
||||
exec { "mount $device":
|
||||
unless => "mount | awk '{print \$3}' | grep -Fxq $mountpoint",
|
||||
command => "mount $mountpoint",
|
||||
path => "/usr/bin"
|
||||
-> exec { "mount ${device}":
|
||||
unless => "mount | awk '{print \$3}' | grep -Fxq ${mountpoint}",
|
||||
command => "mount ${mountpoint}",
|
||||
path => '/usr/bin'
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,24 +80,24 @@ define platform::filesystem::resize(
|
||||
$device = "/dev/${vg_name}/${lv_name}"
|
||||
|
||||
# TODO (rchurch): Fix this... Allowing return code 5 so that lvextends using the same size doesn't blow up
|
||||
exec { "lvextend $device":
|
||||
exec { "lvextend ${device}":
|
||||
command => "lvextend -L${lv_size}G ${device}",
|
||||
returns => [0, 5]
|
||||
} ->
|
||||
}
|
||||
# After a partition extend, make sure that there is no leftover drbd
|
||||
# type metadata from a previous install. Drbd writes its meta at the
|
||||
# very end of a block device causing confusion for blkid.
|
||||
exec { "wipe end of device $device":
|
||||
-> exec { "wipe end of device ${device}":
|
||||
command => "dd if=/dev/zero of=${device} bs=512 seek=$(($(blockdev --getsz ${device}) - 34)) count=34",
|
||||
onlyif => "blkid ${device} | grep TYPE=\\\"drbd\\\"",
|
||||
} ->
|
||||
exec { "resize2fs $devmapper":
|
||||
command => "resize2fs $devmapper",
|
||||
onlyif => "blkid -s TYPE -o value $devmapper | grep -v xfs",
|
||||
} ->
|
||||
exec { "xfs_growfs $devmapper":
|
||||
command => "xfs_growfs $devmapper",
|
||||
onlyif => "blkid -s TYPE -o value $devmapper | grep xfs",
|
||||
onlyif => "blkid ${device} | grep TYPE=\\\"drbd\\\"",
|
||||
}
|
||||
-> exec { "resize2fs ${devmapper}":
|
||||
command => "resize2fs ${devmapper}",
|
||||
onlyif => "blkid -s TYPE -o value ${devmapper} | grep -v xfs",
|
||||
}
|
||||
-> exec { "xfs_growfs ${devmapper}":
|
||||
command => "xfs_growfs ${devmapper}",
|
||||
onlyif => "blkid -s TYPE -o value ${devmapper} | grep xfs",
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,10 +115,10 @@ class platform::filesystem::backup
|
||||
inherits ::platform::filesystem::backup::params {
|
||||
|
||||
platform::filesystem { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
mountpoint => $mountpoint,
|
||||
fs_type => $fs_type,
|
||||
fs_type => $fs_type,
|
||||
fs_options => $fs_options
|
||||
}
|
||||
}
|
||||
@ -136,10 +136,10 @@ class platform::filesystem::scratch
|
||||
inherits ::platform::filesystem::scratch::params {
|
||||
|
||||
platform::filesystem { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
mountpoint => $mountpoint,
|
||||
fs_type => $fs_type,
|
||||
fs_type => $fs_type,
|
||||
fs_options => $fs_options
|
||||
}
|
||||
}
|
||||
@ -157,10 +157,10 @@ class platform::filesystem::gnocchi
|
||||
inherits ::platform::filesystem::gnocchi::params {
|
||||
|
||||
platform::filesystem { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
mountpoint => $mountpoint,
|
||||
fs_type => $fs_type,
|
||||
fs_type => $fs_type,
|
||||
fs_options => $fs_options
|
||||
}
|
||||
}
|
||||
@ -182,13 +182,13 @@ class platform::filesystem::docker
|
||||
|
||||
if $::platform::kubernetes::params::enabled {
|
||||
platform::filesystem { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
mountpoint => $mountpoint,
|
||||
fs_type => $fs_type,
|
||||
fs_type => $fs_type,
|
||||
fs_options => $fs_options,
|
||||
fs_use_all => $fs_use_all,
|
||||
mode => '0711',
|
||||
mode => '0711',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -224,8 +224,8 @@ class platform::filesystem::storage {
|
||||
if $::platform::kubernetes::params::enabled {
|
||||
class {'platform::filesystem::docker::params' :
|
||||
lv_size => 10
|
||||
} ->
|
||||
class {'platform::filesystem::docker' :
|
||||
}
|
||||
-> class {'platform::filesystem::docker' :
|
||||
}
|
||||
|
||||
Class['::platform::lvm::vg::cgts_vg'] -> Class['::platform::filesystem::docker']
|
||||
@ -240,8 +240,8 @@ class platform::filesystem::compute {
|
||||
if $::platform::kubernetes::params::enabled {
|
||||
class {'platform::filesystem::docker::params' :
|
||||
fs_use_all => true
|
||||
} ->
|
||||
class {'platform::filesystem::docker' :
|
||||
}
|
||||
-> class {'platform::filesystem::docker' :
|
||||
}
|
||||
|
||||
Class['::platform::lvm::vg::cgts_vg'] -> Class['::platform::filesystem::docker']
|
||||
@ -265,8 +265,8 @@ class platform::filesystem::backup::runtime {
|
||||
$devmapper = $::platform::filesystem::backup::params::devmapper
|
||||
|
||||
platform::filesystem::resize { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
devmapper => $devmapper,
|
||||
}
|
||||
}
|
||||
@ -280,8 +280,8 @@ class platform::filesystem::scratch::runtime {
|
||||
$devmapper = $::platform::filesystem::scratch::params::devmapper
|
||||
|
||||
platform::filesystem::resize { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
devmapper => $devmapper,
|
||||
}
|
||||
}
|
||||
@ -295,8 +295,8 @@ class platform::filesystem::gnocchi::runtime {
|
||||
$devmapper = $::platform::filesystem::gnocchi::params::devmapper
|
||||
|
||||
platform::filesystem::resize { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
devmapper => $devmapper,
|
||||
}
|
||||
}
|
||||
@ -310,8 +310,8 @@ class platform::filesystem::docker::runtime {
|
||||
$devmapper = $::platform::filesystem::docker::params::devmapper
|
||||
|
||||
platform::filesystem::resize { $lv_name:
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
lv_name => $lv_name,
|
||||
lv_size => $lv_size,
|
||||
devmapper => $devmapper,
|
||||
}
|
||||
}
|
||||
|
@ -39,38 +39,38 @@ define platform::firewall::rule (
|
||||
# NAT rule
|
||||
if $jump == 'SNAT' or $jump == 'MASQUERADE' {
|
||||
firewall { "500 ${service_name} ${heading} ${title}":
|
||||
chain => $chain,
|
||||
table => $table,
|
||||
proto => $proto,
|
||||
outiface => $outiface,
|
||||
jump => $jump,
|
||||
tosource => $tosource,
|
||||
ensure => $ensure,
|
||||
table => $table,
|
||||
proto => $proto,
|
||||
outiface => $outiface,
|
||||
jump => $jump,
|
||||
tosource => $tosource,
|
||||
destination => $destination,
|
||||
source => $source,
|
||||
provider => $provider,
|
||||
ensure => $ensure,
|
||||
source => $source,
|
||||
provider => $provider,
|
||||
chain => $chain,
|
||||
}
|
||||
}
|
||||
else {
|
||||
if $ports == undef {
|
||||
firewall { "500 ${service_name} ${heading} ${title}":
|
||||
chain => $chain,
|
||||
ensure => $ensure,
|
||||
proto => $proto,
|
||||
action => 'accept',
|
||||
source => $source,
|
||||
provider => $provider,
|
||||
ensure => $ensure,
|
||||
chain => $chain,
|
||||
}
|
||||
}
|
||||
else {
|
||||
firewall { "500 ${service_name} ${heading} ${title}":
|
||||
chain => $chain,
|
||||
ensure => $ensure,
|
||||
proto => $proto,
|
||||
dport => $ports,
|
||||
action => 'accept',
|
||||
source => $source,
|
||||
provider => $provider,
|
||||
ensure => $ensure,
|
||||
chain => $chain,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -85,74 +85,74 @@ define platform::firewall::common (
|
||||
$provider = $version ? {'ipv4' => 'iptables', 'ipv6' => 'ip6tables'}
|
||||
|
||||
firewall { "000 platform accept non-oam ${version}":
|
||||
proto => 'all',
|
||||
iniface => "! ${$interface}",
|
||||
action => 'accept',
|
||||
proto => 'all',
|
||||
iniface => "! ${$interface}",
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "001 platform accept related ${version}":
|
||||
proto => 'all',
|
||||
state => ['RELATED', 'ESTABLISHED'],
|
||||
action => 'accept',
|
||||
proto => 'all',
|
||||
state => ['RELATED', 'ESTABLISHED'],
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
# explicitly drop some types of traffic without logging
|
||||
firewall { "800 platform drop tcf-agent udp ${version}":
|
||||
proto => 'udp',
|
||||
dport => 1534,
|
||||
action => 'drop',
|
||||
proto => 'udp',
|
||||
dport => 1534,
|
||||
action => 'drop',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "800 platform drop tcf-agent tcp ${version}":
|
||||
proto => 'tcp',
|
||||
dport => 1534,
|
||||
action => 'drop',
|
||||
proto => 'tcp',
|
||||
dport => 1534,
|
||||
action => 'drop',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "800 platform drop all avahi-daemon ${version}":
|
||||
proto => 'udp',
|
||||
dport => 5353,
|
||||
action => 'drop',
|
||||
proto => 'udp',
|
||||
dport => 5353,
|
||||
action => 'drop',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "999 platform log dropped ${version}":
|
||||
proto => 'all',
|
||||
limit => '2/min',
|
||||
jump => 'LOG',
|
||||
proto => 'all',
|
||||
limit => '2/min',
|
||||
jump => 'LOG',
|
||||
log_prefix => "${provider}-in-dropped: ",
|
||||
log_level => 4,
|
||||
provider => $provider,
|
||||
log_level => 4,
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "000 platform forward non-oam ${version}":
|
||||
chain => 'FORWARD',
|
||||
proto => 'all',
|
||||
iniface => "! ${interface}",
|
||||
action => 'accept',
|
||||
chain => 'FORWARD',
|
||||
proto => 'all',
|
||||
iniface => "! ${interface}",
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "001 platform forward related ${version}":
|
||||
chain => 'FORWARD',
|
||||
proto => 'all',
|
||||
state => ['RELATED', 'ESTABLISHED'],
|
||||
action => 'accept',
|
||||
chain => 'FORWARD',
|
||||
proto => 'all',
|
||||
state => ['RELATED', 'ESTABLISHED'],
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "999 platform log dropped ${version} forwarded":
|
||||
chain => 'FORWARD',
|
||||
proto => 'all',
|
||||
limit => '2/min',
|
||||
jump => 'LOG',
|
||||
chain => 'FORWARD',
|
||||
proto => 'all',
|
||||
limit => '2/min',
|
||||
jump => 'LOG',
|
||||
log_prefix => "${provider}-fwd-dropped: ",
|
||||
log_level => 4,
|
||||
provider => $provider,
|
||||
log_level => 4,
|
||||
provider => $provider,
|
||||
}
|
||||
}
|
||||
|
||||
@ -171,58 +171,58 @@ define platform::firewall::services (
|
||||
|
||||
# Provider specific service rules
|
||||
firewall { "010 platform accept sm ${version}":
|
||||
proto => 'udp',
|
||||
dport => [2222, 2223],
|
||||
action => 'accept',
|
||||
proto => 'udp',
|
||||
dport => [2222, 2223],
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "011 platform accept ssh ${version}":
|
||||
proto => 'tcp',
|
||||
dport => 22,
|
||||
action => 'accept',
|
||||
proto => 'tcp',
|
||||
dport => 22,
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "200 platform accept icmp ${version}":
|
||||
proto => $proto_icmp,
|
||||
action => 'accept',
|
||||
proto => $proto_icmp,
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "201 platform accept ntp ${version}":
|
||||
proto => 'udp',
|
||||
dport => 123,
|
||||
action => 'accept',
|
||||
proto => 'udp',
|
||||
dport => 123,
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "202 platform accept snmp ${version}":
|
||||
proto => 'udp',
|
||||
dport => 161,
|
||||
action => 'accept',
|
||||
proto => 'udp',
|
||||
dport => 161,
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "202 platform accept snmp trap ${version}":
|
||||
proto => 'udp',
|
||||
dport => 162,
|
||||
action => 'accept',
|
||||
proto => 'udp',
|
||||
dport => 162,
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
firewall { "203 platform accept ptp ${version}":
|
||||
proto => 'udp',
|
||||
dport => [319, 320],
|
||||
action => 'accept',
|
||||
proto => 'udp',
|
||||
dport => [319, 320],
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
|
||||
# allow IGMP Query traffic if IGMP Snooping is
|
||||
# enabled on the TOR switch
|
||||
firewall { "204 platform accept igmp ${version}":
|
||||
proto => 'igmp',
|
||||
action => 'accept',
|
||||
proto => 'igmp',
|
||||
action => 'accept',
|
||||
provider => $provider,
|
||||
}
|
||||
}
|
||||
@ -236,21 +236,21 @@ define platform::firewall::hooks (
|
||||
$input_pre_chain = 'INPUT-custom-pre'
|
||||
$input_post_chain = 'INPUT-custom-post'
|
||||
|
||||
firewallchain { "$input_pre_chain:filter:$protocol":
|
||||
firewallchain { "${input_pre_chain}:filter:${protocol}":
|
||||
ensure => present,
|
||||
}->
|
||||
firewallchain { "$input_post_chain:filter:$protocol":
|
||||
}
|
||||
-> firewallchain { "${input_post_chain}:filter:${protocol}":
|
||||
ensure => present,
|
||||
}->
|
||||
firewall { "100 $input_pre_chain $version":
|
||||
}
|
||||
-> firewall { "100 ${input_pre_chain} ${version}":
|
||||
proto => 'all',
|
||||
chain => 'INPUT',
|
||||
jump => "$input_pre_chain"
|
||||
}->
|
||||
firewall { "900 $input_post_chain $version":
|
||||
jump => $input_pre_chain
|
||||
}
|
||||
-> firewall { "900 ${input_post_chain} ${version}":
|
||||
proto => 'all',
|
||||
chain => 'INPUT',
|
||||
jump => "$input_post_chain"
|
||||
jump => $input_post_chain
|
||||
}
|
||||
}
|
||||
|
||||
@ -266,16 +266,16 @@ class platform::firewall::custom (
|
||||
|
||||
platform::firewall::hooks { '::platform:firewall:hooks':
|
||||
version => $version,
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { 'Flush firewall custom pre rules':
|
||||
command => "iptables --flush INPUT-custom-pre",
|
||||
} ->
|
||||
exec { 'Flush firewall custom post rules':
|
||||
command => "iptables --flush INPUT-custom-post",
|
||||
} ->
|
||||
exec { 'Apply firewall custom rules':
|
||||
command => "$restore --noflush $rules_file",
|
||||
-> exec { 'Flush firewall custom pre rules':
|
||||
command => 'iptables --flush INPUT-custom-pre',
|
||||
}
|
||||
-> exec { 'Flush firewall custom post rules':
|
||||
command => 'iptables --flush INPUT-custom-post',
|
||||
}
|
||||
-> exec { 'Apply firewall custom rules':
|
||||
command => "${restore} --noflush ${rules_file}",
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,12 +295,12 @@ class platform::firewall::oam (
|
||||
|
||||
platform::firewall::common { 'platform:firewall:ipv4':
|
||||
interface => $interface_name,
|
||||
version => 'ipv4',
|
||||
version => 'ipv4',
|
||||
}
|
||||
|
||||
platform::firewall::common { 'platform:firewall:ipv6':
|
||||
interface => $interface_name,
|
||||
version => 'ipv6',
|
||||
version => 'ipv6',
|
||||
}
|
||||
|
||||
platform::firewall::services { 'platform:firewall:services':
|
||||
@ -312,34 +312,34 @@ class platform::firewall::oam (
|
||||
ensure => present,
|
||||
policy => drop,
|
||||
before => undef,
|
||||
purge => false,
|
||||
purge => false,
|
||||
}
|
||||
|
||||
firewallchain { 'INPUT:filter:IPv6':
|
||||
ensure => present,
|
||||
policy => drop,
|
||||
before => undef,
|
||||
purge => false,
|
||||
purge => false,
|
||||
}
|
||||
|
||||
firewallchain { 'FORWARD:filter:IPv4':
|
||||
ensure => present,
|
||||
policy => drop,
|
||||
before => undef,
|
||||
purge => false,
|
||||
purge => false,
|
||||
}
|
||||
|
||||
firewallchain { 'FORWARD:filter:IPv6':
|
||||
ensure => present,
|
||||
policy => drop,
|
||||
before => undef,
|
||||
purge => false,
|
||||
purge => false,
|
||||
}
|
||||
|
||||
if $rules_file {
|
||||
|
||||
class { '::platform::firewall::custom':
|
||||
version => $version,
|
||||
version => $version,
|
||||
rules_file => $rules_file,
|
||||
}
|
||||
}
|
||||
|
@ -15,9 +15,9 @@ class platform::fm::config
|
||||
|
||||
$trap_dest_str = join($trap_destinations,',')
|
||||
class { '::fm':
|
||||
region_name => $region_name,
|
||||
system_name => $system_name,
|
||||
trap_destinations => $trap_dest_str,
|
||||
region_name => $region_name,
|
||||
system_name => $system_name,
|
||||
trap_destinations => $trap_dest_str,
|
||||
sysinv_catalog_info => $sysinv_catalog_info,
|
||||
}
|
||||
}
|
||||
@ -50,17 +50,17 @@ class platform::fm::haproxy
|
||||
include ::platform::haproxy::params
|
||||
|
||||
platform::haproxy::proxy { 'fm-api-internal':
|
||||
server_name => 's-fm-api-internal',
|
||||
public_ip_address => $::platform::haproxy::params::private_ip_address,
|
||||
public_port => $api_port,
|
||||
server_name => 's-fm-api-internal',
|
||||
public_ip_address => $::platform::haproxy::params::private_ip_address,
|
||||
public_port => $api_port,
|
||||
private_ip_address => $api_host,
|
||||
private_port => $api_port,
|
||||
public_api => false,
|
||||
private_port => $api_port,
|
||||
public_api => false,
|
||||
}
|
||||
|
||||
platform::haproxy::proxy { 'fm-api-public':
|
||||
server_name => 's-fm-api-public',
|
||||
public_port => $api_port,
|
||||
server_name => 's-fm-api-public',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -79,9 +79,9 @@ class platform::fm::api
|
||||
include ::platform::params
|
||||
|
||||
class { '::fm::api':
|
||||
host => $api_host,
|
||||
workers => $::platform::params::eng_workers,
|
||||
sync_db => $::platform::params::init_database,
|
||||
host => $api_host,
|
||||
workers => $::platform::params::eng_workers,
|
||||
sync_db => $::platform::params::init_database,
|
||||
}
|
||||
|
||||
include ::platform::fm::firewall
|
||||
@ -94,8 +94,8 @@ class platform::fm::runtime {
|
||||
require ::platform::fm::config
|
||||
|
||||
exec { 'notify-fm-mgr':
|
||||
command => "/usr/bin/pkill -HUP fmManager",
|
||||
onlyif => "pgrep fmManager"
|
||||
command => '/usr/bin/pkill -HUP fmManager',
|
||||
onlyif => 'pgrep fmManager'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,16 +4,16 @@ class platform::fstab {
|
||||
if $::personality != 'controller' {
|
||||
exec { 'Unmount NFS filesystems':
|
||||
command => 'umount -a -t nfs ; sleep 5 ;',
|
||||
} ->
|
||||
mount { '/opt/platform':
|
||||
device => 'controller-platform-nfs:/opt/platform',
|
||||
fstype => 'nfs',
|
||||
}
|
||||
-> mount { '/opt/platform':
|
||||
ensure => 'present',
|
||||
fstype => 'nfs',
|
||||
device => 'controller-platform-nfs:/opt/platform',
|
||||
options => "${::platform::params::nfs_mount_options},_netdev",
|
||||
atboot => 'yes',
|
||||
remounts => true,
|
||||
} ->
|
||||
exec { 'Remount NFS filesystems':
|
||||
}
|
||||
-> exec { 'Remount NFS filesystems':
|
||||
command => 'umount -a -t nfs ; sleep 1 ; mount -a -t nfs',
|
||||
}
|
||||
}
|
||||
|
@ -1,24 +1,24 @@
|
||||
class platform::grub
|
||||
{
|
||||
include ::platform::params
|
||||
$managed_security_params = "nopti nospectre_v2"
|
||||
$managed_security_params = 'nopti nospectre_v2'
|
||||
|
||||
# Run grubby to update params
|
||||
# First, remove all the parameters we manage, then we add back in the ones
|
||||
# we want to use
|
||||
exec { 'removing managed security kernel params from command line':
|
||||
command => "grubby --update-kernel=`grubby --default-kernel` --remove-args=\"$managed_security_params\"",
|
||||
} ->
|
||||
exec { 'removing managed security kernel params from command line for EFI':
|
||||
command => "grubby --efi --update-kernel=`grubby --efi --default-kernel` --remove-args=\"$managed_security_params\"",
|
||||
} ->
|
||||
exec { 'adding requested security kernel params to command line ':
|
||||
command => "grubby --update-kernel=`grubby --default-kernel` --remove-args=\"${managed_security_params}\"",
|
||||
}
|
||||
-> exec { 'removing managed security kernel params from command line for EFI':
|
||||
command => "grubby --efi --update-kernel=`grubby --efi --default-kernel` --remove-args=\"${managed_security_params}\"",
|
||||
}
|
||||
-> exec { 'adding requested security kernel params to command line ':
|
||||
command => "grubby --update-kernel=`grubby --default-kernel` --args=\"${::platform::params::security_feature}\"",
|
||||
onlyif => "test -n \"${::platform::params::security_feature}\""
|
||||
} ->
|
||||
exec { 'adding requested security kernel params to command line for EFI':
|
||||
onlyif => "test -n \"${::platform::params::security_feature}\""
|
||||
}
|
||||
-> exec { 'adding requested security kernel params to command line for EFI':
|
||||
command => "grubby --efi --update-kernel=`grubby --efi --default-kernel` --args=\"${::platform::params::security_feature}\"",
|
||||
onlyif => "test -n \"${::platform::params::security_feature}\""
|
||||
onlyif => "test -n \"${::platform::params::security_feature}\""
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,12 +22,12 @@ define platform::haproxy::proxy (
|
||||
$public_api = true,
|
||||
) {
|
||||
include ::platform::haproxy::params
|
||||
|
||||
|
||||
if $enable_https != undef {
|
||||
$https_enabled = $enable_https
|
||||
$https_enabled = $enable_https
|
||||
} else {
|
||||
$https_enabled = $::platform::haproxy::params::enable_https
|
||||
}
|
||||
}
|
||||
|
||||
if $x_forwarded_proto {
|
||||
if $https_enabled and $public_api {
|
||||
@ -67,15 +67,15 @@ define platform::haproxy::proxy (
|
||||
|
||||
haproxy::frontend { $name:
|
||||
collect_exported => false,
|
||||
name => "${name}",
|
||||
bind => {
|
||||
name => $name,
|
||||
bind => {
|
||||
"${public_ip}:${public_port}" => $ssl_option,
|
||||
},
|
||||
options => {
|
||||
options => {
|
||||
'default_backend' => "${name}-internal",
|
||||
'reqadd' => $proto,
|
||||
'timeout' => $real_client_timeout,
|
||||
'rspadd' => $hsts_option,
|
||||
'reqadd' => $proto,
|
||||
'timeout' => $real_client_timeout,
|
||||
'rspadd' => $hsts_option,
|
||||
},
|
||||
}
|
||||
|
||||
@ -87,9 +87,9 @@ define platform::haproxy::proxy (
|
||||
|
||||
haproxy::backend { $name:
|
||||
collect_exported => false,
|
||||
name => "${name}-internal",
|
||||
options => {
|
||||
'server' => "${server_name} ${private_ip}:${private_port}",
|
||||
name => "${name}-internal",
|
||||
options => {
|
||||
'server' => "${server_name} ${private_ip}:${private_port}",
|
||||
'timeout' => $timeout_option,
|
||||
}
|
||||
}
|
||||
@ -106,9 +106,9 @@ class platform::haproxy::server {
|
||||
$tpm_object = $::platform::haproxy::params::tpm_object
|
||||
$tpm_engine = $::platform::haproxy::params::tpm_engine
|
||||
if $tpm_object != undef {
|
||||
$tpm_options = {'tpm-object' => $tpm_object, 'tpm-engine' => $tpm_engine}
|
||||
$tpm_options = {'tpm-object' => $tpm_object, 'tpm-engine' => $tpm_engine}
|
||||
$global_options = merge($::platform::haproxy::params::global_options, $tpm_options)
|
||||
} else {
|
||||
} else {
|
||||
$global_options = $::platform::haproxy::params::global_options
|
||||
}
|
||||
|
||||
|
@ -6,69 +6,69 @@ class platform::helm
|
||||
if $::platform::kubernetes::params::enabled {
|
||||
if str2bool($::is_initial_config_primary) {
|
||||
|
||||
Class['::platform::kubernetes::master'] ->
|
||||
Class['::platform::kubernetes::master']
|
||||
|
||||
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
|
||||
exec { "load tiller docker image":
|
||||
command => "docker image pull gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b",
|
||||
-> exec { 'load tiller docker image':
|
||||
command => 'docker image pull gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
# TODO(tngo): If and when tiller image is upversioned, please ensure armada compatibility as part of the test
|
||||
exec { "load armada docker image":
|
||||
command => "docker image pull quay.io/airshipit/armada:f807c3a1ec727c883c772ffc618f084d960ed5c9",
|
||||
-> exec { 'load armada docker image':
|
||||
command => 'docker image pull quay.io/airshipit/armada:f807c3a1ec727c883c772ffc618f084d960ed5c9',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { "create service account for tiller":
|
||||
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf create serviceaccount --namespace kube-system tiller",
|
||||
-> exec { 'create service account for tiller':
|
||||
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create serviceaccount --namespace kube-system tiller',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { "create cluster role binding for tiller service account":
|
||||
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller",
|
||||
-> exec { 'create cluster role binding for tiller service account':
|
||||
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
# TODO(jrichard): Upversion tiller image to v2.11.1 once released.
|
||||
exec { 'initialize helm':
|
||||
environment => [ "KUBECONFIG=/etc/kubernetes/admin.conf", "HOME=/home/wrsroot" ],
|
||||
command => 'helm init --skip-refresh --service-account tiller --node-selectors "node-role.kubernetes.io/master"="" --tiller-image=gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b',
|
||||
logoutput => true,
|
||||
user => 'wrsroot',
|
||||
group => 'wrs',
|
||||
require => User['wrsroot']
|
||||
} ->
|
||||
-> exec { 'initialize helm':
|
||||
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf', 'HOME=/home/wrsroot' ],
|
||||
command => 'helm init --skip-refresh --service-account tiller --node-selectors "node-role.kubernetes.io/master"="" --tiller-image=gcr.io/kubernetes-helm/tiller@sha256:022ce9d4a99603be1d30a4ca96a7fa57a45e6f2ef11172f4333c18aaae407f5b',
|
||||
logoutput => true,
|
||||
user => 'wrsroot',
|
||||
group => 'wrs',
|
||||
require => User['wrsroot']
|
||||
}
|
||||
|
||||
file {"/www/pages/helm_charts":
|
||||
path => "/www/pages/helm_charts",
|
||||
ensure => directory,
|
||||
owner => "www",
|
||||
-> file {'/www/pages/helm_charts':
|
||||
ensure => directory,
|
||||
path => '/www/pages/helm_charts',
|
||||
owner => 'www',
|
||||
require => User['www']
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { "restart lighttpd for helm":
|
||||
require => File["/etc/lighttpd/lighttpd.conf"],
|
||||
command => "systemctl restart lighttpd.service",
|
||||
-> exec { 'restart lighttpd for helm':
|
||||
require => File['/etc/lighttpd/lighttpd.conf'],
|
||||
command => 'systemctl restart lighttpd.service',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { "generate helm repo index":
|
||||
command => "helm repo index /www/pages/helm_charts",
|
||||
-> exec { 'generate helm repo index':
|
||||
command => 'helm repo index /www/pages/helm_charts',
|
||||
logoutput => true,
|
||||
user => 'www',
|
||||
group => 'www',
|
||||
require => User['www']
|
||||
} ->
|
||||
user => 'www',
|
||||
group => 'www',
|
||||
require => User['www']
|
||||
}
|
||||
|
||||
exec { "add local starlingx helm repo":
|
||||
before => Exec['Stop lighttpd'],
|
||||
environment => [ "KUBECONFIG=/etc/kubernetes/admin.conf" , "HOME=/home/wrsroot"],
|
||||
command => "helm repo add starlingx http://127.0.0.1/helm_charts",
|
||||
logoutput => true,
|
||||
user => 'wrsroot',
|
||||
group => 'wrs',
|
||||
require => User['wrsroot']
|
||||
-> exec { 'add local starlingx helm repo':
|
||||
before => Exec['Stop lighttpd'],
|
||||
environment => [ 'KUBECONFIG=/etc/kubernetes/admin.conf' , 'HOME=/home/wrsroot'],
|
||||
command => 'helm repo add starlingx http://127.0.0.1/helm_charts',
|
||||
logoutput => true,
|
||||
user => 'wrsroot',
|
||||
group => 'wrs',
|
||||
require => User['wrsroot']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,60 +1,60 @@
|
||||
class platform::influxdb::params (
|
||||
$bind_address = undef,
|
||||
$database = undef,
|
||||
$typesdb = undef,
|
||||
$batch_size = undef,
|
||||
$batch_pending = undef,
|
||||
$batch_timeout = undef,
|
||||
$read_buffer = undef,
|
||||
$bind_address = undef,
|
||||
$database = undef,
|
||||
$typesdb = undef,
|
||||
$batch_size = undef,
|
||||
$batch_pending = undef,
|
||||
$batch_timeout = undef,
|
||||
$read_buffer = undef,
|
||||
) {}
|
||||
|
||||
class platform::influxdb
|
||||
inherits ::platform::influxdb::params {
|
||||
|
||||
user { 'influxdb': ensure => present, } ->
|
||||
group { 'influxdb': ensure => present, } ->
|
||||
user { 'influxdb': ensure => present, }
|
||||
-> group { 'influxdb': ensure => present, }
|
||||
|
||||
# make a pid dir for influxdb username and group
|
||||
file { "/var/run/influxdb":
|
||||
-> file { '/var/run/influxdb':
|
||||
ensure => 'directory',
|
||||
owner => 'influxdb',
|
||||
group => 'influxdb',
|
||||
mode => '0755',
|
||||
} ->
|
||||
}
|
||||
|
||||
# make a log dir for influxdb username and group
|
||||
file { "/var/log/influxdb":
|
||||
-> file { '/var/log/influxdb':
|
||||
ensure => 'directory',
|
||||
owner => 'influxdb',
|
||||
group => 'influxdb',
|
||||
mode => '0755',
|
||||
} ->
|
||||
}
|
||||
|
||||
# make a lib dir for influxdb username and group
|
||||
file { "/var/lib/influxdb":
|
||||
-> file { '/var/lib/influxdb':
|
||||
ensure => 'directory',
|
||||
owner => 'influxdb',
|
||||
group => 'influxdb',
|
||||
mode => '0755',
|
||||
} -> # now configure influxdb
|
||||
} # now configure influxdb
|
||||
|
||||
file { "/etc/influxdb/influxdb.conf":
|
||||
ensure => 'present',
|
||||
-> file { '/etc/influxdb/influxdb.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/influxdb.conf.erb'),
|
||||
} -> # now make sure that influxdb is started
|
||||
} # now make sure that influxdb is started
|
||||
|
||||
# ensure that influxdb is running
|
||||
service { 'influxdb':
|
||||
ensure => running,
|
||||
enable => true,
|
||||
provider => 'systemd'
|
||||
} -> # now ask pmon to monitor the process
|
||||
-> service { 'influxdb':
|
||||
ensure => running,
|
||||
enable => true,
|
||||
provider => 'systemd'
|
||||
} # now ask pmon to monitor the process
|
||||
|
||||
# ensure pmon soft link for process monitoring
|
||||
file { "/etc/pmon.d/influxdb.conf":
|
||||
-> file { '/etc/pmon.d/influxdb.conf':
|
||||
ensure => 'link',
|
||||
target => "/etc/influxdb/influxdb.conf.pmon",
|
||||
target => '/etc/influxdb/influxdb.conf.pmon',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
@ -68,17 +68,17 @@ class platform::influxdb::runtime {
|
||||
|
||||
|
||||
class platform::influxdb::logrotate::params (
|
||||
$log_file_name = undef,
|
||||
$log_file_size = undef,
|
||||
$log_file_rotate = undef,
|
||||
$log_file_name = undef,
|
||||
$log_file_size = undef,
|
||||
$log_file_rotate = undef,
|
||||
) {}
|
||||
|
||||
class platform::influxdb::logrotate
|
||||
inherits ::platform::influxdb::logrotate::params {
|
||||
|
||||
file { "/etc/logrotate.d/influxdb":
|
||||
ensure => 'present',
|
||||
file { '/etc/logrotate.d/influxdb':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/logrotate.erb'),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,46 +25,46 @@ class platform::kubernetes::kubeadm {
|
||||
# repo.
|
||||
file { '/etc/yum.repos.d/kubernetes.repo':
|
||||
ensure => file,
|
||||
content => "$repo_file",
|
||||
content => $repo_file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
} ->
|
||||
}
|
||||
|
||||
# Update iptables config. This is required based on:
|
||||
# https://kubernetes.io/docs/tasks/tools/install-kubeadm
|
||||
# This probably belongs somewhere else - initscripts package?
|
||||
file { '/etc/sysctl.d/k8s.conf':
|
||||
-> file { '/etc/sysctl.d/k8s.conf':
|
||||
ensure => file,
|
||||
content => "$iptables_file",
|
||||
content => $iptables_file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
} ->
|
||||
exec { "update kernel parameters for iptables":
|
||||
command => "sysctl --system",
|
||||
} ->
|
||||
}
|
||||
-> exec { 'update kernel parameters for iptables':
|
||||
command => 'sysctl --system',
|
||||
}
|
||||
|
||||
# TODO: Update /etc/resolv.conf.k8s to be controlled by sysinv, as is done
|
||||
# for /etc/resolv.conf. Is should contain all the user-specified DNS
|
||||
# servers, but not the coredns IP.
|
||||
# Create custom resolv.conf file for kubelet
|
||||
file { "/etc/resolv.conf.k8s":
|
||||
ensure => file,
|
||||
content => "nameserver 8.8.8.8",
|
||||
-> file { '/etc/resolv.conf.k8s':
|
||||
ensure => file,
|
||||
content => 'nameserver 8.8.8.8',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
} ->
|
||||
}
|
||||
|
||||
# Start kubelet.
|
||||
service { 'kubelet':
|
||||
-> service { 'kubelet':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
} ->
|
||||
}
|
||||
# A seperate enable is required since we have modified the service resource
|
||||
# to never enable services.
|
||||
exec { 'enable-kubelet':
|
||||
-> exec { 'enable-kubelet':
|
||||
command => '/usr/bin/systemctl enable kubelet.service',
|
||||
}
|
||||
}
|
||||
@ -83,35 +83,35 @@ class platform::kubernetes::master::init
|
||||
file_line { "${resolv_conf} nameserver 8.8.8.8":
|
||||
path => $resolv_conf,
|
||||
line => 'nameserver 8.8.8.8',
|
||||
} ->
|
||||
}
|
||||
|
||||
# Configure the master node.
|
||||
file { "/etc/kubernetes/kubeadm.yaml":
|
||||
ensure => file,
|
||||
-> file { '/etc/kubernetes/kubeadm.yaml':
|
||||
ensure => file,
|
||||
content => template('platform/kubeadm.yaml.erb'),
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { "configure master node":
|
||||
command => "kubeadm init --config=/etc/kubernetes/kubeadm.yaml",
|
||||
-> exec { 'configure master node':
|
||||
command => 'kubeadm init --config=/etc/kubernetes/kubeadm.yaml',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
# Update ownership/permissions for file created by "kubeadm init".
|
||||
# We want it readable by sysinv and wrsroot.
|
||||
file { "/etc/kubernetes/admin.conf":
|
||||
-> file { '/etc/kubernetes/admin.conf':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => $::platform::params::protected_group_name,
|
||||
mode => '0640',
|
||||
} ->
|
||||
}
|
||||
|
||||
# Add a bash profile script to set a k8s env variable
|
||||
file {'bash_profile_k8s':
|
||||
path => '/etc/profile.d/kubeconfig.sh',
|
||||
ensure => file,
|
||||
mode => '0644',
|
||||
source => "puppet:///modules/${module_name}/kubeconfig.sh"
|
||||
} ->
|
||||
-> file {'bash_profile_k8s':
|
||||
ensure => file,
|
||||
path => '/etc/profile.d/kubeconfig.sh',
|
||||
mode => '0644',
|
||||
source => "puppet:///modules/${module_name}/kubeconfig.sh"
|
||||
}
|
||||
|
||||
# Configure calico networking using the Kubernetes API datastore. This is
|
||||
# beta functionality and has this limitation:
|
||||
@ -120,35 +120,35 @@ class platform::kubernetes::master::init
|
||||
# with Kubernetes pod CIDR assignments instead.
|
||||
# See https://docs.projectcalico.org/v3.2/getting-started/kubernetes/
|
||||
# installation/calico for more info.
|
||||
file { "/etc/kubernetes/rbac-kdd.yaml":
|
||||
-> file { '/etc/kubernetes/rbac-kdd.yaml':
|
||||
ensure => file,
|
||||
content => template('platform/rbac-kdd.yaml.erb'),
|
||||
} ->
|
||||
exec { "configure calico RBAC":
|
||||
command =>
|
||||
"kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/rbac-kdd.yaml",
|
||||
}
|
||||
-> exec { 'configure calico RBAC':
|
||||
command =>
|
||||
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/rbac-kdd.yaml',
|
||||
logoutput => true,
|
||||
} ->
|
||||
file { "/etc/kubernetes/calico.yaml":
|
||||
}
|
||||
-> file { '/etc/kubernetes/calico.yaml':
|
||||
ensure => file,
|
||||
content => template('platform/calico.yaml.erb'),
|
||||
} ->
|
||||
exec { "install calico networking":
|
||||
command =>
|
||||
"kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/calico.yaml",
|
||||
}
|
||||
-> exec { 'install calico networking':
|
||||
command =>
|
||||
'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f /etc/kubernetes/calico.yaml',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
# kubernetes 1.12 uses coredns rather than kube-dns.
|
||||
# Restrict the dns pod to master nodes
|
||||
exec { "restrict coredns to master nodes":
|
||||
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'',
|
||||
-> exec { 'restrict coredns to master nodes':
|
||||
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
# Remove the taint from the master node
|
||||
exec { "remove taint from master node":
|
||||
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-",
|
||||
-> exec { 'remove taint from master node':
|
||||
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-",
|
||||
logoutput => true,
|
||||
}
|
||||
} else {
|
||||
@ -157,80 +157,80 @@ class platform::kubernetes::master::init
|
||||
# existing certificates.
|
||||
|
||||
# Create necessary certificate files
|
||||
file { "/etc/kubernetes/pki":
|
||||
file { '/etc/kubernetes/pki':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { '/etc/kubernetes/pki/ca.crt':
|
||||
}
|
||||
-> file { '/etc/kubernetes/pki/ca.crt':
|
||||
ensure => file,
|
||||
content => "$ca_crt",
|
||||
content => $ca_crt,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
} ->
|
||||
file { '/etc/kubernetes/pki/ca.key':
|
||||
}
|
||||
-> file { '/etc/kubernetes/pki/ca.key':
|
||||
ensure => file,
|
||||
content => "$ca_key",
|
||||
content => $ca_key,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
} ->
|
||||
file { '/etc/kubernetes/pki/sa.key':
|
||||
}
|
||||
-> file { '/etc/kubernetes/pki/sa.key':
|
||||
ensure => file,
|
||||
content => "$sa_key",
|
||||
content => $sa_key,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
} ->
|
||||
file { '/etc/kubernetes/pki/sa.pub':
|
||||
}
|
||||
-> file { '/etc/kubernetes/pki/sa.pub':
|
||||
ensure => file,
|
||||
content => "$sa_pub",
|
||||
content => $sa_pub,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
} ->
|
||||
}
|
||||
|
||||
# Configure the master node.
|
||||
file { "/etc/kubernetes/kubeadm.yaml":
|
||||
-> file { '/etc/kubernetes/kubeadm.yaml':
|
||||
ensure => file,
|
||||
content => template('platform/kubeadm.yaml.erb'),
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { "configure master node":
|
||||
command => "kubeadm init --config=/etc/kubernetes/kubeadm.yaml",
|
||||
-> exec { 'configure master node':
|
||||
command => 'kubeadm init --config=/etc/kubernetes/kubeadm.yaml',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
# Update ownership/permissions for file created by "kubeadm init".
|
||||
# We want it readable by sysinv and wrsroot.
|
||||
file { "/etc/kubernetes/admin.conf":
|
||||
-> file { '/etc/kubernetes/admin.conf':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => $::platform::params::protected_group_name,
|
||||
mode => '0640',
|
||||
} ->
|
||||
}
|
||||
|
||||
# Add a bash profile script to set a k8s env variable
|
||||
file {'bash_profile_k8s':
|
||||
path => '/etc/profile.d/kubeconfig.sh',
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
source => "puppet:///modules/${module_name}/kubeconfig.sh"
|
||||
} ->
|
||||
-> file {'bash_profile_k8s':
|
||||
ensure => present,
|
||||
path => '/etc/profile.d/kubeconfig.sh',
|
||||
mode => '0644',
|
||||
source => "puppet:///modules/${module_name}/kubeconfig.sh"
|
||||
}
|
||||
|
||||
# kubernetes 1.12 uses coredns rather than kube-dns.
|
||||
# Restrict the dns pod to master nodes. It seems that each time
|
||||
# kubeadm init is run, it undoes any changes to the deployment.
|
||||
exec { "restrict coredns to master nodes":
|
||||
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'',
|
||||
-> exec { 'restrict coredns to master nodes':
|
||||
command => 'kubectl --kubeconfig=/etc/kubernetes/admin.conf -n kube-system patch deployment coredns -p \'{"spec":{"template":{"spec":{"nodeSelector":{"node-role.kubernetes.io/master":""}}}}}\'',
|
||||
logoutput => true,
|
||||
} ->
|
||||
}
|
||||
|
||||
# Remove the taint from the master node
|
||||
exec { "remove taint from master node":
|
||||
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-",
|
||||
-> exec { 'remove taint from master node':
|
||||
command => "kubectl --kubeconfig=/etc/kubernetes/admin.conf taint node ${::platform::params::hostname} node-role.kubernetes.io/master-",
|
||||
logoutput => true,
|
||||
}
|
||||
}
|
||||
@ -247,9 +247,9 @@ class platform::kubernetes::master
|
||||
|
||||
Class['::platform::etcd'] -> Class[$name]
|
||||
Class['::platform::docker::config'] -> Class[$name]
|
||||
Class['::platform::kubernetes::kubeadm'] ->
|
||||
Class['::platform::kubernetes::master::init'] ->
|
||||
Class['::platform::kubernetes::firewall']
|
||||
Class['::platform::kubernetes::kubeadm']
|
||||
-> Class['::platform::kubernetes::master::init']
|
||||
-> Class['::platform::kubernetes::firewall']
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,10 +264,10 @@ class platform::kubernetes::worker::init
|
||||
|
||||
# Configure the worker node. Only do this once, so check whether the
|
||||
# kubelet.conf file has already been created (by the join).
|
||||
exec { "configure worker node":
|
||||
command => "$join_cmd",
|
||||
exec { 'configure worker node':
|
||||
command => $join_cmd,
|
||||
logoutput => true,
|
||||
unless => 'test -f /etc/kubernetes/kubelet.conf',
|
||||
unless => 'test -f /etc/kubernetes/kubelet.conf',
|
||||
}
|
||||
}
|
||||
|
||||
@ -280,13 +280,13 @@ class platform::kubernetes::worker
|
||||
contain ::platform::kubernetes::kubeadm
|
||||
contain ::platform::kubernetes::worker::init
|
||||
|
||||
Class['::platform::kubernetes::kubeadm'] ->
|
||||
Class['::platform::kubernetes::worker::init']
|
||||
Class['::platform::kubernetes::kubeadm']
|
||||
-> Class['::platform::kubernetes::worker::init']
|
||||
}
|
||||
|
||||
if $enabled {
|
||||
file { "/var/run/.disable_worker_services":
|
||||
ensure => file,
|
||||
file { '/var/run/.disable_worker_services':
|
||||
ensure => file,
|
||||
replace => no,
|
||||
}
|
||||
# TODO: The following exec is a workaround. Once kubernetes becomes the
|
||||
@ -294,7 +294,7 @@ class platform::kubernetes::worker
|
||||
# the load.
|
||||
exec { 'Update PMON libvirtd.conf':
|
||||
command => "/bin/sed -i 's#mode = passive#mode = ignore #' /etc/pmon.d/libvirtd.conf",
|
||||
onlyif => '/usr/bin/test -e /etc/pmon.d/libvirtd.conf'
|
||||
onlyif => '/usr/bin/test -e /etc/pmon.d/libvirtd.conf'
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -333,6 +333,6 @@ class platform::kubernetes::firewall
|
||||
destination => $d_mgmt_subnet,
|
||||
source => $s_mgmt_subnet,
|
||||
tosource => $oam_float_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ class platform::ldap::params (
|
||||
class platform::ldap::server
|
||||
inherits ::platform::ldap::params {
|
||||
if ! $ldapserver_remote {
|
||||
include ::platform::ldap::server::local
|
||||
include ::platform::ldap::server::local
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ class platform::ldap::server::local
|
||||
service { 'openldap':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
name => "slapd",
|
||||
name => 'slapd',
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
@ -55,47 +55,47 @@ class platform::ldap::server::local
|
||||
-e 's:^rootpw .*:rootpw ${admin_hashed_pw}:' \\
|
||||
-e 's:modulepath .*:modulepath /usr/lib64/openldap:' \\
|
||||
/etc/openldap/slapd.conf",
|
||||
onlyif => '/usr/bin/test -e /etc/openldap/slapd.conf'
|
||||
onlyif => '/usr/bin/test -e /etc/openldap/slapd.conf'
|
||||
}
|
||||
|
||||
# don't populate the adminpw if binding anonymously
|
||||
if ! $bind_anonymous {
|
||||
file { "/usr/local/etc/ldapscripts/ldapscripts.passwd":
|
||||
file { '/usr/local/etc/ldapscripts/ldapscripts.passwd':
|
||||
content => $admin_pw,
|
||||
}
|
||||
}
|
||||
|
||||
file { "/usr/share/cracklib/cracklib-small":
|
||||
file { '/usr/share/cracklib/cracklib-small':
|
||||
ensure => link,
|
||||
target => "/usr/share/cracklib/cracklib-small.pwd",
|
||||
target => '/usr/share/cracklib/cracklib-small.pwd',
|
||||
}
|
||||
|
||||
# start openldap with updated config and updated nsswitch
|
||||
# then convert slapd config to db format. Note, slapd must have run and created the db prior to this.
|
||||
Exec['stop-openldap'] ->
|
||||
Exec['update-slapd-conf'] ->
|
||||
Service['nscd'] ->
|
||||
Service['nslcd'] ->
|
||||
Service['openldap'] ->
|
||||
Exec['slapd-convert-config'] ->
|
||||
Exec['slapd-conf-move-backup']
|
||||
Exec['stop-openldap']
|
||||
-> Exec['update-slapd-conf']
|
||||
-> Service['nscd']
|
||||
-> Service['nslcd']
|
||||
-> Service['openldap']
|
||||
-> Exec['slapd-convert-config']
|
||||
-> Exec['slapd-conf-move-backup']
|
||||
}
|
||||
|
||||
|
||||
class platform::ldap::client
|
||||
inherits ::platform::ldap::params {
|
||||
file { "/etc/openldap/ldap.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/openldap/ldap.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/ldap.conf.erb'),
|
||||
}
|
||||
|
||||
file { "/etc/nslcd.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/nslcd.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/nslcd.conf.erb'),
|
||||
} ->
|
||||
service { 'nslcd':
|
||||
}
|
||||
-> service { 'nslcd':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
name => 'nslcd',
|
||||
@ -104,7 +104,7 @@ class platform::ldap::client
|
||||
}
|
||||
|
||||
if $::personality == 'controller' {
|
||||
file { "/usr/local/etc/ldapscripts/ldapscripts.conf":
|
||||
file { '/usr/local/etc/ldapscripts/ldapscripts.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/ldapscripts.conf.erb'),
|
||||
@ -127,30 +127,30 @@ class platform::ldap::bootstrap
|
||||
|
||||
exec { 'populate initial ldap configuration':
|
||||
command => "ldapadd -D ${dn} -w ${admin_pw} -f /etc/openldap/initial_config.ldif"
|
||||
} ->
|
||||
exec { "create ldap admin user":
|
||||
command => "ldapadduser admin root"
|
||||
} ->
|
||||
exec { "create ldap operator user":
|
||||
command => "ldapadduser operator users"
|
||||
} ->
|
||||
exec { 'create ldap protected group':
|
||||
}
|
||||
-> exec { 'create ldap admin user':
|
||||
command => 'ldapadduser admin root'
|
||||
}
|
||||
-> exec { 'create ldap operator user':
|
||||
command => 'ldapadduser operator users'
|
||||
}
|
||||
-> exec { 'create ldap protected group':
|
||||
command => "ldapaddgroup ${::platform::params::protected_group_name} ${::platform::params::protected_group_id}"
|
||||
} ->
|
||||
exec { "add admin to wrs protected group" :
|
||||
}
|
||||
-> exec { 'add admin to wrs protected group' :
|
||||
command => "ldapaddusertogroup admin ${::platform::params::protected_group_name}",
|
||||
} ->
|
||||
exec { "add operator to wrs protected group" :
|
||||
}
|
||||
-> exec { 'add operator to wrs protected group' :
|
||||
command => "ldapaddusertogroup operator ${::platform::params::protected_group_name}",
|
||||
} ->
|
||||
}
|
||||
|
||||
# Change operator shell from default to /usr/local/bin/cgcs_cli
|
||||
file { "/tmp/ldap.cgcs-shell.ldif":
|
||||
-> file { '/tmp/ldap.cgcs-shell.ldif':
|
||||
ensure => present,
|
||||
replace => true,
|
||||
source => "puppet:///modules/${module_name}/ldap.cgcs-shell.ldif"
|
||||
} ->
|
||||
exec { 'ldap cgcs-cli shell update':
|
||||
source => "puppet:///modules/${module_name}/ldap.cgcs-shell.ldif"
|
||||
}
|
||||
-> exec { 'ldap cgcs-cli shell update':
|
||||
command =>
|
||||
"ldapmodify -D ${dn} -w ${admin_pw} -f /tmp/ldap.cgcs-shell.ldif"
|
||||
}
|
||||
|
@ -13,18 +13,18 @@ class platform::lldp
|
||||
$system = $::platform::params::system_name
|
||||
$version = $::platform::params::software_version
|
||||
|
||||
file { "/etc/lldpd.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/lldpd.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/lldp.conf.erb'),
|
||||
notify => Service['lldpd'],
|
||||
notify => Service['lldpd'],
|
||||
}
|
||||
|
||||
file { "/etc/default/lldpd":
|
||||
ensure => 'present',
|
||||
file { '/etc/default/lldpd':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/lldpd.default.erb'),
|
||||
notify => Service['lldpd'],
|
||||
notify => Service['lldpd'],
|
||||
}
|
||||
|
||||
service { 'lldpd':
|
||||
|
@ -10,20 +10,20 @@ class platform::lvm
|
||||
# Mask socket unit as well to make sure
|
||||
# systemd socket activation does not happen
|
||||
service { 'lvm2-lvmetad.socket':
|
||||
enable => mask,
|
||||
ensure => 'stopped',
|
||||
} ->
|
||||
enable => mask,
|
||||
}
|
||||
# Masking service unit ensures that it is not started again
|
||||
service { 'lvm2-lvmetad':
|
||||
enable => mask,
|
||||
-> service { 'lvm2-lvmetad':
|
||||
ensure => 'stopped',
|
||||
} ->
|
||||
enable => mask,
|
||||
}
|
||||
# Since masking is changing unit symlinks to point to /dev/null,
|
||||
# we need to reload systemd configuration
|
||||
exec { 'lvmetad-systemd-daemon-reload':
|
||||
command => "systemctl daemon-reload",
|
||||
} ->
|
||||
file_line { 'use_lvmetad':
|
||||
-> exec { 'lvmetad-systemd-daemon-reload':
|
||||
command => 'systemctl daemon-reload',
|
||||
}
|
||||
-> file_line { 'use_lvmetad':
|
||||
path => '/etc/lvm/lvm.conf',
|
||||
match => '^[^#]*use_lvmetad = 1',
|
||||
line => ' use_lvmetad = 0',
|
||||
@ -32,17 +32,17 @@ class platform::lvm
|
||||
|
||||
|
||||
define platform::lvm::global_filter($filter) {
|
||||
file_line { "$name: update lvm global_filter":
|
||||
file_line { "${name}: update lvm global_filter":
|
||||
path => '/etc/lvm/lvm.conf',
|
||||
line => " global_filter = $filter",
|
||||
line => " global_filter = ${filter}",
|
||||
match => '^[ ]*global_filter =',
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
define platform::lvm::umount {
|
||||
exec { "umount disk $name":
|
||||
command => "umount $name; true",
|
||||
exec { "umount disk ${name}":
|
||||
command => "umount ${name}; true",
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,12 +53,12 @@ class platform::lvm::vg::cgts_vg(
|
||||
) inherits platform::lvm::params {
|
||||
|
||||
::platform::lvm::umount { $physical_volumes:
|
||||
} ->
|
||||
physical_volume { $physical_volumes:
|
||||
ensure => present,
|
||||
} ->
|
||||
volume_group { $vg_name:
|
||||
}
|
||||
-> physical_volume { $physical_volumes:
|
||||
ensure => present,
|
||||
}
|
||||
-> volume_group { $vg_name:
|
||||
ensure => present,
|
||||
physical_volumes => $physical_volumes,
|
||||
}
|
||||
}
|
||||
@ -90,13 +90,13 @@ class platform::lvm::controller::vgs {
|
||||
class platform::lvm::controller
|
||||
inherits ::platform::lvm::params {
|
||||
|
||||
::platform::lvm::global_filter { "transition filter":
|
||||
::platform::lvm::global_filter { 'transition filter':
|
||||
filter => $transition_filter,
|
||||
before => Class['::platform::lvm::controller::vgs']
|
||||
}
|
||||
|
||||
::platform::lvm::global_filter { "final filter":
|
||||
filter => $final_filter,
|
||||
::platform::lvm::global_filter { 'final filter':
|
||||
filter => $final_filter,
|
||||
require => Class['::platform::lvm::controller::vgs']
|
||||
}
|
||||
|
||||
@ -125,13 +125,13 @@ class platform::lvm::compute::vgs {
|
||||
class platform::lvm::compute
|
||||
inherits ::platform::lvm::params {
|
||||
|
||||
::platform::lvm::global_filter { "transition filter":
|
||||
::platform::lvm::global_filter { 'transition filter':
|
||||
filter => $transition_filter,
|
||||
before => Class['::platform::lvm::compute::vgs']
|
||||
}
|
||||
|
||||
::platform::lvm::global_filter { "final filter":
|
||||
filter => $final_filter,
|
||||
::platform::lvm::global_filter { 'final filter':
|
||||
filter => $final_filter,
|
||||
require => Class['::platform::lvm::compute::vgs']
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ class platform::lvm::storage::vgs {
|
||||
class platform::lvm::storage
|
||||
inherits ::platform::lvm::params {
|
||||
|
||||
::platform::lvm::global_filter { "final filter":
|
||||
::platform::lvm::global_filter { 'final filter':
|
||||
filter => $final_filter,
|
||||
before => Class['::platform::lvm::storage::vgs']
|
||||
}
|
||||
|
@ -1,13 +1,13 @@
|
||||
class platform::memcached::params(
|
||||
$package_ensure = 'present',
|
||||
$logfile = '/var/log/memcached.log',
|
||||
# set CACHESIZE in /etc/sysconfig/memcached
|
||||
$max_memory = false,
|
||||
$tcp_port = 11211,
|
||||
$udp_port = 11211,
|
||||
# set MAXCONN in /etc/sysconfig/memcached
|
||||
$max_connections = 8192,
|
||||
$service_restart = true,
|
||||
$package_ensure = 'present',
|
||||
$logfile = '/var/log/memcached.log',
|
||||
# set CACHESIZE in /etc/sysconfig/memcached
|
||||
$max_memory = false,
|
||||
$tcp_port = 11211,
|
||||
$udp_port = 11211,
|
||||
# set MAXCONN in /etc/sysconfig/memcached
|
||||
$max_connections = 8192,
|
||||
$service_restart = true,
|
||||
) {
|
||||
include ::platform::params
|
||||
$controller_0_hostname = $::platform::params::controller_0_hostname
|
||||
@ -43,9 +43,9 @@ class platform::memcached
|
||||
max_connections => $max_connections,
|
||||
max_memory => $max_memory,
|
||||
service_restart => $service_restart,
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { 'systemctl enable memcached.service':
|
||||
command => "/usr/bin/systemctl enable memcached.service",
|
||||
-> exec { 'systemctl enable memcached.service':
|
||||
command => '/usr/bin/systemctl enable memcached.service',
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ class platform::mtce
|
||||
include ::platform::client::credentials::params
|
||||
$keyring_directory = $::platform::client::credentials::params::keyring_directory
|
||||
|
||||
file { "/etc/mtc.ini":
|
||||
file { '/etc/mtc.ini':
|
||||
ensure => present,
|
||||
mode => '0755',
|
||||
content => template('mtce/mtc_ini.erb'),
|
||||
@ -39,10 +39,10 @@ class platform::mtce
|
||||
|
||||
$boot_device = $::boot_disk_device_path
|
||||
|
||||
file { "/etc/rmonfiles.d/static.conf":
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
content => template('mtce/static_conf.erb'),
|
||||
file { '/etc/rmonfiles.d/static.conf':
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
content => template('mtce/static_conf.erb'),
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,17 +53,17 @@ class platform::mtce::agent
|
||||
if $::platform::params::init_keystone {
|
||||
# configure a mtce keystone user
|
||||
keystone_user { $auth_username:
|
||||
password => $auth_pw,
|
||||
ensure => present,
|
||||
password => $auth_pw,
|
||||
enabled => true,
|
||||
}
|
||||
|
||||
# assign an admin role for this mtce user on the services tenant
|
||||
keystone_user_role { "${auth_username}@${auth_project}":
|
||||
ensure => present,
|
||||
user_domain => $auth_user_domain,
|
||||
project_domain => $auth_project_domain,
|
||||
roles => ['admin'],
|
||||
ensure => present,
|
||||
user_domain => $auth_user_domain,
|
||||
project_domain => $auth_project_domain,
|
||||
roles => ['admin'],
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -71,19 +71,19 @@ class platform::mtce::agent
|
||||
|
||||
class platform::mtce::reload {
|
||||
exec {'signal-mtc-agent':
|
||||
command => "pkill -HUP mtcAgent",
|
||||
command => 'pkill -HUP mtcAgent',
|
||||
}
|
||||
exec {'signal-hbs-agent':
|
||||
command => "pkill -HUP hbsAgent",
|
||||
command => 'pkill -HUP hbsAgent',
|
||||
}
|
||||
|
||||
# mtcClient and hbsClient don't currently reload all configuration,
|
||||
# therefore they must be restarted. Move to HUP if daemon updated.
|
||||
exec {'pmon-restart-hbs-client':
|
||||
command => "pmon-restart hbsClient",
|
||||
command => 'pmon-restart hbsClient',
|
||||
}
|
||||
exec {'pmon-restart-mtc-client':
|
||||
command => "pmon-restart mtcClient",
|
||||
command => 'pmon-restart mtcClient',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,16 +9,16 @@ class platform::multipath
|
||||
file { '/etc/multipath.conf':
|
||||
ensure => 'present',
|
||||
mode => '0644',
|
||||
content => template("platform/multipath.conf.erb")
|
||||
} ->
|
||||
service { 'start-multipathd':
|
||||
content => template('platform/multipath.conf.erb')
|
||||
}
|
||||
-> service { 'start-multipathd':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
name => 'multipathd',
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
} ->
|
||||
exec { 'systemctl-enable-multipathd':
|
||||
}
|
||||
-> exec { 'systemctl-enable-multipathd':
|
||||
command => '/usr/bin/systemctl enable multipathd.service',
|
||||
}
|
||||
} else {
|
||||
@ -28,11 +28,11 @@ class platform::multipath
|
||||
name => 'multipathd',
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
} ->
|
||||
exec { 'systemctl-disable-multipathd':
|
||||
}
|
||||
-> exec { 'systemctl-disable-multipathd':
|
||||
command => '/usr/bin/systemctl disable multipathd.service',
|
||||
} ->
|
||||
file { '/etc/multipath.conf':
|
||||
}
|
||||
-> file { '/etc/multipath.conf':
|
||||
ensure => 'absent',
|
||||
}
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ define network_address (
|
||||
# will configure them on the active controller.
|
||||
exec { "Configuring ${name} IP address":
|
||||
command => "ip addr replace ${address} dev ${ifname} ${options}",
|
||||
onlyif => "test -f /etc/platform/simplex",
|
||||
onlyif => 'test -f /etc/platform/simplex',
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,18 +123,18 @@ class platform::network::apply {
|
||||
include ::platform::interfaces
|
||||
include ::platform::addresses
|
||||
|
||||
Network_config <| |> ->
|
||||
Exec['apply-network-config'] ->
|
||||
Network_address <| |> ->
|
||||
Anchor['platform::networking']
|
||||
Network_config <| |>
|
||||
-> Exec['apply-network-config']
|
||||
-> Network_address <| |>
|
||||
-> Anchor['platform::networking']
|
||||
|
||||
# Adding Network_route dependency separately, in case it's empty,
|
||||
# as puppet bug will remove dependency altogether if
|
||||
# Network_route is empty. See below.
|
||||
# https://projects.puppetlabs.com/issues/18399
|
||||
Network_config <| |> ->
|
||||
Network_route <| |> ->
|
||||
Exec['apply-network-config']
|
||||
Network_config <| |>
|
||||
-> Network_route <| |>
|
||||
-> Exec['apply-network-config']
|
||||
|
||||
exec {'apply-network-config':
|
||||
command => 'apply_network_config.sh',
|
||||
@ -161,7 +161,7 @@ class platform::network (
|
||||
exec { 'connectivity-test-management':
|
||||
command => "${testcmd} -t 70 -i ${management_interface} controller-platform-nfs; /bin/true",
|
||||
require => Anchor['platform::networking'],
|
||||
onlyif => "test ! -f /etc/platform/simplex",
|
||||
onlyif => 'test ! -f /etc/platform/simplex',
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ class platform::network (
|
||||
exec { 'connectivity-test-infrastructure':
|
||||
command => "${testcmd} -t 120 -i ${infrastructure_interface} controller-nfs; /bin/true",
|
||||
require => Anchor['platform::networking'],
|
||||
onlyif => "test ! -f /etc/platform/simplex",
|
||||
onlyif => 'test ! -f /etc/platform/simplex',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -77,8 +77,8 @@ class platform::nfv::haproxy
|
||||
inherits ::platform::nfv::params {
|
||||
|
||||
platform::haproxy::proxy { 'vim-restapi':
|
||||
server_name => 's-vim-restapi',
|
||||
public_port => $api_port,
|
||||
server_name => 's-vim-restapi',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
|
@ -9,37 +9,37 @@ class platform::ntp (
|
||||
$pmon_ensure = 'absent'
|
||||
}
|
||||
|
||||
File['ntp_config'] ->
|
||||
File['ntp_config_initial'] ->
|
||||
file {'ntpdate_override_dir':
|
||||
File['ntp_config']
|
||||
-> File['ntp_config_initial']
|
||||
-> file {'ntpdate_override_dir':
|
||||
ensure => directory,
|
||||
path => '/etc/systemd/system/ntpdate.service.d',
|
||||
mode => '0755',
|
||||
} ->
|
||||
file { 'ntpdate_tis_override':
|
||||
}
|
||||
-> file { 'ntpdate_tis_override':
|
||||
ensure => file,
|
||||
path => '/etc/systemd/system/ntpdate.service.d/tis_override.conf',
|
||||
mode => '0644',
|
||||
content => template('platform/ntp.override.erb'),
|
||||
} ->
|
||||
file { 'ntp_pmon_config':
|
||||
}
|
||||
-> file { 'ntp_pmon_config':
|
||||
ensure => file,
|
||||
path => '/etc/ntp.pmon.conf',
|
||||
mode => '0644',
|
||||
content => template('platform/ntp.pmon.conf.erb'),
|
||||
} ->
|
||||
exec { 'systemd-daemon-reload':
|
||||
}
|
||||
-> exec { 'systemd-daemon-reload':
|
||||
command => '/usr/bin/systemctl daemon-reload',
|
||||
} ->
|
||||
exec { 'stop-ntpdate':
|
||||
}
|
||||
-> exec { 'stop-ntpdate':
|
||||
command => '/usr/bin/systemctl stop ntpdate.service',
|
||||
returns => [ 0, 1 ],
|
||||
} ->
|
||||
exec { 'stop-ntpd':
|
||||
}
|
||||
-> exec { 'stop-ntpd':
|
||||
command => '/usr/bin/systemctl stop ntpd.service',
|
||||
returns => [ 0, 1 ],
|
||||
} ->
|
||||
file { 'ntp_pmon_link':
|
||||
}
|
||||
-> file { 'ntp_pmon_link':
|
||||
ensure => $pmon_ensure,
|
||||
path => '/etc/pmon.d/ntpd.conf',
|
||||
target => '/etc/ntp.pmon.conf',
|
||||
@ -52,16 +52,16 @@ class platform::ntp (
|
||||
exec { 'enable-ntpdate':
|
||||
command => '/usr/bin/systemctl enable ntpdate.service',
|
||||
require => File['ntp_pmon_link'],
|
||||
} ->
|
||||
exec { 'enable-ntpd':
|
||||
}
|
||||
-> exec { 'enable-ntpd':
|
||||
command => '/usr/bin/systemctl enable ntpd.service',
|
||||
} ->
|
||||
exec { 'start-ntpdate':
|
||||
}
|
||||
-> exec { 'start-ntpdate':
|
||||
command => '/usr/bin/systemctl start ntpdate.service',
|
||||
returns => [ 0, 1 ],
|
||||
onlyif => "test ! -f /etc/platform/simplex || grep -q '^server' /etc/ntp.conf",
|
||||
} ->
|
||||
service { 'ntpd':
|
||||
}
|
||||
-> service { 'ntpd':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
name => 'ntpd',
|
||||
@ -72,8 +72,8 @@ class platform::ntp (
|
||||
exec { 'disable-ntpdate':
|
||||
command => '/usr/bin/systemctl disable ntpdate.service',
|
||||
require => File['ntp_pmon_link'],
|
||||
} ->
|
||||
exec { 'disable-ntpd':
|
||||
}
|
||||
-> exec { 'disable-ntpd':
|
||||
command => '/usr/bin/systemctl disable ntpd.service',
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ class platform::params (
|
||||
$ipv4 = 4
|
||||
$ipv6 = 6
|
||||
|
||||
$nfs_mount_options = "timeo=30,proto=$nfs_proto,vers=3,rsize=$nfs_rw_size,wsize=$nfs_rw_size"
|
||||
$nfs_mount_options = "timeo=30,proto=${nfs_proto},vers=3,rsize=${nfs_rw_size},wsize=${nfs_rw_size}"
|
||||
|
||||
$protected_group_name = 'wrs_protected'
|
||||
$protected_group_id = '345'
|
||||
@ -41,7 +41,7 @@ class platform::params (
|
||||
# max number of workers
|
||||
$eng_max_workers = 20
|
||||
# min number of workers
|
||||
$eng_min_workers = 1
|
||||
$eng_min_workers = 1
|
||||
# min platform core count
|
||||
$platform_default_min_cpu_count = 2
|
||||
# total system memory per worker
|
||||
@ -49,7 +49,7 @@ class platform::params (
|
||||
# memory headroom per worker (e.g., buffers, cached)
|
||||
$eng_overhead_mb = 1000
|
||||
|
||||
notice("DEBUG: Platform cpu count obtained from sysinv DB is $platform_cpu_count.")
|
||||
notice("DEBUG: Platform cpu count obtained from sysinv DB is ${platform_cpu_count}.")
|
||||
|
||||
# number of workers per service
|
||||
if $system_type == 'All-in-one' {
|
||||
|
@ -15,12 +15,12 @@ define platform_manage_partition(
|
||||
) {
|
||||
if $config {
|
||||
# For drbd partitions, modifications can only be done on standby
|
||||
# controller as we need to:
|
||||
# - stop DRBD [drbd is in-use on active, so it can't be stopped there]
|
||||
# - manage-partitions: backup meta, resize partition, restore meta
|
||||
# - start DRBD
|
||||
# For AIO SX we make an exception as all instances are down on host lock.
|
||||
# see https://docs.linbit.com/doc/users-guide-83/s-resizing/
|
||||
# controller as we need to:
|
||||
# - stop DRBD [drbd is in-use on active, so it can't be stopped there]
|
||||
# - manage-partitions: backup meta, resize partition, restore meta
|
||||
# - start DRBD
|
||||
# For AIO SX we make an exception as all instances are down on host lock.
|
||||
# see https://docs.linbit.com/doc/users-guide-83/s-resizing/
|
||||
exec { "manage-partitions-${action}":
|
||||
logoutput => true,
|
||||
command => template('platform/partitions.manage.erb')
|
||||
@ -42,16 +42,16 @@ class platform::partitions
|
||||
# NOTE: Currently we are executing partition changes serially, not in bulk.
|
||||
platform_manage_partition { 'check':
|
||||
config => $check_config,
|
||||
} ->
|
||||
platform_manage_partition { 'delete':
|
||||
}
|
||||
-> platform_manage_partition { 'delete':
|
||||
config => $delete_config,
|
||||
} ->
|
||||
platform_manage_partition { 'modify':
|
||||
}
|
||||
-> platform_manage_partition { 'modify':
|
||||
config => $modify_config,
|
||||
shutdown_drbd_resource => $shutdown_drbd_resource,
|
||||
system_mode => $::platform::params::system_mode,
|
||||
} ->
|
||||
platform_manage_partition { 'create':
|
||||
}
|
||||
-> platform_manage_partition { 'create':
|
||||
config => $create_config,
|
||||
}
|
||||
}
|
||||
|
@ -1,29 +1,29 @@
|
||||
class platform::password {
|
||||
|
||||
file { "/etc/pam.d/passwd":
|
||||
file { '/etc/pam.d/passwd':
|
||||
ensure => present,
|
||||
content => template('platform/pam.passwd.erb'),
|
||||
}
|
||||
|
||||
file_line { "/etc/nsswitch.conf add passwd ldap":
|
||||
file_line { '/etc/nsswitch.conf add passwd ldap':
|
||||
path => '/etc/nsswitch.conf',
|
||||
line => 'passwd: files sss ldap',
|
||||
match => '^passwd: *files sss',
|
||||
}
|
||||
|
||||
file_line { "/etc/nsswitch.conf add shadow ldap":
|
||||
file_line { '/etc/nsswitch.conf add shadow ldap':
|
||||
path => '/etc/nsswitch.conf',
|
||||
line => 'shadow: files sss ldap',
|
||||
match => '^shadow: *files sss',
|
||||
}
|
||||
|
||||
file_line { "/etc/nsswitch.conf add group ldap":
|
||||
file_line { '/etc/nsswitch.conf add group ldap':
|
||||
path => '/etc/nsswitch.conf',
|
||||
line => 'group: files sss ldap',
|
||||
match => '^group: *files sss',
|
||||
}
|
||||
|
||||
file_line { "/etc/nsswitch.conf add sudoers ldap":
|
||||
file_line { '/etc/nsswitch.conf add sudoers ldap':
|
||||
path => '/etc/nsswitch.conf',
|
||||
line => 'sudoers: files ldap',
|
||||
match => '^sudoers: *files',
|
||||
|
@ -14,8 +14,8 @@ class platform::patching
|
||||
|
||||
group { 'patching':
|
||||
ensure => 'present',
|
||||
} ->
|
||||
user { 'patching':
|
||||
}
|
||||
-> user { 'patching':
|
||||
ensure => 'present',
|
||||
comment => 'patching Daemons',
|
||||
groups => ['nobody', 'patching', $::platform::params::protected_group_name],
|
||||
@ -24,14 +24,14 @@ class platform::patching
|
||||
password_max_age => '-1',
|
||||
password_min_age => '-1',
|
||||
shell => '/sbin/nologin',
|
||||
} ->
|
||||
file { "/etc/patching":
|
||||
ensure => "directory",
|
||||
owner => 'patching',
|
||||
group => 'patching',
|
||||
mode => '0755',
|
||||
} ->
|
||||
class { '::patching': }
|
||||
}
|
||||
-> file { '/etc/patching':
|
||||
ensure => 'directory',
|
||||
owner => 'patching',
|
||||
group => 'patching',
|
||||
mode => '0755',
|
||||
}
|
||||
-> class { '::patching': }
|
||||
}
|
||||
|
||||
|
||||
@ -40,7 +40,7 @@ class platform::patching::firewall
|
||||
|
||||
platform::firewall::rule { 'patching-api':
|
||||
service_name => 'patching',
|
||||
ports => $public_port,
|
||||
ports => $public_port,
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,9 +49,9 @@ class platform::patching::haproxy
|
||||
inherits ::platform::patching::params {
|
||||
|
||||
platform::haproxy::proxy { 'patching-restapi':
|
||||
server_name => 's-patching',
|
||||
public_port => $public_port,
|
||||
private_port => $private_port,
|
||||
server_name => 's-patching',
|
||||
public_port => $public_port,
|
||||
private_port => $private_port,
|
||||
server_timeout => $server_timeout,
|
||||
}
|
||||
}
|
||||
|
@ -104,14 +104,14 @@ class platform::postgresql::server (
|
||||
$service_ensure = 'stopped'
|
||||
}
|
||||
|
||||
class {"::postgresql::globals":
|
||||
class {'::postgresql::globals':
|
||||
datadir => $data_dir,
|
||||
confdir => $config_dir,
|
||||
} ->
|
||||
}
|
||||
|
||||
class {"::postgresql::server":
|
||||
-> class {'::postgresql::server':
|
||||
ip_mask_allow_all_users => $ipv4acl,
|
||||
service_ensure => $service_ensure,
|
||||
service_ensure => $service_ensure,
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ class platform::postgresql::post {
|
||||
# To allow for the transition it must be explicitely stopped. Once puppet
|
||||
# can directly handle SM managed services, then this can be removed.
|
||||
exec { 'stop postgresql service':
|
||||
command => "systemctl stop postgresql; systemctl disable postgresql",
|
||||
command => 'systemctl stop postgresql; systemctl disable postgresql',
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,36 +134,36 @@ class platform::postgresql::bootstrap
|
||||
|
||||
exec { 'Empty pg dir':
|
||||
command => "rm -fR ${root_dir}/*",
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { 'Create pg datadir':
|
||||
-> exec { 'Create pg datadir':
|
||||
command => "mkdir -p ${data_dir}",
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { 'Change pg dir permissions':
|
||||
-> exec { 'Change pg dir permissions':
|
||||
command => "chown -R postgres:postgres ${root_dir}",
|
||||
} ->
|
||||
}
|
||||
|
||||
file_line { 'allow sudo with no tty':
|
||||
-> file_line { 'allow sudo with no tty':
|
||||
path => '/etc/sudoers',
|
||||
match => '^Defaults *requiretty',
|
||||
line => '#Defaults requiretty',
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { 'Create pg database':
|
||||
-> exec { 'Create pg database':
|
||||
command => "sudo -u postgres initdb -D ${data_dir}",
|
||||
} ->
|
||||
}
|
||||
|
||||
exec { 'Move Config files':
|
||||
-> exec { 'Move Config files':
|
||||
command => "mkdir -p ${config_dir} && mv ${data_dir}/*.conf ${config_dir}/ && ln -s ${config_dir}/*.conf ${data_dir}/",
|
||||
} ->
|
||||
}
|
||||
|
||||
class {"::postgresql::globals":
|
||||
-> class {'::postgresql::globals':
|
||||
datadir => $data_dir,
|
||||
confdir => $config_dir,
|
||||
} ->
|
||||
}
|
||||
|
||||
class {"::postgresql::server":
|
||||
-> class {'::postgresql::server':
|
||||
}
|
||||
|
||||
# Allow local postgres user as trusted for simplex upgrade scripts
|
||||
@ -186,15 +186,15 @@ class platform::postgresql::upgrade
|
||||
|
||||
exec { 'Move Config files':
|
||||
command => "mkdir -p ${config_dir} && mv ${data_dir}/*.conf ${config_dir}/ && ln -s ${config_dir}/*.conf ${data_dir}/",
|
||||
} ->
|
||||
}
|
||||
|
||||
class {"::postgresql::globals":
|
||||
datadir => $data_dir,
|
||||
confdir => $config_dir,
|
||||
-> class {'::postgresql::globals':
|
||||
datadir => $data_dir,
|
||||
confdir => $config_dir,
|
||||
needs_initdb => false,
|
||||
} ->
|
||||
}
|
||||
|
||||
class {"::postgresql::server":
|
||||
-> class {'::postgresql::server':
|
||||
}
|
||||
|
||||
include ::aodh::db::postgresql
|
||||
|
@ -37,60 +37,60 @@ class platform::ptp (
|
||||
path => '/etc/ptp4l.conf',
|
||||
mode => '0644',
|
||||
content => template('platform/ptp4l.conf.erb'),
|
||||
} ->
|
||||
file { 'ptp4l_service':
|
||||
}
|
||||
-> file { 'ptp4l_service':
|
||||
ensure => file,
|
||||
path => '/usr/lib/systemd/system/ptp4l.service',
|
||||
mode => '0644',
|
||||
content => template('platform/ptp4l.service.erb'),
|
||||
} ->
|
||||
file { 'ptp4l_sysconfig':
|
||||
}
|
||||
-> file { 'ptp4l_sysconfig':
|
||||
ensure => file,
|
||||
path => '/etc/sysconfig/ptp4l',
|
||||
mode => '0644',
|
||||
content => template('platform/ptp4l.erb'),
|
||||
} ->
|
||||
file { 'phc2sys_service':
|
||||
}
|
||||
-> file { 'phc2sys_service':
|
||||
ensure => file,
|
||||
path => '/usr/lib/systemd/system/phc2sys.service',
|
||||
mode => '0644',
|
||||
content => template('platform/phc2sys.service.erb'),
|
||||
} ->
|
||||
file { 'phc2sys_sysconfig':
|
||||
}
|
||||
-> file { 'phc2sys_sysconfig':
|
||||
ensure => file,
|
||||
path => '/etc/sysconfig/phc2sys',
|
||||
mode => '0644',
|
||||
content => template('platform/phc2sys.erb'),
|
||||
} ->
|
||||
file { 'ptp4l_pmon':
|
||||
}
|
||||
-> file { 'ptp4l_pmon':
|
||||
ensure => file,
|
||||
path => '/etc/ptp4l.pmon.conf',
|
||||
mode => '0644',
|
||||
content => template('platform/ptp4l.pmon.conf.erb'),
|
||||
} ->
|
||||
file { 'phc2sys_pmon':
|
||||
}
|
||||
-> file { 'phc2sys_pmon':
|
||||
ensure => file,
|
||||
path => '/etc/phc2sys.pmon.conf',
|
||||
mode => '0644',
|
||||
content => template('platform/phc2sys.pmon.conf.erb'),
|
||||
} ->
|
||||
file { 'ptp4l_pmon_link':
|
||||
}
|
||||
-> file { 'ptp4l_pmon_link':
|
||||
ensure => $pmon_ensure,
|
||||
path => '/etc/pmon.d/ptp4l.conf',
|
||||
target => '/etc/ptp4l.pmon.conf',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
} ->
|
||||
file { 'phc2sys_pmon_link':
|
||||
}
|
||||
-> file { 'phc2sys_pmon_link':
|
||||
ensure => $pmon_ensure,
|
||||
path => '/etc/pmon.d/phc2sys.conf',
|
||||
target => '/etc/phc2sys.pmon.conf',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
} ->
|
||||
exec { 'systemctl-daemon-reload':
|
||||
}
|
||||
-> exec { 'systemctl-daemon-reload':
|
||||
command => '/usr/bin/systemctl daemon-reload',
|
||||
}
|
||||
|
||||
@ -98,18 +98,18 @@ class platform::ptp (
|
||||
exec { 'enable-ptp4l':
|
||||
command => '/usr/bin/systemctl enable ptp4l.service',
|
||||
require => Exec['systemctl-daemon-reload'],
|
||||
} ->
|
||||
exec { 'enable-phc2sys':
|
||||
}
|
||||
-> exec { 'enable-phc2sys':
|
||||
command => '/usr/bin/systemctl enable phc2sys.service',
|
||||
} ->
|
||||
service { 'ptp4l':
|
||||
}
|
||||
-> service { 'ptp4l':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
name => 'ptp4l',
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
} ->
|
||||
service { 'phc2sys':
|
||||
}
|
||||
-> service { 'phc2sys':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
name => 'phc2sys',
|
||||
@ -120,14 +120,14 @@ class platform::ptp (
|
||||
exec { 'disable-ptp4l':
|
||||
command => '/usr/bin/systemctl disable ptp4l.service',
|
||||
require => Exec['systemctl-daemon-reload'],
|
||||
} ->
|
||||
exec { 'disable-phc2sys':
|
||||
}
|
||||
-> exec { 'disable-phc2sys':
|
||||
command => '/usr/bin/systemctl disable phc2sys.service',
|
||||
}
|
||||
exec { 'stop-ptp4l':
|
||||
command => '/usr/bin/systemctl stop ptp4l.service',
|
||||
} ->
|
||||
exec { 'stop-phc2sys':
|
||||
}
|
||||
-> exec { 'stop-phc2sys':
|
||||
command => '/usr/bin/systemctl stop phc2sys.service',
|
||||
}
|
||||
}
|
||||
|
@ -16,35 +16,35 @@ class platform::remotelogging
|
||||
$hostname = $::hostname
|
||||
|
||||
if($transport == 'tls') {
|
||||
$server = "{tcp(\"$ip_address\" port($port) tls(peer-verify(\"required-untrusted\")));};"
|
||||
$server = "{tcp(\"${ip_address}\" port(${port}) tls(peer-verify(\"required-untrusted\")));};"
|
||||
} else {
|
||||
$server = "{$transport(\"$ip_address\" port($port));};"
|
||||
$server = "{${transport}(\"${ip_address}\" port(${port}));};"
|
||||
}
|
||||
|
||||
$destination = "destination remote_log_server "
|
||||
$destination_line = "$destination $server"
|
||||
$destination = 'destination remote_log_server '
|
||||
$destination_line = "${destination} ${server}"
|
||||
|
||||
file_line { 'conf-add-log-server':
|
||||
path => '/etc/syslog-ng/syslog-ng.conf',
|
||||
line => $destination_line,
|
||||
match => $destination,
|
||||
} ->
|
||||
file_line { 'conf-add-remote':
|
||||
}
|
||||
-> file_line { 'conf-add-remote':
|
||||
path => '/etc/syslog-ng/syslog-ng.conf',
|
||||
line => '@include "remotelogging.conf"',
|
||||
match => '#@include \"remotelogging.conf\"',
|
||||
} ->
|
||||
file { "/etc/syslog-ng/remotelogging.conf":
|
||||
}
|
||||
-> file { '/etc/syslog-ng/remotelogging.conf':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('platform/remotelogging.conf.erb'),
|
||||
} ->
|
||||
exec { "remotelogging-update-tc":
|
||||
}
|
||||
-> exec { 'remotelogging-update-tc':
|
||||
command => "/usr/local/bin/remotelogging_tc_setup.sh ${port}"
|
||||
} ->
|
||||
Exec['syslog-ng-reload']
|
||||
}
|
||||
-> Exec['syslog-ng-reload']
|
||||
|
||||
} else {
|
||||
# remove remote logging configuration from syslog-ng
|
||||
@ -52,11 +52,11 @@ class platform::remotelogging
|
||||
path => '/etc/syslog-ng/syslog-ng.conf',
|
||||
line => '#@include "remotelogging.conf"',
|
||||
match => '@include \"remotelogging.conf\"',
|
||||
} ->
|
||||
Exec["syslog-ng-reload"]
|
||||
}
|
||||
-> Exec['syslog-ng-reload']
|
||||
}
|
||||
|
||||
exec { "syslog-ng-reload":
|
||||
exec { 'syslog-ng-reload':
|
||||
command => '/usr/bin/systemctl reload syslog-ng'
|
||||
}
|
||||
}
|
||||
@ -82,21 +82,21 @@ class platform::remotelogging::proxy(
|
||||
|
||||
platform::firewall::rule { 'remotelogging-nat':
|
||||
service_name => $service_name,
|
||||
table => $table,
|
||||
chain => $chain,
|
||||
proto => $firewall_proto_transport,
|
||||
outiface => $oam_interface,
|
||||
jump => $jump,
|
||||
table => $table,
|
||||
chain => $chain,
|
||||
proto => $firewall_proto_transport,
|
||||
outiface => $oam_interface,
|
||||
jump => $jump,
|
||||
}
|
||||
|
||||
} else {
|
||||
platform::firewall::rule { 'remotelogging-nat':
|
||||
service_name => $service_name,
|
||||
table => $table,
|
||||
chain => $chain,
|
||||
outiface => $oam_interface,
|
||||
jump => $jump,
|
||||
ensure => absent
|
||||
table => $table,
|
||||
chain => $chain,
|
||||
outiface => $oam_interface,
|
||||
jump => $jump,
|
||||
ensure => absent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,13 +1,13 @@
|
||||
class platform::smapi::params (
|
||||
$auth_username = undef,
|
||||
$keystone_auth_url = undef,
|
||||
$keystone_username = undef,
|
||||
$keystone_password = undef,
|
||||
$public_url = undef,
|
||||
$admin_url = undef,
|
||||
$bind_ip = undef,
|
||||
$port = undef,
|
||||
$region = undef,
|
||||
$auth_username = undef,
|
||||
$keystone_auth_url = undef,
|
||||
$keystone_username = undef,
|
||||
$keystone_password = undef,
|
||||
$public_url = undef,
|
||||
$admin_url = undef,
|
||||
$bind_ip = undef,
|
||||
$port = undef,
|
||||
$region = undef,
|
||||
) {}
|
||||
|
||||
class platform::smap::firewall
|
||||
@ -26,12 +26,12 @@ class platform::smapi::haproxy
|
||||
include ::platform::haproxy::params
|
||||
|
||||
platform::haproxy::proxy { 'sm-api-internal':
|
||||
server_name => 's-smapi-internal',
|
||||
public_ip_address => $::platform::haproxy::params::private_ip_address,
|
||||
public_port => $port,
|
||||
public_api => false,
|
||||
server_name => 's-smapi-internal',
|
||||
public_ip_address => $::platform::haproxy::params::private_ip_address,
|
||||
public_port => $port,
|
||||
public_api => false,
|
||||
private_ip_address => $bind_ip,
|
||||
private_port => $port,
|
||||
private_port => $port,
|
||||
}
|
||||
platform::haproxy::proxy { 'sm-api-public':
|
||||
server_name => 's-smapi-public',
|
||||
@ -50,8 +50,8 @@ class platform::smapi
|
||||
include ::platform::smap::firewall
|
||||
include ::platform::smapi::haproxy
|
||||
$bind_host_name = $::platform::params::hostname
|
||||
file { "/etc/sm-api/sm-api.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/sm-api/sm-api.conf':
|
||||
ensure => 'present',
|
||||
content => template('platform/sm-api.conf.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
|
@ -14,15 +14,15 @@ class platform::snmp::runtime
|
||||
$software_version = $::platform::params::software_version
|
||||
$system_info = $::system_info
|
||||
|
||||
file { "/etc/snmp/snmpd.conf":
|
||||
ensure => 'present',
|
||||
file { '/etc/snmp/snmpd.conf':
|
||||
ensure => 'present',
|
||||
replace => true,
|
||||
content => template('platform/snmpd.conf.erb')
|
||||
} ->
|
||||
}
|
||||
|
||||
# send HUP signal to snmpd if it is running
|
||||
exec { 'notify-snmp':
|
||||
command => "/usr/bin/pkill -HUP snmpd",
|
||||
onlyif => "ps -ef | pgrep snmpd"
|
||||
-> exec { 'notify-snmp':
|
||||
command => '/usr/bin/pkill -HUP snmpd',
|
||||
onlyif => 'ps -ef | pgrep snmpd'
|
||||
}
|
||||
}
|
||||
|
@ -22,9 +22,9 @@ class platform::sysinv
|
||||
group { 'sysinv':
|
||||
ensure => 'present',
|
||||
gid => '168',
|
||||
} ->
|
||||
}
|
||||
|
||||
user { 'sysinv':
|
||||
-> user { 'sysinv':
|
||||
ensure => 'present',
|
||||
comment => 'sysinv Daemons',
|
||||
gid => '168',
|
||||
@ -35,22 +35,22 @@ class platform::sysinv
|
||||
password_min_age => '-1',
|
||||
shell => '/sbin/nologin',
|
||||
uid => '168',
|
||||
} ->
|
||||
}
|
||||
|
||||
file { "/etc/sysinv":
|
||||
ensure => "directory",
|
||||
owner => 'sysinv',
|
||||
group => 'sysinv',
|
||||
mode => '0750',
|
||||
} ->
|
||||
-> file { '/etc/sysinv':
|
||||
ensure => 'directory',
|
||||
owner => 'sysinv',
|
||||
group => 'sysinv',
|
||||
mode => '0750',
|
||||
}
|
||||
|
||||
class { '::sysinv':
|
||||
rabbit_host => $::platform::amqp::params::host_url,
|
||||
rabbit_port => $::platform::amqp::params::port,
|
||||
rabbit_userid => $::platform::amqp::params::auth_user,
|
||||
rabbit_password => $::platform::amqp::params::auth_password,
|
||||
fm_catalog_info => $fm_catalog_info,
|
||||
fernet_key_repository => "$keystone_key_repo_path/fernet-keys",
|
||||
-> class { '::sysinv':
|
||||
rabbit_host => $::platform::amqp::params::host_url,
|
||||
rabbit_port => $::platform::amqp::params::port,
|
||||
rabbit_userid => $::platform::amqp::params::auth_user,
|
||||
rabbit_password => $::platform::amqp::params::auth_password,
|
||||
fm_catalog_info => $fm_catalog_info,
|
||||
fernet_key_repository => "${keystone_key_repo_path}/fernet-keys",
|
||||
}
|
||||
|
||||
# Note: The log format strings are prefixed with "sysinv" because it is
|
||||
@ -60,30 +60,30 @@ class platform::sysinv
|
||||
# TODO(mpeters): update puppet-sysinv to permit configuration of log formats
|
||||
# once the log configuration has been moved to oslo::log
|
||||
sysinv_config {
|
||||
"DEFAULT/logging_context_format_string": value =>
|
||||
'DEFAULT/logging_context_format_string': value =>
|
||||
'sysinv %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s';
|
||||
"DEFAULT/logging_default_format_string": value =>
|
||||
'DEFAULT/logging_default_format_string': value =>
|
||||
'sysinv %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s';
|
||||
}
|
||||
|
||||
if str2bool($::is_initial_config_primary) {
|
||||
$software_version = $::platform::params::software_version
|
||||
|
||||
Class['::sysinv'] ->
|
||||
Class['::sysinv']
|
||||
|
||||
file { '/opt/platform/sysinv':
|
||||
-> file { '/opt/platform/sysinv':
|
||||
ensure => directory,
|
||||
owner => 'sysinv',
|
||||
mode => '0755',
|
||||
} ->
|
||||
}
|
||||
|
||||
file { "/opt/platform/sysinv/${software_version}":
|
||||
-> file { "/opt/platform/sysinv/${software_version}":
|
||||
ensure => directory,
|
||||
owner => 'sysinv',
|
||||
mode => '0755',
|
||||
} ->
|
||||
}
|
||||
|
||||
file { "/opt/platform/sysinv/${software_version}/sysinv.conf.default":
|
||||
-> file { "/opt/platform/sysinv/${software_version}/sysinv.conf.default":
|
||||
source => '/etc/sysinv/sysinv.conf',
|
||||
}
|
||||
}
|
||||
@ -112,8 +112,8 @@ class platform::sysinv::haproxy
|
||||
inherits ::platform::sysinv::params {
|
||||
|
||||
platform::haproxy::proxy { 'sysinv-restapi':
|
||||
server_name => 's-sysinv',
|
||||
public_port => $api_port,
|
||||
server_name => 's-sysinv',
|
||||
public_port => $api_port,
|
||||
private_port => $api_port,
|
||||
}
|
||||
}
|
||||
@ -133,22 +133,22 @@ class platform::sysinv::api
|
||||
# the subcloud region.
|
||||
if ($::platform::params::distributed_cloud_role == 'subcloud' and
|
||||
$::platform::params::region_2_name != 'RegionOne') {
|
||||
Keystone_endpoint["${platform::params::region_2_name}/sysinv::platform"] -> Keystone_endpoint["RegionOne/sysinv::platform"]
|
||||
keystone_endpoint { "RegionOne/sysinv::platform":
|
||||
ensure => "absent",
|
||||
name => "sysinv",
|
||||
type => "platform",
|
||||
region => "RegionOne",
|
||||
public_url => "http://127.0.0.1:6385/v1",
|
||||
admin_url => "http://127.0.0.1:6385/v1",
|
||||
internal_url => "http://127.0.0.1:6385/v1"
|
||||
Keystone_endpoint["${platform::params::region_2_name}/sysinv::platform"] -> Keystone_endpoint['RegionOne/sysinv::platform']
|
||||
keystone_endpoint { 'RegionOne/sysinv::platform':
|
||||
ensure => 'absent',
|
||||
name => 'sysinv',
|
||||
type => 'platform',
|
||||
region => 'RegionOne',
|
||||
public_url => 'http://127.0.0.1:6385/v1',
|
||||
admin_url => 'http://127.0.0.1:6385/v1',
|
||||
internal_url => 'http://127.0.0.1:6385/v1'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# TODO(mpeters): move to sysinv puppet module parameters
|
||||
sysinv_config {
|
||||
"DEFAULT/sysinv_api_workers": value => $::platform::params::eng_workers_by_5;
|
||||
'DEFAULT/sysinv_api_workers': value => $::platform::params::eng_workers_by_5;
|
||||
}
|
||||
|
||||
include ::platform::sysinv::firewall
|
||||
|
@ -11,26 +11,26 @@ class platform::users
|
||||
|
||||
group { 'wrs':
|
||||
ensure => 'present',
|
||||
} ->
|
||||
}
|
||||
|
||||
# WRS: Create a 'wrs_protected' group for wrsroot and all openstack services
|
||||
# (including TiS services: sysinv, etc.).
|
||||
group { $::platform::params::protected_group_name:
|
||||
-> group { $::platform::params::protected_group_name:
|
||||
ensure => 'present',
|
||||
gid => $::platform::params::protected_group_id,
|
||||
} ->
|
||||
}
|
||||
|
||||
user { 'wrsroot':
|
||||
ensure => 'present',
|
||||
groups => ['wrs', 'root', $::platform::params::protected_group_name],
|
||||
home => '/home/wrsroot',
|
||||
password => $wrsroot_password,
|
||||
-> user { 'wrsroot':
|
||||
ensure => 'present',
|
||||
groups => ['wrs', 'root', $::platform::params::protected_group_name],
|
||||
home => '/home/wrsroot',
|
||||
password => $wrsroot_password,
|
||||
password_max_age => $wrsroot_password_max_age,
|
||||
shell => '/bin/sh',
|
||||
} ->
|
||||
shell => '/bin/sh',
|
||||
}
|
||||
|
||||
# WRS: Keyring should only be executable by 'wrs_protected'.
|
||||
file { '/usr/bin/keyring':
|
||||
-> file { '/usr/bin/keyring':
|
||||
owner => 'root',
|
||||
group => $::platform::params::protected_group_name,
|
||||
mode => '0750',
|
||||
@ -45,19 +45,19 @@ class platform::users::bootstrap
|
||||
|
||||
group { 'wrs':
|
||||
ensure => 'present',
|
||||
} ->
|
||||
}
|
||||
|
||||
group { $::platform::params::protected_group_name:
|
||||
-> group { $::platform::params::protected_group_name:
|
||||
ensure => 'present',
|
||||
gid => $::platform::params::protected_group_id,
|
||||
} ->
|
||||
}
|
||||
|
||||
user { 'wrsroot':
|
||||
ensure => 'present',
|
||||
groups => ['wrs', 'root', $::platform::params::protected_group_name],
|
||||
home => '/home/wrsroot',
|
||||
-> user { 'wrsroot':
|
||||
ensure => 'present',
|
||||
groups => ['wrs', 'root', $::platform::params::protected_group_name],
|
||||
home => '/home/wrsroot',
|
||||
password_max_age => $wrsroot_password_max_age,
|
||||
shell => '/bin/sh',
|
||||
shell => '/bin/sh',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,8 +26,8 @@ define platform::vswitch::ovs::device(
|
||||
$pci_addr,
|
||||
$driver_type,
|
||||
) {
|
||||
exec { "ovs-bind-device: $title":
|
||||
path => ["/usr/bin", "/usr/sbin", "/usr/share/openvswitch/scripts"],
|
||||
exec { "ovs-bind-device: ${title}":
|
||||
path => ['/usr/bin', '/usr/sbin', '/usr/share/openvswitch/scripts'],
|
||||
command => "dpdk-devbind.py --bind=${driver_type} ${pci_addr}"
|
||||
}
|
||||
}
|
||||
@ -38,9 +38,9 @@ define platform::vswitch::ovs::bridge(
|
||||
$attributes = [],
|
||||
) {
|
||||
exec { "ovs-add-br: ${title}":
|
||||
command => template("platform/ovs.add-bridge.erb")
|
||||
} ->
|
||||
exec { "ovs-link-up: ${title}":
|
||||
command => template('platform/ovs.add-bridge.erb')
|
||||
}
|
||||
-> exec { "ovs-link-up: ${title}":
|
||||
command => "ip link set ${name} up",
|
||||
}
|
||||
}
|
||||
@ -53,7 +53,7 @@ define platform::vswitch::ovs::port(
|
||||
$interfaces,
|
||||
) {
|
||||
exec { "ovs-add-port: ${title}":
|
||||
command => template("platform/ovs.add-port.erb"),
|
||||
command => template('platform/ovs.add-port.erb'),
|
||||
logoutput => true
|
||||
}
|
||||
}
|
||||
@ -76,7 +76,7 @@ define platform::vswitch::ovs::flow(
|
||||
$actions,
|
||||
) {
|
||||
exec { "ovs-add-flow: ${title}":
|
||||
command => template("platform/ovs.add-flow.erb"),
|
||||
command => template('platform/ovs.add-flow.erb'),
|
||||
logoutput => true
|
||||
}
|
||||
}
|
||||
@ -95,9 +95,9 @@ class platform::vswitch::ovs(
|
||||
} elsif $::platform::params::vswitch_type == 'ovs-dpdk' {
|
||||
include ::vswitch::dpdk
|
||||
|
||||
Exec['vfio-iommu-mode'] ->
|
||||
Platform::Vswitch::Ovs::Device<||> ->
|
||||
Platform::Vswitch::Ovs::Bridge<||>
|
||||
Exec['vfio-iommu-mode']
|
||||
-> Platform::Vswitch::Ovs::Device<||>
|
||||
-> Platform::Vswitch::Ovs::Bridge<||>
|
||||
|
||||
create_resources('platform::vswitch::ovs::device', $devices, {
|
||||
driver_type => $driver_type,
|
||||
@ -124,13 +124,13 @@ class platform::vswitch::ovs(
|
||||
if $::platform::params::vswitch_type =~ '^ovs' {
|
||||
|
||||
# clean bridges and ports before applying current configuration
|
||||
exec { "ovs-clean":
|
||||
command => template("platform/ovs.clean.erb"),
|
||||
exec { 'ovs-clean':
|
||||
command => template('platform/ovs.clean.erb'),
|
||||
provider => shell,
|
||||
require => Service['openvswitch']
|
||||
} ->
|
||||
}
|
||||
|
||||
Platform::Vswitch::Ovs::Bridge<||> -> Platform::Vswitch::Ovs::Port<||>
|
||||
-> Platform::Vswitch::Ovs::Bridge<||> -> Platform::Vswitch::Ovs::Port<||>
|
||||
Platform::Vswitch::Ovs::Bridge<||> -> Platform::Vswitch::Ovs::Address<||>
|
||||
Platform::Vswitch::Ovs::Port<||> -> Platform::Vswitch::Ovs::Flow<||>
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ LOGFILE="/var/log/ldapscripts.log"
|
||||
TMPDIR="/tmp"
|
||||
|
||||
# Various binaries used within the scripts
|
||||
# Warning : they also use uuencode, date, grep, sed, cut, which...
|
||||
# Warning : they also use uuencode, date, grep, sed, cut, which...
|
||||
# Please check they are installed before using these scripts
|
||||
# Note that many of them should come with your OS
|
||||
|
||||
|
@ -5,7 +5,7 @@ if [ -f /var/run/goenabled ]; then
|
||||
sm-unmanage service <%= @shutdown_drbd_resource %>
|
||||
fi
|
||||
|
||||
<% if @shutdown_drbd_resource == 'drbd-cinder' and @system_mode == 'simplex' -%>
|
||||
<% if @shutdown_drbd_resource == 'drbd-cinder' and @system_mode == 'simplex' -%>
|
||||
if [ -f /var/run/goenabled ]; then
|
||||
sm-unmanage service cinder-lvm
|
||||
fi
|
||||
@ -20,7 +20,7 @@ DRBD_UNCONFIGURED_DELAY=0
|
||||
while [[ $DRBD_UNCONFIGURED_DELAY -lt $DRBD_UNCONFIGURED_TIMEOUT ]]; do
|
||||
drbdadm down <%= @shutdown_drbd_resource %>
|
||||
drbd_info=$(drbd-overview | grep <%= @shutdown_drbd_resource %> | awk '{print $2}')
|
||||
|
||||
|
||||
if [[ ${drbd_info} == "Unconfigured" ]]; then
|
||||
break
|
||||
else
|
||||
@ -39,7 +39,7 @@ manage-partitions <%= @action %> '<%= @config %>'
|
||||
<% if @shutdown_drbd_resource and (@is_controller_active.to_s == 'false' or @system_mode == 'simplex') -%>
|
||||
drbdadm up <%= @shutdown_drbd_resource %> || exit 30
|
||||
|
||||
<% if @shutdown_drbd_resource == 'drbd-cinder' and @system_mode == 'simplex' -%>
|
||||
<% if @shutdown_drbd_resource == 'drbd-cinder' and @system_mode == 'simplex' -%>
|
||||
drbdadm primary drbd-cinder || exit 50
|
||||
vgchange -ay cinder-volumes || exit 60
|
||||
lvchange -ay cinder-volumes || exit 70
|
||||
|
@ -26,24 +26,15 @@ setenv =
|
||||
GEM_PATH = {envdir}
|
||||
skip_tests = \
|
||||
--no-140chars \
|
||||
--no-2sp_soft_tabs \
|
||||
--no-arrow_alignment \
|
||||
--no-arrow_on_right_operand_line-check \
|
||||
--no-autoloader_layout-check \
|
||||
--no-case_without_default \
|
||||
--no-documentation-check \
|
||||
--no-double_quoted_strings-check \
|
||||
--no-ensure_first_param \
|
||||
--no-hard_tabs \
|
||||
--no-inherits_across_namespaces \
|
||||
--no-only_variable_string \
|
||||
--no-parameter_order \
|
||||
--no-quoted_booleans \
|
||||
--no-single_quote_string_with_variables \
|
||||
--no-trailing_whitespace \
|
||||
--no-variable_is_lowercase-check \
|
||||
--no-variables_not_enclosed
|
||||
--no-variable_is_lowercase-check
|
||||
commands =
|
||||
gem install --no-document puppet-lint
|
||||
bash -c "find {toxinidir} -name \*.pp -print0 | xargs -0 puppet-lint {[testenv:puppetlint]skip_tests}"
|
||||
bash -c "find {toxinidir} -name \*.pp -print0 | xargs -0 puppet-lint --fail-on-warnings {[testenv:puppetlint]skip_tests}"
|
||||
|
||||
|
@ -111,13 +111,13 @@ class dcmanager::api (
|
||||
Package['dcmanager'] -> Dcmanager_config<||>
|
||||
Package['dcmanager'] -> Service['dcmanager-api']
|
||||
package { 'dcmanager':
|
||||
ensure => $package_ensure,
|
||||
name => $::dcmanager::params::api_package,
|
||||
ensure => $package_ensure,
|
||||
name => $::dcmanager::params::api_package,
|
||||
}
|
||||
}
|
||||
|
||||
dcmanager_config {
|
||||
"DEFAULT/bind_host": value => $bind_host;
|
||||
'DEFAULT/bind_host': value => $bind_host;
|
||||
}
|
||||
|
||||
|
||||
@ -154,7 +154,7 @@ class dcmanager::api (
|
||||
'keystone_authtoken/user_domain_name': value => $keystone_user_domain;
|
||||
'keystone_authtoken/project_domain_name': value => $keystone_project_domain;
|
||||
}
|
||||
dcmanager_config {
|
||||
dcmanager_config {
|
||||
'cache/admin_tenant': value => $keystone_admin_tenant;
|
||||
'cache/admin_username': value => $keystone_admin_user;
|
||||
'cache/admin_password': value => $keystone_admin_password, secret=> true;
|
||||
@ -202,7 +202,7 @@ class dcmanager::api (
|
||||
require => Package['dcmanager'],
|
||||
# Only do the db sync if both controllers are running the same software
|
||||
# version. Avoids impacting mate controller during an upgrade.
|
||||
onlyif => "test $::controller_sw_versions_match = true",
|
||||
onlyif => "test ${::controller_sw_versions_match} = true",
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ class dcmanager (
|
||||
'DEFAULT/verbose': value => $verbose;
|
||||
'DEFAULT/debug': value => $debug;
|
||||
}
|
||||
|
||||
|
||||
# Automatically add psycopg2 driver to postgresql (only does this if it is missing)
|
||||
$real_connection = regsubst($database_connection,'^postgresql:','postgresql+psycopg2:')
|
||||
|
||||
@ -101,7 +101,7 @@ class dcmanager (
|
||||
'keystone_authtoken/region_name': value => $region_name;
|
||||
}
|
||||
|
||||
file {"/etc/bash_completion.d/dcmanager.bash_completion":
|
||||
file {'/etc/bash_completion.d/dcmanager.bash_completion':
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
content => generate('/bin/dcmanager', 'complete'),
|
||||
|
@ -49,9 +49,9 @@ class dcmanager::keystone::auth (
|
||||
public_url => $public_url,
|
||||
admin_url => $admin_url,
|
||||
internal_url => $internal_url,
|
||||
} ->
|
||||
}
|
||||
|
||||
keystone_user_role { "${auth_name}@${admin_project_name}":
|
||||
-> keystone_user_role { "${auth_name}@${admin_project_name}":
|
||||
ensure => present,
|
||||
user_domain => $auth_domain,
|
||||
project_domain => $admin_project_domain,
|
||||
|
@ -113,13 +113,13 @@ class dcorch::api_proxy (
|
||||
Package['dcorch'] -> Dcorch_api_paste_ini<||>
|
||||
Package['dcorch'] -> Service['dcorch-api-proxy']
|
||||
package { 'dcorch':
|
||||
ensure => $package_ensure,
|
||||
name => $::dcorch::params::api_proxy_package,
|
||||
ensure => $package_ensure,
|
||||
name => $::dcorch::params::api_proxy_package,
|
||||
}
|
||||
}
|
||||
|
||||
dcorch_config {
|
||||
"DEFAULT/bind_host": value => $bind_host;
|
||||
'DEFAULT/bind_host': value => $bind_host;
|
||||
}
|
||||
|
||||
|
||||
@ -156,7 +156,7 @@ class dcorch::api_proxy (
|
||||
'keystone_authtoken/user_domain_name': value => $keystone_user_domain;
|
||||
'keystone_authtoken/project_domain_name': value => $keystone_project_domain;
|
||||
}
|
||||
dcorch_config {
|
||||
dcorch_config {
|
||||
'cache/admin_tenant': value => $keystone_admin_tenant;
|
||||
'cache/admin_username': value => $keystone_admin_user;
|
||||
'cache/admin_password': value => $keystone_admin_password, secret=> true;
|
||||
@ -204,7 +204,7 @@ class dcorch::api_proxy (
|
||||
require => Package['dcorch'],
|
||||
# Only do the db sync if both controllers are running the same software
|
||||
# version. Avoids impacting mate controller during an upgrade.
|
||||
onlyif => "test $::controller_sw_versions_match = true",
|
||||
onlyif => "test ${::controller_sw_versions_match} = true",
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -99,10 +99,10 @@ class dcorch (
|
||||
'DEFAULT/debug': value => $debug;
|
||||
'DEFAULT/api_paste_config': value => $api_paste_config;
|
||||
}
|
||||
|
||||
|
||||
# Automatically add psycopg2 driver to postgresql (only does this if it is missing)
|
||||
$real_connection = regsubst($database_connection,'^postgresql:','postgresql+psycopg2:')
|
||||
|
||||
|
||||
dcorch_config {
|
||||
'database/connection': value => $real_connection, secret => true;
|
||||
'database/idle_timeout': value => $database_idle_timeout;
|
||||
|
@ -27,7 +27,7 @@ class dcorch::keystone::auth (
|
||||
$public_url = 'http://127.0.0.1:8118/v1.0',
|
||||
$admin_url = 'http://127.0.0.1:8118/v1.0',
|
||||
$internal_url = 'http://127.0.0.1:8118/v1.0',
|
||||
|
||||
|
||||
$neutron_proxy_internal_url = 'http://127.0.0.1:29696',
|
||||
$nova_proxy_internal_url = 'http://127.0.0.1:28774/v2.1',
|
||||
$sysinv_proxy_internal_url = 'http://127.0.0.1:26385/v1',
|
||||
@ -63,27 +63,27 @@ class dcorch::keystone::auth (
|
||||
}
|
||||
|
||||
keystone_endpoint { "${region}/nova::compute" :
|
||||
ensure => "present",
|
||||
name => "nova",
|
||||
type => "compute",
|
||||
ensure => 'present',
|
||||
name => 'nova',
|
||||
type => 'compute',
|
||||
region => $region,
|
||||
public_url => $nova_proxy_public_url,
|
||||
admin_url => $nova_proxy_internal_url,
|
||||
internal_url => $nova_proxy_internal_url
|
||||
}
|
||||
keystone_endpoint { "${region}/sysinv::platform" :
|
||||
ensure => "present",
|
||||
name => "sysinv",
|
||||
type => "platform",
|
||||
ensure => 'present',
|
||||
name => 'sysinv',
|
||||
type => 'platform',
|
||||
region => $region,
|
||||
public_url => $sysinv_proxy_public_url,
|
||||
admin_url => $sysinv_proxy_internal_url,
|
||||
internal_url => $sysinv_proxy_internal_url
|
||||
}
|
||||
keystone_endpoint { "${region}/neutron::network" :
|
||||
ensure => "present",
|
||||
name => "neutron",
|
||||
type => "network",
|
||||
ensure => 'present',
|
||||
name => 'neutron',
|
||||
type => 'network',
|
||||
region => $region,
|
||||
public_url => $neutron_proxy_public_url,
|
||||
admin_url => $neutron_proxy_internal_url,
|
||||
@ -92,18 +92,18 @@ class dcorch::keystone::auth (
|
||||
|
||||
if $::openstack::cinder::params::service_enabled {
|
||||
keystone_endpoint { "${region}/cinderv2::volumev2" :
|
||||
ensure => "present",
|
||||
name => "cinderv2",
|
||||
type => "volumev2",
|
||||
ensure => 'present',
|
||||
name => 'cinderv2',
|
||||
type => 'volumev2',
|
||||
region => $region,
|
||||
public_url => $cinder_proxy_public_url_v2,
|
||||
admin_url => $cinder_proxy_internal_url_v2,
|
||||
internal_url => $cinder_proxy_internal_url_v2
|
||||
}
|
||||
keystone_endpoint { "${region}/cinderv3::volumev3" :
|
||||
ensure => "present",
|
||||
name => "cinderv3",
|
||||
type => "volumev3",
|
||||
ensure => 'present',
|
||||
name => 'cinderv3',
|
||||
type => 'volumev3',
|
||||
region => $region,
|
||||
public_url => $cinder_proxy_public_url_v3,
|
||||
admin_url => $cinder_proxy_internal_url_v3,
|
||||
@ -111,18 +111,18 @@ class dcorch::keystone::auth (
|
||||
}
|
||||
}
|
||||
keystone_endpoint { "${region}/patching::patching" :
|
||||
ensure => "present",
|
||||
name => "patching",
|
||||
type => "patching",
|
||||
ensure => 'present',
|
||||
name => 'patching',
|
||||
type => 'patching',
|
||||
region => $region,
|
||||
public_url => $patching_proxy_public_url,
|
||||
admin_url => $patching_proxy_internal_url,
|
||||
internal_url => $patching_proxy_internal_url
|
||||
}
|
||||
keystone_endpoint { "${region}/keystone::identity" :
|
||||
ensure => "present",
|
||||
name => "keystone",
|
||||
type => "identity",
|
||||
ensure => 'present',
|
||||
name => 'keystone',
|
||||
type => 'identity',
|
||||
region => $region,
|
||||
public_url => $identity_proxy_public_url,
|
||||
admin_url => $identity_proxy_internal_url,
|
||||
|
@ -39,7 +39,7 @@ class dcorch::params {
|
||||
$snmp_service = 'dcorch-snmp'
|
||||
$api_proxy_package = false
|
||||
$api_proxy_service = 'dcorch-api-proxy'
|
||||
|
||||
|
||||
$db_sync_command = 'dcorch-manage db_sync'
|
||||
|
||||
} elsif($::osfamily == 'WRLinux') {
|
||||
|
@ -23,5 +23,5 @@ Puppet::Type.type(:fm_api_paste_ini).provide(
|
||||
def file_path
|
||||
self.class.file_path
|
||||
end
|
||||
|
||||
|
||||
end
|
||||
|
@ -93,7 +93,7 @@ class fm::api (
|
||||
tag => 'fm-service',
|
||||
}
|
||||
} else {
|
||||
fail("Invalid service_name. fm-api for running as a standalone service")
|
||||
fail('Invalid service_name. fm-api for running as a standalone service')
|
||||
}
|
||||
|
||||
fm_config {
|
||||
|
@ -230,14 +230,14 @@ class fm::keystone::authtoken(
|
||||
}
|
||||
|
||||
keystone::resource::authtoken { 'fm_config':
|
||||
username => $username,
|
||||
password => $password,
|
||||
project_name => $project_name,
|
||||
auth_url => $auth_url,
|
||||
auth_uri => $auth_uri,
|
||||
auth_type => $auth_type,
|
||||
user_domain_name => $user_domain_name,
|
||||
project_domain_name => $project_domain_name,
|
||||
region_name => $region_name,
|
||||
username => $username,
|
||||
password => $password,
|
||||
project_name => $project_name,
|
||||
auth_url => $auth_url,
|
||||
auth_uri => $auth_uri,
|
||||
auth_type => $auth_type,
|
||||
user_domain_name => $user_domain_name,
|
||||
project_domain_name => $project_domain_name,
|
||||
region_name => $region_name,
|
||||
}
|
||||
}
|
||||
|
@ -4,5 +4,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
class mtce () {
|
||||
}
|
||||
class mtce () { }
|
||||
|
@ -12,7 +12,7 @@ class nfv::alarm (
|
||||
include nfv::params
|
||||
|
||||
nfv_plugin_alarm_config {
|
||||
/* File-Storage Information */
|
||||
# File-Storage Information
|
||||
'File-Storage/file': value => $storage_file;
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@ class nfv::event_log (
|
||||
include nfv::params
|
||||
|
||||
nfv_plugin_event_log_config {
|
||||
/* File-Storage Information */
|
||||
# File-Storage Information
|
||||
'File-Storage/file': value => $storage_file;
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ class nfv::nfvi (
|
||||
$nova_service_name = 'nova',
|
||||
$nova_service_type = 'compute',
|
||||
$nova_endpoint_type = 'admin',
|
||||
$nova_endpoint_override = "http://localhost:18774",
|
||||
$nova_endpoint_override = 'http://localhost:18774',
|
||||
$nova_endpoint_disabled = false,
|
||||
$sysinv_region_name = 'RegionOne',
|
||||
$sysinv_service_name = 'sysinv',
|
||||
@ -88,7 +88,7 @@ class nfv::nfvi (
|
||||
|
||||
nfv_plugin_nfvi_config {
|
||||
|
||||
/* Platform Authentication Information */
|
||||
# Platform Authentication Information
|
||||
'platform/username': value => $platform_username;
|
||||
'platform/tenant': value => $platform_tenant;
|
||||
'platform/user_domain_name': value => $platform_user_domain;
|
||||
@ -98,7 +98,7 @@ class nfv::nfvi (
|
||||
'platform/authorization_port': value => $platform_auth_port;
|
||||
'platform/keyring_service': value => $platform_keyring_service;
|
||||
|
||||
/* OpenStack Authentication Information */
|
||||
# OpenStack Authentication Information
|
||||
'openstack/username': value => $openstack_username;
|
||||
'openstack/tenant': value => $openstack_tenant;
|
||||
'openstack/user_domain_name': value => $openstack_user_domain;
|
||||
@ -163,31 +163,31 @@ class nfv::nfvi (
|
||||
'fm/service_type': value => $fm_service_type;
|
||||
'fm/endpoint_type': value => $fm_endpoint_type;
|
||||
|
||||
/* AMQP */
|
||||
# AMQP
|
||||
'amqp/host': value => $rabbit_host;
|
||||
'amqp/port': value => $rabbit_port;
|
||||
'amqp/user_id': value => $rabbit_userid;
|
||||
'amqp/password': value => $rabbit_password, secret => true;
|
||||
'amqp/virt_host': value => $rabbit_virtual_host;
|
||||
|
||||
/* Infrastructure Rest-API */
|
||||
# Infrastructure Rest-API
|
||||
'infrastructure-rest-api/host': value => $infrastructure_rest_api_host;
|
||||
'infrastructure-rest-api/port': value => $infrastructure_rest_api_port;
|
||||
'infrastructure-rest-api/data_port_fault_handling_enabled': value => $infrastructure_rest_api_data_port_fault_handling_enabled;
|
||||
|
||||
/* Guest-Services Rest-API */
|
||||
# Guest-Services Rest-API
|
||||
'guest-rest-api/host': value => $guest_rest_api_host;
|
||||
'guest-rest-api/port': value => $guest_rest_api_port;
|
||||
|
||||
/* Compute Rest-API */
|
||||
# Compute Rest-API
|
||||
'compute-rest-api/host': value => $compute_rest_api_host;
|
||||
'compute-rest-api/port': value => $compute_rest_api_port;
|
||||
'compute-rest-api/max_concurrent_requests': value => $compute_rest_api_max_concurrent_requests;
|
||||
'compute-rest-api/max_request_wait_in_secs': value => $compute_rest_api_max_request_wait_in_secs;
|
||||
|
||||
/* Host Listener */
|
||||
# Host Listener
|
||||
'host-listener/host': value => $host_listener_host;
|
||||
'host-listener/port': value => $host_listener_port;
|
||||
'host-listener/port': value => $host_listener_port;
|
||||
}
|
||||
|
||||
if $identity_uri {
|
||||
|
@ -42,27 +42,27 @@ class nfv::vim (
|
||||
include nfv::params
|
||||
|
||||
nfv_vim_config {
|
||||
/* Debug Information */
|
||||
# Debug Information
|
||||
'debug/config_file': value => $debug_config_file;
|
||||
'debug/handlers': value => $debug_handlers;
|
||||
'debug/syslog_address': value => $debug_syslog_address;
|
||||
'debug/syslog_facility': value => $debug_syslog_facility;
|
||||
|
||||
/* Database */
|
||||
# Database
|
||||
'database/database_dir': value => $database_dir;
|
||||
|
||||
/* Alarm */
|
||||
# Alarm
|
||||
'alarm/namespace': value => $alarm_namespace;
|
||||
'alarm/handlers': value => $alarm_handlers;
|
||||
'alarm/audit_interval': value => $alarm_audit_interval;
|
||||
'alarm/config_file': value => $alarm_config_file;
|
||||
|
||||
/* Event Log */
|
||||
# Event Log
|
||||
'event-log/namespace': value => $event_log_namespace;
|
||||
'event-log/handlers': value => $event_log_handlers;
|
||||
'event-log/config_file': value => $event_log_config_file;
|
||||
|
||||
/* NFVI */
|
||||
# NFVI
|
||||
'nfvi/namespace': value => $nfvi_namespace;
|
||||
'nfvi/config_file': value => $nfvi_config_file;
|
||||
'nfvi/image_plugin_disabled': value => $image_plugin_disabled;
|
||||
@ -71,26 +71,26 @@ class nfv::vim (
|
||||
'nfvi/network_plugin_disabled': value => $network_plugin_disabled;
|
||||
'nfvi/guest_plugin_disabled': value => $guest_plugin_disabled;
|
||||
|
||||
/* INSTANCE CONFIGURATION */
|
||||
# INSTANCE CONFIGURATION
|
||||
'instance-configuration/max_live_migrate_wait_in_secs': value => $instance_max_live_migrate_wait_in_secs;
|
||||
'instance-configuration/single_hypervisor': value => $instance_single_hypervisor;
|
||||
|
||||
/* VIM */
|
||||
# VIM
|
||||
'vim/rpc_host': value => $vim_rpc_ip;
|
||||
'vim/rpc_port': value => $vim_rpc_port;
|
||||
|
||||
/* VIM-API */
|
||||
# VIM-API
|
||||
'vim-api/host': value => $vim_api_ip;
|
||||
'vim-api/port': value => $vim_api_port;
|
||||
'vim-api/rpc_host': value => $vim_api_rpc_ip;
|
||||
'vim-api/rpc_port': value => $vim_api_rpc_port;
|
||||
|
||||
/* VIM-Webserver */
|
||||
# VIM-Webserver
|
||||
'vim-webserver/host': value => $vim_webserver_ip;
|
||||
'vim-webserver/port': value => $vim_webserver_port;
|
||||
'vim-webserver/source_dir': value => $vim_webserver_source_dir;
|
||||
|
||||
/* SW-MGMT CONFIGURATION */
|
||||
# SW-MGMT CONFIGURATION
|
||||
'sw-mgmt-configuration/single_controller': value => $sw_mgmt_single_controller;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
The license source is:
|
||||
|
||||
https://github.com/openstack/puppet-nova/blob/stable/juno/LICENSE.
|
||||
https://github.com/openstack/puppet-nova/blob/stable/juno/LICENSE.
|
||||
|
||||
Similarly, the sources for puppet-nova_api_proxy come from that external
|
||||
project.
|
||||
|
@ -43,21 +43,21 @@ class nova_api_proxy::config (
|
||||
|
||||
# SSL Options
|
||||
if $use_ssl {
|
||||
if !$cert_file {
|
||||
fail('The cert_file parameter is required when use_ssl is set to true')
|
||||
}
|
||||
if !$key_file {
|
||||
fail('The key_file parameter is required when use_ssl is set to true')
|
||||
}
|
||||
if !$cert_file {
|
||||
fail('The cert_file parameter is required when use_ssl is set to true')
|
||||
}
|
||||
if !$key_file {
|
||||
fail('The key_file parameter is required when use_ssl is set to true')
|
||||
}
|
||||
}
|
||||
|
||||
proxy_config {
|
||||
'DEFAULT/auth_strategy': value => $auth_strategy;
|
||||
'DEFAULT/osapi_proxy_listen': value => $osapi_proxy_listen;
|
||||
'DEFAULT/osapi_compute_listen': value => $osapi_compute_listen;
|
||||
'DEFAULT/nfvi_compute_listen': value => $nfvi_compute_listen;
|
||||
'DEFAULT/nfvi_compute_listen_port': value => $nfvi_compute_listen_port;
|
||||
'DEFAULT/pool_size': value => $eventlet_pool_size;
|
||||
'DEFAULT/auth_strategy': value => $auth_strategy;
|
||||
'DEFAULT/osapi_proxy_listen': value => $osapi_proxy_listen;
|
||||
'DEFAULT/osapi_compute_listen': value => $osapi_compute_listen;
|
||||
'DEFAULT/nfvi_compute_listen': value => $nfvi_compute_listen;
|
||||
'DEFAULT/nfvi_compute_listen_port': value => $nfvi_compute_listen_port;
|
||||
'DEFAULT/pool_size': value => $eventlet_pool_size;
|
||||
}
|
||||
|
||||
if $use_ssl {
|
||||
|
@ -14,10 +14,10 @@ class patching (
|
||||
include patching::params
|
||||
|
||||
file { $::patching::params::patching_conf:
|
||||
ensure => present,
|
||||
owner => 'patching',
|
||||
group => 'patching',
|
||||
mode => '0600',
|
||||
ensure => present,
|
||||
owner => 'patching',
|
||||
group => 'patching',
|
||||
mode => '0600',
|
||||
}
|
||||
|
||||
patching_config {
|
||||
@ -27,17 +27,17 @@ class patching (
|
||||
'runtime/controller_port': value => $controller_port;
|
||||
'runtime/agent_port': value => $agent_port;
|
||||
}
|
||||
~>
|
||||
service { 'sw-patch-agent.service':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
|
||||
~> service { 'sw-patch-agent.service':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
subscribe => File[$::patching::params::patching_conf],
|
||||
}
|
||||
|
||||
if $::personality == "controller" {
|
||||
if $::personality == 'controller' {
|
||||
service { 'sw-patch-controller-daemon.service':
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
ensure => 'running',
|
||||
enable => true,
|
||||
subscribe => Service['sw-patch-agent.service'],
|
||||
}
|
||||
}
|
||||
|
@ -20,11 +20,9 @@ class patching::keystone::auth (
|
||||
$admin_url = 'http://127.0.0.1:5491/v1',
|
||||
$internal_url = 'http://127.0.0.1:5491/v1',
|
||||
) {
|
||||
$real_service_name = pick($service_name, $auth_name)
|
||||
|
||||
$real_service_name = pick($service_name, $auth_name)
|
||||
|
||||
|
||||
keystone::resource::service_identity { 'patching':
|
||||
keystone::resource::service_identity { 'patching':
|
||||
configure_user => $configure_user,
|
||||
configure_user_role => $configure_user_role,
|
||||
configure_endpoint => $configure_endpoint,
|
||||
|
@ -4,5 +4,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
class sshd () {
|
||||
}
|
||||
class sshd () { }
|
||||
|
@ -34,7 +34,7 @@ class sysinv::agent (
|
||||
}
|
||||
|
||||
sysinv_config {
|
||||
'lldp/drivers': value => join($lldp_drivers,",");
|
||||
'lldp/drivers': value => join($lldp_drivers,',');
|
||||
}
|
||||
|
||||
if $::sysinv::params::agent_package {
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
#
|
||||
#
|
||||
# Nov 2017: rebase pike
|
||||
# Aug 2016: rebase mitaka
|
||||
# Jun 2016: rebase centos
|
||||
@ -175,10 +175,8 @@ class sysinv::api (
|
||||
$pxeboot_host = undef,
|
||||
$enabled = true
|
||||
) {
|
||||
|
||||
include sysinv::params
|
||||
|
||||
|
||||
Sysinv_config<||> ~> Service['sysinv-api']
|
||||
Sysinv_config<||> ~> Exec['sysinv-dbsync']
|
||||
Sysinv_api_paste_ini<||> ~> Service['sysinv-api']
|
||||
@ -188,18 +186,18 @@ class sysinv::api (
|
||||
Package['sysinv'] -> Sysinv_api_paste_ini<||>
|
||||
Package['sysinv'] -> Service['sysinv-api']
|
||||
package { 'sysinv':
|
||||
ensure => $package_ensure,
|
||||
name => $::sysinv::params::api_package,
|
||||
ensure => $package_ensure,
|
||||
name => $::sysinv::params::api_package,
|
||||
}
|
||||
}
|
||||
|
||||
sysinv_config {
|
||||
"DEFAULT/sysinv_api_bind_ip": value => $bind_host;
|
||||
'DEFAULT/sysinv_api_bind_ip': value => $bind_host;
|
||||
}
|
||||
|
||||
if $pxeboot_host {
|
||||
sysinv_config {
|
||||
"DEFAULT/sysinv_api_pxeboot_ip": value => $pxeboot_host;
|
||||
'DEFAULT/sysinv_api_pxeboot_ip': value => $pxeboot_host;
|
||||
}
|
||||
}
|
||||
|
||||
@ -342,9 +340,9 @@ class sysinv::api (
|
||||
# Only do the db sync if both controllers are running the same software
|
||||
# version. Avoids impacting mate controller during an upgrade.
|
||||
onlyif => [
|
||||
"test $::controller_sw_versions_match = true",
|
||||
"systemctl status postgresql"
|
||||
]
|
||||
"test ${::controller_sw_versions_match} = true",
|
||||
'systemctl status postgresql'
|
||||
]
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -30,16 +30,16 @@ class sysinv::base (
|
||||
warning('The sysinv::base class is deprecated. Use sysinv instead.')
|
||||
|
||||
class { '::sysinv':
|
||||
rabbit_password => $rabbit_password,
|
||||
sql_connection => $sql_connection,
|
||||
rabbit_host => $rabbit_host,
|
||||
rabbit_port => $rabbit_port,
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_virtual_host => $rabbit_virtual_host,
|
||||
rabbit_userid => $rabbit_userid,
|
||||
package_ensure => $package_ensure,
|
||||
api_paste_config => $api_paste_config,
|
||||
verbose => $verbose,
|
||||
rabbit_password => $rabbit_password,
|
||||
sql_connection => $sql_connection,
|
||||
rabbit_host => $rabbit_host,
|
||||
rabbit_port => $rabbit_port,
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_virtual_host => $rabbit_virtual_host,
|
||||
rabbit_userid => $rabbit_userid,
|
||||
package_ensure => $package_ensure,
|
||||
api_paste_config => $api_paste_config,
|
||||
verbose => $verbose,
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -28,11 +28,11 @@ class sysinv::db::mysql (
|
||||
Database[$dbname] ~> Exec<| title == 'sysinv-dbsync' |>
|
||||
|
||||
mysql::db { $dbname:
|
||||
user => $user,
|
||||
password => $password,
|
||||
host => $host,
|
||||
charset => $charset,
|
||||
require => Class['mysql::config'],
|
||||
user => $user,
|
||||
password => $password,
|
||||
host => $host,
|
||||
charset => $charset,
|
||||
require => Class['mysql::config'],
|
||||
}
|
||||
|
||||
# Check allowed_hosts to avoid duplicate resource declarations
|
||||
@ -45,9 +45,9 @@ class sysinv::db::mysql (
|
||||
if $real_allowed_hosts {
|
||||
# TODO this class should be in the mysql namespace
|
||||
sysinv::db::mysql::host_access { $real_allowed_hosts:
|
||||
user => $user,
|
||||
password => $password,
|
||||
database => $dbname,
|
||||
user => $user,
|
||||
password => $password,
|
||||
database => $dbname,
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user