Cleanup config_template repo
To upstream config_template as a separate plugin, we need to cleanup the openstack-ansible-plugisn repo. This way it can be included as standalone.
This commit is contained in:
parent
8effc3168a
commit
708d867a70
@ -1,5 +1,5 @@
|
||||
[gerrit]
|
||||
host=review.openstack.org
|
||||
port=29418
|
||||
project=openstack/openstack-ansible-plugins.git
|
||||
project=openstack/ansible-config_template.git
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
Team and repository tags
|
||||
========================
|
||||
|
||||
.. image:: http://governance.openstack.org/badges/openstack-ansible-plugins.svg
|
||||
.. image:: http://governance.openstack.org/badges/ansible-config_template.svg
|
||||
:target: http://governance.openstack.org/reference/tags/index.html
|
||||
|
||||
.. Change things from this point on
|
||||
|
@ -1,243 +0,0 @@
|
||||
# Copyright 2016, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# (c) 2016, Kevin Carter <kevin.carter@rackspace.com>
|
||||
|
||||
import imp
|
||||
import os
|
||||
|
||||
# NOTICE(cloudnull): The connection plugin imported using the full path to the
|
||||
# file because the ssh connection plugin is not importable.
|
||||
import ansible.plugins.connection as conn
|
||||
SSH = imp.load_source(
|
||||
'ssh',
|
||||
os.path.join(os.path.dirname(conn.__file__), 'ssh.py')
|
||||
)
|
||||
|
||||
if not hasattr(SSH, 'shlex_quote'):
|
||||
# NOTE(cloudnull): Later versions of ansible has this attribute already
|
||||
# however this is not set in all versions. Because we use
|
||||
# this method the attribute will set within the plugin
|
||||
# if it's not found.
|
||||
from ansible.compat.six.moves import shlex_quote
|
||||
setattr(SSH, 'shlex_quote', shlex_quote)
|
||||
|
||||
|
||||
class Connection(SSH.Connection):
|
||||
"""Transport options for containers.
|
||||
|
||||
This transport option makes the assumption that the playbook context has
|
||||
vars within it that contain "physical_host" which is the machine running a
|
||||
given container and "container_name" which is the actual name of the
|
||||
container. These options can be added into the playbook via vars set as
|
||||
attributes or though the modification of the a given execution strategy to
|
||||
set the attributes accordingly.
|
||||
|
||||
This plugin operates exactly the same way as the standard SSH plugin but
|
||||
will pad pathing or add command syntax for containers when a container
|
||||
is detected at runtime.
|
||||
"""
|
||||
|
||||
transport = 'ssh'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Connection, self).__init__(*args, **kwargs)
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
if hasattr(self._play_context, 'chroot_path'):
|
||||
self.chroot_path = self._play_context.chroot_path
|
||||
else:
|
||||
self.chroot_path = None
|
||||
if hasattr(self._play_context, 'container_name'):
|
||||
self.container_name = self._play_context.container_name
|
||||
else:
|
||||
self.container_name = None
|
||||
if hasattr(self._play_context, 'physical_host'):
|
||||
self.physical_host = self._play_context.physical_host
|
||||
else:
|
||||
self.physical_host = None
|
||||
if hasattr(self._play_context, 'container_tech'):
|
||||
self.container_tech = self._play_context.container_tech
|
||||
else:
|
||||
# NOTE(cloudnull): For now the default is "lxc" if undefined
|
||||
# revise this in the future.
|
||||
self.container_tech = 'lxc'
|
||||
# Remote user is normally set, but if it isn't, then default to 'root'
|
||||
self.container_user = 'root'
|
||||
if self._play_context.remote_user:
|
||||
self.container_user = self._play_context.remote_user
|
||||
|
||||
def set_host_overrides(self, host, hostvars=None, templar=None):
|
||||
if self._container_check() or self._chroot_check():
|
||||
physical_host_addrs = host.get_vars().get('physical_host_addrs', {})
|
||||
physical_host_addr = physical_host_addrs.get(self.physical_host,
|
||||
self.physical_host)
|
||||
self.host = self._play_context.remote_addr = physical_host_addr
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=True):
|
||||
"""run a command on the remote host."""
|
||||
|
||||
if self._container_check():
|
||||
# Remote user is normally set, but if it isn't, then default to 'root'
|
||||
self.container_user = 'root'
|
||||
if self._play_context.remote_user:
|
||||
self.container_user = self._play_context.remote_user
|
||||
# NOTE(hwoarang) It is important to connect to the container
|
||||
# without inheriting the host environment as that would interfere
|
||||
# with running commands and services inside the container. However,
|
||||
# it is also important to create a sensible environment within the
|
||||
# container because certain commands and services expect some
|
||||
# enviromental variables to be set properly. The best way to do
|
||||
# that would be to execute the commands in a login shell
|
||||
|
||||
# NOTE(hwoarang): the shlex_quote method is necessary here because
|
||||
# we need to properly quote the cmd as it's being passed as argument
|
||||
# to the -c su option. The Ansible ssh class has already
|
||||
# quoted the command of the _executable_ (ie /bin/bash -c "$cmd").
|
||||
# However, we also need to quote the executable itself because the
|
||||
# entire command is being passed to the su process. This produces
|
||||
# a somewhat ugly output with too many quotes in a row but we can't
|
||||
# do much since we are effectively passing a command to a command
|
||||
# to a command etc... It's somewhat ugly but maybe it can be
|
||||
# improved somehow...
|
||||
_pad = None
|
||||
if self.container_tech == 'lxc':
|
||||
_pad = 'lxc-attach --clear-env --name {}'.format(
|
||||
self.container_name
|
||||
)
|
||||
|
||||
elif self.container_tech == 'nspawn':
|
||||
_, pid_path = self._pid_lookup(subdir='ns')
|
||||
_pad = ('nsenter'
|
||||
' --mount={path}/mnt'
|
||||
' --net={path}/net'
|
||||
' --pid={path}/pid'
|
||||
' --uts={path}/uts'
|
||||
' --ipc={path}/ipc').format(path=pid_path)
|
||||
if _pad:
|
||||
cmd = '%s -- su - %s -c %s' % (
|
||||
_pad,
|
||||
self.container_user,
|
||||
SSH.shlex_quote(cmd)
|
||||
)
|
||||
|
||||
if self._chroot_check():
|
||||
chroot_command = 'chroot %s' % self.chroot_path
|
||||
cmd = '%s %s' % (chroot_command, cmd)
|
||||
|
||||
return super(Connection, self).exec_command(cmd, in_data, sudoable)
|
||||
|
||||
def _chroot_check(self):
|
||||
if self.chroot_path is not None:
|
||||
SSH.display.vvv(u'chroot_path: "%s"' % self.chroot_path)
|
||||
if self.physical_host is not None:
|
||||
SSH.display.vvv(
|
||||
u'physical_host: "%s"' % self.physical_host
|
||||
)
|
||||
SSH.display.vvv(u'chroot confirmed')
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _container_check(self):
|
||||
if self.container_name is not None:
|
||||
SSH.display.vvv(u'container_name: "%s"' % self.container_name)
|
||||
if self.physical_host is not None:
|
||||
SSH.display.vvv(
|
||||
u'physical_host: "%s"' % self.physical_host
|
||||
)
|
||||
if self.container_name != self.physical_host:
|
||||
SSH.display.vvv(u'Container confirmed')
|
||||
SSH.display.vvv(u'Container type "{}"'.format(
|
||||
self.container_tech)
|
||||
)
|
||||
return True
|
||||
|
||||
# If the container check fails set the container_tech to None.
|
||||
self.container_tech = None
|
||||
return False
|
||||
|
||||
def _pid_lookup(self, subdir=None):
|
||||
if self.container_tech == 'nspawn':
|
||||
lookup_command = (
|
||||
u"machinectl show %s | awk -F'=' '/Leader/ {print $2}'"
|
||||
% self.container_name
|
||||
)
|
||||
pid_path = """/proc/%s"""
|
||||
if not subdir:
|
||||
subdir = 'cwd'
|
||||
elif self.container_tech == 'lxc':
|
||||
lookup_command = (
|
||||
u"lxc-info --name %s --pid | awk '/PID:/ {print $2}'"
|
||||
% self.container_name
|
||||
)
|
||||
pid_path = """/proc/%s"""
|
||||
if not subdir:
|
||||
subdir = 'root'
|
||||
else:
|
||||
return 1, ''
|
||||
|
||||
args = ('ssh', self.host, lookup_command)
|
||||
returncode, stdout, _ = self._run(
|
||||
self._build_command(*args),
|
||||
in_data=None,
|
||||
sudoable=False
|
||||
)
|
||||
pid_path = os.path.join(
|
||||
pid_path % SSH.to_text(stdout.strip()),
|
||||
subdir
|
||||
)
|
||||
return returncode, pid_path
|
||||
|
||||
def _container_path_pad(self, path):
|
||||
|
||||
returncode, pid_path = self._pid_lookup()
|
||||
if returncode == 0:
|
||||
pad = os.path.join(
|
||||
pid_path,
|
||||
path.lstrip(os.sep)
|
||||
)
|
||||
SSH.display.vvv(
|
||||
u'The path has been padded with the following to support a'
|
||||
u' container rootfs: [ %s ]' % pad
|
||||
)
|
||||
return pad
|
||||
else:
|
||||
return path
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
"""fetch a file from remote to local."""
|
||||
if self._container_check():
|
||||
in_path = self._container_path_pad(path=in_path)
|
||||
|
||||
return super(Connection, self).fetch_file(in_path, out_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
"""transfer a file from local to remote."""
|
||||
if self._container_check():
|
||||
out_path = self._container_path_pad(path=out_path)
|
||||
|
||||
return super(Connection, self).put_file(in_path, out_path)
|
||||
|
||||
def close(self):
|
||||
# If we have a persistent ssh connection (ControlPersist), we can ask it
|
||||
# to stop listening. Otherwise, there's nothing to do here.
|
||||
if self._connected and self._persistent:
|
||||
cmd = self._build_command('ssh', '-O', 'stop', self.host)
|
||||
cmd = map(SSH.to_bytes, cmd)
|
||||
p = SSH.subprocess.Popen(cmd,
|
||||
stdin=SSH.subprocess.PIPE,
|
||||
stdout=SSH.subprocess.PIPE,
|
||||
stderr=SSH.subprocess.PIPE)
|
||||
p.communicate()
|
@ -1,8 +0,0 @@
|
||||
=======
|
||||
Actions
|
||||
=======
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
|
||||
actions/*
|
@ -1,22 +0,0 @@
|
||||
config_template
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
Renders template files providing a create/update override interface
|
||||
|
||||
- The module contains the template functionality with the ability to override
|
||||
items in config, in transit, through the use of a simple dictionary without
|
||||
having to write out various temp files on target machines. The module renders
|
||||
all of the potential jinja a user could provide in both the template file and
|
||||
in the override dictionary which is ideal for deployers who may have lots of
|
||||
different configs using a similar code base.
|
||||
- The module is an extension of the **copy** module and all of attributes that
|
||||
can be set there are available to be set here.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. literalinclude:: ../../../library/config_template
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
@ -1,20 +0,0 @@
|
||||
dist_sort
|
||||
~~~~~~~~~
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
Deterministically sort a list to distribute the elements in the list evenly.
|
||||
Based on external values such as host or static modifier. Returns a string as
|
||||
named key ``sorted_list``.
|
||||
|
||||
- This module returns a list of servers uniquely sorted based on a index from a
|
||||
look up value location within a group. The group should be an existing
|
||||
Ansible inventory group. This will module returns the sorted list as a
|
||||
delimited string.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. literalinclude:: ../../../library/dist_sort
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
@ -1,13 +0,0 @@
|
||||
keystone
|
||||
~~~~~~~~
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
Manage OpenStack Identity (keystone) users, projects, roles, and endpoints.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. literalinclude:: ../../../library/keystone
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
@ -1,13 +0,0 @@
|
||||
magnum
|
||||
~~~~~~~~
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
Manage OpenStack magnum cluster templates.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. literalinclude:: ../../../library/magnum
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
@ -1,13 +0,0 @@
|
||||
memcached
|
||||
~~~~~~~~~
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
Add, remove, and get items from memcached.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. literalinclude:: ../../../library/memcached
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
@ -1,13 +0,0 @@
|
||||
name2int
|
||||
~~~~~~~~
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
Hash a host name and return an integer.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. literalinclude:: ../../../library/name2int
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
@ -1,13 +0,0 @@
|
||||
provider_networks
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Synopsis
|
||||
--------
|
||||
Parse a list of networks and return data that Ansible can use.
|
||||
|
||||
Examples
|
||||
--------
|
||||
.. literalinclude:: ../../../library/provider_networks
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# openstack-ansible-plugins documentation build configuration file, created by
|
||||
# Config Template documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Apr 13 20:42:26 2015.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
@ -51,12 +51,11 @@ master_doc = 'index'
|
||||
# General information about the project.
|
||||
author = 'OpenStack-Ansible Contributors'
|
||||
category = 'Miscellaneous'
|
||||
copyright = '2014-2016, OpenStack-Ansible Contributors'
|
||||
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
|
||||
project = 'OpenStack-Ansible'
|
||||
role_name = 'plugins'
|
||||
target_name = 'openstack-ansible-' + role_name
|
||||
title = 'OpenStack-Ansible Documentation: ' + role_name + 'role'
|
||||
copyright = '2014-2018, OpenStack-Ansible Contributors'
|
||||
description = 'Config Template allow flexible .yaml, .ini, .json modifications.'
|
||||
project = 'Config Template'
|
||||
target_name = 'ansible-config_template'
|
||||
title = 'Config Template Documentation'
|
||||
|
||||
# The link to the browsable source code (for the left hand menu)
|
||||
oslosphinx_cgit_link = (
|
||||
@ -220,7 +219,7 @@ html_last_updated_fmt = '%Y-%m-%d %H:%M'
|
||||
# html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'openstack-ansible-pluginsdoc'
|
||||
htmlhelp_basename = 'ansible-config_templatedoc'
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
@ -242,9 +241,9 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'openstack-ansible-plugins.tex',
|
||||
'openstack-ansible-plugins Documentation',
|
||||
'openstack-ansible-plugins contributors', 'manual'),
|
||||
(master_doc, 'ansible-config_template.tex',
|
||||
'ansible-config_template Documentation',
|
||||
'ansible-config_template contributors', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@ -273,8 +272,8 @@ latex_documents = [
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'openstack-ansible-plugins',
|
||||
'openstack-ansible-plugins Documentation',
|
||||
(master_doc, 'ansible-config_template',
|
||||
'ansible-config_template Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
@ -288,9 +287,9 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'openstack-ansible-plugins',
|
||||
'openstack-ansible-plugins Documentation',
|
||||
author, 'openstack-ansible-plugins', 'One line description of project.',
|
||||
(master_doc, 'ansible-config_template',
|
||||
'ansible-config_template Documentation',
|
||||
author, 'ansible-config_template', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
|
@ -1,166 +0,0 @@
|
||||
=======
|
||||
Filters
|
||||
=======
|
||||
|
||||
bit_length_power_of_2
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
This filter will return the smallest power of 2 greater than a given numeric
|
||||
value.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ 1000 | bit_length_power_of_2 }}
|
||||
# => 1024
|
||||
|
||||
deprecated
|
||||
~~~~~~~~~~
|
||||
This filter will return the old_var value, if defined, along with a
|
||||
deprecation warning that will inform the user that the old variable
|
||||
should no longer be used.
|
||||
|
||||
In order to use this filter the old and new variable names must be provided
|
||||
to the filter as a string which is used to render the warning message. The
|
||||
removed_in option is used to give a date or release name where the old
|
||||
option will be removed. Optionally, if fatal is set to True, the filter
|
||||
will raise an exception if the old variable is used.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
old_var: "old value"
|
||||
old_var_name: "old_var"
|
||||
new_var_name: "new_var"
|
||||
removed_in: "Next release"
|
||||
fatal_deprecations: false
|
||||
|
||||
{{ new_var | deprecated(old_var,
|
||||
old_var_name,
|
||||
new_var_name,
|
||||
removed_in,
|
||||
fatal_deprecations) }}
|
||||
# WARNING => Deprecated Option provided: Deprecated variable:
|
||||
# "old_var", Removal timeframe: "Next release", Future usage:
|
||||
# "new_var"
|
||||
# => "old value"
|
||||
|
||||
git_link_parse
|
||||
~~~~~~~~~~~~~~
|
||||
This filter will return a dict containing the parts of a given git repo URL.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ 'https://git.openstack.org/openstack/openstack-ansible@master' |
|
||||
git_link_parse }}
|
||||
# =>
|
||||
# {
|
||||
# "url": "https://git.openstack.org/openstack/openstack-ansible",
|
||||
# "plugin_path": null,
|
||||
# "version": "master",
|
||||
# "name": "openstack-ansible",
|
||||
# "original":
|
||||
# "https://git.openstack.org/openstack/openstack-ansible@master"
|
||||
# }
|
||||
|
||||
git_link_parse_name
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
This filter will return the name of a given git repo URL.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ 'https://git.openstack.org/openstack/openstack-ansible@master' |
|
||||
git_link_parse_name }}
|
||||
# => "openstack-ansible"
|
||||
|
||||
filtered_list
|
||||
~~~~~~~~~~~~~
|
||||
This filter takes two lists as inputs. The first list will be returned to the
|
||||
user after removing any duplicates found within the second list.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ ['a', 'b'] | filtered_list(['b', 'c']) }}
|
||||
# => [ "a" ]
|
||||
|
||||
netloc
|
||||
~~~~~~
|
||||
This filter will return the netloc from a given URL.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ 'https://172.29.236.100:5000/v3/auth/tokens' | netloc }}
|
||||
# => "172.29.236.100:5000"
|
||||
|
||||
netloc_no_port
|
||||
~~~~~~~~~~~~~~
|
||||
This filter will return the netloc, without a port, from a given URL.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ 'https://172.29.236.100:5000/v3/auth/tokens' | netloc_no_port }}
|
||||
# => "172.29.236.100"
|
||||
|
||||
netorigin
|
||||
~~~~~~~~~
|
||||
This filter will return the scheme and netloc from a given URL.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ 'https://172.29.236.100:5000/v3/auth/tokens' | netorigin }}
|
||||
# => "https://172.29.236.100:5000"
|
||||
|
||||
pip_constraint_update
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
This filter will return a merged list from a given list of pip packages and a
|
||||
list of pip package constraints to a apply to that list.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pip_package_list:
|
||||
- pip==8.1.2
|
||||
- setuptools==25.1.0
|
||||
- wheel==0.29.0
|
||||
pip_package_constraint_list:
|
||||
- babel==2.3.4
|
||||
- pip==8.1.0
|
||||
|
||||
{{ pip_package_list | pip_constraint_update(pip_package_constraint_list) }}
|
||||
# => [ "babel==2.3.4", "pip==8.1.0", "setuptools==25.1.0", "wheel==0.29.0" ]
|
||||
|
||||
pip_requirement_names
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
This filter will return of list of package names from a given list of pip
|
||||
packages.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
pip_package_list:
|
||||
- pip==8.1.2
|
||||
- setuptools==25.1.0
|
||||
- wheel==0.29.0
|
||||
|
||||
{{ pip_package_list | pip_requirement_names }}
|
||||
# => [ "pip", "setuptools", "wheel" ]
|
||||
|
||||
splitlines
|
||||
~~~~~~~~~~
|
||||
This filter will return of list from a string with line breaks.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
string_with_line_breaks: |
|
||||
a string
|
||||
with
|
||||
line
|
||||
breaks
|
||||
|
||||
{{ string_with_line_breaks | splitlines }}
|
||||
# => [ "a string", "with", "line", "breaks" ]
|
||||
|
||||
string_2_int
|
||||
~~~~~~~~~~~~
|
||||
This filter will hash a given string, convert it to a base36 int, and return
|
||||
the modulo of 10240.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
{{ 'openstack-ansible' | string_2_int }}
|
||||
# => 3587
|
@ -1,28 +1,48 @@
|
||||
=========================
|
||||
OpenStack-Ansible plugins
|
||||
=========================
|
||||
======================
|
||||
Config Template plugin
|
||||
======================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
Synopsis
|
||||
--------
|
||||
Renders template files providing a create/update override interface
|
||||
|
||||
actions.rst
|
||||
filters.rst
|
||||
lookups.rst
|
||||
- The module contains the template functionality with the ability to override
|
||||
items in config, in transit, through the use of a simple dictionary without
|
||||
having to write out various temp files on target machines. The module renders
|
||||
all of the potential jinja a user could provide in both the template file and
|
||||
in the override dictionary which is ideal for deployers who may have lots of
|
||||
different configs using a similar code base.
|
||||
- The module is an extension of the **copy** module and all of attributes that
|
||||
can be set there are available to be set here.
|
||||
|
||||
Example ansible.cfg file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Loading
|
||||
-------
|
||||
|
||||
.. literalinclude:: ../../examples/example.ini
|
||||
:language: yaml
|
||||
To use the plugin, include this role in your meta/main.yml dependencies
|
||||
|
||||
.. code-block :: yaml
|
||||
|
||||
dependencies:
|
||||
- role: ansible-config_template
|
||||
|
||||
Alternatively, move the role to the appropriate plugin folder location
|
||||
of your ansible configuration.
|
||||
|
||||
Example role requirement overload for automatic plugin download
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
---------------------------------------------------------------
|
||||
|
||||
The Ansible role requirement file can be used to overload the
|
||||
``ansible-galaxy`` command to automatically fetch the plugins for
|
||||
you in a given project. To do this add the following lines to your
|
||||
``ansible-role-requirements.yml`` file.
|
||||
|
||||
.. literalinclude:: ../../examples/playbook.yml
|
||||
.. literalinclude:: ../../examples/ansible-role-requirements.yml
|
||||
:language: yaml
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
.. literalinclude:: ../../library/config_template
|
||||
:language: yaml
|
||||
:start-after: EXAMPLES = """
|
||||
:end-before: """
|
||||
|
@ -1,167 +0,0 @@
|
||||
=======
|
||||
Lookups
|
||||
=======
|
||||
|
||||
py_pkgs
|
||||
~~~~~~~
|
||||
The ``py_pkgs`` lookup crawls a given list of directories to parse variables
|
||||
and generate lists of Python packages, git repo information and Ansible group
|
||||
memberships which is used within OpenStack-Ansible's repo_build role to build
|
||||
wheels and virtual environments.
|
||||
|
||||
Files and paths containing the following strings are evaluated:
|
||||
- test-requirements.txt
|
||||
- dev-requirements.txt
|
||||
- requirements.txt
|
||||
- global-requirements.txt
|
||||
- global-requirement-pins.txt
|
||||
- /defaults/
|
||||
- /vars/
|
||||
- /user_*
|
||||
|
||||
Variables parsed within any evaluated files include:
|
||||
- service_pip_dependencies
|
||||
- pip_common_packages
|
||||
- pip_container_packages
|
||||
- pip_packages
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- name: Load local packages
|
||||
debug:
|
||||
msg: "Loading Packages"
|
||||
with_py_pkgs: "{{ pkg_locations }}"
|
||||
register: local_packages
|
||||
vars:
|
||||
pkg_locations:
|
||||
- "/etc/ansible/roles/os_nova"
|
||||
# => {
|
||||
# "packages": [
|
||||
# "httplib2",
|
||||
# "keystonemiddleware",
|
||||
# "libvirt-python",
|
||||
# "nova",
|
||||
# "nova-lxd",
|
||||
# "nova-powervm",
|
||||
# "pyasn1-modules",
|
||||
# "pycrypto",
|
||||
# "pylxd",
|
||||
# "pymysql",
|
||||
# "python-ironicclient",
|
||||
# "python-keystoneclient",
|
||||
# "python-memcached",
|
||||
# "python-novaclient",
|
||||
# "virtualenv",
|
||||
# "websockify"
|
||||
# ],
|
||||
# "remote_package_parts": [
|
||||
# {
|
||||
# "egg_name": "nova",
|
||||
# "fragment": null,
|
||||
# "name": "nova",
|
||||
# "original":
|
||||
# "git+https://git.openstack.org/openstack/nova@stable/newton#egg=nova&gitname=nova&projectgroup=all",
|
||||
# "project_group": "all",
|
||||
# "url": "https://git.openstack.org/openstack/nova",
|
||||
# "version": "stable/newton"
|
||||
# },
|
||||
# {
|
||||
# "egg_name": "nova_lxd",
|
||||
# "fragment": null,
|
||||
# "name": "nova-lxd",
|
||||
# "original":
|
||||
# "git+https://git.openstack.org/openstack/nova-lxd@stable/newton#egg=nova_lxd&gitname=nova-lxd&projectgroup=all",
|
||||
# "project_group": "all",
|
||||
# "url": "https://git.openstack.org/openstack/nova-lxd",
|
||||
# "version": "stable/newton"
|
||||
# },
|
||||
# {
|
||||
# "egg_name": "novnc",
|
||||
# "fragment": null,
|
||||
# "name": "novnc",
|
||||
# "original":
|
||||
# "git+https://github.com/kanaka/novnc@master#egg=novnc&gitname=novnc&projectgroup=all",
|
||||
# "project_group": "all",
|
||||
# "url": "https://github.com/kanaka/novnc",
|
||||
# "version": "master"
|
||||
# },
|
||||
# {
|
||||
# "egg_name": "spice_html5",
|
||||
# "fragment": null,
|
||||
# "name": "spice-html5",
|
||||
# "original":
|
||||
# "git+https://github.com/SPICE/spice-html5@master#egg=spice_html5&gitname=spice-html5&projectgroup=all",
|
||||
# "project_group": "all",
|
||||
# "url": "https://github.com/SPICE/spice-html5",
|
||||
# "version": "master"
|
||||
# }
|
||||
# ],
|
||||
# "remote_packages": [
|
||||
# "git+https://git.openstack.org/openstack/nova-lxd@stable/newton#egg=nova_lxd&gitname=nova-lxd&projectgroup=all",
|
||||
# "git+https://git.openstack.org/openstack/nova@stable/newton#egg=nova&gitname=nova&projectgroup=all",
|
||||
# "git+https://github.com/SPICE/spice-html5@master#egg=spice_html5&gitname=spice-html5&projectgroup=all",
|
||||
# "git+https://github.com/kanaka/novnc@master#egg=novnc&gitname=novnc&projectgroup=all"
|
||||
# ],
|
||||
# "role_packages": {
|
||||
# "os_nova": [
|
||||
# "httplib2",
|
||||
# "keystonemiddleware",
|
||||
# "libvirt-python",
|
||||
# "nova",
|
||||
# "nova-lxd",
|
||||
# "nova-powervm",
|
||||
# "pyasn1-modules",
|
||||
# "pycrypto",
|
||||
# "pylxd",
|
||||
# "pymysql",
|
||||
# "python-ironicclient",
|
||||
# "python-keystoneclient",
|
||||
# "python-memcached",
|
||||
# "python-novaclient",
|
||||
# "virtualenv",
|
||||
# "websockify"
|
||||
# ]
|
||||
# },
|
||||
# "role_project_groups": {
|
||||
# "os_nova": "nova_all"
|
||||
# },
|
||||
# "role_requirement_files": {},
|
||||
# "role_requirements": {
|
||||
# "os_nova": {
|
||||
# "nova_compute_ironic_pip_packages": [
|
||||
# "python-ironicclient"
|
||||
# ],
|
||||
# "nova_compute_lxd_pip_packages": [
|
||||
# "nova-lxd",
|
||||
# "pylxd"
|
||||
# ],
|
||||
# "nova_compute_pip_packages": [
|
||||
# "libvirt-python"
|
||||
# ],
|
||||
# "nova_compute_powervm_pip_packages": [
|
||||
# "nova-powervm",
|
||||
# "pyasn1-modules"
|
||||
# ],
|
||||
# "nova_novnc_pip_packages": [
|
||||
# "websockify"
|
||||
# ],
|
||||
# "nova_pip_packages": [
|
||||
# "keystonemiddleware",
|
||||
# "nova",
|
||||
# "pycrypto",
|
||||
# "pymysql",
|
||||
# "python-keystoneclient",
|
||||
# "python-memcached",
|
||||
# "python-novaclient"
|
||||
# ],
|
||||
# "nova_requires_pip_packages": [
|
||||
# "httplib2",
|
||||
# "python-keystoneclient",
|
||||
# "virtualenv",
|
||||
# ],
|
||||
# "project_group": "nova_all"
|
||||
# }
|
||||
# }
|
4
examples/ansible-role-requirements.yml
Normal file
4
examples/ansible-role-requirements.yml
Normal file
@ -0,0 +1,4 @@
|
||||
- name: ansible-config_template
|
||||
src: https://git.openstack.org/openstack/ansible-config_template
|
||||
scm: git
|
||||
version: master
|
@ -1,5 +0,0 @@
|
||||
[defaults]
|
||||
lookup_plugins = /etc/ansible/plugins/lookups
|
||||
filter_plugins = /etc/ansible/plugins/filters
|
||||
action_plugins = /etc/ansible/plugins/actions
|
||||
library = /etc/ansible/plugins/library
|
@ -1,4 +0,0 @@
|
||||
- name: plugins
|
||||
src: https://git.openstack.org/openstack/openstack-ansible-plugins
|
||||
scm: git
|
||||
version: master
|
@ -1,368 +0,0 @@
|
||||
# Copyright 2015, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# (c) 2015, Kevin Carter <kevin.carter@rackspace.com>
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible import errors
|
||||
from jinja2.runtime import Undefined
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
except ImportError:
|
||||
from urlparse import urlparse
|
||||
|
||||
"""Filter usage:
|
||||
|
||||
Simple filters that may be useful from within the stack
|
||||
"""
|
||||
|
||||
|
||||
def _deprecated(new_var, old_var=None, old_var_name=None,
|
||||
new_var_name=None, removed_in=None, fatal=False):
|
||||
"""Provide a deprecation warning on deprecated variables.
|
||||
|
||||
This filter will return the old_var value if defined along with a
|
||||
deprecation warning that will inform the user that the old variable
|
||||
should no longer be used.
|
||||
|
||||
In order to use this filter the old and new variable names must be provided
|
||||
to the filter as a string which is used to render the warning message. The
|
||||
removed_in option is used to give a date or release name where the old
|
||||
option will be removed. Optionally, if fatal is set to True, the filter
|
||||
will raise an exception if the old variable is used.
|
||||
|
||||
USAGE: {{ new_var | deprecated(old_var,
|
||||
"old_var_name",
|
||||
"new_var_name",
|
||||
"removed_in",
|
||||
false) }}
|
||||
|
||||
:param new_var: ``object``
|
||||
:param old_var: ``object``
|
||||
:param old_var_name: ``str``
|
||||
:param new_var_name: ``str``
|
||||
:param removed_in: ``str``
|
||||
:param fatal: ``bol``
|
||||
"""
|
||||
_usage = (
|
||||
'USAGE: '
|
||||
'{{ new_var | deprecated(old_var=old_var, old_var_name="old_var_name",'
|
||||
' new_var_name="new_var_name", removed_in="removed_in",'
|
||||
' fatal=false) }}'
|
||||
)
|
||||
|
||||
if not old_var_name:
|
||||
raise errors.AnsibleUndefinedVariable(
|
||||
'To use this filter you must provide the "old_var_name" option'
|
||||
' with the string name of the old variable that will be'
|
||||
' replaced. ' + _usage
|
||||
)
|
||||
if not new_var_name:
|
||||
raise errors.AnsibleUndefinedVariable(
|
||||
'To use this filter you must provide the "new_var_name" option'
|
||||
' with the string name of the new variable that will replace the'
|
||||
' deprecated one. ' + _usage
|
||||
)
|
||||
if not removed_in:
|
||||
raise errors.AnsibleUndefinedVariable(
|
||||
'To use this filter you must provide the "removed_in" option with'
|
||||
' the string name of the release where the old_var will be'
|
||||
' removed. ' + _usage
|
||||
)
|
||||
|
||||
# If old_var is undefined or has a None value return the new_var value
|
||||
if isinstance(old_var, Undefined) or not old_var:
|
||||
return new_var
|
||||
|
||||
name = 'Ansible-Warning| '
|
||||
log = logging.getLogger(name)
|
||||
for handler in log.handlers:
|
||||
if name == handler.name:
|
||||
break
|
||||
else:
|
||||
stream_handler = logging.StreamHandler()
|
||||
stream_handler.setLevel(logging.DEBUG)
|
||||
stream_handler.name = name
|
||||
stream_format = logging.Formatter(
|
||||
'%(asctime)s - %(name)s%(levelname)s => %(message)s'
|
||||
)
|
||||
stream_handler.setFormatter(stream_format)
|
||||
|
||||
log.setLevel(logging.DEBUG)
|
||||
log.addHandler(stream_handler)
|
||||
|
||||
message = (
|
||||
'Deprecated Option provided: Deprecated variable: "%(old)s", Removal'
|
||||
' timeframe: "%(removed_in)s", Future usage: "%(new)s"'
|
||||
% {'old': old_var_name, 'new': new_var_name, 'removed_in': removed_in}
|
||||
)
|
||||
|
||||
if str(fatal).lower() in ['yes', 'true']:
|
||||
message = 'Fatally %s' % message
|
||||
log.fatal(message)
|
||||
raise RuntimeError(message)
|
||||
else:
|
||||
log.warn(message)
|
||||
return old_var
|
||||
|
||||
|
||||
def _pip_requirement_split(requirement):
|
||||
version_descriptors = "(>=|<=|>|<|==|~=|!=)"
|
||||
requirement = requirement.split(';')
|
||||
requirement_info = re.split(r'%s\s*' % version_descriptors, requirement[0])
|
||||
name = requirement_info[0]
|
||||
marker = None
|
||||
if len(requirement) > 1:
|
||||
marker = requirement[1]
|
||||
versions = None
|
||||
if len(requirement_info) > 1:
|
||||
versions = requirement_info[1]
|
||||
|
||||
return name, versions, marker
|
||||
|
||||
|
||||
def _lower_set_lists(list_one, list_two):
|
||||
|
||||
_list_one = set([i.lower() for i in list_one])
|
||||
_list_two = set([i.lower() for i in list_two])
|
||||
return _list_one, _list_two
|
||||
|
||||
|
||||
def bit_length_power_of_2(value):
|
||||
"""Return the smallest power of 2 greater than a numeric value.
|
||||
|
||||
:param value: Number to find the smallest power of 2
|
||||
:type value: ``int``
|
||||
:returns: ``int``
|
||||
"""
|
||||
return 2**(int(value)-1).bit_length()
|
||||
|
||||
|
||||
def get_netloc(url):
|
||||
"""Return the netloc from a URL.
|
||||
|
||||
If the input value is not a value URL the method will raise an Ansible
|
||||
filter exception.
|
||||
|
||||
:param url: the URL to parse
|
||||
:type url: ``str``
|
||||
:returns: ``str``
|
||||
"""
|
||||
try:
|
||||
netloc = urlparse(url).netloc
|
||||
except Exception as exp:
|
||||
raise errors.AnsibleFilterError(
|
||||
'Failed to return the netloc of: "%s"' % str(exp)
|
||||
)
|
||||
else:
|
||||
return netloc
|
||||
|
||||
|
||||
def get_netloc_no_port(url):
|
||||
"""Return the netloc without a port from a URL.
|
||||
|
||||
If the input value is not a value URL the method will raise an Ansible
|
||||
filter exception.
|
||||
|
||||
:param url: the URL to parse
|
||||
:type url: ``str``
|
||||
:returns: ``str``
|
||||
"""
|
||||
return get_netloc(url=url).split(':')[0]
|
||||
|
||||
|
||||
def get_netorigin(url):
|
||||
"""Return the netloc from a URL.
|
||||
|
||||
If the input value is not a value URL the method will raise an Ansible
|
||||
filter exception.
|
||||
|
||||
:param url: the URL to parse
|
||||
:type url: ``str``
|
||||
:returns: ``str``
|
||||
"""
|
||||
try:
|
||||
parsed_url = urlparse(url)
|
||||
netloc = parsed_url.netloc
|
||||
scheme = parsed_url.scheme
|
||||
except Exception as exp:
|
||||
raise errors.AnsibleFilterError(
|
||||
'Failed to return the netorigin of: "%s"' % str(exp)
|
||||
)
|
||||
else:
|
||||
return '%s://%s' % (scheme, netloc)
|
||||
|
||||
|
||||
def string_2_int(string):
|
||||
"""Return the an integer from a string.
|
||||
|
||||
The string is hashed, converted to a base36 int, and the modulo of 10240
|
||||
is returned.
|
||||
|
||||
:param string: string to retrieve an int from
|
||||
:type string: ``str``
|
||||
:returns: ``int``
|
||||
"""
|
||||
# Try to encode utf-8 else pass
|
||||
try:
|
||||
string = string.encode('utf-8')
|
||||
except AttributeError:
|
||||
pass
|
||||
hashed_name = hashlib.sha256(string).hexdigest()
|
||||
return int(hashed_name, 36) % 10240
|
||||
|
||||
|
||||
def pip_requirement_names(requirements):
|
||||
"""Return a ``str`` of requirement name and list of versions.
|
||||
:param requirement: Name of a requirement that may have versions within
|
||||
it. This will use the constant,
|
||||
VERSION_DESCRIPTORS.
|
||||
:type requirement: ``str``
|
||||
:return: ``str``
|
||||
"""
|
||||
|
||||
named_requirements = list()
|
||||
for requirement in requirements:
|
||||
name = _pip_requirement_split(requirement)[0]
|
||||
if name and not name.startswith('#'):
|
||||
named_requirements.append(name.lower())
|
||||
|
||||
return sorted(set(named_requirements))
|
||||
|
||||
|
||||
def pip_constraint_update(list_one, list_two):
|
||||
|
||||
_list_one, _list_two = _lower_set_lists(list_one, list_two)
|
||||
_list_one, _list_two = list(_list_one), list(_list_two)
|
||||
for item2 in _list_two:
|
||||
item2_name, item2_versions, _ = _pip_requirement_split(item2)
|
||||
if item2_versions:
|
||||
for item1 in _list_one:
|
||||
if item2_name == _pip_requirement_split(item1)[0]:
|
||||
item1_index = _list_one.index(item1)
|
||||
_list_one[item1_index] = item2
|
||||
break
|
||||
else:
|
||||
_list_one.append(item2)
|
||||
|
||||
return sorted(_list_one)
|
||||
|
||||
|
||||
def splitlines(string_with_lines):
|
||||
"""Return a ``list`` from a string with lines."""
|
||||
|
||||
return string_with_lines.splitlines()
|
||||
|
||||
|
||||
def filtered_list(list_one, list_two):
|
||||
|
||||
_list_one, _list_two = _lower_set_lists(list_one, list_two)
|
||||
return list(_list_one-_list_two)
|
||||
|
||||
|
||||
def git_link_parse(repo):
|
||||
"""Return a dict containing the parts of a git repository.
|
||||
|
||||
:param repo: git repo string to parse.
|
||||
:type repo: ``str``
|
||||
:returns: ``dict``
|
||||
"""
|
||||
|
||||
if 'git+' in repo:
|
||||
_git_url = repo.split('git+', 1)[-1]
|
||||
else:
|
||||
_git_url = repo
|
||||
|
||||
if '@' in _git_url:
|
||||
url, branch = _git_url.split('@', 1)
|
||||
else:
|
||||
url = _git_url
|
||||
branch = 'master'
|
||||
|
||||
name = os.path.basename(url.rstrip('/'))
|
||||
_branch = branch.split('#')
|
||||
branch = _branch[0]
|
||||
|
||||
plugin_path = None
|
||||
# Determine if the package is a plugin type
|
||||
if len(_branch) > 1 and 'subdirectory=' in _branch[-1]:
|
||||
plugin_path = _branch[-1].split('subdirectory=')[-1].split('&')[0]
|
||||
|
||||
return {
|
||||
'name': name.split('.git')[0].lower(),
|
||||
'version': branch,
|
||||
'plugin_path': plugin_path,
|
||||
'url': url,
|
||||
'original': repo
|
||||
}
|
||||
|
||||
|
||||
def git_link_parse_name(repo):
|
||||
"""Return the name of a git repo."""
|
||||
|
||||
return git_link_parse(repo)['name']
|
||||
|
||||
def get_nested(target_dict, keys):
|
||||
"""Retrieves values through a nested dictionary.
|
||||
|
||||
If any key on the path is missing, return None
|
||||
|
||||
This helps solves convoluted guards in roles/plays such as the following:
|
||||
|
||||
('openstack_ansible' not in ansible_local or
|
||||
'swift' not in ansible_local['openstack_ansible'] or
|
||||
'venv_tag' not in ansible_local['openstack_ansible']['swift'] or
|
||||
ansible_local['openstack_ansible']['swift']['venv_tag'] == swift_venv_tag)
|
||||
|
||||
With this filter, it could be instead written:
|
||||
ansible_local|get_nested('openstack_ansible.swift.venv_tag') == swift_venv_tag
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
key, next_keys = keys.split('.', 1)
|
||||
except ValueError:
|
||||
return target_dict.get(keys, None)
|
||||
|
||||
try:
|
||||
next_dict = target_dict[key]
|
||||
except KeyError:
|
||||
return None
|
||||
return get_nested(next_dict, next_keys)
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
"""Ansible jinja2 filters."""
|
||||
|
||||
@staticmethod
|
||||
def filters():
|
||||
return {
|
||||
'bit_length_power_of_2': bit_length_power_of_2,
|
||||
'netloc': get_netloc,
|
||||
'netloc_no_port': get_netloc_no_port,
|
||||
'netorigin': get_netorigin,
|
||||
'string_2_int': string_2_int,
|
||||
'pip_requirement_names': pip_requirement_names,
|
||||
'pip_constraint_update': pip_constraint_update,
|
||||
'splitlines': splitlines,
|
||||
'filtered_list': filtered_list,
|
||||
'git_link_parse': git_link_parse,
|
||||
'git_link_parse_name': git_link_parse_name,
|
||||
'deprecated': _deprecated,
|
||||
'get_nested': get_nested
|
||||
}
|
@ -1,173 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
|
||||
#
|
||||
# Copyright 2014, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: dist_sort
|
||||
version_added: "1.6.6"
|
||||
short_description:
|
||||
- Deterministically sort a list to distribute the elements in the list
|
||||
evenly. Based on external values such as host or static modifier. Returns
|
||||
a string as named key ``sorted_list``.
|
||||
description:
|
||||
- This module returns a list of servers uniquely sorted based on a index
|
||||
from a look up value location within a group. The group should be an
|
||||
existing ansible inventory group. This will module returns the sorted
|
||||
list as a delimited string.
|
||||
options:
|
||||
src_list:
|
||||
description:
|
||||
- list in the form of a string separated by a delimiter.
|
||||
required: True
|
||||
ref_list:
|
||||
description:
|
||||
- list to lookup value_to_lookup against to return index number
|
||||
This should be a pre-determined ansible group containing the
|
||||
``value_to_lookup``.
|
||||
required: False
|
||||
value_to_lookup:
|
||||
description:
|
||||
- value is looked up against ref_list to get index number.
|
||||
required: False
|
||||
sort_modifier:
|
||||
description:
|
||||
- add a static int into the sort equation to weight the output.
|
||||
type: int
|
||||
default: 0
|
||||
delimiter:
|
||||
description:
|
||||
- delimiter used to parse ``src_list`` with.
|
||||
default: ','
|
||||
author:
|
||||
- Kevin Carter
|
||||
- Sam Yaple
|
||||
"""
|
||||
|
||||
|
||||
EXAMPLES = """
|
||||
- dist_sort:
|
||||
value_to_lookup: "Hostname-in-ansible-group_name"
|
||||
ref_list: "{{ groups['group_name'] }}"
|
||||
src_list: "Server1,Server2,Server3"
|
||||
register: test_var
|
||||
|
||||
# With a pre-set delimiter
|
||||
- dist_sort:
|
||||
value_to_lookup: "Hostname-in-ansible-group_name"
|
||||
ref_list: "{{ groups['group_name'] }}"
|
||||
src_list: "Server1|Server2|Server3"
|
||||
delimiter: '|'
|
||||
register: test_var
|
||||
|
||||
# With a set modifier
|
||||
- dist_sort:
|
||||
value_to_lookup: "Hostname-in-ansible-group_name"
|
||||
ref_list: "{{ groups['group_name'] }}"
|
||||
src_list: "Server1#Server2#Server3"
|
||||
delimiter: '#'
|
||||
sort_modifier: 5
|
||||
register: test_var
|
||||
"""
|
||||
|
||||
|
||||
class DistSort(object):
|
||||
def __init__(self, module):
|
||||
"""Deterministically sort a list of servers.
|
||||
|
||||
:param module: The active ansible module.
|
||||
:type module: ``class``
|
||||
"""
|
||||
self.module = module
|
||||
self.params = self.module.params
|
||||
self.return_data = self._runner()
|
||||
|
||||
def _runner(self):
|
||||
"""Return the sorted list of servers.
|
||||
|
||||
Based on the modulo of index of a *value_to_lookup* from an ansible
|
||||
group this function will return a comma "delimiter" separated list of
|
||||
items.
|
||||
|
||||
:returns: ``str``
|
||||
"""
|
||||
try:
|
||||
index = self.params['ref_list'].index(
|
||||
self.params['value_to_lookup']
|
||||
)
|
||||
except ValueError:
|
||||
index = 0
|
||||
|
||||
src_list = self.params['src_list'].split(self.params['delimiter'])
|
||||
index += self.params['sort_modifier']
|
||||
for _ in range(index % len(src_list)):
|
||||
src_list.append(src_list.pop(0))
|
||||
|
||||
return self.params['delimiter'].join(src_list)
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the main app."""
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
value_to_lookup=dict(
|
||||
required=True,
|
||||
type='str'
|
||||
),
|
||||
ref_list=dict(
|
||||
required=True,
|
||||
type='list'
|
||||
),
|
||||
src_list=dict(
|
||||
required=True,
|
||||
type='str'
|
||||
),
|
||||
delimiter=dict(
|
||||
required=False,
|
||||
type='str',
|
||||
default=','
|
||||
),
|
||||
sort_modifier=dict(
|
||||
required=False,
|
||||
type='str',
|
||||
default='0'
|
||||
)
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
try:
|
||||
# This is done so that the failure can be parsed and does not cause
|
||||
# ansible to fail if a non-int is passed.
|
||||
module.params['sort_modifier'] = int(module.params['sort_modifier'])
|
||||
_ds = DistSort(module=module)
|
||||
if _ds.return_data == module.params['src_list']:
|
||||
_changed = False
|
||||
else:
|
||||
_changed = True
|
||||
except Exception as exp:
|
||||
resp = {'stderr': str(exp)}
|
||||
resp.update(module.params)
|
||||
module.fail_json(msg='Failed Process', **resp)
|
||||
else:
|
||||
module.exit_json(changed=_changed, **{'sorted_list': _ds.return_data})
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
1469
library/keystone
1469
library/keystone
File diff suppressed because it is too large
Load Diff
267
library/magnum
267
library/magnum
@ -1,267 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2016, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from magnumclient import client as magnum_client
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: magnum
|
||||
version_added: "1.9.4"
|
||||
short_description:
|
||||
- Manage OpenStack Magnum Cluster Templates
|
||||
description:
|
||||
- Manage OpenStack Magnum Cluster Templates
|
||||
options:
|
||||
action:
|
||||
description:
|
||||
- Indicate the desired state of the resource
|
||||
choices: ['template-present', 'template-absent']
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the resource
|
||||
required: true
|
||||
openrc_path:
|
||||
description:
|
||||
- Path to OpenRC file
|
||||
required: true
|
||||
insecure:
|
||||
description:
|
||||
- Explicitly allow client to perform "insecure" TLS
|
||||
choices:
|
||||
- false
|
||||
- true
|
||||
default: false
|
||||
keypair:
|
||||
description:
|
||||
- Keypair for Cluster Template
|
||||
required: false
|
||||
image:
|
||||
description:
|
||||
- Image for Cluster Template
|
||||
required: false
|
||||
external_network:
|
||||
description:
|
||||
- External Network for Cluster Template
|
||||
required: false
|
||||
dns_nameserver:
|
||||
description:
|
||||
- DNS Nameserver for Cluster Template
|
||||
required: false
|
||||
flavor:
|
||||
description:
|
||||
- Flavor for Cluster Template
|
||||
required: false
|
||||
docker_volume_size:
|
||||
description:
|
||||
- Volume size (in GB) for Cluster Template
|
||||
required: false
|
||||
coe:
|
||||
description:
|
||||
- Container Orchestration Engine for Cluster Template
|
||||
required: false
|
||||
public:
|
||||
description:
|
||||
- Whether or not Cluster Template should be visible to all users
|
||||
required: false
|
||||
requirements: [ python-magnumclient ]
|
||||
author: Christopher Hultin
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
#Create Swarm Template
|
||||
- magnum:
|
||||
action: 'template-present'
|
||||
name: 'swarm-dev-test'
|
||||
openrc_path: '/root/openrc'
|
||||
insecure: True
|
||||
keypair: 'testkey'
|
||||
image: 'fedora-atomic-latest'
|
||||
external_network: 'public'
|
||||
dns_nameserver: '8.8.8.8'
|
||||
flavor: 'm1.small'
|
||||
docker_volume_size: 5
|
||||
coe: 'swarm'
|
||||
public: 'false'
|
||||
|
||||
#Change the DNS Nameserver
|
||||
- magnum:
|
||||
action: 'template-present'
|
||||
name: 'swarm-dev-test'
|
||||
openrc_path: '/root/openrc'
|
||||
insecure: True
|
||||
keypair: 'testkey'
|
||||
image: 'fedora-atomic-latest'
|
||||
external_network: 'public'
|
||||
dns_nameserver: '8.8.4.4'
|
||||
flavor: 'm1.small'
|
||||
docker_volume_size: 5
|
||||
coe: 'swarm'
|
||||
public: 'false'
|
||||
#Delete Template
|
||||
- magnum:
|
||||
action: 'template-absent'
|
||||
name: 'swarm-dev-test'
|
||||
openrc_path: '/root/openrc'
|
||||
insecure: True
|
||||
"""
|
||||
|
||||
|
||||
COMMAND_MAP = {
|
||||
'template-present': 'template_present',
|
||||
'template-absent': 'template_absent',
|
||||
}
|
||||
|
||||
|
||||
class ManageMagnum(object):
|
||||
def __init__(self, module):
|
||||
self.state_change = False
|
||||
self.magnum = None
|
||||
self.module = module
|
||||
try:
|
||||
self._init_magnum()
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
err="Initialisation Error: %s" % e,
|
||||
rc=2, msg=str(e))
|
||||
|
||||
def _parse_openrc(self):
|
||||
"""Get credentials from an openrc file."""
|
||||
openrc_path = self.module.params['openrc_path']
|
||||
line_re = re.compile('^export (?P<key>OS_\w*)=\'?(?P<value>[^\n\']*)')
|
||||
with open(openrc_path) as openrc:
|
||||
matches = [line_re.match(l) for l in openrc]
|
||||
return dict(
|
||||
(g.groupdict()['key'], g.groupdict()['value'])
|
||||
for g in matches if g
|
||||
)
|
||||
|
||||
def _init_magnum(self):
|
||||
openrc = self._parse_openrc()
|
||||
self.magnum = magnum_client.Client(
|
||||
username=openrc['OS_USERNAME'],
|
||||
password=openrc['OS_PASSWORD'],
|
||||
project_name=openrc['OS_PROJECT_NAME'],
|
||||
auth_url=openrc['OS_AUTH_URL'],
|
||||
service_type='container-infra',
|
||||
insecure=self.module.params['insecure'],
|
||||
user_domain_name=openrc['OS_USER_DOMAIN_NAME'],
|
||||
project_domain_name=openrc['OS_PROJECT_DOMAIN_NAME']
|
||||
)
|
||||
|
||||
def route(self):
|
||||
"""Run the command specified by the command parameter."""
|
||||
getattr(self, COMMAND_MAP[self.module.params['action']])()
|
||||
|
||||
def template_present(self):
|
||||
p = self.module.params
|
||||
template_name = p['name']
|
||||
template_opts = dict(
|
||||
name=template_name,
|
||||
image_id=p['image'],
|
||||
keypair_id=p['keypair'],
|
||||
external_network_id=p['external_network'],
|
||||
dns_nameserver=p['dns_nameserver'],
|
||||
flavor_id=p['flavor'],
|
||||
docker_volume_size=p['docker_volume_size'],
|
||||
coe=p['coe'],
|
||||
public=p['public']
|
||||
)
|
||||
template_found = False
|
||||
updates = []
|
||||
for template in self.magnum.cluster_templates.list(detail=True):
|
||||
if template.name == template_name:
|
||||
template_found = True
|
||||
if not template_found:
|
||||
try:
|
||||
self.magnum.cluster_templates.create(**template_opts)
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
msg="Failed to create template: {}".format(e)
|
||||
)
|
||||
self.state_change = True
|
||||
self.module.exit_json(
|
||||
changed=self.state_change
|
||||
)
|
||||
else:
|
||||
try:
|
||||
for opt, val in template_opts.items():
|
||||
if (val != getattr(template, opt) and
|
||||
val is not None):
|
||||
updates.append({opt: val})
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
msg="Error: Parameters are undefined: {}".format(e)
|
||||
)
|
||||
if len(updates) >= 1:
|
||||
try:
|
||||
self.magnum.cluster_templates.update(template_name, updates)
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
msg="Failed to update template: {}".format(e)
|
||||
)
|
||||
self.state_change = True
|
||||
else:
|
||||
self.state_change = False
|
||||
self.module.exit_json(
|
||||
changed=self.state_change
|
||||
)
|
||||
|
||||
def template_absent(self):
|
||||
p = self.module.params
|
||||
template_name = p['name']
|
||||
for template in self.magnum.cluster_templates.list(detail=True):
|
||||
if template.name == template_name:
|
||||
try:
|
||||
self.magnum.cluster_templates.delete(template_name)
|
||||
except Exception as e:
|
||||
self.module.fail_json(
|
||||
msg="Failed to delete template: {}".format(e)
|
||||
)
|
||||
self.state_change = True
|
||||
self.module.exit_json(
|
||||
changed=self.state_change
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec={
|
||||
"name": {"required": True, "type": "str"},
|
||||
"image": {"required": False, "type": "str"},
|
||||
"keypair": {"required": False, "type": "str"},
|
||||
"external_network": {"required": False, "type": "str"},
|
||||
"dns_nameserver": {"required": False, "type": "str"},
|
||||
"flavor": {"required": False, "type": "str"},
|
||||
"docker_volume_size": {"required": False, "type": "int"},
|
||||
"coe": {"required": False, "type": "str"},
|
||||
"public": {"required": False, "type": "bool"},
|
||||
"openrc_path": {"required": True, "type": "str"},
|
||||
"insecure": {"required": False, "default": False, "type": "bool"},
|
||||
"action": {
|
||||
"choices": ['template-present', 'template-absent',
|
||||
'cluster-present', 'cluster-absent'],
|
||||
"type": "str"
|
||||
},
|
||||
},
|
||||
supports_check_mode=False
|
||||
)
|
||||
mg = ManageMagnum(module)
|
||||
mg.route()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,598 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
|
||||
#
|
||||
# Copyright 2014, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
|
||||
import memcache
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto import Random
|
||||
|
||||
ENCRYPT_IMPORT = True
|
||||
except ImportError:
|
||||
ENCRYPT_IMPORT = False
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: memcached
|
||||
version_added: "1.6.6"
|
||||
short_description:
|
||||
- Add, remove, and get items from memcached
|
||||
description:
|
||||
- Add, remove, and get items from memcached
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Memcached key name
|
||||
required: true
|
||||
content:
|
||||
description:
|
||||
- Add content to memcached. Only used when state is 'present'.
|
||||
required: false
|
||||
file_path:
|
||||
description:
|
||||
- This can be used with state 'present' and 'retrieve'. When set
|
||||
with state 'present' the contents of a file will be used, when
|
||||
set with state 'retrieve' the contents of the memcached key will
|
||||
be written to a file.
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- ['absent', 'present', 'retrieve']
|
||||
required: true
|
||||
server:
|
||||
description:
|
||||
- server IP address and port. This can be a comma separated list of
|
||||
servers to connect to.
|
||||
required: true
|
||||
encrypt_string:
|
||||
description:
|
||||
- Encrypt/Decrypt a memcached object using a provided value.
|
||||
required: false
|
||||
dir_mode:
|
||||
description:
|
||||
- If a directory is created when using the ``file_path`` argument
|
||||
the directory will be created with a set mode.
|
||||
default: '0755'
|
||||
required: false
|
||||
file_mode:
|
||||
description:
|
||||
- If a file is created when using the ``file_path`` argument
|
||||
the file will be created with a set mode.
|
||||
default: '0644'
|
||||
required: false
|
||||
expires:
|
||||
description:
|
||||
- Seconds until an item is expired from memcached.
|
||||
default: 300
|
||||
required: false
|
||||
notes:
|
||||
- The "absent" state will remove an item from memcached.
|
||||
- The "present" state will place an item from a string or a file into
|
||||
memcached.
|
||||
- The "retrieve" state will get an item from memcached and return it as a
|
||||
string. If a ``file_path`` is set this module will also write the value
|
||||
to a file.
|
||||
- All items added into memcached are base64 encoded.
|
||||
- All items retrieved will attempt base64 decode and return the string
|
||||
value if not applicable.
|
||||
- Items retrieve from memcached are returned within a "value" key unless
|
||||
a ``file_path`` is specified which would then write the contents of the
|
||||
memcached key to a file.
|
||||
- The ``file_path`` and ``content`` fields are mutually exclusive.
|
||||
- If you'd like to encrypt items in memcached PyCrypto is a required.
|
||||
requirements:
|
||||
- "python-memcached"
|
||||
optional_requirements:
|
||||
- "pycrypto"
|
||||
author: Kevin Carter
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# Add an item into memcached.
|
||||
- memcached:
|
||||
name: "key_name"
|
||||
content: "Super awesome value"
|
||||
state: "present"
|
||||
server: "localhost:11211"
|
||||
|
||||
# Read the contents of a memcached key, returned as "memcached_phrase.value".
|
||||
- memcached:
|
||||
name: "key_name"
|
||||
state: "retrieve"
|
||||
server: "localhost:11211"
|
||||
register: memcached_key
|
||||
|
||||
# Add the contents of a file into memcached.
|
||||
- memcached:
|
||||
name: "key_name"
|
||||
file_path: "/home/user_name/file.txt"
|
||||
state: "present"
|
||||
server: "localhost:11211"
|
||||
|
||||
# Write the contents of a memcached key to a file and is returned as
|
||||
# "memcached_phrase.value".
|
||||
- memcached:
|
||||
name: "key_name"
|
||||
file_path: "/home/user_name/file.txt"
|
||||
state: "retrieve"
|
||||
server: "localhost:11211"
|
||||
register: memcached_key
|
||||
|
||||
# Delete an item from memcached.
|
||||
- memcached:
|
||||
name: "key_name"
|
||||
state: "absent"
|
||||
server: "localhost:11211"
|
||||
"""
|
||||
|
||||
SERVER_MAX_VALUE_LENGTH = 1024 * 256
|
||||
|
||||
MAX_MEMCACHED_CHUNKS = 256
|
||||
|
||||
|
||||
class AESCipher(object):
|
||||
"""Encrypt an a string in using AES.
|
||||
|
||||
Solution derived from "http://stackoverflow.com/a/21928790"
|
||||
"""
|
||||
def __init__(self, key):
|
||||
if ENCRYPT_IMPORT is False:
|
||||
raise ImportError(
|
||||
'PyCrypto failed to be imported. Encryption is not supported'
|
||||
' on this system until PyCrypto is installed.'
|
||||
)
|
||||
|
||||
self.bs = 32
|
||||
if len(key) >= 32:
|
||||
self.key = key[:32]
|
||||
else:
|
||||
self.key = self._pad(key)
|
||||
|
||||
def encrypt(self, raw):
|
||||
"""Encrypt raw message.
|
||||
|
||||
:param raw: ``str``
|
||||
:returns: ``str`` Base64 encoded string.
|
||||
"""
|
||||
raw = self._pad(raw)
|
||||
iv = Random.new().read(AES.block_size)
|
||||
cipher = AES.new(self.key, AES.MODE_CBC, iv)
|
||||
return base64.b64encode(iv + cipher.encrypt(raw))
|
||||
|
||||
def decrypt(self, enc):
|
||||
"""Decrypt an encrypted message.
|
||||
|
||||
:param enc: ``str``
|
||||
:returns: ``str``
|
||||
"""
|
||||
enc = base64.b64decode(enc)
|
||||
iv = enc[:AES.block_size]
|
||||
cipher = AES.new(self.key, AES.MODE_CBC, iv)
|
||||
return self._unpad(cipher.decrypt(enc[AES.block_size:]))
|
||||
|
||||
def _pad(self, string):
|
||||
"""Pad an AES encryption key.
|
||||
|
||||
:param string: ``str``
|
||||
"""
|
||||
base = (self.bs - len(string) % self.bs)
|
||||
back = chr(self.bs - len(string) % self.bs)
|
||||
return string + base * back
|
||||
|
||||
@staticmethod
|
||||
def _unpad(string):
|
||||
"""Un-pad an AES encryption key.
|
||||
|
||||
:param string: ``str``
|
||||
"""
|
||||
ordinal_range = ord(string[len(string) - 1:])
|
||||
return string[:-ordinal_range]
|
||||
|
||||
|
||||
class Memcached(object):
|
||||
"""Manage objects within memcached."""
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.state_change = False
|
||||
self.mc = None
|
||||
|
||||
def router(self):
|
||||
"""Route all commands to their respected functions.
|
||||
|
||||
If an exception happens a failure will be raised.
|
||||
"""
|
||||
|
||||
try:
|
||||
action = getattr(self, self.module.params['state'])
|
||||
self.mc = memcache.Client(
|
||||
self.module.params['server'].split(','),
|
||||
server_max_value_length=SERVER_MAX_VALUE_LENGTH,
|
||||
debug=0
|
||||
)
|
||||
facts = action()
|
||||
except Exception as exp:
|
||||
self._failure(error=str(exp), rc=1, msg='general exception')
|
||||
else:
|
||||
self.mc.disconnect_all()
|
||||
self.module.exit_json(
|
||||
changed=self.state_change, **facts
|
||||
)
|
||||
|
||||
def _failure(self, error, rc, msg):
|
||||
"""Return a Failure when running an Ansible command.
|
||||
|
||||
:param error: ``str`` Error that occurred.
|
||||
:param rc: ``int`` Return code while executing an Ansible command.
|
||||
:param msg: ``str`` Message to report.
|
||||
"""
|
||||
|
||||
self.module.fail_json(msg=msg, rc=rc, err=error)
|
||||
|
||||
def absent(self):
|
||||
"""Remove a key from memcached.
|
||||
|
||||
If the value is not deleted when instructed to do so an exception will
|
||||
be raised.
|
||||
|
||||
:return: ``dict``
|
||||
"""
|
||||
|
||||
key_name = self.module.params['name']
|
||||
get_keys = [
|
||||
'%s.%s' % (key_name, i) for i in range(MAX_MEMCACHED_CHUNKS)
|
||||
]
|
||||
self.mc.delete_multi(get_keys)
|
||||
value = self.mc.get_multi(get_keys)
|
||||
if not value:
|
||||
self.state_change = True
|
||||
return {'absent': True, 'key': self.module.params['name']}
|
||||
else:
|
||||
self._failure(
|
||||
error='Memcache key not deleted',
|
||||
rc=1,
|
||||
msg='Failed to remove an item from memcached please check your'
|
||||
' memcached server for issues. If you are load balancing'
|
||||
' memcached, attempt to connect to a single node.'
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _decode_value(value):
|
||||
"""Return a ``str`` from a base64 decoded value.
|
||||
|
||||
If the content is not a base64 ``str`` the raw value will be returned.
|
||||
|
||||
:param value: ``str``
|
||||
:return:
|
||||
"""
|
||||
|
||||
try:
|
||||
b64_value = base64.decodestring(value)
|
||||
except Exception:
|
||||
return value
|
||||
else:
|
||||
return b64_value
|
||||
|
||||
def _encode_value(self, value):
|
||||
"""Return a base64 encoded value.
|
||||
|
||||
If the value can't be base64 encoded an excption will be raised.
|
||||
|
||||
:param value: ``str``
|
||||
:return: ``str``
|
||||
"""
|
||||
|
||||
try:
|
||||
b64_value = base64.encodestring(value)
|
||||
except Exception as exp:
|
||||
self._failure(
|
||||
error=str(exp),
|
||||
rc=1,
|
||||
msg='The value provided can not be Base64 encoded.'
|
||||
)
|
||||
else:
|
||||
return b64_value
|
||||
|
||||
def _file_read(self, full_path, pass_on_error=False):
|
||||
"""Read the contents of a file.
|
||||
|
||||
This will read the contents of a file. If the ``full_path`` does not
|
||||
exist an exception will be raised.
|
||||
|
||||
:param full_path: ``str``
|
||||
:return: ``str``
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(full_path, 'rb') as f:
|
||||
o_value = f.read()
|
||||
except IOError as exp:
|
||||
if pass_on_error is False:
|
||||
self._failure(
|
||||
error=str(exp),
|
||||
rc=1,
|
||||
msg="The file you've specified does not exist. Please"
|
||||
" check your full path @ [ %s ]." % full_path
|
||||
)
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return o_value
|
||||
|
||||
def _chown(self, path, mode_type):
|
||||
"""Chown a file or directory based on a given mode type.
|
||||
|
||||
If the file is modified the state will be changed.
|
||||
|
||||
:param path: ``str``
|
||||
:param mode_type: ``str``
|
||||
"""
|
||||
mode = self.module.params.get(mode_type)
|
||||
# Ensure that the mode type is a string.
|
||||
mode = str(mode)
|
||||
_mode = oct(stat.S_IMODE(os.stat(path).st_mode))
|
||||
if _mode != mode or _mode[1:] != mode:
|
||||
os.chmod(path, int(mode, 8))
|
||||
self.state_change = True
|
||||
|
||||
def _file_write(self, full_path, value):
|
||||
"""Write the contents of ``value`` to the ``full_path``.
|
||||
|
||||
This will return True upon success and will raise an exception upon
|
||||
failure.
|
||||
|
||||
:param full_path: ``str``
|
||||
:param value: ``str``
|
||||
:return: ``bol``
|
||||
"""
|
||||
|
||||
try:
|
||||
# Ensure that the directory exists
|
||||
dir_path = os.path.dirname(full_path)
|
||||
try:
|
||||
os.makedirs(dir_path)
|
||||
except OSError as exp:
|
||||
if exp.errno == errno.EEXIST and os.path.isdir(dir_path):
|
||||
pass
|
||||
else:
|
||||
self._failure(
|
||||
error=str(exp),
|
||||
rc=1,
|
||||
msg="The directory [ %s ] does not exist and couldn't"
|
||||
" be created. Please check the path and that you"
|
||||
" have permission to write the file."
|
||||
)
|
||||
|
||||
# Ensure proper directory permissions
|
||||
self._chown(path=dir_path, mode_type='dir_mode')
|
||||
|
||||
# Write contents of a cached key to a file.
|
||||
with open(full_path, 'wb') as f:
|
||||
if isinstance(value, list):
|
||||
f.writelines(value)
|
||||
else:
|
||||
f.write(value)
|
||||
|
||||
# Ensure proper file permissions
|
||||
self._chown(path=full_path, mode_type='file_mode')
|
||||
|
||||
except IOError as exp:
|
||||
self._failure(
|
||||
error=str(exp),
|
||||
rc=1,
|
||||
msg="There was an issue while attempting to write to the"
|
||||
" file [ %s ]. Please check your full path and"
|
||||
" permissions." % full_path
|
||||
)
|
||||
else:
|
||||
return True
|
||||
|
||||
def retrieve(self):
|
||||
"""Return a value from memcached.
|
||||
|
||||
If ``file_path`` is specified the value of the memcached key will be
|
||||
written to a file at the ``file_path`` location. If the value of a key
|
||||
is None, an exception will be raised.
|
||||
|
||||
:returns: ``dict``
|
||||
"""
|
||||
|
||||
key_name = self.module.params['name']
|
||||
get_keys = [
|
||||
'%s.%s' % (key_name, i) for i in range(MAX_MEMCACHED_CHUNKS)
|
||||
]
|
||||
multi_value = self.mc.get_multi(get_keys)
|
||||
if multi_value:
|
||||
value = ''.join([i for i in multi_value.values() if i is not None])
|
||||
# Get the file path if specified.
|
||||
file_path = self.module.params.get('file_path')
|
||||
if file_path is not None:
|
||||
full_path = os.path.abspath(os.path.expanduser(file_path))
|
||||
|
||||
# Decode cached value
|
||||
encrypt_string = self.module.params.get('encrypt_string')
|
||||
if encrypt_string:
|
||||
_d_value = AESCipher(key=encrypt_string)
|
||||
d_value = _d_value.decrypt(enc=value)
|
||||
if not d_value:
|
||||
d_value = self._decode_value(value=value)
|
||||
else:
|
||||
d_value = self._decode_value(value=value)
|
||||
|
||||
o_value = self._file_read(
|
||||
full_path=full_path, pass_on_error=True
|
||||
)
|
||||
|
||||
# compare old value to new value and write if different
|
||||
if o_value != d_value:
|
||||
self.state_change = True
|
||||
self._file_write(full_path=full_path, value=d_value)
|
||||
|
||||
return {
|
||||
'present': True,
|
||||
'key': self.module.params['name'],
|
||||
'value': value,
|
||||
'file_path': full_path
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'present': True,
|
||||
'key': self.module.params['name'],
|
||||
'value': value
|
||||
}
|
||||
else:
|
||||
self._failure(
|
||||
error='Memcache key not found',
|
||||
rc=1,
|
||||
msg='The key you specified was not found within memcached. '
|
||||
'If you are load balancing memcached, attempt to connect'
|
||||
' to a single node.'
|
||||
)
|
||||
|
||||
def present(self):
|
||||
"""Create and or update a key within Memcached.
|
||||
|
||||
The state processed here is present. This state will ensure that
|
||||
content is written to a memcached server. When ``file_path`` is
|
||||
specified the content will be read in from a file.
|
||||
"""
|
||||
|
||||
file_path = self.module.params.get('file_path')
|
||||
if file_path is not None:
|
||||
full_path = os.path.abspath(os.path.expanduser(file_path))
|
||||
# Read the contents of a file into memcached.
|
||||
o_value = self._file_read(full_path=full_path)
|
||||
else:
|
||||
o_value = self.module.params['content']
|
||||
|
||||
# Encode cached value
|
||||
encrypt_string = self.module.params.get('encrypt_string')
|
||||
if encrypt_string:
|
||||
_d_value = AESCipher(key=encrypt_string)
|
||||
d_value = _d_value.encrypt(raw=o_value)
|
||||
else:
|
||||
d_value = self._encode_value(value=o_value)
|
||||
|
||||
compare = 1024 * 128
|
||||
chunks = sys.getsizeof(d_value) / compare
|
||||
if chunks == 0:
|
||||
chunks = 1
|
||||
elif chunks > MAX_MEMCACHED_CHUNKS:
|
||||
self._failure(
|
||||
error='Memcache content too large',
|
||||
rc=1,
|
||||
msg='The content that you are attempting to cache is larger'
|
||||
' than [ %s ] megabytes.'
|
||||
% ((compare * MAX_MEMCACHED_CHUNKS / 1024 / 1024))
|
||||
)
|
||||
|
||||
step = len(d_value) / chunks
|
||||
if step == 0:
|
||||
step = 1
|
||||
|
||||
key_name = self.module.params['name']
|
||||
split_d_value = {}
|
||||
count = 0
|
||||
for i in range(0, len(d_value), step):
|
||||
split_d_value['%s.%s' % (key_name, count)] = d_value[i:i + step]
|
||||
count += 1
|
||||
|
||||
value = self.mc.set_multi(
|
||||
mapping=split_d_value,
|
||||
time=self.module.params['expires'],
|
||||
min_compress_len=2048
|
||||
)
|
||||
|
||||
if not value:
|
||||
self.state_change = True
|
||||
return {
|
||||
'present': True,
|
||||
'key': self.module.params['name']
|
||||
}
|
||||
else:
|
||||
self._failure(
|
||||
error='Memcache content not created',
|
||||
rc=1,
|
||||
msg='The content you attempted to place within memcached'
|
||||
' was not created. If you are load balancing'
|
||||
' memcached, attempt to connect to a single node.'
|
||||
' Returned a value of unstored keys [ %s ] - Original'
|
||||
' Connection [ %s ]'
|
||||
% (value, [i.__dict__ for i in self.mc.servers])
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main ansible run method."""
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
content=dict(
|
||||
type='str',
|
||||
required=False
|
||||
),
|
||||
file_path=dict(
|
||||
type='str',
|
||||
required=False
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
server=dict(
|
||||
type='str',
|
||||
required=True
|
||||
),
|
||||
expires=dict(
|
||||
type='int',
|
||||
default=300,
|
||||
required=False
|
||||
),
|
||||
file_mode=dict(
|
||||
type='str',
|
||||
default='0644',
|
||||
required=False
|
||||
),
|
||||
dir_mode=dict(
|
||||
type='str',
|
||||
default='0755',
|
||||
required=False
|
||||
),
|
||||
encrypt_string=dict(
|
||||
type='str',
|
||||
required=False
|
||||
)
|
||||
),
|
||||
supports_check_mode=False,
|
||||
mutually_exclusive=[
|
||||
['content', 'file_path']
|
||||
]
|
||||
)
|
||||
ms = Memcached(module=module)
|
||||
ms.router()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,79 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
|
||||
#
|
||||
# Copyright 2014, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import hashlib
|
||||
import platform
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: name2int
|
||||
version_added: "1.6.6"
|
||||
short_description:
|
||||
- hash a host name and return an integer
|
||||
description:
|
||||
- hash a host name and return an integer
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- login username
|
||||
required: true
|
||||
author: Kevin Carter
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# Create a new container
|
||||
- name2int:
|
||||
name: "Some-hostname.com"
|
||||
"""
|
||||
|
||||
|
||||
class HashHostname(object):
|
||||
def __init__(self, module):
|
||||
"""Generate an integer from a name."""
|
||||
self.module = module
|
||||
|
||||
def return_hashed_host(self, name):
|
||||
hashed_name = hashlib.md5(name).hexdigest()
|
||||
hash_int = int(hashed_name, 32)
|
||||
real_int = int(hash_int % 300)
|
||||
return real_int
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(
|
||||
required=True
|
||||
)
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
try:
|
||||
sm = HashHostname(module=module)
|
||||
int_value = sm.return_hashed_host(platform.node())
|
||||
resp = {'int_value': int_value}
|
||||
module.exit_json(changed=True, **resp)
|
||||
except Exception as exp:
|
||||
resp = {'stderr': exp}
|
||||
module.fail_json(msg='Failed Process', **resp)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,290 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
|
||||
#
|
||||
# Copyright 2014, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: provider_networks
|
||||
version_added: "1.8.5"
|
||||
short_description:
|
||||
- Parse a list of networks and return data that Ansible can use
|
||||
description:
|
||||
- Parse a list of networks and return data that Ansible can use
|
||||
options:
|
||||
provider_networks:
|
||||
description:
|
||||
- List of networks to parse
|
||||
required: true
|
||||
is_metal:
|
||||
description:
|
||||
- Enable handling of on metal hosts
|
||||
required: false
|
||||
bind_prefix:
|
||||
description:
|
||||
- Add a prefix to all network interfaces.
|
||||
required: false
|
||||
author: Kevin Carter
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
## This is what the provider_networks list should look like.
|
||||
# provider_networks:
|
||||
# - network:
|
||||
# container_bridge: "br-mgmt"
|
||||
# container_type: "veth"
|
||||
# container_interface: "eth1"
|
||||
# ip_from_q: "container"
|
||||
# type: "raw"
|
||||
# group_binds:
|
||||
# - all_containers
|
||||
# - hosts
|
||||
# is_container_address: true
|
||||
# is_ssh_address: true
|
||||
# - network:
|
||||
# container_bridge: "br-vxlan"
|
||||
# container_type: "veth"
|
||||
# container_interface: "eth10"
|
||||
# ip_from_q: "tunnel"
|
||||
# type: "vxlan"
|
||||
# range: "1:1000"
|
||||
# net_name: "vxlan"
|
||||
# group_binds:
|
||||
# - neutron_linuxbridge_agent
|
||||
# - network:
|
||||
# container_bridge: "br-vlan"
|
||||
# container_type: "veth"
|
||||
# container_interface: "eth12"
|
||||
# host_bind_override: "eth12"
|
||||
# type: "flat"
|
||||
# net_name: "flat"
|
||||
# group_binds:
|
||||
# - neutron_linuxbridge_agent
|
||||
# - network:
|
||||
# container_bridge: "br-vlan"
|
||||
# container_type: "veth"
|
||||
# container_interface: "eth11"
|
||||
# host_bind_override: "eth11"
|
||||
# sriov_host_interfaces: "p1p1,p1p2"
|
||||
# type: "vlan"
|
||||
# range: "1:1, 101:101"
|
||||
# net_name: "vlan"
|
||||
# group_binds:
|
||||
# - neutron_linuxbridge_agent
|
||||
# - network:
|
||||
# container_bridge: "br-storage"
|
||||
# container_type: "veth"
|
||||
# container_interface: "eth2"
|
||||
# ip_from_q: "storage"
|
||||
# type: "raw"
|
||||
# group_binds:
|
||||
# - glance_api
|
||||
# - cinder_api
|
||||
# - cinder_volume
|
||||
# - nova_compute
|
||||
# - swift_proxy
|
||||
|
||||
- name: Test provider networks
|
||||
provider_networks:
|
||||
provider_networks: "{{ provider_networks }}"
|
||||
register: pndata1
|
||||
|
||||
- name: Test provider networks is metal
|
||||
provider_networks:
|
||||
provider_networks: "{{ provider_networks }}"
|
||||
is_metal: true
|
||||
register: pndata2
|
||||
|
||||
- name: Test provider networks with prfix
|
||||
provider_networks:
|
||||
provider_networks: "{{ provider_networks }}"
|
||||
bind_prefix: "brx"
|
||||
is_metal: true
|
||||
register: pndata3
|
||||
|
||||
## Module output:
|
||||
# {
|
||||
# "network_flat_networks": "flat",
|
||||
# "network_flat_networks_list": [
|
||||
# "flat"
|
||||
# ],
|
||||
# "network_mappings": "flat:brx-eth12,vlan:brx-eth11",
|
||||
# "network_mappings_list": [
|
||||
# "flat:brx-eth12",
|
||||
# "vlan:brx-eth11"
|
||||
# ],
|
||||
# "network_sriov_mappings": "physnet1:p1p1,physnet1:p1p2",
|
||||
# "network_sriov_mappings_list": [
|
||||
# "physnet1:p1p1"
|
||||
# "physnet1:p1p2"
|
||||
# ],
|
||||
# "network_types": "vxlan,flat,vlan",
|
||||
# "network_types_list": [
|
||||
# "vxlan",
|
||||
# "flat",
|
||||
# "vlan"
|
||||
# ],
|
||||
# "network_vlan_ranges": "vlan:1:1,vlan:1024:1025",
|
||||
# "network_vlan_ranges_list": [
|
||||
# "vlan:1:1",
|
||||
# "vlan:1024:1025"
|
||||
# ],
|
||||
# "network_vxlan_ranges": "1:1000",
|
||||
# "network_vxlan_ranges_list": [
|
||||
# "1:1000"
|
||||
# ]
|
||||
# }
|
||||
"""
|
||||
|
||||
|
||||
class ProviderNetworksParsing(object):
|
||||
def __init__(self, module):
|
||||
"""Generate an integer from a name.
|
||||
|
||||
:param module: Load the ansible module
|
||||
:type module: ``object``
|
||||
"""
|
||||
self.module = module
|
||||
self.network_vlan_ranges = list()
|
||||
self.network_vxlan_ranges = list()
|
||||
self.network_flat_networks = list()
|
||||
self.network_mappings = list()
|
||||
self.network_types = list()
|
||||
self.network_sriov_mappings = list()
|
||||
|
||||
def load_networks(self, provider_networks, is_metal=False,
|
||||
bind_prefix=None):
|
||||
"""Load the lists of network and network data types.
|
||||
|
||||
:param provider_networks: list of networks defined in user_config
|
||||
:type provider_networks: ``list``
|
||||
:param is_metal: Enable of disable handling of on metal nodes
|
||||
:type is_metal: ``bol``
|
||||
:param bind_prefix: Pre-interface prefix forced within the network map
|
||||
:type bind_prefix: ``str``
|
||||
"""
|
||||
|
||||
for net in provider_networks:
|
||||
if net['network']['type'] == "vlan":
|
||||
if "vlan" not in self.network_types:
|
||||
self.network_types.append('vlan')
|
||||
for vlan_range in net['network']['range'].split(','):
|
||||
self.network_vlan_ranges.append(
|
||||
'%s:%s' % (
|
||||
net['network']['net_name'], vlan_range.strip()
|
||||
)
|
||||
)
|
||||
elif net['network']['type'] == "vxlan":
|
||||
if "vxlan" not in self.network_types:
|
||||
self.network_types.append('vxlan')
|
||||
self.network_vxlan_ranges.append(net['network']['range'])
|
||||
elif net['network']['type'] == "flat":
|
||||
if "flat" not in self.network_types:
|
||||
self.network_types.append('flat')
|
||||
self.network_flat_networks.append(
|
||||
net['network']['net_name']
|
||||
)
|
||||
|
||||
# Create the network mappings
|
||||
if net['network']['type'] not in ['raw', 'vxlan']:
|
||||
if 'net_name' in net['network']:
|
||||
if is_metal:
|
||||
if 'host_bind_override' in net['network']:
|
||||
bind_device = net['network']['host_bind_override']
|
||||
else:
|
||||
bind_device = net['network']['container_bridge']
|
||||
else:
|
||||
bind_device = net['network']['container_interface']
|
||||
|
||||
if bind_prefix:
|
||||
bind_device = '%s-%s' % (bind_prefix, bind_device)
|
||||
|
||||
self.network_mappings.append(
|
||||
'%s:%s' % (
|
||||
net['network']['net_name'],
|
||||
bind_device
|
||||
)
|
||||
)
|
||||
|
||||
if 'sriov_host_interfaces' in net['network']:
|
||||
host_interfaces = \
|
||||
net['network']['sriov_host_interfaces']
|
||||
for interface in host_interfaces.split(','):
|
||||
self.network_sriov_mappings.append(
|
||||
'%s:%s' % (
|
||||
net['network']['net_name'],
|
||||
interface
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
provider_networks=dict(
|
||||
type='list',
|
||||
required=True
|
||||
),
|
||||
is_metal=dict(
|
||||
type='bool',
|
||||
default='false'
|
||||
),
|
||||
bind_prefix=dict(
|
||||
type='str',
|
||||
required=False,
|
||||
default=None
|
||||
)
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
try:
|
||||
pnp = ProviderNetworksParsing(module=module)
|
||||
pnp.load_networks(
|
||||
provider_networks=module.params.get('provider_networks'),
|
||||
is_metal=module.params.get('is_metal'),
|
||||
bind_prefix=module.params.get('bind_prefix')
|
||||
)
|
||||
|
||||
# Response dictionary, this adds commas to all list items in string
|
||||
# format as well as preserves the list functionality for future data
|
||||
# processing.
|
||||
resp = {
|
||||
'network_vlan_ranges': ','.join(pnp.network_vlan_ranges),
|
||||
'network_vlan_ranges_list': pnp.network_vlan_ranges,
|
||||
'network_vxlan_ranges': ','.join(pnp.network_vxlan_ranges),
|
||||
'network_vxlan_ranges_list': pnp.network_vxlan_ranges,
|
||||
'network_flat_networks': ','.join(pnp.network_flat_networks),
|
||||
'network_flat_networks_list': pnp.network_flat_networks,
|
||||
'network_mappings': ','.join(pnp.network_mappings),
|
||||
'network_mappings_list': pnp.network_mappings,
|
||||
'network_types': ','.join(pnp.network_types),
|
||||
'network_sriov_mappings': ','.join(pnp.network_sriov_mappings),
|
||||
'network_sriov_mappings_list': pnp.network_sriov_mappings
|
||||
}
|
||||
|
||||
module.exit_json(changed=True, **resp)
|
||||
except Exception as exp:
|
||||
resp = {'stderr': exp}
|
||||
module.fail_json(msg='Failed Process', **resp)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,79 +0,0 @@
|
||||
# Copyright 2017, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# (c) 2017, Jean-Philippe Evrard <jean-philippe.evrard@rackspace.co.uk>
|
||||
|
||||
# Take a path to a debian mirror release file, and outputs a dict with
|
||||
# package names, each of the packages holding the following:
|
||||
# - package version
|
||||
# - checksums
|
||||
# - Relative location to pool folder
|
||||
#
|
||||
# example:
|
||||
# get_url:
|
||||
# url: http://rpc-repo.rackspace.com/apt-mirror/integrated/dists/r14.0.0rc1-trusty/main/binary-amd64/Packages
|
||||
# dest: /tmp/trusty-amd64-Packages
|
||||
# debug:
|
||||
# var: item
|
||||
# with_packages_file:
|
||||
# - /tmp/trusty-amd64-Packages
|
||||
|
||||
import os
|
||||
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
from ansible.errors import AnsibleLookupError
|
||||
|
||||
IMPORTANT_FIELDS = ['Version', 'Filename', 'MD5sum', 'SHA1', 'SHA256']
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
def parse_fields(line):
|
||||
for field in IMPORTANT_FIELDS:
|
||||
if line.startswith(field + ":"):
|
||||
return (field, line.split(":")[1].strip())
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
ret = []
|
||||
for term in terms:
|
||||
pkg_details = {}
|
||||
with open(term, 'r') as f:
|
||||
for line in f:
|
||||
#non empty line means pkg data
|
||||
if line.strip():
|
||||
if line.startswith('Package:'):
|
||||
currentpkg = line.split(":")[1].strip()
|
||||
pkg_details[currentpkg] = {}
|
||||
elif line.startswith('Provides:'):
|
||||
pkg_details[line.split(":")[1].strip()] = pkg_details[currentpkg]
|
||||
else:
|
||||
# Now doing package data
|
||||
parsed = parse_fields(line)
|
||||
if parsed:
|
||||
pkg_details[currentpkg][parsed[0]] = parsed[1]
|
||||
else:
|
||||
currentpkg=""
|
||||
ret.append(pkg_details)
|
||||
return ret
|
||||
|
||||
# For debug purposes
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import json
|
||||
print(json.dumps(LookupModule().run(terms=sys.argv[1:]), indent=4, sort_keys=True))
|
@ -1,763 +0,0 @@
|
||||
# Copyright 2014, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
|
||||
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
|
||||
try:
|
||||
basestring
|
||||
except NameError:
|
||||
basestring = str
|
||||
|
||||
|
||||
# Used to keep track of git package parts as various files are processed
|
||||
GIT_PACKAGE_DEFAULT_PARTS = dict()
|
||||
|
||||
|
||||
# Role based package indexes
|
||||
ROLE_DISTRO_BREAKOUT_PACKAGES = dict()
|
||||
ROLE_BREAKOUT_REQUIREMENTS = dict()
|
||||
ROLE_PACKAGES = dict()
|
||||
ROLE_REQUIREMENTS = dict()
|
||||
|
||||
|
||||
REQUIREMENTS_FILE_TYPES = [
|
||||
'test-requirements.txt',
|
||||
'dev-requirements.txt',
|
||||
'requirements.txt',
|
||||
'global-requirements.txt',
|
||||
'global-requirement-pins.txt'
|
||||
]
|
||||
|
||||
|
||||
# List of variable names that could be used within the yaml files that
|
||||
# represent lists of python packages.
|
||||
BUILT_IN_PIP_PACKAGE_VARS = [
|
||||
'service_pip_dependencies',
|
||||
'pip_common_packages',
|
||||
'pip_container_packages',
|
||||
'pip_packages'
|
||||
]
|
||||
|
||||
BUILT_IN_DISTRO_PACKAGE_VARS = [
|
||||
'distro_packages',
|
||||
'apt_packages',
|
||||
'yum_packages'
|
||||
]
|
||||
|
||||
|
||||
PACKAGE_MAPPING = {
|
||||
'packages': set(),
|
||||
'remote_packages': set(),
|
||||
'remote_package_parts': list(),
|
||||
'role_packages': dict(),
|
||||
'role_project_groups': dict(),
|
||||
'distro_packages': set()
|
||||
}
|
||||
|
||||
|
||||
def map_base_and_remote_packages(package, package_map):
|
||||
"""Determine whether a package is a base package or a remote package
|
||||
and add to the appropriate set.
|
||||
|
||||
:type package: ``str``
|
||||
:type package_map: ``dict``
|
||||
"""
|
||||
def check_for_ignore(p):
|
||||
p_parts = GIT_PACKAGE_DEFAULT_PARTS.get(p)
|
||||
if p_parts:
|
||||
fragments = p_parts.get('fragments', '') or ''
|
||||
if 'ignorerequirements=True' not in fragments:
|
||||
package_map['packages'].add(p)
|
||||
else:
|
||||
package_map['packages'].add(p)
|
||||
|
||||
if package.startswith(('http:', 'https:', 'git+')):
|
||||
if '@' not in package:
|
||||
check_for_ignore(p=package)
|
||||
else:
|
||||
git_parts = git_pip_link_parse(package)
|
||||
package_name = git_parts[-2]
|
||||
if not package_name:
|
||||
package_name = git_pip_link_parse(package)[0]
|
||||
|
||||
for rpkg in list(package_map['remote_packages']):
|
||||
rpkg_name = git_pip_link_parse(rpkg)[-2]
|
||||
if not rpkg_name:
|
||||
rpkg_name = git_pip_link_parse(package)[0]
|
||||
|
||||
if rpkg_name == package_name:
|
||||
package_map['remote_packages'].remove(rpkg)
|
||||
package_map['remote_packages'].add(package)
|
||||
break
|
||||
else:
|
||||
package_map['remote_packages'].add(package)
|
||||
else:
|
||||
check_for_ignore(p=package)
|
||||
|
||||
|
||||
def parse_remote_package_parts(package_map):
|
||||
"""Parse parts of each remote package and add them to
|
||||
the remote_package_parts list.
|
||||
|
||||
:type package_map: ``dict``
|
||||
"""
|
||||
keys = [
|
||||
'name',
|
||||
'version',
|
||||
'fragment',
|
||||
'url',
|
||||
'original',
|
||||
'egg_name',
|
||||
'project_group'
|
||||
]
|
||||
remote_pkg_parts = [
|
||||
dict(
|
||||
zip(
|
||||
keys, git_pip_link_parse(i)
|
||||
)
|
||||
) for i in package_map['remote_packages']
|
||||
]
|
||||
package_map['remote_package_parts'].extend(remote_pkg_parts)
|
||||
package_map['remote_package_parts'] = list(
|
||||
dict(
|
||||
(i['name'], i)
|
||||
for i in package_map['remote_package_parts']
|
||||
).values()
|
||||
)
|
||||
|
||||
|
||||
def map_role_packages(package_map):
|
||||
"""Add and sort packages belonging to a role to the role_packages dict.
|
||||
|
||||
:type package_map: ``dict``
|
||||
"""
|
||||
for k, v in ROLE_PACKAGES.items():
|
||||
role_pkgs = package_map['role_packages'][k] = list()
|
||||
package_map['role_project_groups'][k] = v.pop('project_group', 'all')
|
||||
for pkg_list in v.values():
|
||||
role_pkgs.extend(pkg_list)
|
||||
else:
|
||||
package_map['role_packages'][k] = sorted(set(role_pkgs))
|
||||
|
||||
|
||||
def map_base_package_details(package_map):
|
||||
"""Parse package version and marker requirements and add to the
|
||||
base packages set.
|
||||
|
||||
:type package_map: ``dict``
|
||||
"""
|
||||
check_pkgs = dict()
|
||||
base_packages = sorted(list(package_map['packages']))
|
||||
for pkg in base_packages:
|
||||
name, versions, markers = _pip_requirement_split(pkg)
|
||||
if versions and markers:
|
||||
versions = '%s;%s' % (versions, markers)
|
||||
elif not versions and markers:
|
||||
versions = ';%s' % markers
|
||||
|
||||
if name in check_pkgs:
|
||||
if versions and not check_pkgs[name]:
|
||||
check_pkgs[name] = versions
|
||||
else:
|
||||
check_pkgs[name] = versions
|
||||
else:
|
||||
return_pkgs = list()
|
||||
for k, v in check_pkgs.items():
|
||||
if v:
|
||||
return_pkgs.append('%s%s' % (k, v))
|
||||
else:
|
||||
return_pkgs.append(k)
|
||||
package_map['packages'] = set(return_pkgs)
|
||||
|
||||
|
||||
def git_pip_link_parse(repo):
|
||||
"""Return a tuple containing the parts of a git repository.
|
||||
|
||||
Example parsing a standard git repo:
|
||||
>>> git_pip_link_parse('git+https://github.com/username/repo-name@tag')
|
||||
('repo-name',
|
||||
'tag',
|
||||
None,
|
||||
'https://github.com/username/repo',
|
||||
'git+https://github.com/username/repo@tag',
|
||||
'repo_name')
|
||||
|
||||
Example parsing a git repo that uses an installable from a subdirectory:
|
||||
>>> git_pip_link_parse(
|
||||
... 'git+https://github.com/username/repo@tag#egg=plugin.name'
|
||||
... '&subdirectory=remote_path/plugin.name'
|
||||
... )
|
||||
('plugin.name',
|
||||
'tag',
|
||||
'remote_path/plugin.name',
|
||||
'https://github.com/username/repo',
|
||||
'git+https://github.com/username/repo@tag#egg=plugin.name&'
|
||||
'subdirectory=remote_path/plugin.name',
|
||||
'plugin.name')
|
||||
|
||||
:param repo: git repo string to parse.
|
||||
:type repo: ``str``
|
||||
:returns: ``tuple``
|
||||
"""'meta'
|
||||
|
||||
def _meta_return(meta_data, item):
|
||||
"""Return the value of an item in meta data."""
|
||||
|
||||
return meta_data.lstrip('#').split('%s=' % item)[-1].split('&')[0]
|
||||
|
||||
_git_url = repo.split('+')
|
||||
if len(_git_url) >= 2:
|
||||
_git_url = _git_url[1]
|
||||
else:
|
||||
_git_url = _git_url[0]
|
||||
|
||||
git_branch_sha = _git_url.split('@')
|
||||
if len(git_branch_sha) > 2:
|
||||
branch = git_branch_sha.pop()
|
||||
url = '@'.join(git_branch_sha)
|
||||
elif len(git_branch_sha) > 1:
|
||||
url, branch = git_branch_sha
|
||||
else:
|
||||
url = git_branch_sha[0]
|
||||
branch = 'master'
|
||||
|
||||
egg_name = name = os.path.basename(url.rstrip('/'))
|
||||
egg_name = egg_name.replace('-', '_')
|
||||
|
||||
_branch = branch.split('#')
|
||||
branch = _branch[0]
|
||||
|
||||
plugin_path = None
|
||||
# Determine if the package is a plugin type
|
||||
if len(_branch) > 1:
|
||||
if 'subdirectory=' in _branch[-1]:
|
||||
plugin_path = _meta_return(_branch[-1], 'subdirectory')
|
||||
name = os.path.basename(plugin_path)
|
||||
|
||||
if 'egg=' in _branch[-1]:
|
||||
egg_name = _meta_return(_branch[-1], 'egg')
|
||||
egg_name = egg_name.replace('-', '_')
|
||||
|
||||
if 'gitname=' in _branch[-1]:
|
||||
name = _meta_return(_branch[-1], 'gitname')
|
||||
|
||||
project_group = 'all'
|
||||
if 'projectgroup=' in _branch[-1]:
|
||||
project_group = _meta_return(_branch[-1], 'projectgroup')
|
||||
|
||||
return name.lower(), branch, plugin_path, url, repo, egg_name, project_group
|
||||
|
||||
|
||||
def _pip_requirement_split(requirement):
|
||||
"""Split pip versions from a given requirement.
|
||||
|
||||
The method will return the package name, versions, and any markers.
|
||||
|
||||
:type requirement: ``str``
|
||||
:returns: ``tuple``
|
||||
"""
|
||||
version_descriptors = "(>=|<=|>|<|==|~=|!=)"
|
||||
requirement = requirement.split(';')
|
||||
requirement_info = re.split(r'%s\s*' % version_descriptors, requirement[0])
|
||||
name = requirement_info[0]
|
||||
marker = None
|
||||
if len(requirement) > 1:
|
||||
marker = requirement[-1]
|
||||
versions = None
|
||||
if len(requirement_info) > 1:
|
||||
versions = ''.join(requirement_info[1:])
|
||||
|
||||
return name, versions, marker
|
||||
|
||||
|
||||
class DependencyFileProcessor(object):
|
||||
def __init__(self, local_path):
|
||||
"""Find required files.
|
||||
|
||||
:type local_path: ``str``
|
||||
:return:
|
||||
"""
|
||||
self.pip = dict()
|
||||
self.pip['git_package'] = list()
|
||||
self.pip['py_package'] = list()
|
||||
self.pip['git_data'] = list()
|
||||
self.git_pip_install = 'git+%s@%s'
|
||||
self.file_names = self._get_files(path=local_path)
|
||||
|
||||
# Process everything simply by calling the method
|
||||
self._process_files()
|
||||
|
||||
def _py_pkg_extend(self, packages, py_package=None):
|
||||
if py_package is None:
|
||||
py_package = self.pip['py_package']
|
||||
|
||||
for pkg in packages:
|
||||
pkg_name = _pip_requirement_split(pkg)[0]
|
||||
for py_pkg in py_package:
|
||||
py_pkg_name = _pip_requirement_split(py_pkg)[0]
|
||||
if pkg_name == py_pkg_name:
|
||||
py_package.remove(py_pkg)
|
||||
else:
|
||||
norm_pkgs = [i.lower() for i in packages if not i.startswith('{{')]
|
||||
py_package.extend(norm_pkgs)
|
||||
return py_package
|
||||
|
||||
@staticmethod
|
||||
def _filter_files(file_names, ext):
|
||||
"""Filter the files and return a sorted list.
|
||||
:type file_names:
|
||||
:type ext: ``str`` or ``tuple``
|
||||
:returns: ``list``
|
||||
"""
|
||||
_file_names = list()
|
||||
file_name_words = ['/defaults/', '/vars/', '/user_']
|
||||
file_name_words.extend(REQUIREMENTS_FILE_TYPES)
|
||||
for file_name in file_names:
|
||||
if file_name.endswith(ext):
|
||||
if any(i in file_name for i in file_name_words):
|
||||
_file_names.append(file_name)
|
||||
else:
|
||||
return _file_names
|
||||
|
||||
@staticmethod
|
||||
def _get_files(path):
|
||||
"""Return a list of all files in the defaults/repo_packages directory.
|
||||
|
||||
:type path: ``str``
|
||||
:returns: ``list``
|
||||
"""
|
||||
paths = os.walk(os.path.abspath(path), followlinks=True)
|
||||
files = list()
|
||||
for fpath, _, afiles in paths:
|
||||
for afile in afiles:
|
||||
files.append(os.path.join(fpath, afile))
|
||||
else:
|
||||
return files
|
||||
|
||||
def _check_plugins(self, git_repo_plugins, git_data):
|
||||
"""Check if the git url is a plugin type.
|
||||
|
||||
:type git_repo_plugins: ``dict``
|
||||
:type git_data: ``dict``
|
||||
"""
|
||||
for repo_plugin in git_repo_plugins:
|
||||
strip_plugin_path = repo_plugin['package'].lstrip('/')
|
||||
plugin = '%s/%s' % (
|
||||
repo_plugin['path'].strip('/'),
|
||||
strip_plugin_path
|
||||
)
|
||||
|
||||
name = git_data['name'] = os.path.basename(strip_plugin_path)
|
||||
git_data['egg_name'] = name.replace('-', '_')
|
||||
package = self.git_pip_install % (
|
||||
git_data['repo'], git_data['branch']
|
||||
)
|
||||
package += '#egg=%s' % git_data['egg_name']
|
||||
package += '&subdirectory=%s' % plugin
|
||||
package += '&gitname=%s' % name
|
||||
if git_data['fragments']:
|
||||
package += '&%s' % git_data['fragments']
|
||||
|
||||
self.pip['git_data'].append(git_data)
|
||||
self.pip['git_package'].append(package)
|
||||
|
||||
if name not in GIT_PACKAGE_DEFAULT_PARTS:
|
||||
GIT_PACKAGE_DEFAULT_PARTS[name] = git_data.copy()
|
||||
else:
|
||||
GIT_PACKAGE_DEFAULT_PARTS[name].update(git_data.copy())
|
||||
|
||||
@staticmethod
|
||||
def _check_defaults(git_data, name, item):
|
||||
"""Check if a default exists and use it if an item is undefined.
|
||||
|
||||
:type git_data: ``dict``
|
||||
:type name: ``str``
|
||||
:type item: ``str``
|
||||
"""
|
||||
if not git_data[item] and name in GIT_PACKAGE_DEFAULT_PARTS:
|
||||
check_item = GIT_PACKAGE_DEFAULT_PARTS[name].get(item)
|
||||
if check_item:
|
||||
git_data[item] = check_item
|
||||
|
||||
def _process_git(self, loaded_yaml, git_item, yaml_file_name):
|
||||
"""Process git repos.
|
||||
|
||||
:type loaded_yaml: ``dict``
|
||||
:type git_item: ``str``
|
||||
"""
|
||||
git_data = dict()
|
||||
if git_item.split('_')[0] == 'git':
|
||||
prefix = ''
|
||||
else:
|
||||
prefix = '%s_' % git_item.split('_git_repo')[0].replace('.', '_')
|
||||
|
||||
# Set the various variable definitions
|
||||
repo_var = prefix + 'git_repo'
|
||||
name_var = prefix + 'git_package_name'
|
||||
branch_var = prefix + 'git_install_branch'
|
||||
fragment_var = prefix + 'git_install_fragments'
|
||||
plugins_var = prefix + 'repo_plugins'
|
||||
group_var = prefix + 'git_project_group'
|
||||
|
||||
# get the repo definition
|
||||
git_data['repo'] = loaded_yaml.get(repo_var)
|
||||
group = git_data['project_group'] = loaded_yaml.get(group_var, 'all')
|
||||
|
||||
# get the repo name definition
|
||||
name = git_data['name'] = loaded_yaml.get(name_var)
|
||||
if not name:
|
||||
# NOTE: strip off trailing /, .git, or .git/
|
||||
name = git_data['name'] = os.path.basename(
|
||||
re.sub(r'(\/$|\.git(\/)?$)', '', git_data['repo'])
|
||||
)
|
||||
git_data['egg_name'] = name.replace('-', '_')
|
||||
|
||||
# This conditional is set to ensure we're only processing git
|
||||
# repos from the defaults file when those same repos are not
|
||||
# being set in the repo_packages files.
|
||||
if '/defaults/main' in yaml_file_name:
|
||||
if name in GIT_PACKAGE_DEFAULT_PARTS:
|
||||
return
|
||||
|
||||
# get the repo branch definition
|
||||
git_data['branch'] = loaded_yaml.get(branch_var)
|
||||
self._check_defaults(git_data, name, 'branch')
|
||||
if not git_data['branch']:
|
||||
git_data['branch'] = 'master'
|
||||
|
||||
package = self.git_pip_install % (git_data['repo'], git_data['branch'])
|
||||
|
||||
# get the repo fragment definitions, if any
|
||||
git_data['fragments'] = loaded_yaml.get(fragment_var)
|
||||
self._check_defaults(git_data, name, 'fragments')
|
||||
|
||||
package += '#egg=%s' % git_data['egg_name']
|
||||
package += '&gitname=%s' % name
|
||||
package += '&projectgroup=%s' % group
|
||||
if git_data['fragments']:
|
||||
package += '&%s' % git_data['fragments']
|
||||
|
||||
self.pip['git_package'].append(package)
|
||||
self.pip['git_data'].append(git_data.copy())
|
||||
|
||||
# Set the default package parts to track data during the run
|
||||
if name not in GIT_PACKAGE_DEFAULT_PARTS:
|
||||
GIT_PACKAGE_DEFAULT_PARTS[name] = git_data.copy()
|
||||
else:
|
||||
GIT_PACKAGE_DEFAULT_PARTS[name].update(git_data)
|
||||
|
||||
# get the repo plugin definitions, if any
|
||||
git_data['plugins'] = loaded_yaml.get(plugins_var)
|
||||
self._check_defaults(git_data, name, 'plugins')
|
||||
if git_data['plugins']:
|
||||
self._check_plugins(
|
||||
git_repo_plugins=git_data['plugins'],
|
||||
git_data=git_data
|
||||
)
|
||||
|
||||
def _package_build_index(self, packages, role_name, var_name, pkg_index,
|
||||
project_group='all', var_file_name=None,
|
||||
pip_packages=True):
|
||||
if pip_packages:
|
||||
self._py_pkg_extend(packages)
|
||||
|
||||
if role_name:
|
||||
if role_name in pkg_index:
|
||||
role_pkgs = pkg_index[role_name]
|
||||
else:
|
||||
role_pkgs = pkg_index[role_name] = dict()
|
||||
|
||||
role_pkgs['project_group'] = project_group
|
||||
|
||||
if var_file_name:
|
||||
_name = os.path.splitext(os.path.basename(var_file_name))[0]
|
||||
if _name in pkg_index[role_name]:
|
||||
file_name_index = pkg_index[role_name][_name]
|
||||
else:
|
||||
file_name_index = pkg_index[role_name][_name] = dict()
|
||||
pkgs = file_name_index.get(var_name, list())
|
||||
pkgs = self._py_pkg_extend(packages, pkgs)
|
||||
file_name_index[var_name] = sorted(set(pkgs))
|
||||
else:
|
||||
pkgs = role_pkgs.get(var_name, list())
|
||||
pkgs.extend(packages)
|
||||
if 'pip' in var_name:
|
||||
pkgs = [i.lower() for i in pkgs if not i.startswith('{{')]
|
||||
else:
|
||||
pkgs = [i for i in pkgs if not i.startswith('{{')]
|
||||
if pkgs:
|
||||
pkg_index[role_name][var_name] = sorted(set(pkgs))
|
||||
else:
|
||||
for k, v in pkg_index.items():
|
||||
for item_name in v.keys():
|
||||
if var_name == item_name:
|
||||
pkg_index[k][item_name] = self._py_pkg_extend(
|
||||
packages,
|
||||
pkg_index[k][item_name]
|
||||
)
|
||||
|
||||
def _process_files(self):
|
||||
"""Process all of the requirement files."""
|
||||
self._process_files_defaults()
|
||||
self._process_files_requirements()
|
||||
|
||||
def _process_files_defaults(self):
|
||||
"""Process files."""
|
||||
for file_name in self._filter_files(self.file_names, ('yaml', 'yml')):
|
||||
with open(file_name, 'r') as f:
|
||||
# If there is an exception loading the file continue
|
||||
# and if the loaded_config is None continue. This makes
|
||||
# no bad config gets passed to the rest of the process.
|
||||
try:
|
||||
loaded_config = yaml.safe_load(f.read())
|
||||
except Exception: # Broad exception so everything is caught
|
||||
continue
|
||||
else:
|
||||
if not loaded_config or not isinstance(loaded_config, dict):
|
||||
continue
|
||||
|
||||
if 'roles' in file_name:
|
||||
_role_name = file_name.split('roles%s' % os.sep)[-1]
|
||||
role_name = _role_name.split(os.sep)[0]
|
||||
else:
|
||||
role_name = None
|
||||
|
||||
for key, value in loaded_config.items():
|
||||
if key.endswith('role_project_group'):
|
||||
project_group = value
|
||||
break
|
||||
else:
|
||||
project_group = 'all'
|
||||
|
||||
if role_name is not None:
|
||||
PACKAGE_MAPPING['role_project_groups'][role_name] = project_group
|
||||
for key, values in loaded_config.items():
|
||||
key = key.lower()
|
||||
if key.endswith('git_repo'):
|
||||
self._process_git(
|
||||
loaded_yaml=loaded_config,
|
||||
git_item=key,
|
||||
yaml_file_name=file_name
|
||||
)
|
||||
# Process pip packages
|
||||
self._process_packages(
|
||||
pkg_constant=BUILT_IN_PIP_PACKAGE_VARS,
|
||||
pkg_breakout_index=ROLE_BREAKOUT_REQUIREMENTS,
|
||||
pkg_role_index=ROLE_PACKAGES,
|
||||
pkg_var_name=key,
|
||||
packages=values,
|
||||
role_name=role_name,
|
||||
project_group=project_group
|
||||
)
|
||||
# Process distro packages
|
||||
self._process_packages(
|
||||
pkg_constant=BUILT_IN_DISTRO_PACKAGE_VARS,
|
||||
pkg_breakout_index=ROLE_DISTRO_BREAKOUT_PACKAGES,
|
||||
pkg_role_index=dict(), # this is not used here
|
||||
pkg_var_name=key,
|
||||
packages=values,
|
||||
role_name=role_name,
|
||||
project_group=project_group,
|
||||
role_index=False,
|
||||
var_file_name=file_name,
|
||||
pip_packages=False
|
||||
)
|
||||
|
||||
def _process_packages(self, pkg_constant, pkg_breakout_index,
|
||||
pkg_role_index, pkg_var_name, packages, role_name,
|
||||
project_group, role_index=True, var_file_name=None,
|
||||
pip_packages=True):
|
||||
"""Process variables to build the package data structures.
|
||||
|
||||
:param pkg_constant: CONSTANT used to validate package names
|
||||
:type pkg_constant: ``list``
|
||||
:param pkg_breakout_index: CONSTANT used to store indexed packages
|
||||
:type pkg_breakout_index: ``dict``
|
||||
:param pkg_role_index: CONSTANT used to store role indexed packages
|
||||
:type pkg_role_index: ``dict``
|
||||
:param pkg_var_name: package variable name
|
||||
:type pkg_var_name: ``str``
|
||||
:param packages: list of packages to index
|
||||
:type packages: ``list``
|
||||
:param role_name: Name of the role where the packages came from
|
||||
:type role_name: ``str``
|
||||
:param project_group: Name of the group being indexed
|
||||
:type project_group: ``str``
|
||||
:param role_index: Enable or disable the use of the role index
|
||||
:type role_index: ``bool``
|
||||
:param var_file_name: Variable file name used to index packages
|
||||
:type var_file_name: ``str``
|
||||
:param pip_packages: Enable or disable pip index types
|
||||
:type pip_packages: ``bool``
|
||||
"""
|
||||
if [i for i in pkg_constant if i in pkg_var_name]:
|
||||
if 'proprietary' in pkg_var_name:
|
||||
return
|
||||
|
||||
self._package_build_index(
|
||||
packages=packages,
|
||||
role_name=role_name,
|
||||
var_name=pkg_var_name,
|
||||
pkg_index=pkg_breakout_index,
|
||||
project_group=project_group,
|
||||
var_file_name=var_file_name,
|
||||
pip_packages=pip_packages
|
||||
)
|
||||
|
||||
if not role_index:
|
||||
return
|
||||
elif 'optional' in pkg_var_name:
|
||||
return
|
||||
else:
|
||||
self._package_build_index(
|
||||
packages=packages,
|
||||
role_name=role_name,
|
||||
var_name=pkg_var_name,
|
||||
pkg_index=pkg_role_index,
|
||||
project_group=project_group,
|
||||
var_file_name=var_file_name,
|
||||
pip_packages=pip_packages
|
||||
)
|
||||
|
||||
def _process_files_requirements(self):
|
||||
"""Process requirements files."""
|
||||
return_list = self._filter_files(self.file_names, 'txt')
|
||||
for file_name in return_list:
|
||||
base_name = os.path.basename(file_name)
|
||||
if base_name in REQUIREMENTS_FILE_TYPES:
|
||||
index = REQUIREMENTS_FILE_TYPES.index(base_name)
|
||||
return_list.remove(file_name)
|
||||
return_list.insert(index, file_name)
|
||||
else:
|
||||
for file_name in return_list:
|
||||
if file_name.endswith('other-requirements.txt'):
|
||||
continue
|
||||
elif file_name.endswith('bindep.txt'):
|
||||
continue
|
||||
elif 'roles' in file_name:
|
||||
_role_name = file_name.split('roles%s' % os.sep)[-1]
|
||||
role_name = _role_name.split(os.sep)[0]
|
||||
else:
|
||||
role_name = 'default'
|
||||
with open(file_name, 'r') as f:
|
||||
packages = [
|
||||
i.split()[0].lower() for i in f.read().splitlines()
|
||||
if i
|
||||
if not i.startswith('#')
|
||||
]
|
||||
base_file_name = os.path.basename(file_name)
|
||||
if base_file_name.endswith('test-requirements.txt'):
|
||||
continue
|
||||
if base_file_name.endswith('global-requirement-pins.txt'):
|
||||
self._package_build_index(
|
||||
packages=packages,
|
||||
role_name='global_pins',
|
||||
var_name='pinned_packages',
|
||||
pkg_index=ROLE_REQUIREMENTS,
|
||||
project_group='all'
|
||||
)
|
||||
self._package_build_index(
|
||||
packages=packages,
|
||||
role_name=role_name,
|
||||
var_name='txt_file_packages',
|
||||
pkg_index=ROLE_REQUIREMENTS,
|
||||
project_group='all'
|
||||
)
|
||||
|
||||
|
||||
def _abs_path(path):
|
||||
return os.path.abspath(
|
||||
os.path.expanduser(
|
||||
path
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def __init__(self, basedir=None, **kwargs):
|
||||
"""Run the lookup module.
|
||||
|
||||
:type basedir:
|
||||
:type kwargs:
|
||||
"""
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
"""Run the main application.
|
||||
|
||||
:type terms: ``str``
|
||||
:type variables: ``str``
|
||||
:type kwargs: ``dict``
|
||||
:returns: ``list``
|
||||
"""
|
||||
if isinstance(terms, str):
|
||||
terms = [terms]
|
||||
|
||||
return_data = PACKAGE_MAPPING
|
||||
|
||||
for term in terms:
|
||||
return_list = list()
|
||||
try:
|
||||
dfp = DependencyFileProcessor(
|
||||
local_path=_abs_path(str(term))
|
||||
)
|
||||
return_list.extend(dfp.pip['py_package'])
|
||||
return_list.extend(dfp.pip['git_package'])
|
||||
except Exception as exp:
|
||||
raise AnsibleError(
|
||||
'lookup_plugin.py_pkgs(%s) returned "%s" error "%s"' % (
|
||||
term,
|
||||
str(exp),
|
||||
traceback.format_exc()
|
||||
)
|
||||
)
|
||||
|
||||
for item in return_list:
|
||||
map_base_and_remote_packages(item, return_data)
|
||||
else:
|
||||
parse_remote_package_parts(return_data)
|
||||
else:
|
||||
map_role_packages(return_data)
|
||||
map_base_package_details(return_data)
|
||||
# Sort everything within the returned data
|
||||
for key, value in return_data.items():
|
||||
if isinstance(value, (list, set)):
|
||||
try:
|
||||
if all(isinstance(item, dict) for item in value):
|
||||
return_data[key] = sorted(value, key = lambda k: k['name'])
|
||||
else:
|
||||
return_data[key] = sorted(value)
|
||||
except TypeError:
|
||||
return_data[key] = value
|
||||
return_data['role_requirement_files'] = ROLE_REQUIREMENTS
|
||||
return_data['role_requirements'] = ROLE_BREAKOUT_REQUIREMENTS
|
||||
_dp = return_data['role_distro_packages'] = ROLE_DISTRO_BREAKOUT_PACKAGES
|
||||
for k, v in PACKAGE_MAPPING['role_project_groups'].items():
|
||||
if k in _dp:
|
||||
_dp[k]['project_group'] = v
|
||||
return [return_data]
|
||||
|
||||
# Used for testing and debuging usage: `python plugins/lookups/py_pkgs.py ../`
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import json
|
||||
print(json.dumps(LookupModule().run(terms=sys.argv[1:]), indent=4, sort_keys=True))
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
# Copyright 2014, Rackspace US, Inc.
|
||||
# Copyright 2018, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -18,7 +18,7 @@ galaxy_info:
|
||||
description: Plugin collection
|
||||
company: Rackspace
|
||||
license: Apache2
|
||||
min_ansible_version: 1.6.6
|
||||
min_ansible_version: 2.3.0
|
||||
platforms:
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
|
@ -1,20 +0,0 @@
|
||||
---
|
||||
# Copyright 2017, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# (c) 2017, Jean-Philippe Evrard <jean-philippe.evrard@rackspace.co.uk>
|
||||
|
||||
maturity_info:
|
||||
status: complete
|
||||
created_during: mitaka
|
@ -1,11 +1,11 @@
|
||||
[metadata]
|
||||
name = openstack-ansible-plugins
|
||||
summary = plugins for OpenStack Ansible
|
||||
name = ansible-config_template
|
||||
summary = Config Template plugin for Ansible
|
||||
description-file =
|
||||
README.rst
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = https://docs.openstack.org/openstack-ansible-plugins/latest/
|
||||
home-page = https://docs.openstack.org/ansible-config_template/latest/
|
||||
classifier =
|
||||
Intended Audience :: Developers
|
||||
Intended Audience :: System Administrators
|
||||
|
@ -1,166 +0,0 @@
|
||||
# Copyright 2016, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# (c) 2016, Kevin Carter <kevin.carter@rackspace.com>
|
||||
|
||||
import copy
|
||||
import imp
|
||||
import os
|
||||
|
||||
# NOTICE(cloudnull): The connection plugin imported using the full path to the
|
||||
# file because the linear strategy plugin is not importable.
|
||||
import ansible.plugins.strategy as strategy
|
||||
LINEAR = imp.load_source(
|
||||
'ssh',
|
||||
os.path.join(os.path.dirname(strategy.__file__), 'linear.py')
|
||||
)
|
||||
|
||||
# NOTICE(jmccrory): The play_context is imported so that additional container
|
||||
# specific variables can be made available to connection
|
||||
# plugins.
|
||||
import ansible.playbook.play_context
|
||||
ansible.playbook.play_context.MAGIC_VARIABLE_MAPPING.update({'physical_host':
|
||||
('physical_host',)})
|
||||
ansible.playbook.play_context.MAGIC_VARIABLE_MAPPING.update({'container_name':
|
||||
('inventory_hostname',)})
|
||||
ansible.playbook.play_context.MAGIC_VARIABLE_MAPPING.update({'chroot_path':
|
||||
('chroot_path',)})
|
||||
ansible.playbook.play_context.MAGIC_VARIABLE_MAPPING.update({'container_tech':
|
||||
('container_tech',)})
|
||||
|
||||
class StrategyModule(LINEAR.StrategyModule):
|
||||
"""Notes about this strategy.
|
||||
|
||||
When this strategy encounters a task with a "when" or "register" stanza it
|
||||
will collect results immediately essentially forming a block. If the task
|
||||
does not have a "when" or "register" stanza the results will be collected
|
||||
after all tasks have been queued.
|
||||
|
||||
To improve execution speed if a task has a "when" conditional attached to
|
||||
it the conditional will be rendered before queuing the task and should the
|
||||
conditional evaluate to True the task will be queued. To ensure the correct
|
||||
execution of playbooks this optimisation will only be used if there are no
|
||||
lookups used with the task which is to guarantee proper task execution.
|
||||
|
||||
To optimize transport reliability if a task is using a "delegate_to" stanza
|
||||
the connection method will change to paramiko if the connection option has
|
||||
been set at "smart", the Ansible 2.x default. Regardless of the connection
|
||||
method if a "delegate_to" is used the task will have pipelining disabled
|
||||
for the duration of that specific task.
|
||||
|
||||
Container context will be added to the ``playbook_context`` which is used
|
||||
to further optimise connectivity by only ever SSH'ing into a given host
|
||||
machine instead of attempting an SSH connection into a container.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _check_when(host, task, templar, task_vars):
|
||||
"""Evaluate if conditionals are to be run.
|
||||
|
||||
This will error on the side of caution:
|
||||
* If a conditional is detected to be valid the method will return
|
||||
True.
|
||||
* If there's ever an issue with the templated conditional the
|
||||
method will also return True.
|
||||
* If the task has a detected "with" the method will return True.
|
||||
|
||||
:param host: object
|
||||
:param task: object
|
||||
:param templar: object
|
||||
:param task_vars: dict
|
||||
"""
|
||||
try:
|
||||
if not task.when or (task.when and task.register):
|
||||
return True
|
||||
|
||||
_ds = getattr(task, '_ds', dict())
|
||||
if any([i for i in _ds.keys() if i.startswith('with')]):
|
||||
return True
|
||||
|
||||
conditional = task.evaluate_conditional(templar, task_vars)
|
||||
if not conditional:
|
||||
LINEAR.display.verbose(
|
||||
u'Task "%s" has been omitted from the job because the'
|
||||
u' conditional "%s" was evaluated as "%s"'
|
||||
% (task.name, task.when, conditional),
|
||||
host=host,
|
||||
caplevel=0
|
||||
)
|
||||
return False
|
||||
except Exception:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
||||
def _queue_task(self, host, task, task_vars, play_context):
|
||||
"""Queue a task to be sent to the worker.
|
||||
|
||||
Set a host variable, 'physical_host_addrs', containing a dictionary of
|
||||
each physical host and its 'ansible_host' variable.
|
||||
|
||||
Modify the playbook_context to disable pipelining and use the paramiko
|
||||
transport method when a task is being delegated.
|
||||
"""
|
||||
templar = LINEAR.Templar(loader=self._loader, variables=task_vars)
|
||||
if not self._check_when(host, task, templar, task_vars):
|
||||
return
|
||||
|
||||
_play_context = copy.deepcopy(play_context)
|
||||
|
||||
try:
|
||||
groups = self._inventory.get_groups_dict()
|
||||
except AttributeError:
|
||||
groups = self._inventory.get_group_dict()
|
||||
physical_hosts = groups.get('hosts', groups.get('all', {}))
|
||||
physical_host_addrs = {}
|
||||
for physical_host in physical_hosts:
|
||||
physical_host_vars = self._inventory.get_host(physical_host).vars
|
||||
physical_host_addr = physical_host_vars.get('ansible_host',
|
||||
physical_host)
|
||||
physical_host_addrs[physical_host] = physical_host_addr
|
||||
host.set_variable('physical_host_addrs', physical_host_addrs)
|
||||
|
||||
if task.delegate_to:
|
||||
# If a task uses delegation change the play_context
|
||||
# to use paramiko with pipelining disabled for this
|
||||
# one task on its collection of hosts.
|
||||
if _play_context.pipelining:
|
||||
_play_context.pipelining = False
|
||||
LINEAR.display.verbose(
|
||||
u'Because this is a task using "delegate_to"'
|
||||
u' pipelining has been disabled. but will be'
|
||||
u' restored upon completion of this task.',
|
||||
host=host,
|
||||
caplevel=0
|
||||
)
|
||||
|
||||
if _play_context.connection == 'smart':
|
||||
_play_context.connection = 'paramiko'
|
||||
LINEAR.display.verbose(
|
||||
u'Delegated task transport changing from'
|
||||
u' "%s" to "%s". The context will be restored'
|
||||
u' once the task has completed.' % (
|
||||
_play_context.connection,
|
||||
_play_context.connection
|
||||
),
|
||||
host=host,
|
||||
caplevel=0
|
||||
)
|
||||
|
||||
return super(StrategyModule, self)._queue_task(
|
||||
host,
|
||||
task,
|
||||
task_vars,
|
||||
_play_context
|
||||
)
|
@ -1,43 +0,0 @@
|
||||
# Copyright 2017, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import itertools
|
||||
import os
|
||||
|
||||
import linear
|
||||
|
||||
|
||||
class StrategyModule(linear.StrategyModule):
|
||||
def _queue_task(self, host, task, task_vars, play_context):
|
||||
"""Wipe the notification system and return for config tasks."""
|
||||
skip_handlers = task_vars.get('skip_handlers', True)
|
||||
if skip_handlers:
|
||||
task.notify = None
|
||||
skip_tags = task_vars.get('skip_tags')
|
||||
if skip_tags:
|
||||
if not hasattr(skip_tags, '__iter__'):
|
||||
skip_tags = (skip_tags,)
|
||||
else:
|
||||
skip_tags = ()
|
||||
if any([True for (i, j) in itertools.product(skip_tags, task.tags)
|
||||
if i in j]):
|
||||
return
|
||||
else:
|
||||
return super(StrategyModule, self)._queue_task(
|
||||
host,
|
||||
task,
|
||||
task_vars,
|
||||
play_context
|
||||
)
|
@ -1,20 +1,4 @@
|
||||
- name: apt_package_pinning
|
||||
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
|
||||
scm: git
|
||||
version: master
|
||||
- name: pip_install
|
||||
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
|
||||
scm: git
|
||||
version: master
|
||||
- name: openstack_hosts
|
||||
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
|
||||
scm: git
|
||||
version: master
|
||||
- name: lxc_hosts
|
||||
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
|
||||
scm: git
|
||||
version: master
|
||||
- name: lxc_container_create
|
||||
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
|
||||
scm: git
|
||||
version: master
|
||||
|
@ -1,216 +0,0 @@
|
||||
---
|
||||
# Copyright 2016, Comcast Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- name: Test config_template
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: yes
|
||||
tasks:
|
||||
# Test basic function of config_template
|
||||
- name: Template test INI template
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.ini"
|
||||
dest: "/tmp/test.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
|
||||
- name: Read test.ini
|
||||
slurp:
|
||||
src: /tmp/test.ini
|
||||
register: ini_file
|
||||
- debug:
|
||||
msg: "ini - {{ ini_file.content | b64decode }}"
|
||||
- name: Validate output
|
||||
assert:
|
||||
that:
|
||||
- "(lookup('ini', 'new_key section=DEFAULT file=/tmp/test.ini')) == 'new_value'"
|
||||
- "(lookup('ini', 'baz section=foo file=/tmp/test.ini')) == 'bar'"
|
||||
|
||||
# Test basic function of config_template with content instead of src
|
||||
- name: Template test INI template
|
||||
config_template:
|
||||
content: "{{ lookup('file', playbook_dir + '/templates/test.ini') }}"
|
||||
dest: "/tmp/test_with_content.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
|
||||
- name: Read test.ini
|
||||
slurp:
|
||||
src: /tmp/test_with_content.ini
|
||||
register: ini_file_with_content
|
||||
- debug:
|
||||
msg: "ini - {{ ini_file_with_content.content | b64decode }}"
|
||||
- name: Validate output
|
||||
assert:
|
||||
that:
|
||||
- "(lookup('ini', 'new_key section=DEFAULT file=/tmp/test_with_content.ini')) == 'new_value'"
|
||||
- "(lookup('ini', 'baz section=foo file=/tmp/test_with_content.ini')) == 'bar'"
|
||||
|
||||
# Test list additions in config_template
|
||||
- name: Template test YML template
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.yml"
|
||||
dest: "/tmp/test_extend.yml"
|
||||
config_overrides: "{{ test_config_yml_overrides }}"
|
||||
config_type: "yaml"
|
||||
list_extend: True
|
||||
|
||||
- name: Read test_extend.yml
|
||||
slurp:
|
||||
src: /tmp/test_extend.yml
|
||||
register: extend_file
|
||||
- name: Read expected test_extend.yml
|
||||
slurp:
|
||||
src: "{{ playbook_dir }}/files/test_extend.yml.expected"
|
||||
register: extend_file_expected
|
||||
- debug:
|
||||
msg: "extend - {{ extend_file.content | b64decode }}"
|
||||
- debug:
|
||||
msg: "extend.expected - {{ extend_file_expected.content | b64decode }}"
|
||||
- name: Compare files
|
||||
assert:
|
||||
that:
|
||||
- "(extend_file.content | b64decode) == (extend_file_expected.content | b64decode)"
|
||||
|
||||
# Test list replacement in config_template
|
||||
- name: Template test YML template
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.yml"
|
||||
dest: "/tmp/test_no_extend.yml"
|
||||
config_overrides: "{{ test_config_yml_overrides }}"
|
||||
config_type: "yaml"
|
||||
list_extend: False
|
||||
- name: Read test_no_extend.yml
|
||||
slurp:
|
||||
src: /tmp/test_no_extend.yml
|
||||
register: no_extend_file
|
||||
- name: Read expected test_no_extend.yml
|
||||
slurp:
|
||||
src: "{{ playbook_dir }}/files/test_no_extend.yml.expected"
|
||||
register: no_extend_file_expected
|
||||
- debug:
|
||||
msg: "no_extend - {{ no_extend_file.content | b64decode }}"
|
||||
- debug:
|
||||
msg: "no_extend.expected - {{ no_extend_file_expected.content | b64decode }}"
|
||||
- name: Compare files
|
||||
assert:
|
||||
that:
|
||||
- "(no_extend_file.content | b64decode) == (no_extend_file_expected.content | b64decode)"
|
||||
|
||||
# Test dumping hostvars using config overrides
|
||||
- name: Template test YML template with hostvars override
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.yml"
|
||||
dest: "/tmp/test_hostvars.yml"
|
||||
config_overrides: "{{ test_config_yml_hostvars_overrides }}"
|
||||
config_type: "yaml"
|
||||
- name: Read test_hostvars.yml
|
||||
slurp:
|
||||
src: /tmp/test_hostvars.yml
|
||||
register: hostvars_file
|
||||
- debug:
|
||||
msg: "hostvars - {{ (hostvars_file.content | b64decode | from_yaml).test_hostvar }}"
|
||||
- debug:
|
||||
msg: "hostvars.expected - {{ test_config_yml_hostvars_overrides.test_hostvar }}"
|
||||
- name: Compare files
|
||||
assert:
|
||||
that:
|
||||
- "((hostvars_file.content | b64decode | from_yaml).test_hostvar) == (test_config_yml_hostvars_overrides.test_hostvar)"
|
||||
|
||||
|
||||
# Test content attribute with a dictionary input and config_type equal to 'json'
|
||||
- name: Template test JSON template with content attribute
|
||||
config_template:
|
||||
dest: "/tmp/test_content_no_overrides.json"
|
||||
config_overrides: {}
|
||||
config_type: "json"
|
||||
content: "{{ lookup('file', playbook_dir ~ '/templates/test.json') | from_json }}"
|
||||
- name: Read test_content_no_overrides.json
|
||||
slurp:
|
||||
src: /tmp/test_content_no_overrides.json
|
||||
register: content_no_overrides_file
|
||||
- name: Read expected test_content_no_overrides.json
|
||||
slurp:
|
||||
src: "{{ playbook_dir }}/files/test_content_no_overrides.json.expected"
|
||||
register: content_no_overrides_file_expected
|
||||
- debug:
|
||||
msg: "content_no_overrides.json - {{ content_no_overrides_file.content | b64decode | from_json }}"
|
||||
- debug:
|
||||
msg: "content_no_overrides.json.expected - {{ content_no_overrides_file_expected.content | b64decode | from_json }}"
|
||||
# NOTE (alextricity25): The config_template module doesn't use ordered dicts when reading and writing json
|
||||
# data, so we can't guarantee that the string literal of both file's content will be the same. Instead, we compare
|
||||
# the content after transforming it into a dictionary.
|
||||
- name: Compare file content
|
||||
assert:
|
||||
that:
|
||||
- "(content_no_overrides_file.content | b64decode | from_json) == (content_no_overrides_file_expected.content | b64decode | from_json)"
|
||||
|
||||
# Test the ignore_none_type attribute when set to False
|
||||
- name: Template test with ignore_none_type set to false
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test_ignore_none_type.ini"
|
||||
dest: "/tmp/test_ignore_none_type.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
ignore_none_type: False
|
||||
- name: Read test_ignore_none_type.ini
|
||||
slurp:
|
||||
src: /tmp/test_ignore_none_type.ini
|
||||
register: test_ignore_none_type
|
||||
- debug:
|
||||
msg: "test_ignore_none_type.ini - {{ test_ignore_none_type.content | b64decode }}"
|
||||
- name: Validate output has valueless options printed out
|
||||
assert:
|
||||
that:
|
||||
- "{{ test_ignore_none_type.content | b64decode | search('(?m)^india$') }}"
|
||||
- "{{ test_ignore_none_type.content | b64decode | search('(?m)^juliett kilo$') }}"
|
||||
|
||||
# Test basic function of config_template
|
||||
- name: Template test INI comments
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test_with_comments.ini"
|
||||
dest: "/tmp/test_with_comments.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
tags: test
|
||||
|
||||
- name: Read test.ini
|
||||
slurp:
|
||||
src: /tmp/test_with_comments.ini
|
||||
register: ini_file
|
||||
tags: test
|
||||
|
||||
- debug:
|
||||
msg: "ini - {{ ini_file.content | b64decode }}"
|
||||
- name: Validate output
|
||||
tags: test
|
||||
assert:
|
||||
that:
|
||||
- "(lookup('ini', 'new_key section=DEFAULT file=/tmp/test_with_comments.ini')) == 'new_value'"
|
||||
- "(lookup('ini', 'baz section=foo file=/tmp/test_with_comments.ini')) == 'bar'"
|
||||
- "{{ ini_file.content | b64decode | search('#This is a comment')}}"
|
||||
- "{{ ini_file.content | b64decode | search('# A default section comment\n# broken into multiple lines\n\\[DEFAULT\\]')}}"
|
||||
|
||||
vars:
|
||||
test_config_ini_overrides:
|
||||
DEFAULT:
|
||||
new_key: "new_value"
|
||||
foo:
|
||||
baz: "bar"
|
||||
test_config_yml_overrides:
|
||||
list_one:
|
||||
- four
|
||||
test_config_yml_hostvars_overrides:
|
||||
test_hostvar: "{{ ansible_default_ipv4.address }}"
|
@ -1,69 +0,0 @@
|
||||
---
|
||||
# Copyright 2017, Logan Vig <logan2211@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- name: Fail if sshd is running in the containers
|
||||
hosts: all_containers:alt_containers
|
||||
gather_facts: no
|
||||
user: root
|
||||
become: True
|
||||
tasks:
|
||||
- name: Ensure sshd is not running
|
||||
command: pgrep sshd
|
||||
register: sshd_pgrep
|
||||
failed_when: "sshd_pgrep.rc == 0"
|
||||
changed_when: false
|
||||
|
||||
# The container3 ping validates I75f9d0f55ecd875caa1bf608a77c92f950b679a1
|
||||
- name: Test the connection plugin container awareness functions
|
||||
hosts: all_containers:alt_containers
|
||||
gather_facts: no
|
||||
user: root
|
||||
become: True
|
||||
tasks:
|
||||
- name: Test container ping
|
||||
action:
|
||||
module: ping
|
||||
|
||||
# Test for I56d8afddbccf01f2944d2fdd505b601a4b048374
|
||||
- name: Test delegation in the container aware connection plugin
|
||||
hosts: localhost
|
||||
gather_facts: no
|
||||
user: root
|
||||
become: True
|
||||
tasks:
|
||||
- name: Test container delegation without templating
|
||||
command: cat /etc/hostname
|
||||
delegate_to: container1
|
||||
register: delegated
|
||||
failed_when: delegated.stdout != 'container1'
|
||||
changed_when: false
|
||||
- name: Test container delegation using templating
|
||||
command: cat /etc/hostname
|
||||
delegate_to: "{{ groups['all_containers'][1] }}"
|
||||
register: delegated
|
||||
failed_when: delegated.stdout != 'container2'
|
||||
changed_when: false
|
||||
|
||||
# Test for If594914df53efacc6d5bba148f4f46280f5a117d
|
||||
- name: Test delegation between container physical_hosts
|
||||
hosts: fakecontainer
|
||||
gather_facts: no
|
||||
user: root
|
||||
become: True
|
||||
tasks:
|
||||
- name: Test delegation between containers on different hosts
|
||||
action:
|
||||
module: ping
|
||||
delegate_to: "{{ groups['all_containers'][0] }}"
|
@ -1,137 +0,0 @@
|
||||
---
|
||||
# Copyright 2016, @WalmartLabs
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
|
||||
- name: Test filters
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Validate bit_length_power_of_2 filter
|
||||
assert:
|
||||
that:
|
||||
- "{{ 1024 | bit_length_power_of_2 }} == 1024"
|
||||
- "{{ 9600 | bit_length_power_of_2 }} == 16384"
|
||||
|
||||
- name: Set net filter facts
|
||||
set_fact:
|
||||
url_netloc: "{{ 'http://review.openstack.org:29418/something' | netloc }}"
|
||||
url_netloc_no_port: "{{ 'http://review.openstack.org:29418/something' | netloc_no_port }}"
|
||||
url_netorigin: "{{ 'http://review.openstack.org:29418/something' | netorigin }}"
|
||||
- name: Validate net filters
|
||||
assert:
|
||||
that:
|
||||
- "url_netloc == 'review.openstack.org:29418'"
|
||||
- "url_netloc_no_port == 'review.openstack.org'"
|
||||
- "url_netorigin == 'http://review.openstack.org:29418'"
|
||||
|
||||
- name: Validate string_2_int filter
|
||||
assert:
|
||||
that:
|
||||
- "{{ 'test' | string_2_int }} == 3752"
|
||||
|
||||
- name: Set pip package list facts
|
||||
set_fact:
|
||||
pip_package_list_1:
|
||||
- pip==8.1.2
|
||||
- setuptools==25.1.0
|
||||
- wheel==0.29.0
|
||||
pip_package_list_1_names:
|
||||
- pip
|
||||
- setuptools
|
||||
- wheel
|
||||
pip_package_list_2:
|
||||
- babel==2.3.4
|
||||
- pip==8.1.0
|
||||
pip_package_list_merged:
|
||||
- babel==2.3.4
|
||||
- pip==8.1.0
|
||||
- setuptools==25.1.0
|
||||
- wheel==0.29.0
|
||||
- name: Set pip package filter facts
|
||||
set_fact:
|
||||
pip_package_list_1_names_filtered: "{{ pip_package_list_1 | pip_requirement_names }}"
|
||||
pip_package_list_constraint_filtered: "{{ pip_package_list_1 | pip_constraint_update(pip_package_list_2) }}"
|
||||
- name: Validate pip requirement filters
|
||||
assert:
|
||||
that:
|
||||
- "pip_package_list_1_names_filtered == pip_package_list_1_names"
|
||||
- "pip_package_list_constraint_filtered == pip_package_list_merged"
|
||||
|
||||
- name: Set splitlines string facts
|
||||
set_fact:
|
||||
string_with_lines: |
|
||||
this
|
||||
is
|
||||
a
|
||||
test
|
||||
string_split_lines:
|
||||
- this
|
||||
- is
|
||||
- a
|
||||
- test
|
||||
- name: Set splitlines filter fact
|
||||
set_fact:
|
||||
string_split_lines_filtered: "{{ string_with_lines | splitlines }}"
|
||||
- name: Validate splitlines filter
|
||||
assert:
|
||||
that: "string_split_lines_filtered == string_split_lines"
|
||||
|
||||
- name: Set git repo facts
|
||||
set_fact:
|
||||
git_repo: "git+https://git.openstack.org/openstack/nova@2bc8128d7793cc72ca2e146de3a092e1fef5033b#egg=nova&gitname=nova"
|
||||
git_repo_name: nova
|
||||
git_repo_link_parts:
|
||||
name: nova
|
||||
version: 2bc8128d7793cc72ca2e146de3a092e1fef5033b
|
||||
plugin_path: null
|
||||
url: "https://git.openstack.org/openstack/nova"
|
||||
original: "git+https://git.openstack.org/openstack/nova@2bc8128d7793cc72ca2e146de3a092e1fef5033b#egg=nova&gitname=nova"
|
||||
- name: Set git link parse filter facts
|
||||
set_fact:
|
||||
git_repo_link_parse_filtered: "{{ git_repo | git_link_parse }}"
|
||||
git_repo_link_parse_name_filtered: "{{ git_repo | git_link_parse_name }}"
|
||||
- name: Validate git link parse filters
|
||||
assert:
|
||||
that:
|
||||
- "git_repo_link_parse_filtered == git_repo_link_parts"
|
||||
- "git_repo_link_parse_name_filtered == git_repo_name"
|
||||
|
||||
- name: Set deprecation variable facts
|
||||
set_fact:
|
||||
new_var: new
|
||||
old_var: old
|
||||
- name: Set deprecated filter fact
|
||||
set_fact:
|
||||
deprecated_value: "{{ new_var | deprecated(old_var, 'old_var', 'new_var', 'Next Release', false) }}"
|
||||
- name: Validate deprecated filter
|
||||
assert:
|
||||
that: "deprecated_value == old_var"
|
||||
|
||||
- name: Set test_dict fact
|
||||
set_fact:
|
||||
test_dict:
|
||||
a:
|
||||
b:
|
||||
c: d
|
||||
|
||||
- name: Validate get_nested returns value
|
||||
assert:
|
||||
that: "{{ test_dict|get_nested('a.b.c') == 'd' }}"
|
||||
|
||||
- name: Validate get_nested returns None on missing key
|
||||
assert:
|
||||
that: "{{ test_dict|get_nested('a.c') == None }}"
|
@ -1,40 +0,0 @@
|
||||
---
|
||||
# Copyright 2017, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# (c) 2017, Jean-Philippe Evrard <jean-philippe.evrard@rackspace.co.uk>
|
||||
|
||||
|
||||
- name: Test lookups
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Ensure Package list contains the files listed in Package
|
||||
set_fact:
|
||||
packages_dict: "{{ lookup('packages_file', playbook_dir ~ '/files/Packages') }}"
|
||||
- name: Validate that Filename, MD5sum, SHA1, SHA256 and Version are properly generated
|
||||
assert:
|
||||
that:
|
||||
- packages_dict['0ad']['Filename'] == "pool/main/0/0ad/0ad_0.0.16-2~ubuntu14.04.1_amd64.deb"
|
||||
- packages_dict['0ad']['MD5sum'] == "7e5f2ba5e1a95e47753eeb962af32e26"
|
||||
- packages_dict['0ad']['SHA1'] == "3cf898d4595092daa274e6cd8d9afd0332b0afbe"
|
||||
- packages_dict['0ad']['SHA256'] == "f4602a90a305abeacb4a48bbfd7d609aa7cbb3ed2ab9127ae30ef64a4be88378"
|
||||
- packages_dict['0ad']['Version'] == "0.0.16-2~ubuntu14.04.1"
|
||||
- name: Validate that a provides package is listed as a package too
|
||||
assert:
|
||||
that:
|
||||
- packages_dict['aide'] is defined
|
||||
- packages_dict['aide-binary'] is defined
|
||||
- packages_dict['aide'] == packages_dict['aide-binary']
|
@ -1,129 +0,0 @@
|
||||
---
|
||||
# Copyright 2017, Logan Vig <logan2211@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- name: Test the tagfilter execution strategy with a list of skip tags
|
||||
hosts: localhost
|
||||
strategy: tagfilter
|
||||
gather_facts: no
|
||||
vars:
|
||||
skip_tags:
|
||||
- skipit
|
||||
- tagskip
|
||||
tasks:
|
||||
- name: Test skipped task
|
||||
debug:
|
||||
msg: "This task is skipped"
|
||||
register: skipped_task
|
||||
tags:
|
||||
# Multiple tags specified before the skip tag to make sure the .product()
|
||||
# loop in the strategy is working properly. (ie. ensure each element in
|
||||
# the skip_tags list is being checked against each element in this tag
|
||||
# list.)
|
||||
- test-tag1
|
||||
- test-tag2
|
||||
- test-tag3
|
||||
- test-tag4
|
||||
- test-skipit
|
||||
- name: Test unskipped task
|
||||
command: /bin/true
|
||||
register: run_task
|
||||
notify: Skipped Handler
|
||||
tags:
|
||||
- skip_ansible_lint
|
||||
- test-runit
|
||||
handlers:
|
||||
- name: Skipped Handler
|
||||
debug:
|
||||
msg: "This handler is always skipped"
|
||||
register: skipped_handler
|
||||
post_tasks:
|
||||
- name: Check task run states
|
||||
assert:
|
||||
that:
|
||||
- "skipped_task is not defined"
|
||||
- "run_task | changed"
|
||||
- "skipped_handler is not defined"
|
||||
|
||||
- name: Test the tagfilter execution strategy with a string skip tag
|
||||
hosts: localhost
|
||||
strategy: tagfilter
|
||||
gather_facts: no
|
||||
vars:
|
||||
skip_tags: skipit
|
||||
tasks:
|
||||
- name: Test skipped task
|
||||
debug:
|
||||
msg: "This task is skipped"
|
||||
register: skipped_task
|
||||
tags:
|
||||
- test-skipit
|
||||
- name: Test unskipped task
|
||||
command: /bin/true
|
||||
register: run_task
|
||||
notify: Skipped Handler
|
||||
tags:
|
||||
- skip_ansible_lint
|
||||
- test-runit
|
||||
handlers:
|
||||
- name: Skipped Handler
|
||||
debug:
|
||||
msg: "This handler is always skipped"
|
||||
register: skipped_handler
|
||||
post_tasks:
|
||||
- name: Check task run states
|
||||
assert:
|
||||
that:
|
||||
- "skipped_task is not defined"
|
||||
- "run_task | changed"
|
||||
- "skipped_handler is not defined"
|
||||
|
||||
- name: Test the tagfilter execution strategy with handlers enabled
|
||||
hosts: localhost
|
||||
strategy: tagfilter
|
||||
gather_facts: no
|
||||
vars:
|
||||
skip_handlers: False
|
||||
skip_tags: skipit
|
||||
tasks:
|
||||
- name: Test skipped task
|
||||
command: /bin/true
|
||||
register: skipped_task
|
||||
notify: Skipped Handler
|
||||
tags:
|
||||
- skip_ansible_lint
|
||||
- test-skipit
|
||||
- name: Test unskipped task
|
||||
command: /bin/true
|
||||
register: run_task
|
||||
notify: Run Handler
|
||||
tags:
|
||||
- skip_ansible_lint
|
||||
- test-runit
|
||||
handlers:
|
||||
- name: Skipped Handler
|
||||
debug:
|
||||
msg: "This handler is always skipped"
|
||||
register: skipped_handler
|
||||
- name: Run Handler
|
||||
command: /bin/true
|
||||
register: run_handler
|
||||
post_tasks:
|
||||
- name: Check task run states
|
||||
assert:
|
||||
that:
|
||||
- "skipped_task is not defined"
|
||||
- "run_task | changed"
|
||||
- "skipped_handler is not defined"
|
||||
- "run_handler | changed"
|
204
tests/test.yml
204
tests/test.yml
@ -1,5 +1,5 @@
|
||||
---
|
||||
# Copyright 2016, @WalmartLabs
|
||||
# Copyright 2018, Rackspace US
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -13,18 +13,204 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- name: Collect facts for all hosts
|
||||
hosts: hosts:!fake_hosts
|
||||
- name: Test config_template
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: yes
|
||||
tasks:
|
||||
# Test basic function of config_template
|
||||
- name: Template test INI template
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.ini"
|
||||
dest: "/tmp/test.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
|
||||
- include: common/test-setup-host.yml
|
||||
- name: Read test.ini
|
||||
slurp:
|
||||
src: /tmp/test.ini
|
||||
register: ini_file
|
||||
- debug:
|
||||
msg: "ini - {{ ini_file.content | b64decode }}"
|
||||
- name: Validate output
|
||||
assert:
|
||||
that:
|
||||
- "(lookup('ini', 'new_key section=DEFAULT file=/tmp/test.ini')) == 'new_value'"
|
||||
- "(lookup('ini', 'baz section=foo file=/tmp/test.ini')) == 'bar'"
|
||||
|
||||
- include: test-config_template.yml
|
||||
# Test basic function of config_template with content instead of src
|
||||
- name: Template test INI template
|
||||
config_template:
|
||||
content: "{{ lookup('file', playbook_dir + '/templates/test.ini') }}"
|
||||
dest: "/tmp/test_with_content.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
|
||||
- include: test-filters.yml
|
||||
- name: Read test.ini
|
||||
slurp:
|
||||
src: /tmp/test_with_content.ini
|
||||
register: ini_file_with_content
|
||||
- debug:
|
||||
msg: "ini - {{ ini_file_with_content.content | b64decode }}"
|
||||
- name: Validate output
|
||||
assert:
|
||||
that:
|
||||
- "(lookup('ini', 'new_key section=DEFAULT file=/tmp/test_with_content.ini')) == 'new_value'"
|
||||
- "(lookup('ini', 'baz section=foo file=/tmp/test_with_content.ini')) == 'bar'"
|
||||
|
||||
- include: test-lookups.yml
|
||||
# Test list additions in config_template
|
||||
- name: Template test YML template
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.yml"
|
||||
dest: "/tmp/test_extend.yml"
|
||||
config_overrides: "{{ test_config_yml_overrides }}"
|
||||
config_type: "yaml"
|
||||
list_extend: True
|
||||
|
||||
- include: test-strategy-tagfilter.yml
|
||||
- name: Read test_extend.yml
|
||||
slurp:
|
||||
src: /tmp/test_extend.yml
|
||||
register: extend_file
|
||||
- name: Read expected test_extend.yml
|
||||
slurp:
|
||||
src: "{{ playbook_dir }}/files/test_extend.yml.expected"
|
||||
register: extend_file_expected
|
||||
- debug:
|
||||
msg: "extend - {{ extend_file.content | b64decode }}"
|
||||
- debug:
|
||||
msg: "extend.expected - {{ extend_file_expected.content | b64decode }}"
|
||||
- name: Compare files
|
||||
assert:
|
||||
that:
|
||||
- "(extend_file.content | b64decode) == (extend_file_expected.content | b64decode)"
|
||||
|
||||
- include: test-connection-plugin.yml
|
||||
# Test list replacement in config_template
|
||||
- name: Template test YML template
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.yml"
|
||||
dest: "/tmp/test_no_extend.yml"
|
||||
config_overrides: "{{ test_config_yml_overrides }}"
|
||||
config_type: "yaml"
|
||||
list_extend: False
|
||||
- name: Read test_no_extend.yml
|
||||
slurp:
|
||||
src: /tmp/test_no_extend.yml
|
||||
register: no_extend_file
|
||||
- name: Read expected test_no_extend.yml
|
||||
slurp:
|
||||
src: "{{ playbook_dir }}/files/test_no_extend.yml.expected"
|
||||
register: no_extend_file_expected
|
||||
- debug:
|
||||
msg: "no_extend - {{ no_extend_file.content | b64decode }}"
|
||||
- debug:
|
||||
msg: "no_extend.expected - {{ no_extend_file_expected.content | b64decode }}"
|
||||
- name: Compare files
|
||||
assert:
|
||||
that:
|
||||
- "(no_extend_file.content | b64decode) == (no_extend_file_expected.content | b64decode)"
|
||||
|
||||
# Test dumping hostvars using config overrides
|
||||
- name: Template test YML template with hostvars override
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test.yml"
|
||||
dest: "/tmp/test_hostvars.yml"
|
||||
config_overrides: "{{ test_config_yml_hostvars_overrides }}"
|
||||
config_type: "yaml"
|
||||
- name: Read test_hostvars.yml
|
||||
slurp:
|
||||
src: /tmp/test_hostvars.yml
|
||||
register: hostvars_file
|
||||
- debug:
|
||||
msg: "hostvars - {{ (hostvars_file.content | b64decode | from_yaml).test_hostvar }}"
|
||||
- debug:
|
||||
msg: "hostvars.expected - {{ test_config_yml_hostvars_overrides.test_hostvar }}"
|
||||
- name: Compare files
|
||||
assert:
|
||||
that:
|
||||
- "((hostvars_file.content | b64decode | from_yaml).test_hostvar) == (test_config_yml_hostvars_overrides.test_hostvar)"
|
||||
|
||||
|
||||
# Test content attribute with a dictionary input and config_type equal to 'json'
|
||||
- name: Template test JSON template with content attribute
|
||||
config_template:
|
||||
dest: "/tmp/test_content_no_overrides.json"
|
||||
config_overrides: {}
|
||||
config_type: "json"
|
||||
content: "{{ lookup('file', playbook_dir ~ '/templates/test.json') | from_json }}"
|
||||
- name: Read test_content_no_overrides.json
|
||||
slurp:
|
||||
src: /tmp/test_content_no_overrides.json
|
||||
register: content_no_overrides_file
|
||||
- name: Read expected test_content_no_overrides.json
|
||||
slurp:
|
||||
src: "{{ playbook_dir }}/files/test_content_no_overrides.json.expected"
|
||||
register: content_no_overrides_file_expected
|
||||
- debug:
|
||||
msg: "content_no_overrides.json - {{ content_no_overrides_file.content | b64decode | from_json }}"
|
||||
- debug:
|
||||
msg: "content_no_overrides.json.expected - {{ content_no_overrides_file_expected.content | b64decode | from_json }}"
|
||||
# NOTE (alextricity25): The config_template module doesn't use ordered dicts when reading and writing json
|
||||
# data, so we can't guarantee that the string literal of both file's content will be the same. Instead, we compare
|
||||
# the content after transforming it into a dictionary.
|
||||
- name: Compare file content
|
||||
assert:
|
||||
that:
|
||||
- "(content_no_overrides_file.content | b64decode | from_json) == (content_no_overrides_file_expected.content | b64decode | from_json)"
|
||||
|
||||
# Test the ignore_none_type attribute when set to False
|
||||
- name: Template test with ignore_none_type set to false
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test_ignore_none_type.ini"
|
||||
dest: "/tmp/test_ignore_none_type.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
ignore_none_type: False
|
||||
- name: Read test_ignore_none_type.ini
|
||||
slurp:
|
||||
src: /tmp/test_ignore_none_type.ini
|
||||
register: test_ignore_none_type
|
||||
- debug:
|
||||
msg: "test_ignore_none_type.ini - {{ test_ignore_none_type.content | b64decode }}"
|
||||
- name: Validate output has valueless options printed out
|
||||
assert:
|
||||
that:
|
||||
- "{{ test_ignore_none_type.content | b64decode | search('(?m)^india$') }}"
|
||||
- "{{ test_ignore_none_type.content | b64decode | search('(?m)^juliett kilo$') }}"
|
||||
|
||||
# Test basic function of config_template
|
||||
- name: Template test INI comments
|
||||
config_template:
|
||||
src: "{{ playbook_dir }}/templates/test_with_comments.ini"
|
||||
dest: "/tmp/test_with_comments.ini"
|
||||
config_overrides: "{{ test_config_ini_overrides }}"
|
||||
config_type: "ini"
|
||||
tags: test
|
||||
|
||||
- name: Read test.ini
|
||||
slurp:
|
||||
src: /tmp/test_with_comments.ini
|
||||
register: ini_file
|
||||
tags: test
|
||||
|
||||
- debug:
|
||||
msg: "ini - {{ ini_file.content | b64decode }}"
|
||||
- name: Validate output
|
||||
tags: test
|
||||
assert:
|
||||
that:
|
||||
- "(lookup('ini', 'new_key section=DEFAULT file=/tmp/test_with_comments.ini')) == 'new_value'"
|
||||
- "(lookup('ini', 'baz section=foo file=/tmp/test_with_comments.ini')) == 'bar'"
|
||||
- "{{ ini_file.content | b64decode | search('#This is a comment')}}"
|
||||
- "{{ ini_file.content | b64decode | search('# A default section comment\n# broken into multiple lines\n\\[DEFAULT\\]')}}"
|
||||
|
||||
vars:
|
||||
test_config_ini_overrides:
|
||||
DEFAULT:
|
||||
new_key: "new_value"
|
||||
foo:
|
||||
baz: "bar"
|
||||
test_config_yml_overrides:
|
||||
list_one:
|
||||
- four
|
||||
test_config_yml_hostvars_overrides:
|
||||
test_hostvar: "{{ ansible_default_ipv4.address }}"
|
||||
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
# Copyright 2017, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- job:
|
||||
name: openstack-ansible-python3-ubuntu-xenial-nv
|
||||
parent: openstack-ansible-functional-ubuntu-xenial
|
||||
voting: false
|
||||
vars:
|
||||
tox_env: func_py3
|
@ -1,31 +0,0 @@
|
||||
# Copyright 2017, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- project:
|
||||
check:
|
||||
jobs:
|
||||
- openstack-ansible-linters
|
||||
- openstack-ansible-functional-centos-7
|
||||
- openstack-ansible-functional-opensuse-423
|
||||
- openstack-ansible-functional-ubuntu-xenial
|
||||
- openstack-ansible-python3-ubuntu-xenial-nv
|
||||
experimental:
|
||||
jobs:
|
||||
- openstack-ansible-integrated-deploy-aio
|
||||
gate:
|
||||
jobs:
|
||||
- openstack-ansible-linters
|
||||
- openstack-ansible-functional-centos-7
|
||||
- openstack-ansible-functional-opensuse-423
|
||||
- openstack-ansible-functional-ubuntu-xenial
|
Loading…
x
Reference in New Issue
Block a user