From 92ff63f3e087e9f6c768a4bbcf553db464675d74 Mon Sep 17 00:00:00 2001 From: Jeffrey Zhang Date: Wed, 30 Nov 2016 00:27:16 +0800 Subject: [PATCH] clean up kolla related files * rename package name from kolla to kolla-ansible * remove docker for data_files * remove kolla docker * remove kolla-build console_scripts Change-Id: I53abbf79dffb54eb785a39ba04d375bc4e4f27b0 --- kolla/cmd/__init__.py | 0 kolla/cmd/build.py | 39 - kolla/common/__init__.py | 0 kolla/common/config.py | 379 ------ kolla/common/task.py | 42 - kolla/hacking/__init__.py | 0 kolla/hacking/checks.py | 40 - kolla/image/__init__.py | 0 kolla/image/build.py | 1024 ----------------- kolla/opts.py | 17 - kolla/template/__init__.py | 0 kolla/template/filters.py | 29 - kolla/template/methods.py | 63 - kolla/tests/__init__.py | 0 kolla/tests/base.py | 44 - kolla/tests/common/__init__.py | 0 kolla/tests/common/test_config.py | 20 - kolla/tests/docker/base/Dockerfile.j2 | 1 - .../tests/docker/neutron-server/Dockerfile.j2 | 1 - kolla/tests/etc/default.conf | 16 - kolla/tests/test_build.py | 287 ----- setup.cfg | 6 +- tests/test_build.py | 188 --- tools/build.py | 1 - tools/validate-all-dockerfiles.sh | 11 - tox.ini | 4 - 26 files changed, 2 insertions(+), 2210 deletions(-) delete mode 100644 kolla/cmd/__init__.py delete mode 100755 kolla/cmd/build.py delete mode 100644 kolla/common/__init__.py delete mode 100644 kolla/common/config.py delete mode 100644 kolla/common/task.py delete mode 100644 kolla/hacking/__init__.py delete mode 100644 kolla/hacking/checks.py delete mode 100644 kolla/image/__init__.py delete mode 100644 kolla/image/build.py delete mode 100644 kolla/opts.py delete mode 100644 kolla/template/__init__.py delete mode 100644 kolla/template/filters.py delete mode 100644 kolla/template/methods.py delete mode 100644 kolla/tests/__init__.py delete mode 100644 kolla/tests/base.py delete mode 100644 kolla/tests/common/__init__.py delete mode 100644 kolla/tests/common/test_config.py delete mode 100644 kolla/tests/docker/base/Dockerfile.j2 delete mode 100644 kolla/tests/docker/neutron-server/Dockerfile.j2 delete mode 100644 kolla/tests/etc/default.conf delete mode 100644 kolla/tests/test_build.py delete mode 100644 tests/test_build.py delete mode 120000 tools/build.py delete mode 100755 tools/validate-all-dockerfiles.sh diff --git a/kolla/cmd/__init__.py b/kolla/cmd/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kolla/cmd/build.py b/kolla/cmd/build.py deleted file mode 100755 index 4674de7622..0000000000 --- a/kolla/cmd/build.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -# NOTE(SamYaple): Update the search path to prefer PROJECT_ROOT as the source -# of packages to import if we are using local tools instead of -# pip installed kolla tools -PROJECT_ROOT = os.path.abspath(os.path.join( - os.path.dirname(os.path.realpath(__file__)), '../..')) -if PROJECT_ROOT not in sys.path: - sys.path.insert(0, PROJECT_ROOT) - -from kolla.image import build - - -def main(): - statuses = build.run_build() - if statuses: - bad_results, good_results, unmatched_results = statuses - if bad_results: - return 1 - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/kolla/common/__init__.py b/kolla/common/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kolla/common/config.py b/kolla/common/config.py deleted file mode 100644 index 72c87e4978..0000000000 --- a/kolla/common/config.py +++ /dev/null @@ -1,379 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -from oslo_config import cfg -from oslo_config import types - -from kolla.version import version_info as version - - -BASE_OS_DISTRO = ['centos', 'rhel', 'ubuntu', 'oraclelinux', 'debian'] -DISTRO_RELEASE = { - 'centos': '7', - 'rhel': '7', - 'oraclelinux': '7', - 'debian': '8', - 'ubuntu': '16.04', -} -DELOREAN = ("http://buildlogs.centos.org/centos/7/cloud/x86_64/" - "rdo-trunk-master-tested/delorean.repo") -# TODO(pbourke): update to buildlogs.centos.org once this moves -DELOREAN_DEPS = "http://trunk.rdoproject.org/centos7/delorean-deps.repo" -INSTALL_TYPE_CHOICES = ['binary', 'source', 'rdo', 'rhos'] - -_PROFILE_OPTS = [ - cfg.ListOpt('infra', - default=['ceph', 'cron', 'elasticsearch', 'etcd', 'haproxy', - 'heka', 'keepalived', 'kibana', 'kolla-toolbox', - 'mariadb', 'memcached', 'mongodb', 'openvswitch', - 'rabbitmq', 'tgtd'], - help='Infra images'), - cfg.ListOpt('main', - default=['cinder', 'ceilometer', 'glance', 'heat', - 'horizon', 'iscsi', 'keystone', 'neutron', 'nova', - 'swift'], - help='Main images'), - cfg.ListOpt('aux', - default=['aodh', 'cloudkitty', 'congress', 'designate', - 'freezer', 'gnocchi', 'influxdb', 'ironic', 'kuryr', - 'magnum', 'manila', 'mistral', 'murano', 'panko', - 'rally', 'sahara', 'searchlight', 'senlin', 'solum', - 'telegraf', 'trove', 'zaqar'], - help='Aux Images'), - cfg.ListOpt('default', - default=['chrony', 'cron', 'kolla-toolbox', 'glance', - 'haproxy', 'heat', 'horizon', 'keepalived', - 'keystone', 'memcached', 'mariadb', 'neutron', 'nova', - 'openvswitch', 'rabbitmq', 'heka'], - help='Default images'), - cfg.ListOpt('gate', - default=['chrony', 'cron', 'glance', 'haproxy', 'keepalived', - 'keystone', 'kolla-toolbox', 'mariadb', 'memcached', - 'neutron', 'nova', 'openvswitch', 'rabbitmq', 'heka'], - help='Gate images') -] - -_CLI_OPTS = [ - cfg.StrOpt('base', short='b', default='centos', - choices=BASE_OS_DISTRO, - help='The distro type of the base image'), - cfg.StrOpt('base-tag', default='latest', - help='The base distro image tag'), - cfg.StrOpt('base-image', - help='The base image name. Default is the same with base'), - cfg.BoolOpt('debug', short='d', default=False, - help='Turn on debugging log level'), - cfg.DictOpt('build-args', - help='Set docker build time variables'), - cfg.StrOpt('include-header', short='i', - deprecated_for_removal=True, - deprecated_reason=('Use a header block within a template' - ' overrides file instead'), - help=('Path to custom file to be added at ' - 'beginning of base Dockerfile')), - cfg.StrOpt('include-footer', short='I', - deprecated_for_removal=True, - deprecated_reason=('Use a footer block within a template' - ' overrides file instead'), - help=('Path to custom file to be added at ' - 'end of Dockerfiles for final images')), - cfg.BoolOpt('keep', default=False, - help='Keep failed intermediate containers'), - cfg.BoolOpt('list-dependencies', short='l', - help='Show image dependencies (filtering supported)'), - cfg.BoolOpt('list-images', - help='Show all available images'), - cfg.StrOpt('namespace', short='n', default='kolla', - help='The Docker namespace name'), - cfg.BoolOpt('cache', default=True, - help='Use the Docker cache when building', - ), - cfg.MultiOpt('profile', types.String(), short='p', - help=('Build a pre-defined set of images, see [profiles]' - ' section in config. The default profiles are:' - ' {}'.format(', '.join( - [opt.name for opt in _PROFILE_OPTS]) - ))), - cfg.BoolOpt('push', default=False, - help='Push images after building'), - cfg.IntOpt('push-threads', default=1, min=1, - help=('The number of threads to user while pushing' - ' Images. Note: Docker can not handle threading' - ' push properly.')), - cfg.IntOpt('retries', short='r', default=3, min=0, - help='The number of times to retry while building'), - cfg.MultiOpt('regex', types.String(), positional=True, - help=('Build only images matching regex and its' - ' dependencies')), - cfg.StrOpt('registry', - help=('The docker registry host. The default registry host' - ' is Docker Hub')), - cfg.StrOpt('save-dependency', - help=('Path to the file to store the docker image' - ' dependency in Graphviz dot format')), - cfg.StrOpt('type', short='t', default='binary', - choices=INSTALL_TYPE_CHOICES, - dest='install_type', - help=('The method of the OpenStack install')), - cfg.IntOpt('threads', short='T', default=8, min=1, - help=('The number of threads to use while building.' - ' (Note: setting to one will allow real time' - ' logging.)')), - cfg.StrOpt('tag', default=version.cached_version_string(), - help='The Docker tag'), - cfg.BoolOpt('template-only', default=False, - help=("Don't build images. Generate Dockerfile only")), - cfg.IntOpt('timeout', default=120, - help='Time in seconds after which any operation times out'), - cfg.StrOpt('template-override', - help='Path to template override file'), - cfg.StrOpt('logs-dir', help='Path to logs directory'), -] - -_BASE_OPTS = [ - cfg.StrOpt('maintainer', - default='Kolla Project (https://launchpad.net/kolla)', - help='The MAINTAINER field'), - cfg.ListOpt('rpm_setup_config', default=[DELOREAN, DELOREAN_DEPS], - help=('Comma separated list of .rpm or .repo file(s) ' - 'or URL(s) to install before building containers')), - cfg.StrOpt('apt_sources_list', help=('Path to custom sources.list')), - cfg.StrOpt('apt_preferences', help=('Path to custom apt/preferences')) -] - - -SOURCES = { - 'openstack-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/requirements/' - 'requirements-master.tar.gz')}, - 'aodh-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/aodh/' - 'aodh-master.tar.gz')}, - 'barbican-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/barbican/' - 'barbican-master.tar.gz')}, - 'bifrost-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/bifrost/' - 'bifrost-master.tar.gz')}, - 'ceilometer-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/ceilometer/' - 'ceilometer-master.tar.gz')}, - 'cinder-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/cinder/' - 'cinder-master.tar.gz')}, - 'congress-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/congress/' - 'congress-master.tar.gz')}, - 'cloudkitty-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/cloudkitty/' - 'cloudkitty-master.tar.gz')}, - 'designate-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/designate/' - 'designate-master.tar.gz')}, - 'freezer-api': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/freezer-api/' - 'freezer-api-master.tar.gz')}, - 'freezer-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/freezer/' - 'freezer-master.tar.gz')}, - 'glance-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/glance/' - 'glance-master.tar.gz')}, - 'gnocchi-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/gnocchi/' - 'gnocchi-master.tar.gz')}, - 'heat-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/heat/' - 'heat-master.tar.gz')}, - 'horizon': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/horizon/' - 'horizon-master.tar.gz')}, - 'horizon-plugin-neutron-lbaas-dashboard': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/neutron-lbaas-dashboard/' - 'neutron-lbaas-dashboard-master.tar.gz')}, - 'ironic-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/ironic/' - 'ironic-master.tar.gz')}, - 'ironic-inspector': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/ironic-inspector/' - 'ironic-inspector-master.tar.gz')}, - 'keystone-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/keystone/' - 'keystone-master.tar.gz')}, - 'kuryr-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/kuryr/' - 'kuryr-master.tar.gz')}, - 'kuryr-libnetwork': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/kuryr-libnetwork/' - 'kuryr-libnetwork-master.tar.gz')}, - 'magnum-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/magnum/' - 'magnum-master.tar.gz')}, - 'manila-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/manila/' - 'manila-master.tar.gz')}, - 'mistral-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/mistral/' - 'mistral-master.tar.gz')}, - 'murano-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/murano/' - 'murano-master.tar.gz')}, - 'neutron-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/neutron/' - 'neutron-master.tar.gz')}, - 'neutron-lbaas-agent': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/neutron-lbaas/' - 'neutron-lbaas-master.tar.gz')}, - 'neutron-sfc-agent': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/networking-sfc/' - 'networking-sfc-master.tar.gz')}, - 'neutron-vpnaas-agent': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/neutron-vpnaas/' - 'neutron-vpnaas-master.tar.gz')}, - 'nova-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/nova/' - 'nova-master.tar.gz')}, - 'nova-spicehtml5proxy': { - 'type': 'url', - 'location': ('http://github.com/SPICE/spice-html5/tarball/' - 'spice-html5-0.1.6')}, - 'nova-novncproxy': { - 'type': 'url', - 'location': ('http://github.com/kanaka/noVNC/tarball/' - 'v0.5.1')}, - 'panko-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/panko/' - 'panko-master.tar.gz')}, - 'rally': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/rally/' - 'rally-master.tar.gz')}, - 'sahara-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/sahara/' - 'sahara-master.tar.gz')}, - 'searchlight-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/searchlight/' - 'searchlight-1.0.0.tar.gz')}, - 'senlin-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/senlin/' - 'senlin-master.tar.gz')}, - 'solum-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/solum/' - 'solum-master.tar.gz')}, - 'swift-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/swift/' - 'swift-master.tar.gz')}, - 'tempest': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/tempest/' - 'tempest-master.tar.gz')}, - 'trove-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/trove/' - 'trove-master.tar.gz')}, - 'watcher-base': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/watcher/' - 'watcher-master.tar.gz')}, - 'zaqar': { - 'type': 'url', - 'location': ('http://tarballs.openstack.org/zaqar/' - 'zaqar-master.tar.gz')} -} - - -def get_source_opts(type_=None, location=None, reference=None): - return [cfg.StrOpt('type', choices=['local', 'git', 'url'], - default=type_, - help='Source location type'), - cfg.StrOpt('location', default=location, - help='The location for source install'), - cfg.StrOpt('reference', default=reference, - help=('Git reference to pull, commit sha, tag ' - 'or branch name'))] - - -def gen_all_source_opts(): - for name, params in SOURCES.items(): - type_ = params['type'] - location = params['location'] - reference = params.get('reference') - yield name, get_source_opts(type_, location, reference) - - -def list_opts(): - return itertools.chain([(None, _CLI_OPTS), - (None, _BASE_OPTS), - ('profiles', _PROFILE_OPTS)], - gen_all_source_opts(), - ) - - -def parse(conf, args, usage=None, prog=None, - default_config_files=None): - conf.register_cli_opts(_CLI_OPTS) - conf.register_opts(_BASE_OPTS) - conf.register_opts(_PROFILE_OPTS, group='profiles') - for name, opts in gen_all_source_opts(): - conf.register_opts(opts, name) - - conf(args=args, - project='kolla', - usage=usage, - prog=prog, - version=version.cached_version_string(), - default_config_files=default_config_files) - - # NOTE(jeffrey4l): set the default base tag based on the - # base option - conf.set_default('base_tag', DISTRO_RELEASE.get(conf.base)) - - if not conf.base_image: - conf.base_image = conf.base diff --git a/kolla/common/task.py b/kolla/common/task.py deleted file mode 100644 index 255abf85c4..0000000000 --- a/kolla/common/task.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Task(object): - - def __init__(self): - self.success = False - - @abc.abstractproperty - def name(self): - pass - - def reset(self): - self.success = False - - @property - def followups(self): - return [] - - @abc.abstractmethod - def run(self): - pass - - @staticmethod - def set_status(status): - # TODO(harlowja): remove this. - pass diff --git a/kolla/hacking/__init__.py b/kolla/hacking/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kolla/hacking/checks.py b/kolla/hacking/checks.py deleted file mode 100644 index 9eba06c014..0000000000 --- a/kolla/hacking/checks.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - - -mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") - - -def no_log_warn(logical_line): - """Disallow 'LOG.warn(' - - Deprecated LOG.warn(), instead use LOG.warning - https://bugs.launchpad.net/senlin/+bug/1508442 - N352 - """ - - msg = ("N352: LOG.warn is deprecated, please use LOG.warning!") - if "LOG.warn(" in logical_line: - yield (0, msg) - - -def no_mutable_default_args(logical_line): - msg = "N301: Method's default argument shouldn't be mutable!" - if mutable_default_args.match(logical_line): - yield (0, msg) - - -def factory(register): - register(no_mutable_default_args) - register(no_log_warn) diff --git a/kolla/image/__init__.py b/kolla/image/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kolla/image/build.py b/kolla/image/build.py deleted file mode 100644 index 4c4c9f643b..0000000000 --- a/kolla/image/build.py +++ /dev/null @@ -1,1024 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import contextlib -import datetime -import errno -import json -import logging -import os -import pprint -import re -import requests -import shutil -import sys -import tarfile -import tempfile -import threading -import time - -import docker -import git -import jinja2 -from oslo_config import cfg -from requests import exceptions as requests_exc -import six - -# NOTE(SamYaple): Update the search path to prefer PROJECT_ROOT as the source -# of packages to import if we are using local tools instead of -# pip installed kolla tools -PROJECT_ROOT = os.path.abspath(os.path.join( - os.path.dirname(os.path.realpath(__file__)), '../..')) -if PROJECT_ROOT not in sys.path: - sys.path.insert(0, PROJECT_ROOT) - -from kolla.common import config as common_config -from kolla.common import task -from kolla.template import filters as jinja_filters -from kolla.template import methods as jinja_methods -from kolla import version - - -def make_a_logger(conf=None, image_name=None): - if image_name: - log = logging.getLogger(".".join([__name__, image_name])) - else: - log = logging.getLogger(__name__) - if not log.handlers: - if conf is None or not conf.logs_dir or not image_name: - handler = logging.StreamHandler(sys.stdout) - log.propagate = False - else: - filename = os.path.join(conf.logs_dir, "%s.log" % image_name) - handler = logging.FileHandler(filename, delay=True) - handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) - log.addHandler(handler) - if conf is not None and conf.debug: - log.setLevel(logging.DEBUG) - else: - log.setLevel(logging.INFO) - return log - - -LOG = make_a_logger() - - -class KollaDirNotFoundException(Exception): - pass - - -class KollaUnknownBuildTypeException(Exception): - pass - - -class KollaMismatchBaseTypeException(Exception): - pass - - -class KollaRpmSetupUnknownConfig(Exception): - pass - - -# Image status constants. -# -# TODO(harlowja): use enum lib in the future?? -STATUS_CONNECTION_ERROR = 'connection_error' -STATUS_PUSH_ERROR = 'push_error' -STATUS_ERROR = 'error' -STATUS_PARENT_ERROR = 'parent_error' -STATUS_BUILT = 'built' -STATUS_BUILDING = 'building' -STATUS_UNMATCHED = 'unmatched' -STATUS_MATCHED = 'matched' -STATUS_UNPROCESSED = 'unprocessed' - -# All error status constants. -STATUS_ERRORS = (STATUS_CONNECTION_ERROR, STATUS_PUSH_ERROR, - STATUS_ERROR, STATUS_PARENT_ERROR) - - -@contextlib.contextmanager -def join_many(threads): - try: - yield - for t in threads: - t.join() - except KeyboardInterrupt: - try: - LOG.info('Waiting for daemon threads exit. Push Ctrl + c again to' - ' force exit') - for t in threads: - if t.is_alive(): - LOG.debug('Waiting thread %s to exit', t.name) - # NOTE(Jeffrey4l): Python Bug: When join without timeout, - # KeyboardInterrupt is never sent. - t.join(0xffff) - LOG.debug('Thread %s exits', t.name) - except KeyboardInterrupt: - LOG.warning('Force exits') - - -class DockerTask(task.Task): - - docker_kwargs = docker.utils.kwargs_from_env() - - def __init__(self): - super(DockerTask, self).__init__() - self._dc = None - - @property - def dc(self): - if self._dc is not None: - return self._dc - docker_kwargs = self.docker_kwargs.copy() - self._dc = docker.Client(version='auto', **docker_kwargs) - return self._dc - - -class Image(object): - def __init__(self, name, canonical_name, path, parent_name='', - status=STATUS_UNPROCESSED, parent=None, - source=None, logger=None): - self.name = name - self.canonical_name = canonical_name - self.path = path - self.status = status - self.parent = parent - self.source = source - self.parent_name = parent_name - if logger is None: - logger = make_a_logger(image_name=name) - self.logger = logger - self.children = [] - self.plugins = [] - - def copy(self): - c = Image(self.name, self.canonical_name, self.path, - logger=self.logger, parent_name=self.parent_name, - status=self.status, parent=self.parent) - if self.source: - c.source = self.source.copy() - if self.children: - c.children = list(self.children) - if self.plugins: - c.plugins = list(self.plugins) - return c - - def __repr__(self): - return ("Image(%s, %s, %s, parent_name=%s," - " status=%s, parent=%s, source=%s)") % ( - self.name, self.canonical_name, self.path, - self.parent_name, self.status, self.parent, self.source) - - -class PushIntoQueueTask(task.Task): - """Task that pushes some other task into a queue.""" - - def __init__(self, push_task, push_queue): - super(PushIntoQueueTask, self).__init__() - self.push_task = push_task - self.push_queue = push_queue - - @property - def name(self): - return 'PushIntoQueueTask(%s=>%s)' % (self.push_task.name, - self.push_queue) - - def run(self): - self.push_queue.put(self.push_task) - self.success = True - - -class PushTask(DockerTask): - """Task that pushes an image to a docker repository.""" - - def __init__(self, conf, image): - super(PushTask, self).__init__() - self.conf = conf - self.image = image - self.logger = image.logger - - @property - def name(self): - return 'PushTask(%s)' % self.image.name - - def run(self): - image = self.image - self.logger.info('Trying to push the image') - try: - self.push_image(image) - except requests_exc.ConnectionError: - self.logger.exception('Make sure Docker is running and that you' - ' have the correct privileges to run Docker' - ' (root)') - image.status = STATUS_CONNECTION_ERROR - except Exception: - self.logger.exception('Unknown error when pushing') - image.status = STATUS_PUSH_ERROR - finally: - if (image.status not in STATUS_ERRORS - and image.status != STATUS_UNPROCESSED): - self.logger.info('Pushed successfully') - self.success = True - else: - self.success = False - - def push_image(self, image): - for response in self.dc.push(image.canonical_name, - stream=True, - insecure_registry=True): - stream = json.loads(response) - if 'stream' in stream: - self.logger.info(stream['stream']) - elif 'errorDetail' in stream: - image.status = STATUS_ERROR - self.logger.error(stream['errorDetail']['message']) - - -class BuildTask(DockerTask): - """Task that builds out an image.""" - - def __init__(self, conf, image, push_queue): - super(BuildTask, self).__init__() - self.conf = conf - self.image = image - self.push_queue = push_queue - self.nocache = not conf.cache - self.forcerm = not conf.keep - self.logger = image.logger - - @property - def name(self): - return 'BuildTask(%s)' % self.image.name - - def run(self): - self.builder(self.image) - if self.image.status == STATUS_BUILT: - self.success = True - - @property - def followups(self): - followups = [] - if self.conf.push and self.success: - followups.extend([ - # If we are supposed to push the image into a docker - # repository, then make sure we do that... - PushIntoQueueTask( - PushTask(self.conf, self.image), - self.push_queue), - ]) - if self.image.children and self.success: - for image in self.image.children: - followups.append(BuildTask(self.conf, image, self.push_queue)) - return followups - - def process_source(self, image, source): - dest_archive = os.path.join(image.path, source['name'] + '-archive') - - if source.get('type') == 'url': - self.logger.debug("Getting archive from %s", source['source']) - try: - r = requests.get(source['source'], timeout=self.conf.timeout) - except requests_exc.Timeout: - self.logger.exception( - 'Request timed out while getting archive from %s', - source['source']) - image.status = STATUS_ERROR - return - - if r.status_code == 200: - with open(dest_archive, 'wb') as f: - f.write(r.content) - else: - self.logger.error( - 'Failed to download archive: status_code %s', - r.status_code) - image.status = STATUS_ERROR - return - - elif source.get('type') == 'git': - clone_dir = '{}-{}'.format(dest_archive, - source['reference'].replace('/', '-')) - try: - self.logger.debug("Cloning from %s", source['source']) - git.Git().clone(source['source'], clone_dir) - git.Git(clone_dir).checkout(source['reference']) - reference_sha = git.Git(clone_dir).rev_parse('HEAD') - self.logger.debug("Git checkout by reference %s (%s)", - source['reference'], reference_sha) - except Exception as e: - self.logger.error("Failed to get source from git", image.name) - self.logger.error("Error: %s", e) - # clean-up clone folder to retry - shutil.rmtree(clone_dir) - image.status = STATUS_ERROR - return - - with tarfile.open(dest_archive, 'w') as tar: - tar.add(clone_dir, arcname=os.path.basename(clone_dir)) - - elif source.get('type') == 'local': - self.logger.debug("Getting local archive from %s", - source['source']) - if os.path.isdir(source['source']): - with tarfile.open(dest_archive, 'w') as tar: - tar.add(source['source'], - arcname=os.path.basename(source['source'])) - else: - shutil.copyfile(source['source'], dest_archive) - - else: - self.logger.error("Wrong source type '%s'", source.get('type')) - image.status = STATUS_ERROR - return - - # Set time on destination archive to epoch 0 - os.utime(dest_archive, (0, 0)) - - return dest_archive - - def update_buildargs(self): - buildargs = dict() - if self.conf.build_args: - buildargs = dict(self.conf.build_args) - - proxy_vars = ('HTTP_PROXY', 'http_proxy', 'HTTPS_PROXY', - 'https_proxy', 'FTP_PROXY', 'ftp_proxy', - 'NO_PROXY', 'no_proxy') - - for proxy_var in proxy_vars: - if proxy_var in os.environ and proxy_var not in buildargs: - buildargs[proxy_var] = os.environ.get(proxy_var) - - if not buildargs: - return None - return buildargs - - def builder(self, image): - self.logger.debug('Processing') - if image.status == STATUS_UNMATCHED: - return - - if (image.parent is not None and - image.parent.status in STATUS_ERRORS): - self.logger.error('Parent image error\'d with message "%s"', - image.parent.status) - image.status = STATUS_PARENT_ERROR - return - - image.status = STATUS_BUILDING - self.logger.info('Building') - - if image.source and 'source' in image.source: - self.process_source(image, image.source) - if image.status in STATUS_ERRORS: - return - - plugin_archives = list() - plugins_path = os.path.join(image.path, 'plugins') - for plugin in image.plugins: - archive_path = self.process_source(image, plugin) - if image.status in STATUS_ERRORS: - return - plugin_archives.append(archive_path) - if plugin_archives: - for plugin_archive in plugin_archives: - with tarfile.open(plugin_archive, 'r') as plugin_archive_tar: - plugin_archive_tar.extractall(path=plugins_path) - else: - try: - os.mkdir(plugins_path) - except OSError as e: - if e.errno == errno.EEXIST: - self.logger.info('Directory %s already exist. Skipping.', - plugins_path) - else: - self.logger.error('Failed to create directory %s: %s', - plugins_path, e) - image.status = STATUS_CONNECTION_ERROR - return - with tarfile.open(os.path.join(image.path, 'plugins-archive'), - 'w') as tar: - tar.add(plugins_path, arcname='plugins') - - # Pull the latest image for the base distro only - pull = True if image.parent is None else False - - buildargs = self.update_buildargs() - try: - for response in self.dc.build(path=image.path, - tag=image.canonical_name, - nocache=not self.conf.cache, - rm=True, - pull=pull, - forcerm=self.forcerm, - buildargs=buildargs): - stream = json.loads(response.decode('utf-8')) - if 'stream' in stream: - for line in stream['stream'].split('\n'): - if line: - self.logger.info('%s', line) - if 'errorDetail' in stream: - image.status = STATUS_ERROR - self.logger.error('Error\'d with the following message') - for line in stream['errorDetail']['message'].split('\n'): - if line: - self.logger.error('%s', line) - return - except docker.errors.DockerException: - image.status = STATUS_ERROR - self.logger.exception('Unknown docker error when building') - except Exception: - image.status = STATUS_ERROR - self.logger.exception('Unknown error when building') - else: - image.status = STATUS_BUILT - self.logger.info('Built') - - -class WorkerThread(threading.Thread): - """Thread that executes tasks until the queue provides a tombstone.""" - - #: Object to be put on worker queues to get them to die. - tombstone = object() - - def __init__(self, conf, queue): - super(WorkerThread, self).__init__() - self.queue = queue - self.conf = conf - self.should_stop = False - - def run(self): - while not self.should_stop: - task = self.queue.get() - if task is self.tombstone: - # Ensure any other threads also get the tombstone. - self.queue.put(task) - break - try: - for attempt in six.moves.range(self.conf.retries + 1): - if self.should_stop: - break - if attempt > 0: - LOG.info("Attempting to run task %s for the %s time", - task.name, attempt + 1) - else: - LOG.info("Attempting to run task %s for the first" - " time", task.name) - try: - task.run() - if task.success: - break - except Exception: - LOG.exception('Unhandled error when running %s', - task.name) - # try again... - task.reset() - if task.success and not self.should_stop: - for next_task in task.followups: - LOG.info('Added next task %s to queue', - next_task.name) - self.queue.put(next_task) - finally: - self.queue.task_done() - - -class KollaWorker(object): - - def __init__(self, conf): - self.conf = conf - self.images_dir = self._get_images_dir() - self.registry = conf.registry - if self.registry: - self.namespace = self.registry + '/' + conf.namespace - else: - self.namespace = conf.namespace - self.base = conf.base - self.base_tag = conf.base_tag - self.install_type = conf.install_type - self.tag = conf.tag - self.images = list() - rpm_setup_config = filter(None, conf.rpm_setup_config) - self.rpm_setup = self.build_rpm_setup(rpm_setup_config) - - rh_base = ['centos', 'oraclelinux', 'rhel'] - rh_type = ['source', 'binary', 'rdo', 'rhos'] - deb_base = ['ubuntu', 'debian'] - deb_type = ['source', 'binary'] - - if not ((self.base in rh_base and self.install_type in rh_type) or - (self.base in deb_base and self.install_type in deb_type)): - raise KollaMismatchBaseTypeException( - '{} is unavailable for {}'.format(self.install_type, self.base) - ) - - if self.install_type == 'binary': - self.install_metatype = 'rdo' - elif self.install_type == 'source': - self.install_metatype = 'mixed' - elif self.install_type == 'rdo': - self.install_type = 'binary' - self.install_metatype = 'rdo' - elif self.install_type == 'rhos': - self.install_type = 'binary' - self.install_metatype = 'rhos' - else: - raise KollaUnknownBuildTypeException( - 'Unknown install type' - ) - - self.image_prefix = self.base + '-' + self.install_type + '-' - - self.include_header = conf.include_header - self.include_footer = conf.include_footer - self.regex = conf.regex - self.image_statuses_bad = dict() - self.image_statuses_good = dict() - self.image_statuses_unmatched = dict() - self.maintainer = conf.maintainer - - def _get_images_dir(self): - possible_paths = ( - PROJECT_ROOT, - os.path.join(sys.prefix, 'share/kolla'), - os.path.join(sys.prefix, 'local/share/kolla')) - - for path in possible_paths: - image_path = os.path.join(path, 'docker') - # NOTE(SamYaple): We explicty check for the base folder to ensure - # this is the correct path - # TODO(SamYaple): Improve this to make this safer - if os.path.exists(os.path.join(image_path, 'base')): - LOG.info('Found the docker image folder at %s', image_path) - return image_path - else: - raise KollaDirNotFoundException('Image dir can not be found') - - def build_rpm_setup(self, rpm_setup_config): - """Generates a list of docker commands based on provided configuration. - - :param rpm_setup_config: A list of .rpm or .repo paths or URLs - :return: A list of docker commands - """ - rpm_setup = list() - - for config in rpm_setup_config: - if config.endswith('.rpm'): - # RPM files can be installed with yum from file path or url - cmd = "RUN yum -y install {}".format(config) - elif config.endswith('.repo'): - if config.startswith('http'): - # Curl http://url/etc.repo to /etc/yum.repos.d/etc.repo - name = config.split('/')[-1] - cmd = "RUN curl -L {} -o /etc/yum.repos.d/{}".format( - config, name) - else: - # Copy .repo file from filesystem - cmd = "COPY {} /etc/yum.repos.d/".format(config) - else: - raise KollaRpmSetupUnknownConfig( - 'RPM setup must be provided as .rpm or .repo files.' - ' Attempted configuration was {}'.format(config) - ) - - rpm_setup.append(cmd) - - return rpm_setup - - def copy_apt_files(self): - if self.conf.apt_sources_list: - shutil.copyfile( - self.conf.apt_sources_list, - os.path.join(self.working_dir, "base", "sources.list") - ) - - if self.conf.apt_preferences: - shutil.copyfile( - self.conf.apt_preferences, - os.path.join(self.working_dir, "base", "apt_preferences") - ) - - def setup_working_dir(self): - """Creates a working directory for use while building""" - ts = time.time() - ts = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S_') - self.temp_dir = tempfile.mkdtemp(prefix='kolla-' + ts) - self.working_dir = os.path.join(self.temp_dir, 'docker') - shutil.copytree(self.images_dir, self.working_dir) - self.copy_apt_files() - LOG.debug('Created working dir: %s', self.working_dir) - - def set_time(self): - for root, dirs, files in os.walk(self.working_dir): - for file_ in files: - os.utime(os.path.join(root, file_), (0, 0)) - for dir_ in dirs: - os.utime(os.path.join(root, dir_), (0, 0)) - LOG.debug('Set atime and mtime to 0 for all content in working dir') - - def _get_filters(self): - filters = { - 'customizable': jinja_filters.customizable, - } - return filters - - def _get_methods(self): - """Mapping of available Jinja methods - - return a dictionary that maps available function names and their - corresponding python methods to make them available in jinja templates - """ - - return { - 'debian_package_install': jinja_methods.debian_package_install, - } - - def create_dockerfiles(self): - kolla_version = version.version_info.cached_version_string() - supported_distro_release = common_config.DISTRO_RELEASE.get( - self.base) - for path in self.docker_build_paths: - template_name = "Dockerfile.j2" - image_name = path.split("/")[-1] - values = {'base_distro': self.base, - 'base_image': self.conf.base_image, - 'base_distro_tag': self.base_tag, - 'supported_distro_release': supported_distro_release, - 'install_metatype': self.install_metatype, - 'image_prefix': self.image_prefix, - 'install_type': self.install_type, - 'namespace': self.namespace, - 'tag': self.tag, - 'maintainer': self.maintainer, - 'kolla_version': kolla_version, - 'image_name': image_name, - 'rpm_setup': self.rpm_setup} - env = jinja2.Environment( # nosec: not used to render HTML - loader=jinja2.FileSystemLoader(self.working_dir)) - env.filters.update(self._get_filters()) - env.globals.update(self._get_methods()) - tpl_path = os.path.join( - os.path.relpath(path, self.working_dir), - template_name) - - template = env.get_template(tpl_path) - if self.conf.template_override: - template_path = os.path.dirname(self.conf.template_override) - template_name = os.path.basename(self.conf.template_override) - values['parent_template'] = template - env = jinja2.Environment( # nosec: not used to render HTML - loader=jinja2.FileSystemLoader(template_path)) - env.filters.update(self._get_filters()) - env.globals.update(self._get_methods()) - template = env.get_template(template_name) - if self.include_header: - with open(self.include_header, 'r') as f: - values['include_header'] = f.read() - if self.include_footer: - with open(self.include_footer, 'r') as f: - values['include_footer'] = f.read() - content = template.render(values) - content_path = os.path.join(path, 'Dockerfile') - with open(content_path, 'w') as f: - LOG.debug("Rendered %s into:", tpl_path) - LOG.debug(content) - f.write(content) - LOG.debug("Wrote it to %s", content_path) - - def find_dockerfiles(self): - """Recursive search for Dockerfiles in the working directory""" - self.docker_build_paths = list() - path = self.working_dir - filename = 'Dockerfile.j2' - - for root, dirs, names in os.walk(path): - if filename in names: - self.docker_build_paths.append(root) - LOG.debug('Found %s', root.split(self.working_dir)[1]) - - LOG.debug('Found %d Dockerfiles', len(self.docker_build_paths)) - - def cleanup(self): - """Remove temp files""" - shutil.rmtree(self.temp_dir) - - def filter_images(self): - """Filter which images to build""" - filter_ = list() - - if self.regex: - filter_ += self.regex - elif self.conf.profile: - for profile in self.conf.profile: - if profile not in self.conf.profiles: - self.conf.register_opt(cfg.ListOpt(profile, - default=[]), - 'profiles') - if len(self.conf.profiles[profile]) == 0: - msg = 'Profile: {} does not exist'.format(profile) - raise ValueError(msg) - else: - filter_ += self.conf.profiles[profile] - - if filter_: - patterns = re.compile(r"|".join(filter_).join('()')) - for image in self.images: - if image.status == STATUS_MATCHED: - continue - if re.search(patterns, image.name): - image.status = STATUS_MATCHED - while (image.parent is not None and - image.parent.status != STATUS_MATCHED): - image = image.parent - image.status = STATUS_MATCHED - LOG.debug('Image %s matched regex', image.name) - else: - image.status = STATUS_UNMATCHED - else: - for image in self.images: - image.status = STATUS_MATCHED - - def summary(self): - """Walk the dictionary of images statuses and print results""" - # For debug we print the logs again if the image error'd. This is to - # help us debug and it will be extra helpful in the gate. - for image in self.images: - if image.status in STATUS_ERRORS: - LOG.debug("Image %s failed", image.name) - - self.get_image_statuses() - - if self.image_statuses_good: - LOG.info("=========================") - LOG.info("Successfully built images") - LOG.info("=========================") - for name in self.image_statuses_good.keys(): - LOG.info(name) - - if self.image_statuses_bad: - LOG.info("===========================") - LOG.info("Images that failed to build") - LOG.info("===========================") - for name, status in self.image_statuses_bad.items(): - LOG.error('%s Failed with status: %s', name, status) - - if self.image_statuses_unmatched: - LOG.debug("=====================================") - LOG.debug("Images not matched for build by regex") - LOG.debug("=====================================") - for name in self.image_statuses_unmatched.keys(): - LOG.debug(name) - - def get_image_statuses(self): - if any([self.image_statuses_bad, - self.image_statuses_good, - self.image_statuses_unmatched]): - return (self.image_statuses_bad, - self.image_statuses_good, - self.image_statuses_unmatched) - for image in self.images: - if image.status == STATUS_BUILT: - self.image_statuses_good[image.name] = image.status - elif image.status == STATUS_UNMATCHED: - self.image_statuses_unmatched[image.name] = image.status - else: - self.image_statuses_bad[image.name] = image.status - return (self.image_statuses_bad, - self.image_statuses_good, - self.image_statuses_unmatched) - - def build_image_list(self): - def process_source_installation(image, section): - installation = dict() - # NOTE(jeffrey4l): source is not needed when the type is None - if self.conf._get('type', self.conf._get_group(section)) is None: - if image.parent_name is None: - LOG.debug('No source location found in section %s', - section) - else: - installation['type'] = self.conf[section]['type'] - installation['source'] = self.conf[section]['location'] - installation['name'] = section - if installation['type'] == 'git': - installation['reference'] = self.conf[section]['reference'] - return installation - - all_sections = (set(six.iterkeys(self.conf._groups)) | - set(self.conf.list_all_sections())) - - for path in self.docker_build_paths: - # Reading parent image name - with open(os.path.join(path, 'Dockerfile')) as f: - content = f.read() - - image_name = os.path.basename(path) - canonical_name = (self.namespace + '/' + self.image_prefix + - image_name + ':' + self.tag) - image = Image(image_name, canonical_name, path, - parent_name=content.split(' ')[1].split('\n')[0], - logger=make_a_logger(self.conf, image_name)) - - if self.install_type == 'source': - # NOTE(jeffrey4l): register the opts if the section didn't - # register in the kolla/common/config.py file - if image.name not in self.conf._groups: - self.conf.register_opts(common_config.get_source_opts(), - image.name) - image.source = process_source_installation(image, image.name) - for plugin in [match.group(0) for match in - (re.search('^{}-plugin-.+'.format(image.name), - section) for section in - all_sections) if match]: - try: - self.conf.register_opts( - common_config.get_source_opts(), - plugin - ) - except cfg.DuplicateOptError: - LOG.debug('Plugin %s already registered in config', - plugin) - image.plugins.append( - process_source_installation(image, plugin)) - - self.images.append(image) - - def save_dependency(self, to_file): - try: - import graphviz - except ImportError: - LOG.error('"graphviz" is required for save dependency') - raise - dot = graphviz.Digraph(comment='Docker Images Dependency') - dot.body.extend(['rankdir=LR']) - for image in self.images: - if image.status not in [STATUS_MATCHED]: - continue - dot.node(image.name) - if image.parent is not None: - dot.edge(image.parent.name, image.name) - - with open(to_file, 'w') as f: - f.write(dot.source) - - def list_images(self): - for count, image in enumerate(self.images): - print(count + 1, ':', image.name) - - def list_dependencies(self): - match = False - for image in self.images: - if image.status in [STATUS_MATCHED]: - match = True - if image.parent is None: - base = image - if not match: - print('Nothing matched!') - return - - def list_children(images, ancestry): - children = six.next(iter(ancestry.values())) - for image in images: - if image.status not in [STATUS_MATCHED]: - continue - if not image.children: - children.append(image.name) - else: - newparent = {image.name: []} - children.append(newparent) - list_children(image.children, newparent) - - ancestry = {base.name: []} - list_children(base.children, ancestry) - pprint.pprint(ancestry) - - def find_parents(self): - """Associate all images with parents and children""" - sort_images = dict() - - for image in self.images: - sort_images[image.canonical_name] = image - - for parent_name, parent in sort_images.items(): - for image in sort_images.values(): - if image.parent_name == parent_name: - parent.children.append(image) - image.parent = parent - - def build_queue(self, push_queue): - """Organizes Queue list - - Return a list of Queues that have been organized into a hierarchy - based on dependencies - """ - self.build_image_list() - self.find_parents() - self.filter_images() - - queue = six.moves.queue.Queue() - - for image in self.images: - if image.status == STATUS_UNMATCHED: - # Don't bother queuing up build tasks for things that - # were not matched in the first place... (not worth the - # effort to run them, if they won't be used anyway). - continue - if image.parent is None: - queue.put(BuildTask(self.conf, image, push_queue)) - LOG.info('Added image %s to queue', image.name) - - return queue - - -def run_build(): - """Build container images. - - :return: A 3-tuple containing bad, good, and unmatched container image - status dicts, or None if no images were built. - """ - conf = cfg.ConfigOpts() - common_config.parse(conf, sys.argv[1:], prog='kolla-build') - - if conf.debug: - LOG.setLevel(logging.DEBUG) - - kolla = KollaWorker(conf) - kolla.setup_working_dir() - kolla.find_dockerfiles() - kolla.create_dockerfiles() - - if conf.template_only: - LOG.info('Dockerfiles are generated in %s', kolla.working_dir) - return - - # We set the atime and mtime to 0 epoch to preserve allow the Docker cache - # to work like we want. A different size or hash will still force a rebuild - kolla.set_time() - - if conf.save_dependency: - kolla.build_image_list() - kolla.find_parents() - kolla.filter_images() - kolla.save_dependency(conf.save_dependency) - LOG.info('Docker images dependency are saved in %s', - conf.save_dependency) - return - if conf.list_images: - kolla.build_image_list() - kolla.list_images() - return - if conf.list_dependencies: - kolla.build_image_list() - kolla.find_parents() - kolla.filter_images() - kolla.list_dependencies() - return - - push_queue = six.moves.queue.Queue() - queue = kolla.build_queue(push_queue) - workers = [] - - with join_many(workers): - try: - for x in six.moves.range(conf.threads): - worker = WorkerThread(conf, queue) - worker.setDaemon(True) - worker.start() - workers.append(worker) - - for x in six.moves.range(conf.push_threads): - worker = WorkerThread(conf, push_queue) - worker.setDaemon(True) - worker.start() - workers.append(worker) - - # sleep until queue is empty - while queue.unfinished_tasks or push_queue.unfinished_tasks: - time.sleep(3) - - # ensure all threads exited happily - push_queue.put(WorkerThread.tombstone) - queue.put(WorkerThread.tombstone) - except KeyboardInterrupt: - for w in workers: - w.should_stop = True - push_queue.put(WorkerThread.tombstone) - queue.put(WorkerThread.tombstone) - raise - - kolla.summary() - kolla.cleanup() - - return kolla.get_image_statuses() diff --git a/kolla/opts.py b/kolla/opts.py deleted file mode 100644 index 0571d7fbe2..0000000000 --- a/kolla/opts.py +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from kolla.common import config - - -def list_opts(): - return config.list_opts() diff --git a/kolla/template/__init__.py b/kolla/template/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kolla/template/filters.py b/kolla/template/filters.py deleted file mode 100644 index 1322ef4d1c..0000000000 --- a/kolla/template/filters.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from jinja2 import contextfilter - - -@contextfilter -def customizable(context, val_list, call_type): - name = context['image_name'].replace("-", "_") + "_" + call_type + "_" - if name + "override" in context: - return context[name + "override"] - if name + "append" in context: - val_list.extend(context[name + "append"]) - if name + "remove" in context: - for removal in context[name + "remove"]: - if removal in val_list: - val_list.remove(removal) - return val_list diff --git a/kolla/template/methods.py b/kolla/template/methods.py deleted file mode 100644 index 8e655f4c8a..0000000000 --- a/kolla/template/methods.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def debian_package_install(packages): - """Jinja utility method for building debian-based package install command - - apt-get is not capable of installing .deb files from a URL and the - template logic to construct a series of steps to install regular packages - from apt repos as well as .deb files that need to be downloaded, manually - installed, and cleaned up is complicated. This method will contruct the - proper string required to install all packages in a way that's a bit - easier to follow - - :param packages: a list of strings that are either packages to install - from an apt repo, or URLs to .deb files - :type packages: list - - :returns: string suitable to provide to RUN command in a Dockerfile that - will install the given packages - :rtype: string - """ - cmds = [] - - # divide the list into two groups, one for regular packages and one for - # URL packages - reg_packages, url_packages = [], [] - for package in packages: - if package.startswith('http'): - url_packages.append(package) - else: - reg_packages.append(package) - - # handle the apt-get install - if reg_packages: - cmds.append('apt-get -y install --no-install-recommends {}'.format( - ' '.join(reg_packages) - )) - cmds.append('apt-get clean') - - # handle URL packages - for url in url_packages: - # the path portion should be the file name - name = url[url.rfind('/') + 1:] - cmds.extend([ - 'curl --location {} -o {}'.format(url, name), - 'dpkg -i {}'.format(name), - 'rm -rf {}'.format(name), - ]) - - # return the list of commands - return ' && '.join(cmds) diff --git a/kolla/tests/__init__.py b/kolla/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kolla/tests/base.py b/kolla/tests/base.py deleted file mode 100644 index 1c43b76196..0000000000 --- a/kolla/tests/base.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import fixtures -import mock -from oslo_config import cfg -from oslotest import base as oslotest_base - -from kolla.common import config as common_config - - -TESTS_ROOT = os.path.dirname(os.path.abspath(__file__)) - - -class TestCase(oslotest_base.BaseTestCase): - '''All unit test should inherit from this class''' - config_file = None - - def setUp(self): - super(TestCase, self).setUp() - self.conf = cfg.ConfigOpts() - default_config_files = self.get_default_config_files() - common_config.parse(self.conf, [], - default_config_files=default_config_files) - # NOTE(jeffrey4l): mock the _get_image_dir method to return a fake - # docker images dir - self.useFixture(fixtures.MockPatch( - 'kolla.image.build.KollaWorker._get_images_dir', - mock.Mock(return_value=os.path.join(TESTS_ROOT, 'docker')))) - - def get_default_config_files(self): - if self.config_file: - return [os.path.join(TESTS_ROOT, 'etc', self.config_file)] diff --git a/kolla/tests/common/__init__.py b/kolla/tests/common/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/kolla/tests/common/test_config.py b/kolla/tests/common/test_config.py deleted file mode 100644 index 28a9c8d8de..0000000000 --- a/kolla/tests/common/test_config.py +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from kolla.tests import base - - -class ConfigTest(base.TestCase): - config_file = 'default.conf' - - def test_debug_opt(self): - self.assertTrue(self.conf.debug) diff --git a/kolla/tests/docker/base/Dockerfile.j2 b/kolla/tests/docker/base/Dockerfile.j2 deleted file mode 100644 index ef5d2105a1..0000000000 --- a/kolla/tests/docker/base/Dockerfile.j2 +++ /dev/null @@ -1 +0,0 @@ -FROM {{ base_distro }}:{{ base_distro_tag }} diff --git a/kolla/tests/docker/neutron-server/Dockerfile.j2 b/kolla/tests/docker/neutron-server/Dockerfile.j2 deleted file mode 100644 index ae637eec48..0000000000 --- a/kolla/tests/docker/neutron-server/Dockerfile.j2 +++ /dev/null @@ -1 +0,0 @@ -FROM {{ namespace }}/{{ image_prefix }}base:{{ tag }} diff --git a/kolla/tests/etc/default.conf b/kolla/tests/etc/default.conf deleted file mode 100644 index 545953e34b..0000000000 --- a/kolla/tests/etc/default.conf +++ /dev/null @@ -1,16 +0,0 @@ -[DEFAULT] -debug=True - -[neutron-server-plugin-networking-arista] -reference = master -location = https://git.openstack.org/openstack/networking-arista -type = git - -[neutron-base-plugin-neutron-fwaas] -reference = master -location = https://git.openstack.org/openstack/neutron-fwaas -type = git - -[profiles] -default = image-base -all = image-base,image-child diff --git a/kolla/tests/test_build.py b/kolla/tests/test_build.py deleted file mode 100644 index 980413468b..0000000000 --- a/kolla/tests/test_build.py +++ /dev/null @@ -1,287 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import fixtures -import itertools -import mock -import os -import requests - -from kolla.cmd import build as build_cmd -from kolla.image import build -from kolla.tests import base - - -FAKE_IMAGE = build.Image( - 'image-base', 'image-base:latest', - '/fake/path', parent_name=None, - parent=None, status=build.STATUS_MATCHED) -FAKE_IMAGE_CHILD = build.Image( - 'image-child', 'image-child:latest', - '/fake/path2', parent_name='image-base', - parent=FAKE_IMAGE, status=build.STATUS_MATCHED) - - -class TasksTest(base.TestCase): - - def setUp(self): - super(TasksTest, self).setUp() - self.image = FAKE_IMAGE.copy() - # NOTE(jeffrey4l): use a real, temporary dir - self.image.path = self.useFixture(fixtures.TempDir()).path - - @mock.patch.dict(os.environ, clear=True) - @mock.patch('docker.Client') - def test_push_image(self, mock_client): - pusher = build.PushTask(self.conf, self.image) - pusher.run() - mock_client().push.assert_called_once_with( - self.image.canonical_name, stream=True, insecure_registry=True) - - @mock.patch.dict(os.environ, clear=True) - @mock.patch('docker.Client') - def test_build_image(self, mock_client): - push_queue = mock.Mock() - builder = build.BuildTask(self.conf, self.image, push_queue) - builder.run() - - mock_client().build.assert_called_once_with( - path=self.image.path, tag=self.image.canonical_name, - nocache=False, rm=True, pull=True, forcerm=True, - buildargs=None) - - self.assertTrue(builder.success) - - @mock.patch.dict(os.environ, clear=True) - @mock.patch('docker.Client') - def test_build_image_with_build_arg(self, mock_client): - build_args = { - 'HTTP_PROXY': 'http://localhost:8080', - 'NO_PROXY': '127.0.0.1' - } - self.conf.set_override('build_args', build_args) - push_queue = mock.Mock() - builder = build.BuildTask(self.conf, self.image, push_queue) - builder.run() - - mock_client().build.assert_called_once_with( - path=self.image.path, tag=self.image.canonical_name, - nocache=False, rm=True, pull=True, forcerm=True, - buildargs=build_args) - - self.assertTrue(builder.success) - - @mock.patch.dict(os.environ, {'http_proxy': 'http://FROM_ENV:8080'}, - clear=True) - @mock.patch('docker.Client') - def test_build_arg_from_env(self, mock_client): - push_queue = mock.Mock() - build_args = { - 'http_proxy': 'http://FROM_ENV:8080', - } - builder = build.BuildTask(self.conf, self.image, push_queue) - builder.run() - - mock_client().build.assert_called_once_with( - path=self.image.path, tag=self.image.canonical_name, - nocache=False, rm=True, pull=True, forcerm=True, - buildargs=build_args) - - self.assertTrue(builder.success) - - @mock.patch.dict(os.environ, {'http_proxy': 'http://FROM_ENV:8080'}, - clear=True) - @mock.patch('docker.Client') - def test_build_arg_precedence(self, mock_client): - build_args = { - 'http_proxy': 'http://localhost:8080', - } - self.conf.set_override('build_args', build_args) - - push_queue = mock.Mock() - builder = build.BuildTask(self.conf, self.image, push_queue) - builder.run() - - mock_client().build.assert_called_once_with( - path=self.image.path, tag=self.image.canonical_name, - nocache=False, rm=True, pull=True, forcerm=True, - buildargs=build_args) - - self.assertTrue(builder.success) - - @mock.patch('docker.Client') - @mock.patch('requests.get') - def test_requests_get_timeout(self, mock_get, mock_client): - self.image.source = { - 'source': 'http://fake/source', - 'type': 'url', - 'name': 'fake-image-base' - } - push_queue = mock.Mock() - builder = build.BuildTask(self.conf, self.image, push_queue) - mock_get.side_effect = requests.exceptions.Timeout - get_result = builder.process_source(self.image, self.image.source) - - self.assertIsNone(get_result) - self.assertEqual(self.image.status, build.STATUS_ERROR) - mock_get.assert_called_once_with(self.image.source['source'], - timeout=120) - - self.assertFalse(builder.success) - - -class KollaWorkerTest(base.TestCase): - - config_file = 'default.conf' - - def setUp(self): - super(KollaWorkerTest, self).setUp() - image = FAKE_IMAGE.copy() - image.status = None - image_child = FAKE_IMAGE_CHILD.copy() - image_child.status = None - self.images = [image, image_child] - - def test_supported_base_type(self): - rh_base = ['centos', 'oraclelinux', 'rhel'] - rh_type = ['source', 'binary', 'rdo', 'rhos'] - deb_base = ['ubuntu', 'debian'] - deb_type = ['source', 'binary'] - - for base_distro, install_type in itertools.chain( - itertools.product(rh_base, rh_type), - itertools.product(deb_base, deb_type)): - self.conf.set_override('base', base_distro) - self.conf.set_override('install_type', install_type) - # should no exception raised - build.KollaWorker(self.conf) - - def test_unsupported_base_type(self): - for base_distro, install_type in itertools.product( - ['ubuntu', 'debian'], ['rdo', 'rhos']): - self.conf.set_override('base', base_distro) - self.conf.set_override('install_type', install_type) - self.assertRaises(build.KollaMismatchBaseTypeException, - build.KollaWorker, self.conf) - - def test_build_image_list_adds_plugins(self): - - self.conf.set_override('install_type', 'source') - - kolla = build.KollaWorker(self.conf) - kolla.setup_working_dir() - kolla.find_dockerfiles() - kolla.create_dockerfiles() - kolla.build_image_list() - expected_plugin = { - 'name': 'neutron-server-plugin-networking-arista', - 'reference': 'master', - 'source': 'https://git.openstack.org/openstack/networking-arista', - 'type': 'git' - } - for image in kolla.images: - if image.name == 'neutron-server': - self.assertEqual(image.plugins[0], expected_plugin) - break - else: - self.fail('Can not find the expected neutron arista plugin') - - def test_build_image_list_plugin_parsing(self): - """Ensure regex used to parse plugins adds them to the correct image""" - self.conf.set_override('install_type', 'source') - - kolla = build.KollaWorker(self.conf) - kolla.setup_working_dir() - kolla.find_dockerfiles() - kolla.create_dockerfiles() - kolla.build_image_list() - for image in kolla.images: - if image.name == 'base': - self.assertEqual(len(image.plugins), 0, - 'base image should not have any plugins ' - 'registered') - break - else: - self.fail('Expected to find the base image in this test') - - def _get_matched_images(self, images): - return [image for image in images - if image.status == build.STATUS_MATCHED] - - def test_without_profile(self): - kolla = build.KollaWorker(self.conf) - kolla.images = self.images - kolla.filter_images() - - self.assertEqual(2, len(self._get_matched_images(kolla.images))) - - def test_pre_defined_exist_profile(self): - # default profile include the fake image: image-base - self.conf.set_override('profile', ['default']) - kolla = build.KollaWorker(self.conf) - kolla.images = self.images - kolla.filter_images() - - self.assertEqual(1, len(self._get_matched_images(kolla.images))) - - def test_pre_defined_exist_profile_not_include(self): - # infra profile do not include the fake image: image-base - self.conf.set_override('profile', ['infra']) - kolla = build.KollaWorker(self.conf) - kolla.images = self.images - kolla.filter_images() - - self.assertEqual(0, len(self._get_matched_images(kolla.images))) - - def test_pre_defined_not_exist_profile(self): - # NOTE(jeffrey4l): not exist profile will raise ValueError - self.conf.set_override('profile', ['not_exist']) - kolla = build.KollaWorker(self.conf) - kolla.images = self.images - self.assertRaises(ValueError, - kolla.filter_images) - - @mock.patch('pprint.pprint') - def test_list_dependencies(self, pprint_mock): - self.conf.set_override('profile', ['all']) - kolla = build.KollaWorker(self.conf) - kolla.images = self.images - kolla.filter_images() - kolla.list_dependencies() - pprint_mock.assert_called_once_with(mock.ANY) - - -@mock.patch.object(build, 'run_build') -class MainTest(base.TestCase): - - def test_images_built(self, mock_run_build): - image_statuses = ({}, {'img': 'built'}, {}) - mock_run_build.return_value = image_statuses - result = build_cmd.main() - self.assertEqual(0, result) - - def test_images_unmatched(self, mock_run_build): - image_statuses = ({}, {}, {'img': 'unmatched'}) - mock_run_build.return_value = image_statuses - result = build_cmd.main() - self.assertEqual(0, result) - - def test_no_images_built(self, mock_run_build): - mock_run_build.return_value = None - result = build_cmd.main() - self.assertEqual(0, result) - - def test_bad_images(self, mock_run_build): - image_statuses = ({'img': 'error'}, {}, {}) - mock_run_build.return_value = image_statuses - result = build_cmd.main() - self.assertEqual(1, result) diff --git a/setup.cfg b/setup.cfg index 8f78de6597..c5d5b1257f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] -name = kolla -summary = Kolla OpenStack Deployment +name = kolla-ansible +summary = Ansible Deployment of Kolla containers description-file = README.rst author = OpenStack @@ -25,7 +25,6 @@ packages = kolla data_files = share/kolla/ansible = ansible/* - share/kolla/docker = docker/* share/kolla/tools = tools/validate-docker-execute.sh share/kolla/tools = tools/cleanup-containers share/kolla/tools = tools/cleanup-host @@ -41,7 +40,6 @@ scripts = [entry_points] console_scripts = - kolla-build = kolla.cmd.build:main kolla-genpwd = kolla.cmd.genpwd:main kolla-mergepwd = kolla.cmd.mergepwd:main oslo.config.opts = diff --git a/tests/test_build.py b/tests/test_build.py deleted file mode 100644 index 8b240c017b..0000000000 --- a/tests/test_build.py +++ /dev/null @@ -1,188 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os -import sys - -from mock import patch -from oslo_log import fixture as log_fixture -from oslo_log import log as logging -from oslotest import base -import testtools - -sys.path.append( - os.path.abspath(os.path.join(os.path.dirname(__file__), '../tools'))) -from kolla.image import build - -LOG = logging.getLogger(__name__) - - -class BuildTest(object): - excluded_images = abc.abstractproperty() - - def setUp(self): - super(BuildTest, self).setUp() - self.useFixture(log_fixture.SetLogLevel([__name__], - logging.logging.INFO)) - self.build_args = [__name__, "--debug", '--threads', '4'] - - @testtools.skipUnless(os.environ.get('DOCKER_BUILD_TEST'), - 'Skip the docker build test') - def runTest(self): - with patch.object(sys, 'argv', self.build_args): - LOG.info("Running with args %s", self.build_args) - bad_results, good_results, unmatched_results = build.run_build() - - failures = 0 - for image, result in bad_results.items(): - if image in self.excluded_images: - if result is 'error': - continue - failures = failures + 1 - LOG.warning(">>> Expected image '%s' to fail, please update" - " the excluded_images in source file above if the" - " image build has been fixed.", image) - else: - if result is not 'error': - continue - failures = failures + 1 - LOG.critical(">>> Expected image '%s' to succeed!", image) - - for image in unmatched_results.keys(): - LOG.warning(">>> Image '%s' was not matched", image) - - self.assertEqual(failures, 0, "%d failure(s) occurred" % failures) - - -class BuildTestCentosBinary(BuildTest, base.BaseTestCase): - excluded_images = ["kuryr-base", - "neutron-sfc-agent", - "searchlight-base", - "senlin-base", - "solum-base", - "vmtp", - "manila-data", - "watcher-base", - "congress-base", - "bifrost-base", - "cloudkitty-base", - "freezer-base"] - - def setUp(self): - super(BuildTestCentosBinary, self).setUp() - self.build_args.extend(["--base", "centos", - "--type", "binary"]) - - -class BuildTestCentosSource(BuildTest, base.BaseTestCase): - excluded_images = ["mistral-base"] - - def setUp(self): - super(BuildTestCentosSource, self).setUp() - self.build_args.extend(["--base", "centos", - "--type", "source"]) - - -class BuildTestUbuntuBinary(BuildTest, base.BaseTestCase): - excluded_images = ["kuryr-base", - "neutron-sfc-agent", - "searchlight-base", - "senlin-base", - "solum-base", - "vmtp", - "zaqar", - "watcher-base", - "congress-base", - "bifrost-base", - "cloudkitty-base", - "freezer-base", - "panko-base"] - - def setUp(self): - super(BuildTestUbuntuBinary, self).setUp() - self.build_args.extend(["--base", "ubuntu", - "--type", "binary"]) - - -class BuildTestUbuntuSource(BuildTest, base.BaseTestCase): - excluded_images = [] - - def setUp(self): - super(BuildTestUbuntuSource, self).setUp() - self.build_args.extend(["--base", "ubuntu", - "--type", "source"]) - - -class BuildTestOracleLinuxBinary(BuildTest, base.BaseTestCase): - excluded_images = ["kuryr-base", - "neutron-sfc-agent", - "searchlight-base", - "senlin-base", - "solum-base", - "vmtp", - "manila-data", - "watcher-base", - "congress-base", - "bifrost-base", - "cloudkitty-base", - "freezer-base"] - - def setUp(self): - super(BuildTestOracleLinuxBinary, self).setUp() - self.build_args.extend(["--base", "oraclelinux", - "--type", "binary"]) - - -class BuildTestOracleLinuxSource(BuildTest, base.BaseTestCase): - excluded_images = [] - - def setUp(self): - super(BuildTestOracleLinuxSource, self).setUp() - self.build_args.extend(["--base", "oraclelinux", - "--type", "source"]) - - -class DeployTestCentosBinary(BuildTestCentosBinary): - def setUp(self): - super(DeployTestCentosBinary, self).setUp() - self.build_args.extend(["--profile", "gate"]) - - -class DeployTestCentosSource(BuildTestCentosSource): - def setUp(self): - super(DeployTestCentosSource, self).setUp() - self.build_args.extend(["--profile", "gate"]) - - -class DeployTestOracleLinuxBinary(BuildTestOracleLinuxBinary): - def setUp(self): - super(DeployTestOracleLinuxBinary, self).setUp() - self.build_args.extend(["--profile", "gate"]) - - -class DeployTestOracleLinuxSource(BuildTestOracleLinuxSource): - def setUp(self): - super(DeployTestOracleLinuxSource, self).setUp() - self.build_args.extend(["--profile", "gate"]) - - -class DeployTestUbuntuBinary(BuildTestUbuntuBinary): - def setUp(self): - super(DeployTestUbuntuBinary, self).setUp() - self.build_args.extend(["--profile", "gate"]) - - -class DeployTestUbuntuSource(BuildTestUbuntuSource): - def setUp(self): - super(DeployTestUbuntuSource, self).setUp() - self.build_args.extend(["--profile", "gate"]) diff --git a/tools/build.py b/tools/build.py deleted file mode 120000 index 285d9d6602..0000000000 --- a/tools/build.py +++ /dev/null @@ -1 +0,0 @@ -../kolla/cmd/build.py \ No newline at end of file diff --git a/tools/validate-all-dockerfiles.sh b/tools/validate-all-dockerfiles.sh deleted file mode 100755 index 5a39d24c04..0000000000 --- a/tools/validate-all-dockerfiles.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -REAL_PATH=$(python -c "import os,sys;print os.path.realpath('$0')") -cd "$(dirname "$REAL_PATH")/.." - -find docker -name Dockerfile.j2 -print0 | - xargs -0 tools/validate-maintainer.sh || exit 1 - -find docker -name Dockerfile.j2 -print0 | - xargs -0 tools/validate-install-command.sh || exit 1 - diff --git a/tox.ini b/tox.ini index 18230d74bf..8c19b4a377 100644 --- a/tox.ini +++ b/tox.ini @@ -25,7 +25,6 @@ commands = oslo_debug_helper {posargs} commands = {toxinidir}/tools/run-bashate.sh flake8 {posargs} - {toxinidir}/tools/validate-all-dockerfiles.sh python {toxinidir}/tools/validate-all-file.py bandit -r ansible/library kolla tests tools @@ -58,6 +57,3 @@ commands = [flake8] show-source = True exclude=.eggs,.git,.tox,doc - -[hacking] -local-check-factory = kolla.hacking.checks.factory