Unit tests for K8s-coredump handler

Add unit tests for the solution and fix warnings
raised by the test suite.

Test Plan:
PASS: Run tox with all the test suite (pytest, flake8,
      pep8, bandit, cover, pylint)

Story: 2010261
Task: 47117

Depends-On: https://review.opendev.org/c/starlingx/utilities/+/869392

Signed-off-by: Heron Vieira <heron.vieira@windriver.com>
Change-Id: Ie0735e32f37f99197dba15d3f1f2b39dbf1c222c
This commit is contained in:
Heron Vieira 2023-01-05 16:20:42 -03:00
parent db7144f1cf
commit d03a0fdccc
13 changed files with 1309 additions and 15 deletions

View File

@ -11,6 +11,7 @@
- stx-utilities-pci-irq-affinity-agent-tox-py27
- stx-utilities-pci-irq-affinity-agent-tox-py39
- stx-utilities-pci-irq-affinity-agent-tox-pep8
- stx-utilities-k8s-coredump-tox-py39
gate:
jobs:
- stx-utilities-tox-linters
@ -22,6 +23,7 @@
- stx-utilities-pci-irq-affinity-agent-tox-py27
- stx-utilities-pci-irq-affinity-agent-tox-py39
- stx-utilities-pci-irq-affinity-agent-tox-pep8
- stx-utilities-k8s-coredump-tox-py39
post:
jobs:
- stx-utilities-upload-git-mirror
@ -136,6 +138,17 @@
tox_envlist: pep8
tox_extra_args: -c ./utilities/pci-irq-affinity-agent/pci_irq_affinity/tox.ini
- job:
name: stx-utilities-k8s-coredump-tox-py39
parent: tox-py39
description: Run py39 tests for k8s-coredump
nodeset: debian-bullseye
files:
- utilities/k8s-coredump/k8s-coredump/*
vars:
tox_envlist: py39
tox_extra_args: -c utilities/k8s-coredump/k8s-coredump/tox.ini
- secret:
name: stx-utilities-github-secret
data:

View File

@ -0,0 +1,3 @@
[DEFAULT]
test_path=./k8s_coredump/tests
top_dir=./k8s_coredump

View File

@ -161,7 +161,8 @@ def check_available_space(path):
"free_space": free,
}
LOG.info(
f'Space info for {path}: Total - {total} bytes | Used - {used} bytes ({"%.2f" % ((used * 100) / total)} %) | '
f'Space info for {path}: Total - {total} bytes | '
f'Used - {used} bytes ({"%.2f" % ((used * 100) / total)} %) | '
f'Free - {free} bytes ({"%.2f" % ((free * 100) / total)} %)')
return space_info
except FileNotFoundError as e:
@ -237,7 +238,7 @@ def write_coredump_file(pid, corefile, annotations_config):
f'Size limit? {size_limit_in_bytes if size_limit_in_bytes > 0 else "No size limit"} |'
f'Buffer read size = {buffer_read_size}')
with nsenter.Namespace(pid, 'mnt') as ns:
with nsenter.Namespace(pid, 'mnt'):
LOG.info(f'Entered namespace for pid = {pid}')
try:
with io.open(corefile, "wb") as f:
@ -298,7 +299,7 @@ def get_file_size_limit(corefile, annotations_config, pid):
Value of the calculated size limit or 0 if no limit is set.
"""
# Enter namespace to check size inside the container
with nsenter.Namespace(pid, 'mnt') as ns:
with nsenter.Namespace(pid, 'mnt'):
# Set starting information
core_path = os.path.dirname(corefile)
space_info = check_available_space(core_path)
@ -316,21 +317,24 @@ def get_file_size_limit(corefile, annotations_config, pid):
if annotations_config['file_size_config']:
file_size_config = parse_size_config(annotations_config['file_size_config'])
if not file_size_config:
LOG.error("Invalid starlingx.io/core_max_size configuration: {}".format(annotations_config['file_size_config']))
LOG.error("Invalid starlingx.io/core_max_size configuration: {}".format(
annotations_config['file_size_config']))
sys.exit(-1)
has_max_file_config = True
if annotations_config['max_use_config']:
max_use_config = parse_size_config(annotations_config['max_use_config'])
if not max_use_config:
LOG.error("Invalid starlingx.io/core_max_used configuration: {}".format(annotations_config['max_use_config']))
LOG.error("Invalid starlingx.io/core_max_used configuration: {}".format(
annotations_config['max_use_config']))
sys.exit(-1)
has_max_use_config = True
if annotations_config['keep_free_config']:
keep_free_config = parse_size_config(annotations_config['keep_free_config'])
if not keep_free_config:
LOG.error("Invalid starlingx.io/core_min_free configuration: {}".format(annotations_config['keep_free_config']))
LOG.error("Invalid starlingx.io/core_min_free configuration: {}".format(
annotations_config['keep_free_config']))
sys.exit(-1)
has_keep_free_config = True

View File

@ -45,14 +45,14 @@ def _getPodUID(pid):
# extract pod UID from pod cgroup path
return match.group(1)
except IOError as e:
LOG.error("Failed to read process cgroups: {}".format(e))
LOG.error("Failed to read process cgroups: %s" % e)
sys.exit(-1)
return None # normal for processes not running in a container
def _lookupPod(pid):
podUID = _getPodUID(pid)
LOG.debug("lookupPod: podUID={}".format(podUID))
LOG.debug("lookupPod: podUID=%s" % podUID)
# retrieve pod details from kubelet
if podUID:
url = LOCALHOST_URL
@ -73,11 +73,11 @@ def _systemCoreFile():
# delegate handling to systemd coredump handler
try:
cmd = [SYSTEMD_COREDUMP] + sys.argv[1:]
LOG.info(f"No pod information was found, using default system coredump. Command: {cmd}")
LOG.info("No pod information was found, using default system coredump. Command: %s" % cmd)
subprocess.run(cmd)
LOG.info("Dumped through default core process")
except subprocess.CalledProcessError as e:
LOG.error("Failed to call systemd-coredump: {}".format(e))
LOG.error("Failed to call systemd-coredump: %s" % e)
sys.exit(-1)
@ -89,10 +89,10 @@ def _podCoreFile(pid, corefile, annotations_config):
cwd = os.path.realpath(path)
corefile = os.path.join(cwd, corefile)
except os.OSError as e:
LOG.error("Failed to get current working directory: {}".format(e))
LOG.error("Failed to get current working directory: %s" % e)
sys.exit(-1)
LOG.debug("podCoreFile: corefile={}".format(corefile))
LOG.debug("podCoreFile: corefile=%s" % corefile)
write_coredump_file(pid, corefile, annotations_config)
@ -110,7 +110,7 @@ def CoreDumpHandler(**kwargs):
metadata = pod['metadata']
annotations_config = get_annotations_config(pod)
if annotations_config['core_pattern'] is not None:
LOG.info("Pod %s/%s handling core dump for %s" % \
LOG.info("Pod %s/%s handling core dump for %s" %
(metadata['namespace'], metadata['name'], pid))
if not annotations_config['core_pattern']:
# default core pattern
@ -122,10 +122,10 @@ def CoreDumpHandler(**kwargs):
_podCoreFile(pid, corefile, annotations_config)
return # core dump handled by Pod
else:
LOG.debug("Pod %s/%s does not define annotation core_pattern" % \
LOG.debug("Pod %s/%s does not define annotation core_pattern" %
(metadata['namespace'], metadata['name']))
except ValueError as e:
LOG.error("Pod defined an invalid core dump annotation: {}".format(e))
LOG.error("Pod defined an invalid core dump annotation: %s" % e)
sys.exit(-1)
except KeyError:
LOG.debug("Pod does have annotations defined")

View File

@ -0,0 +1,24 @@
################################################################################
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
################################################################################
Running pytest through tox:
tox [--recreate] -e py39
Running flake8 through tox:
tox [--recreate] -e flake8
Running full suite through tox:
tox [--recreate] -e py39,flake8,pep8,bandit,pylint,cover
Observations:
-Some methods use the base.MockOpen and base.MockFile to manage file operations,
if these methods fail there'll be a extra exception with the actual test error
like this:
NotImplementedError("If you're seeing this, something went wrong with the tests, check the logs.")
This is expected since when trying to make the traceback stack the tests try to
use some file operations that ends up using base.MockOpen and base.MockFile and
these don't work for those operations.

View File

@ -0,0 +1,7 @@
"""
:mod:`k8s_coredump.tests` -- k8s_coredump Unittests
=====================================================
.. automodule:: k8s_coredump.tests
:platform: Unix
"""

View File

@ -0,0 +1,136 @@
################################################################################
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
################################################################################
import io
from io import StringIO
import fixtures
from k8s_coredump.tests.test_data import CGROUP_FILE_MOCK
from k8s_coredump.tests.test_data import EXPECTED_TOKEN
from testtools import TestCase
class FakeLog(object):
def __init__(self):
self.logs = {"info": [], "error": []}
def clear_logs(self):
self.logs = {"info": [], "error": []}
def info(self, string_log):
self.logs['info'].append(string_log)
def debug(self, string_log):
self.logs['info'].append(string_log)
def error(self, string_log):
self.logs['error'].append(string_log)
def critical(self, string_log):
self.logs['error'].append(string_log)
def get_info(self):
return f"Info:\n{[a for a in self.logs['info']]}"
def get_error(self):
return f"Error:\n{[a for a in self.logs['error']]}"
def get_all(self):
return f"{self.get_info()}\n{self.get_error()}"
class MockedFile(object):
def __init__(self, path, mode):
self.path = path
self.mode = mode
self.content = []
self.bytes_amount = 0
def write(self, content):
self.bytes_amount += len(content)
self.content.append(content)
def tell(self):
return self.bytes_amount
def flush(self):
pass
def get_full_content(self):
content = ""
for item in self.content:
content += item
return content
def read(self):
# This method is used on the test for the coredump._getToken method.
return f"""
{{
"k8s_coredump_token": "{EXPECTED_TOKEN}.{self.path}.{self.mode}"
}}
"""
def __iter__(self):
# This method is used on the test for the coredump._getPodUID method.
self.string_file = io.StringIO(CGROUP_FILE_MOCK)
return self
def __next__(self):
# This method is used on the test for the coredump._getPodUID method.
line = self.string_file.readline()
if line != '':
return line
else:
raise StopIteration
class MockedOpen(object):
def __init__(self, path, mode):
self.path = path
self.mode = mode
def __enter__(self):
self.opened_file = MockedFile(self.path, self.mode)
return self.opened_file
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def readline(self):
raise NotImplementedError(
"If you're seeing this, something went wrong with the tests, check the logs.")
class MockedStdin(object):
def __init__(self, coredump_content):
self.buffer = StringIO(coredump_content)
class BaseTestCase(TestCase):
def setUp(self):
"""Run before each test method to initialize test environment."""
super(BaseTestCase, self).setUp()
self.fake_log = FakeLog()
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.config_functions.LOG', self.fake_log))
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.coredump.LOG', self.fake_log))
# Values that would come from the invokation of k8s-coredump-handler
self.input_kwargs = {
'pid': "999999", # %P
'uid': "8", # %u
'gid': "7", # %g
'signal': "6", # %s
'timestamp': "1671181200", # %t
'comm': "process_name_for_systemd_handler", # %e
'hostname': "test_host", # %h
'comm2': "process_name_for_k8s_handler", # %e
}

View File

@ -0,0 +1,217 @@
################################################################################
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
################################################################################
import collections
import fixtures
from k8s_coredump.tests.base import BaseTestCase
from k8s_coredump.tests.base import MockedOpen
from k8s_coredump.tests.base import MockedStdin
from k8s_coredump.tests.test_data import ANNOTATIONS_EXAMPLES
from k8s_coredump.tests.test_data import DISK_USAGE
import mock
from testtools import matchers
# Mocking logging.basicConfig to avoid "path not found" error on constants.py that is imported by config_functions.
with mock.patch('logging.basicConfig') as mock_method:
from k8s_coredump import config_functions
class TestConfigFunctions(BaseTestCase):
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestConfigFunctions, self).setUp()
self.space_info = DISK_USAGE
# Mocking disk usage object to be as close to shutil.disk_usage as possible,
# using the same method to make the named tuple "usage" that shutil.disk_usage returns.
def mocked_disk_usage(path):
test_usage = collections.namedtuple('usage', 'total used free')
test_usage.total.__doc__ = 'Total space in bytes'
test_usage.used.__doc__ = 'Used space in bytes'
test_usage.free.__doc__ = 'Free space in bytes'
return test_usage(total=self.space_info['total_space'],
used=self.space_info['used_space'],
free=self.space_info['free_space'])
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.config_functions.shutil.disk_usage', mocked_disk_usage))
self.annotations_examples = ANNOTATIONS_EXAMPLES
def test_parse_core_pattern(self):
"""Test for config_functions.parse_core_pattern
Using the annotations_examples from test_data, the method config_functions.parse_core_pattern
is tested to verify if the parsing of core_pattern is working as intended.
"""
for annotations in self.annotations_examples:
parsed_core_name = config_functions.parse_core_pattern(annotations["starlingx.io/core_pattern"],
**self.input_kwargs)
self.assertEqual(parsed_core_name, annotations["expected_core_pattern"])
def test_parse_size_config(self):
"""Test for config_functions.parse_size_config
Using the annotations_examples from test_data, the method config_functions.parse_size_config
is tested to verify if the parsing of the size config is working as intended.
"""
for annotations in self.annotations_examples:
if "starlingx.io/core_max_size" in annotations:
parsed_size_config = config_functions.parse_size_config(annotations["starlingx.io/core_max_size"])
self.assertEqual(parsed_size_config, annotations["expected_core_max_size"])
def test_get_annotations_config(self):
"""Test for config_functions.get_annotations_config
Using the annotations_examples from test_data, the method config_functions.get_annotations_config
is tested to verify if the parsing of the annotations is working as intended.
"""
for annotations in self.annotations_examples:
# Setup data as is expected in the method get_annotations_config
raw_annotations = {
'metadata': {
'annotations': annotations
}
}
parsed_annotations = config_functions.get_annotations_config(raw_annotations)
# Test if parsed annotations has all the keys that it should have.
self.assertThat(parsed_annotations,
matchers.KeysEqual('core_pattern', 'file_size_config',
'file_compression_config', 'max_use_config', 'keep_free_config'))
# Test each value
self.assertEqual(parsed_annotations['core_pattern'],
annotations.get("starlingx.io/core_pattern"))
self.assertEqual(parsed_annotations['file_size_config'],
annotations.get("starlingx.io/core_max_size"))
self.assertEqual(parsed_annotations['file_compression_config'],
annotations.get("starlingx.io/core_compression"))
self.assertEqual(parsed_annotations['max_use_config'],
annotations.get("starlingx.io/core_max_used"))
self.assertEqual(parsed_annotations['keep_free_config'],
annotations.get("starlingx.io/core_min_free"))
def test_check_available_space(self):
"""Test for config_functions.check_available_space
Using a mocked disk space, the method config_functions.check_available_space
is tested to verify if the method is working as intended, returning the
disk space information.
"""
avail_space = config_functions.check_available_space('/any/path')
# Check if returned available space is the same as the mocked in the setup.
self.assertEqual(avail_space, self.space_info)
def test_convert_from_bytes(self):
"""Test for config_functions.convert_from_bytes
Using some test values, the method config_functions.convert_from_bytes
is tested to verify if the convertion of bytes to other size types
is working as intended.
"""
converted_gigabytes = config_functions.convert_from_bytes(536870912000, 'g')
self.assertEqual(converted_gigabytes, 500)
converted_kilobytes = config_functions.convert_from_bytes(536870912000, 'k')
self.assertEqual(converted_kilobytes, 524288000)
converted_megabytes = config_functions.convert_from_bytes(536870912000, 'm')
self.assertEqual(converted_megabytes, 512000)
def test_convert_to_bytes(self):
"""Test for config_functions.convert_to_bytes
Using some test values, the method config_functions.convert_to_bytes
is tested to verify if the convertion to bytes from other size types
is working as intended.
"""
converted_gigabytes = config_functions.convert_to_bytes(500, 'g')
self.assertEqual(converted_gigabytes, 536870912000)
converted_kilobytes = config_functions.convert_to_bytes(524288000, 'k')
self.assertEqual(converted_kilobytes, 536870912000)
converted_megabytes = config_functions.convert_to_bytes(512000, 'm')
self.assertEqual(converted_megabytes, 536870912000)
def test_get_percentage_byte_value(self):
"""Test for config_functions.get_percentage_byte_value
Using some test values, the method config_functions.get_percentage_byte_value
is tested to verify if the convertion from bytes to percentage of total space
of the disk is working as intended.
"""
bytes_value = config_functions.get_percentage_byte_value(20, self.space_info)
self.assertEqual(bytes_value, self.space_info['total_space'] * 0.2)
bytes_value = config_functions.get_percentage_byte_value(50, self.space_info)
self.assertEqual(bytes_value, self.space_info['total_space'] * 0.5)
bytes_value = config_functions.get_percentage_byte_value(2, self.space_info)
self.assertEqual(bytes_value, self.space_info['total_space'] * 0.02)
def test_get_file_size_limit(self):
"""Test for config_functions.get_file_size_limit
Using the annotations_examples from test_data, the method config_functions.get_file_size_limit
is tested to verify if the method calculates the file size limit as intended, according to
the annotations configuration and disk space.
"""
for annotations in self.annotations_examples:
# Setup data as is expected in the method get_annotations_config
raw_annotations = {
'metadata': {
'annotations': annotations
}
}
parsed_annotations = config_functions.get_annotations_config(raw_annotations)
with mock.patch('k8s_coredump.config_functions.nsenter.Namespace'):
with mock.patch('k8s_coredump.config_functions.os.path.dirname'):
try:
size_value_to_truncate = config_functions.get_file_size_limit('/core/file/path',
parsed_annotations, '9999')
except SystemExit:
if annotations['expected_truncate_value'] == 0:
size_value_to_truncate = 0
else:
raise SystemExit()
self.assertEqual(size_value_to_truncate, annotations['expected_truncate_value'])
def test_write_coredump_file(self):
"""Test for config_functions.write_coredump_file
Using the annotations_examples from test_data, the method config_functions.write_coredump_file
is tested to verify if the method calculates the file size limit as intended, according to
the annotations configuration and disk space and writes the file respecting the file size
limit with the correct content.
"""
for annotations in self.annotations_examples:
# Setup data as is expected in the method get_annotations_config
raw_annotations = {
'metadata': {
'annotations': annotations
}
}
parsed_annotations = config_functions.get_annotations_config(raw_annotations)
def mocked_open(path, mode):
self.mocked_open_instance = MockedOpen(path, mode)
return self.mocked_open_instance
self.mock_stdin = MockedStdin(annotations['coredump_file_content'])
with mock.patch('k8s_coredump.config_functions.nsenter.Namespace'):
with mock.patch('k8s_coredump.config_functions.os.path.dirname'):
with mock.patch('k8s_coredump.config_functions.io.open', mocked_open):
with mock.patch('k8s_coredump.config_functions.sys.stdin', self.mock_stdin):
try:
config_functions.write_coredump_file('9999', '/core/file/path', parsed_annotations)
self.assertEqual(self.mocked_open_instance.path, '/core/file/path')
self.assertEqual(self.mocked_open_instance.mode, 'wb')
except SystemExit:
if annotations['expected_write_content'] != "":
raise SystemExit()
if annotations['expected_write_content'] != "":
self.assertEqual(annotations['expected_write_content'],
self.mocked_open_instance.opened_file.get_full_content())

View File

@ -0,0 +1,194 @@
################################################################################
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
################################################################################
import collections
import json
import fixtures
from k8s_coredump import coredump
from k8s_coredump.tests.base import BaseTestCase
from k8s_coredump.tests.base import MockedOpen
from k8s_coredump.tests.base import MockedStdin
from k8s_coredump.tests.test_data import ANNOTATIONS_EXAMPLES
from k8s_coredump.tests.test_data import DISK_USAGE
from k8s_coredump.tests.test_data import EXPECTED_TOKEN
from k8s_coredump.tests.test_data import EXPECTED_TOKEN_MODE
from k8s_coredump.tests.test_data import EXPECTED_TOKEN_PATH
from k8s_coredump.tests.test_data import MOCKED_POD_INFO
from k8s_coredump.tests.test_data import MOCKED_PODS_REQUEST_RESPONSE
from k8s_coredump.tests.test_data import MOCKED_UID
import mock
class MockedGetResponse(object):
def __init__(self, url, headers, timeout, verify, response=MOCKED_PODS_REQUEST_RESPONSE):
self.url = url
self.headers = headers
self.timeout = timeout
self.verify = verify
self.response = response
def json(self):
return json.loads(self.response)
class TestCoredump(BaseTestCase):
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCoredump, self).setUp()
self.expected_token = EXPECTED_TOKEN
self.expected_path = EXPECTED_TOKEN_PATH
self.expected_mode = EXPECTED_TOKEN_MODE
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.coredump.open', MockedOpen))
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.coredump.io.open', MockedOpen))
def mocked_run(cmd):
self.run_command = cmd
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.coredump.subprocess.run', mocked_run))
def mocked_requests_get(url, headers, timeout, verify):
self.mocked_get = MockedGetResponse(url, headers, timeout, verify)
return self.mocked_get
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.coredump.requests.get', mocked_requests_get))
self.annotations_examples = ANNOTATIONS_EXAMPLES
self.space_info = DISK_USAGE
# Mocking disk usage object to be as close to shutil.disk_usage as possible,
# using the same method to make the named tuple "usage" that shutil.disk_usage returns.
def mocked_disk_usage(path):
test_usage = collections.namedtuple('usage', 'total used free')
test_usage.total.__doc__ = 'Total space in bytes'
test_usage.used.__doc__ = 'Used space in bytes'
test_usage.free.__doc__ = 'Free space in bytes'
return test_usage(total=self.space_info['total_space'],
used=self.space_info['used_space'],
free=self.space_info['free_space'])
self.useFixture(
fixtures.MonkeyPatch('k8s_coredump.config_functions.shutil.disk_usage', mocked_disk_usage))
def test_getToken(self):
"""Test for coredump._getToken
Using some mocked data, the method coredump._getToken is tested to verify if
the method returns the token as expected, also verifying the path and mode that
the json file containing the token was opened.
The setup data for this test is done in MockedFile.read, as this is the way that
coredump._getToken uses to get the token.
"""
token = coredump._getToken()
expected_result = f"{self.expected_token}.{self.expected_path}.{self.expected_mode}"
self.assertEqual(token, expected_result)
def test_systemCoreFile(self):
"""Test for coredump._systemCoreFile
Using a mocked run method, the method coredump._systemCoreFile is tested to verify if
the method attempts to run the right command when invoking systemd-coredump.
See the mocked_run in the setup method of this test case class.
"""
self.run_command = ""
coredump._systemCoreFile()
self.assertEqual(self.run_command[0], '/usr/lib/systemd/systemd-coredump')
def test_getPodUID(self):
"""Test for coredump._getPodUID
Using a mocked open method, the method coredump._getPodUID is tested to verify if
the method manages to get the pod UID correctly.
See the MockedFile.__iter__ and MockedFile.__next__ in the base file for the methods
that are mocked for this test case.
"""
pod_uid = coredump._getPodUID("9999999")
self.assertEqual(pod_uid, MOCKED_UID)
def test_lookupPod(self):
"""Test for coredump._lookupPod
Using a mocked requests.get method, the method coredump._lookupPod is tested to verify if
the method manages to get the pod information correctly.
See the mocked_requests_get in the setup method of this test case class and
MockedGetResponse class for the mocks for this test case.
"""
pod_info = coredump._lookupPod("9999999")
mocked_pod_info_dict = json.loads(MOCKED_POD_INFO)
self.assertEqual(pod_info, mocked_pod_info_dict)
metadata = pod_info.get("metadata")
pod_uid = metadata.get("uid")
self.assertEqual(pod_uid, MOCKED_UID)
def test_CoreDumpHandler(self):
"""Test for coredump.CoreDumpHandler
Using the annotations_examples from test_data, the method coredump.CoreDumpHandler
is tested to verify if the method executes the whole process, validating annotations
configurations and writing the right content to the coredump file.
See MockedFile write, tell and get_full_content for the methods that are used
in this test case.
"""
for annotations in self.annotations_examples:
self.mocked_response = f"""
{{
"items": [
{{
"metadata": {{
"uid": "{MOCKED_UID}",
"namespace": "POD_NAME",
"name": "APPLICATION_NAME",
"annotations":
{{
"starlingx.io/core_pattern": "{annotations.get(
"starlingx.io/core_pattern", "")}",
"starlingx.io/core_max_size": "{annotations.get(
"starlingx.io/core_max_size", "")}",
"starlingx.io/core_compression": "{annotations.get(
"starlingx.io/core_compression", "")}",
"starlingx.io/core_max_used": "{annotations.get(
"starlingx.io/core_max_used", "")}",
"starlingx.io/core_min_free": "{annotations.get(
"starlingx.io/core_min_free", "")}"
}}
}}
}}
]
}}
"""
def mocked_requests_get(url, headers, timeout, verify):
self.mocked_get = MockedGetResponse(url, headers, timeout, verify, self.mocked_response)
return self.mocked_get
def mocked_open(path, mode):
self.mocked_open_instance = MockedOpen(path, mode)
return self.mocked_open_instance
self.mock_stdin = MockedStdin(annotations['coredump_file_content'])
with mock.patch('k8s_coredump.coredump.requests.get', mocked_requests_get):
with mock.patch('k8s_coredump.config_functions.nsenter.Namespace'):
with mock.patch('k8s_coredump.config_functions.os.path.dirname'):
with mock.patch('k8s_coredump.config_functions.io.open', mocked_open):
with mock.patch('k8s_coredump.config_functions.sys.stdin', self.mock_stdin):
try:
coredump.CoreDumpHandler(**self.input_kwargs)
except SystemExit:
if annotations['expected_write_content'] != "":
raise SystemExit()
if annotations['expected_write_content'] != "":
self.assertEqual(annotations['expected_write_content'],
self.mocked_open_instance.opened_file.get_full_content())

View File

@ -0,0 +1,130 @@
################################################################################
# Copyright (c) 2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
################################################################################
import mock
# Mocking logging.basicConfig to avoid "path not found" error on constants.py that is imported by config_functions.
with mock.patch('logging.basicConfig') as mock_method:
from k8s_coredump import config_functions
from k8s_coredump.common import constants
# Mock for disk usage using values in bytes: Total = 500GB / Used = 250GB / Free = 250GB
DISK_USAGE = {'total_space': 536870912000, 'used_space': 268435456000, 'free_space': 268435456000}
# Dictionary with test input values and expected values for individual test cases.
ANNOTATIONS_EXAMPLES = [
{
"starlingx.io/core_pattern": "test.core.%P.%U.%G.%S.%T.%E.%H", # All Upper Case
"starlingx.io/core_max_size": "200K", # Test Kilobytes and Upper case
"starlingx.io/core_compression": "lz4", # Test compression.
"starlingx.io/core_max_used": "20%", # Test maximum used space
"starlingx.io/core_min_free": "20%",
"expected_core_pattern": "test.core.999999.8.7.6.1671181200.process_name_for_k8s_handler.test_host",
"expected_core_max_size": (200.0, config_functions.file_size_properties['k']),
"expected_truncate_value": 0,
# The value here is 0 because the core_max_used is 20% and the test
# setup the disk space to be 50% used.
"coredump_file_content": "0123456789012345678901234567890123456789",
"expected_write_content": "",
},
{
"starlingx.io/core_pattern": "test.core.%p.%u.%g.%s.%t.%e.%h", # All Lower Case
"starlingx.io/core_max_size": "20m", # Test Megabytes and Lower case
"expected_core_pattern": "test.core.999999.8.7.6.1671181200.process_name_for_k8s_handler.test_host",
"expected_core_max_size": (20.0, config_functions.file_size_properties['m']),
"expected_truncate_value": 20971520, # 20mb in Bytes
"coredump_file_content": "0123456789012345678901234567890123456789",
"expected_write_content": "0123456789012345678901234567890123456789",
},
{
"starlingx.io/core_pattern": "test.core.%P.%u.%G.%s.%t.%E.%h", # Mixed Case
"starlingx.io/core_max_size": "2G", # Test Gigabytes
"starlingx.io/core_min_free": "249G",
# The test is setup to have 250gb free space, configuring 249gb as
# the core_min_free will make the file_size_limit to be 1GB.
"expected_core_pattern": "test.core.999999.8.7.6.1671181200.process_name_for_k8s_handler.test_host",
"expected_core_max_size": (2.0, config_functions.file_size_properties['g']),
"expected_truncate_value": 1073741824,
# 1gb in Bytes, which is the last remaing 1GB free according to the core_min_free annotation.
"coredump_file_content": "0123456789012345678901234567890123456789",
"expected_write_content": "0123456789012345678901234567890123456789",
},
{
"starlingx.io/core_pattern": "", # Empty
"starlingx.io/core_max_size": "2%", # Percentage
"expected_core_pattern": "",
"expected_core_max_size": (2.0, config_functions.file_size_properties['%']),
"expected_truncate_value": 10737418240, # 10gb in Bytes, that is 2% of the 500GB of total disk space
"coredump_file_content": "0123456789012345678901234567890123456789",
"expected_write_content": "0123456789012345678901234567890123456789",
},
{
"starlingx.io/core_pattern": "test.core.%p.%u.%g.%s.%t.%e.%h", # All Lower Case
"starlingx.io/core_max_size": "10b", # Test bytes and Lower case
"expected_core_pattern": "test.core.999999.8.7.6.1671181200.process_name_for_k8s_handler.test_host",
"expected_core_max_size": (10.0, config_functions.file_size_properties['b']),
"expected_truncate_value": 10, # 10 Bytes
"coredump_file_content": "012345678901234567890123456789",
"expected_write_content": "0123456789",
},
{
"starlingx.io/core_pattern": "/var/log/coredump/test.core.%P.%u.%G.%s.%t.%E.%h", # With path
"expected_core_pattern":
"/var/log/coredump/test.core.999999.8.7.6.1671181200.process_name_for_k8s_handler.test_host",
"expected_truncate_value": 0, # No size limit
"coredump_file_content": "012345678901234567890123456789",
"expected_write_content": "012345678901234567890123456789",
},
]
# Expected values for token file
EXPECTED_TOKEN = "EXPECTED_KUBERNETES_COREDUMP_HANDLER_KUBECTL_TOKEN"
EXPECTED_TOKEN_PATH = constants.K8S_COREDUMP_CONF
EXPECTED_TOKEN_MODE = "r"
# Expected value for the pod UID
MOCKED_UID = "2284e2ba-cdaf-4558-907a-b9364b66f3e9"
# CGROUP file mock for coredump._getPodUID method test
CGROUP_FILE_MOCK = f"""
12:blkio:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
11:pids:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
10:hugetlb:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
9:perf_event:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
8:cpuset:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
7:memory:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
6:cpu,cpuacct:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
5:rdma:/
4:freezer:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
3:net_cls,net_prio:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
2:devices:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
1:name=systemd:/k8s-infra/kubepods/besteffort/pod{MOCKED_UID}/0123456789012345678901234567890123456789012345678901234567890123
0::/system.slice/containerd.service
"""
# Mocked pod information for the coredump._lookupPod method test
MOCKED_POD_INFO = f"""
{{
"metadata": {{
"uid": "{MOCKED_UID}",
"annotations":
{{
"starlingx.io/core_pattern": "test.core.%P.%U.%G.%S.%T.%E.%H",
"starlingx.io/core_max_size": "200K",
"starlingx.io/core_compression": "lz4",
"starlingx.io/core_max_used": "20%",
"starlingx.io/core_min_free": "20%"
}}
}}
}}
"""
MOCKED_PODS_REQUEST_RESPONSE = f"""
{{
"items": [
{MOCKED_POD_INFO}
]
}}
"""

View File

@ -0,0 +1,364 @@
[MASTER]
# Specify a configuration file.
rcfile=pylint.rc
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Add files or directories to the blacklist. Should be base names, not paths.
ignore=
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=4
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=lxml.etree,greenlet
[MESSAGES CONTROL]
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time.
#
# Python3 checker:
#
# E1601: print-statement
# E1602: parameter-unpacking
# E1603: unpacking-in-except
# E1604: old-raise-syntax
# E1605: backtick
# E1606: long-suffix
# E1607: old-ne-operator
# E1608: old-octal-literal
# E1609: import-star-module-level
# E1610: non-ascii-bytes-literal
# E1611: invalid-unicode-literal
# W1601: apply-builtin
# W1602: basestring-builtin
# W1603: buffer-builtin
# W1604: cmp-builtin
# W1605: coerce-builtin
# W1606: execfile-builtin
# W1607: file-builtin
# W1608: long-builtin
# W1609: raw_input-builtin
# W1610: reduce-builtin
# W1611: standarderror-builtin
# W1612: unicode-builtin
# W1613: xrange-builtin
# W1614: coerce-method
# W1615: delslice-method
# W1616: getslice-method
# W1617: setslice-method
# W1618: no-absolute-import
# W1619: old-division
# W1620: dict-iter-method
# W1621: dict-view-method
# W1622: next-method-called
# W1623: metaclass-assignment
# W1624: indexing-exception
# W1625: raising-string
# W1626: reload-builtin
# W1627: oct-method
# W1628: hex-method
# W1629: nonzero-method
# W1630: cmp-method
# W1632: input-builtin
# W1633: round-builtin
# W1634: intern-builtin
# W1635: unichr-builtin
# W1636: map-builtin-not-iterating
# W1637: zip-builtin-not-iterating
# W1638: range-builtin-not-iterating
# W1639: filter-builtin-not-iterating
# W1640: using-cmp-argument
# W1641: eq-without-hash
# W1642: div-method
# W1643: idiv-method
# W1644: rdiv-method
# W1645: exception-message-attribute
# W1646: invalid-str-codec
# W1647: sys-max-int
# W1648: bad-python3-import
# W1649: deprecated-string-function
# W1650: deprecated-str-translate-call
# W1651: deprecated-itertools-function
# W1652: deprecated-types-field
# W1653: next-method-defined
# W1654: dict-items-not-iterating
# W1655: dict-keys-not-iterating
# W1656: dict-values-not-iterating
# W1657: deprecated-operator-function
# W1658: deprecated-urllib-function
# W1659: xreadlines-attribute
# W1660: deprecated-sys-function
# W1661: exception-escape
# W1662: comprehension-escape
enable=E1603,E1609,E1610,E1602,E1606,E1608,E1607,E1605,E1604,E1601,E1611,W1652,
W1651,W1649,W1657,W1660,W1658,W1659,W1623,W1622,W1620,W1621,W1645,W1641,
W1624,W1648,W1625,W1611,W1662,W1661,W1650,W1640,W1630,W1614,W1615,W1642,
W1616,W1628,W1643,W1629,W1627,W1644,W1617,W1601,W1602,W1603,W1604,W1605,
W1654,W1655,W1656,W1619,W1606,W1607,W1639,W1618,W1632,W1634,W1608,W1636,
W1653,W1646,W1638,W1609,W1610,W1626,W1633,W1647,W1635,W1612,W1613,W1637
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once).
# See "Messages Control" section of
# https://pylint.readthedocs.io/en/latest/user_guide
# We are disabling (C)onvention
# We are disabling (R)efactor
# We are selectively disabling (W)arning
# We are not disabling (F)atal, (E)rror
# The following warnings should be fixed:
# fixme (todo, xxx, fixme)
# W0101: unreachable
# W0105: pointless-string-statement
# W0106: expression-not-assigned
# W0107: unnecessary-pass
# W0108: unnecessary-lambda
# W0110: deprecated-lambda
# W0123: eval-used
# W0150: lost-exception
# W0201: attribute-defined-outside-init
# W0211: bad-staticmethod-argument
# W0212: protected-access
# W0221: arguments-differ
# W0223: abstract-method
# W0231: super-init-not-called
# W0235: useless-super-delegation
# W0311: bad-indentation
# W0402: deprecated-module
# W0403: relative-import
# W0404: reimported
# W0603: global-statement
# W0612: unused-variable
# W0613: unused-argument
# W0621: redefined-outer-name
# W0622: redefined-builtin
# W0631: undefined-loop-variable
# W0632: unbalanced-tuple-unpacking
# W0703: broad-except
# W0706: try-except-raise
# W1113: keyword-arg-before-vararg
# W1201: logging-not-lazy
# W1401: anomalous-backslash-in-string
# W1501: subprocess-popen-preexec-fn
# W1505: deprecated-method
# W1509: subprocess-popen-preexec-fn
# W1618: no-absolute-import
# All these errors should be fixed:
# E1101: no-member
# E1111: assignment-from-no-return
disable=C, R, fixme, W0101, W0105, W0106, W0107, W0108, W0110, W0123, W0150,
W0201, W0211, W0212, W0221, W0223, W0231, W0235, W0311, W0402, W0403,
W0404, W0603, W0612, W0613, W0621, W0622, W0631, W0632, W0701, W0703,
W0706, W1113, W1201, W1401, W1505, W1509, W1618,
E1101, E1111
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=text
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=yes
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=85
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually 4 spaces or "\t" (1 tab).
indent-string=' '
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis
ignored-modules=distutils,eventlet.green.subprocess,six,six.moves
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set).
# pylint is confused by sqlalchemy Table, as well as sqlalchemy Enum types
# ie: (unprovisioned, identity)
# LookupDict in requests library confuses pylint
ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local,
Table, unprovisioned, identity, LookupDict
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E0201 when accessed. Python regular
# expressions are accepted.
generated-members=REQUEST,acl_users,aq_parent
[BASIC]
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,apply,input
# Regular expression which should only match correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression which should only match correct module level names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression which should only match correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Regular expression which should only match correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match correct list comprehension
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Regular expression which should only match functions or classes name which do
# not require a docstring
no-docstring-rgx=__.*__
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the beginning of the name of dummy variables
# (i.e. not used).
dummy-variables-rgx=_|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branchs=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

View File

@ -0,0 +1,21 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking>=1.1.0,<=2.0.0 # Apache-2.0
astroid <= 2.2.5
bandit<1.7.2;python_version>="3.0"
coverage>=3.6
fixtures>=3.0.0 # Apache-2.0/BSD
mock>=2.0.0 # BSD
python-subunit>=0.0.18
requests-mock>=0.6.0 # Apache-2.0
sphinx
oslosphinx
oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testrepository>=0.0.18
testtools!=1.2.0,>=0.9.36
isort<5;python_version>="3.0"
pylint<2.1.0;python_version<"3.0" # GPLv2
pylint<2.4.0;python_version>="3.0" # GPLv2
pycryptodomex

View File

@ -0,0 +1,181 @@
[tox]
envlist = flake8,py39,pylint
minversion = 1.6
skipsdist = True
# tox does not work if the path to the workdir is too long, so move it to /tmp
# tox 3.1.0 adds TOX_LIMITED_SHEBANG
toxworkdir = /tmp/{env:USER}_k8scoredumptox
stxdir = {toxinidir}/../../../..
distshare={toxworkdir}/.tox/distshare
[testenv]
usedevelop = True
basepython = python3
# tox is silly... these need to be separated by a newline....
whitelist_externals = bash
find
install_command = pip install -v -v -v \
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \
{opts} {packages}
# Note the hash seed is set to 0 until can be tested with a
# random hash seed successfully.
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
PIP_RESOLVER_DEBUG=1
PYTHONDONTWRITEBYTECODE=1
OS_TEST_PATH=./k8s_coredump/tests
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
TOX_WORK_DIR={toxworkdir}
PYLINTHOME={toxworkdir}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
find . -type f -name "*.pyc" -delete
[flake8]
# H series are hacking
# H101 is TODO
# H102 is apache license
# H104 file contains only comments (ie: license)
# H105 author tags
# H306 imports not in alphabetical order
# H401 docstring should not start with a space
# H403 multi line docstrings should end on a new line
# H404 multi line docstring should start without a leading new line
# H405 multi line docstring summary not separated with an empty line
# H701 Empty localization string
# H702 Formatting operation should be outside of localization method call
# H703 Multiple positional placeholders
# B series are bugbear
# B006 Do not use mutable data structures for argument defaults. Needs to be FIXED.
# B007 Loop control variable not used within the loop body.
# B009 Do not call getattr with a constant attribute value
# B010 Do not call setattr with a constant attribute value
# B012 return/continue/break inside finally blocks cause exceptions to be silenced
# B014 Redundant exception types
# B301 Python 3 does not include `.iter*` methods on dictionaries. (this should be suppressed on a per line basis)
# W series are warnings
# W503 line break before binary operator
# W504 line break after binary operator
# W605 invalid escape sequence
# E series are pep8
# E117 over-indented
# E126 continuation line over-indented for hanging indent
# E127 continuation line over-indented for visual indent
# E128 continuation line under-indented for visual indent
# E402 module level import not at top of file
# E741 ambiguous variable name
ignore = H101,H102,H104,H105,H306,H401,H403,H404,H405,H701,H702,H703,
B006,B007,B009,B010,B012,B014,B301
W503,W504,W605,
E117,E126,E127,E128,E402,E741
exclude = build,dist,tools,.eggs
max-line-length=120
[testenv:flake8]
basepython = python3
deps = -r{toxinidir}/test-requirements.txt
commands =
flake8 {posargs} . --exclude venv
[testenv:py39]
basepython = python3.9
install_command = pip install -v -v -v \
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \
{opts} {packages}
commands =
stestr run {posargs}
stestr slowest
[testenv:pep8]
# testenv:flake8 clone
basepython = python3
deps = -r{toxinidir}/test-requirements.txt
commands = {[testenv:flake8]commands}
[testenv:venv]
commands = {posargs}
[bandit]
# The following bandit tests are being skipped:
# B101: Test for use of assert
# B103: Test for setting permissive file permissions
# B104: Test for binding to all interfaces
# B105: Test for use of hard-coded password strings
# B108: Test for insecure usage of tmp file/directory
# B110: Try, Except, Pass detected.
# B303: Use of insecure MD2, MD4, MD5, or SHA1 hash function.
# B307: Blacklisted call to eval.
# B310: Audit url open for permitted schemes
# B311: Standard pseudo-random generators are not suitable for security/cryptographic purposes
# B314: Blacklisted calls to xml.etree.ElementTree
# B318: Blacklisted calls to xml.dom.minidom
# B320: Blacklisted calls to lxml.etree
# B404: Import of subprocess module
# B405: import xml.etree
# B408: import xml.minidom
# B410: import lxml
# B501: Test for missing certificate validation
# B506: Test for use of yaml load
# B602: Test for use of popen with shell equals true
# B603: Test for use of subprocess without shell equals true
# B604: Test for any function with shell equals true
# B605: Test for starting a process with a shell
# B607: Test for starting a process with a partial path
# B608: Possible SQL injection vector through string-based query
#
# Note: 'skips' entry cannot be split across multiple lines
#
skips = B101,B103,B104,B105,B108,B110,B303,B307,B310,B311,B314,B318,B320,B404,B405,B408,B410,B501,B506,B602,B603,B604,B605,B607,B608
exclude = tests
[testenv:bandit]
basepython = python3.9
deps = -r{toxinidir}/test-requirements.txt
commands = bandit --ini tox.ini -n 5 -r k8s_coredump
[testenv:pylint]
basepython = python3.9
commands =
pylint {posargs} k8s_coredump --rcfile=./pylint.rc
[testenv:cover]
basepython = python3.9
install_command = pip install -v -v -v \
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \
{opts} {packages}
passenv = CURL_CA_BUNDLE
deps = {[testenv]deps}
setenv = {[testenv]setenv}
PYTHON=coverage run --parallel-mode
commands =
{[testenv]commands}
coverage erase
stestr run {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
coverage report
[testenv:pip-missing-reqs]
# do not install test-requirements as that will pollute the virtualenv for
# determining missing packages
# this also means that pip-missing-reqs must be installed separately, outside
# of the requirements.txt files
deps = pip_missing_reqs
-rrequirements.txt
commands=pip-missing-reqs -d --ignore-file=/k8s_coredump/tests k8s_coredump