Address bandit gate failures
This change addresses the results that were found when running bandit against the templated python files in the various charts. This also makes the bandit gate only run when python template files are changed as well as makes the job voting. Change-Id: Ia158f5f9d6d791872568dafe8bce69575fece5aa
This commit is contained in:
parent
92dfac645a
commit
86e56b2aee
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import subprocess
|
||||
import subprocess # nosec
|
||||
import json
|
||||
import sys
|
||||
from argparse import *
|
||||
@ -60,7 +60,7 @@ class cephCRUSH():
|
||||
if 'all' in poolName or 'All' in poolName:
|
||||
try:
|
||||
poolLs = 'ceph osd pool ls -f json-pretty'
|
||||
poolstr = subprocess.check_output(poolLs, shell=True)
|
||||
poolstr = subprocess.check_output(poolLs, shell=True) # nosec
|
||||
self.listPoolName = json.loads(poolstr)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print('{}'.format(e))
|
||||
@ -72,7 +72,7 @@ class cephCRUSH():
|
||||
try:
|
||||
"""Retrieve the crush hierarchies"""
|
||||
crushTree = "ceph osd crush tree -f json-pretty | jq .nodes"
|
||||
chstr = subprocess.check_output(crushTree, shell=True)
|
||||
chstr = subprocess.check_output(crushTree, shell=True) # nosec
|
||||
self.crushHierarchy = json.loads(chstr)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print('{}'.format(e))
|
||||
@ -107,8 +107,8 @@ class cephCRUSH():
|
||||
self.poolSize = 0
|
||||
|
||||
def isNautilus(self):
|
||||
grepResult = int(subprocess.check_output('ceph mon versions | egrep -q "nautilus" | echo $?', shell=True))
|
||||
return True if grepResult == 0 else False
|
||||
grepResult = int(subprocess.check_output('ceph mon versions | egrep -q "nautilus" | echo $?', shell=True)) # nosec
|
||||
return grepResult == 0
|
||||
|
||||
def getPoolSize(self, poolName):
|
||||
"""
|
||||
@ -119,7 +119,7 @@ class cephCRUSH():
|
||||
"""Get the size attribute of the poolName"""
|
||||
try:
|
||||
poolGet = 'ceph osd pool get ' + poolName + ' size -f json-pretty'
|
||||
szstr = subprocess.check_output(poolGet, shell=True)
|
||||
szstr = subprocess.check_output(poolGet, shell=True) # nosec
|
||||
pSize = json.loads(szstr)
|
||||
self.poolSize = pSize['size']
|
||||
except subprocess.CalledProcessError as e:
|
||||
@ -130,7 +130,7 @@ class cephCRUSH():
|
||||
|
||||
def checkPGs(self, poolName):
|
||||
poolPGs = self.poolPGs['pg_stats'] if self.isNautilus() else self.poolPGs
|
||||
if len(poolPGs) == 0:
|
||||
if not poolPGs:
|
||||
return
|
||||
print('Checking PGs in pool {} ...'.format(poolName)),
|
||||
badPGs = False
|
||||
@ -160,7 +160,8 @@ class cephCRUSH():
|
||||
"""traverse up (to the root) one level"""
|
||||
traverseID = self.crushFD[traverseID]['id']
|
||||
traverseLevel += 1
|
||||
assert (traverseLevel == self.osd_depth), "OSD depth mismatch"
|
||||
if not (traverseLevel == self.osd_depth):
|
||||
raise Exception("OSD depth mismatch")
|
||||
"""
|
||||
check_FD should have
|
||||
{
|
||||
@ -214,12 +215,13 @@ class cephCRUSH():
|
||||
elif self.poolSize == 0:
|
||||
print('Pool {} was not found.'.format(pool))
|
||||
continue
|
||||
assert (self.poolSize > 1), "Pool size was incorrectly set"
|
||||
if not self.poolSize > 1:
|
||||
raise Exception("Pool size was incorrectly set")
|
||||
|
||||
try:
|
||||
"""Get the list of PGs in the pool"""
|
||||
lsByPool = 'ceph pg ls-by-pool ' + pool + ' -f json-pretty'
|
||||
pgstr = subprocess.check_output(lsByPool, shell=True)
|
||||
pgstr = subprocess.check_output(lsByPool, shell=True) # nosec
|
||||
self.poolPGs = json.loads(pgstr)
|
||||
"""Check that OSDs in the PG are in separate failure domains"""
|
||||
self.checkPGs(pool)
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
import re
|
||||
import os
|
||||
import subprocess
|
||||
import subprocess # nosec
|
||||
import json
|
||||
|
||||
MON_REGEX = r"^\d: ([0-9\.]*):\d+/\d* mon.([^ ]*)$"
|
||||
@ -15,7 +15,7 @@ monmap_command = "ceph --cluster=${NAMESPACE} mon getmap > /tmp/monmap && monmap
|
||||
|
||||
|
||||
def extract_mons_from_monmap():
|
||||
monmap = subprocess.check_output(monmap_command, shell=True)
|
||||
monmap = subprocess.check_output(monmap_command, shell=True) # nosec
|
||||
mons = {}
|
||||
for line in monmap.split("\n"):
|
||||
m = re.match(MON_REGEX, line)
|
||||
@ -24,7 +24,7 @@ def extract_mons_from_monmap():
|
||||
return mons
|
||||
|
||||
def extract_mons_from_kubeapi():
|
||||
kubemap = subprocess.check_output(kubectl_command, shell=True)
|
||||
kubemap = subprocess.check_output(kubectl_command, shell=True) # nosec
|
||||
return json.loads(kubemap)
|
||||
|
||||
current_mons = extract_mons_from_monmap()
|
||||
@ -37,11 +37,11 @@ removed_mon = False
|
||||
for mon in current_mons:
|
||||
if not mon in expected_mons:
|
||||
print("removing zombie mon %s" % mon)
|
||||
subprocess.call(["ceph", "--cluster", os.environ["NAMESPACE"], "mon", "remove", mon])
|
||||
subprocess.call(["ceph", "--cluster", os.environ["NAMESPACE"], "mon", "remove", mon]) # nosec
|
||||
removed_mon = True
|
||||
elif current_mons[mon] != expected_mons[mon]: # check if for some reason the ip of the mon changed
|
||||
print("ip change detected for pod %s" % mon)
|
||||
subprocess.call(["kubectl", "--namespace", os.environ["NAMESPACE"], "delete", "pod", mon])
|
||||
subprocess.call(["kubectl", "--namespace", os.environ["NAMESPACE"], "delete", "pod", mon]) # nosec
|
||||
removed_mon = True
|
||||
print("deleted mon %s via the kubernetes api" % mon)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import subprocess
|
||||
import subprocess # nosec
|
||||
import json
|
||||
import sys
|
||||
import collections
|
||||
@ -11,7 +11,7 @@ if (int(len(sys.argv)) == 1):
|
||||
else:
|
||||
poolName = sys.argv[1]
|
||||
cmdRep = 'ceph osd map' + ' ' + str(poolName) + ' ' + 'testreplication -f json-pretty'
|
||||
objectRep = subprocess.check_output(cmdRep, shell=True)
|
||||
objectRep = subprocess.check_output(cmdRep, shell=True) # nosec
|
||||
repOut = json.loads(objectRep)
|
||||
osdNumbers = repOut['up']
|
||||
print("Test object got replicated on these osds: %s" % str(osdNumbers))
|
||||
@ -19,7 +19,7 @@ else:
|
||||
osdHosts= []
|
||||
for osd in osdNumbers:
|
||||
cmdFind = 'ceph osd find' + ' ' + str(osd)
|
||||
osdFind = subprocess.check_output(cmdFind , shell=True)
|
||||
osdFind = subprocess.check_output(cmdFind , shell=True) # nosec
|
||||
osdHost = json.loads(osdFind)
|
||||
osdHostLocation = osdHost['crush_location']
|
||||
osdHosts.append(osdHostLocation['host'])
|
||||
|
@ -21,7 +21,7 @@ import logging
|
||||
import os
|
||||
import select
|
||||
import signal
|
||||
import subprocess
|
||||
import subprocess # nosec
|
||||
import socket
|
||||
import sys
|
||||
import tempfile
|
||||
@ -142,7 +142,7 @@ def run_cmd_with_logging(popenargs,
|
||||
stderr_log_level=logging.INFO,
|
||||
**kwargs):
|
||||
"""Run subprocesses and stream output to logger."""
|
||||
child = subprocess.Popen(
|
||||
child = subprocess.Popen( # nosec
|
||||
popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
|
||||
log_level = {
|
||||
child.stdout: stdout_log_level,
|
||||
@ -266,7 +266,7 @@ def mysqld_bootstrap():
|
||||
], logger)
|
||||
if not mysql_dbaudit_username:
|
||||
template = (
|
||||
"DELETE FROM mysql.user ;\n"
|
||||
"DELETE FROM mysql.user ;\n" # nosec
|
||||
"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \'{1}\' ;\n"
|
||||
"GRANT ALL ON *.* TO '{0}'@'%' WITH GRANT OPTION ;\n"
|
||||
"DROP DATABASE IF EXISTS test ;\n"
|
||||
@ -277,7 +277,7 @@ def mysqld_bootstrap():
|
||||
mysql_dbsst_username, mysql_dbsst_password))
|
||||
else:
|
||||
template = (
|
||||
"DELETE FROM mysql.user ;\n"
|
||||
"DELETE FROM mysql.user ;\n" # nosec
|
||||
"CREATE OR REPLACE USER '{0}'@'%' IDENTIFIED BY \'{1}\' ;\n"
|
||||
"GRANT ALL ON *.* TO '{0}'@'%' WITH GRANT OPTION ;\n"
|
||||
"DROP DATABASE IF EXISTS test ;\n"
|
||||
@ -537,7 +537,7 @@ def update_grastate_on_restart():
|
||||
|
||||
def recover_wsrep_position():
|
||||
"""Extract recoved wsrep position from uncleanly exited node."""
|
||||
wsrep_recover = subprocess.Popen(
|
||||
wsrep_recover = subprocess.Popen( # nosec
|
||||
[
|
||||
'mysqld', '--bind-address=127.0.0.1',
|
||||
'--wsrep_cluster_address=gcomm://', '--wsrep-recover'
|
||||
|
@ -59,7 +59,7 @@ def click_link_by_name(link_name):
|
||||
browser.quit()
|
||||
sys.exit(1)
|
||||
|
||||
def take_screenshot(page_name, artifacts_dir='/tmp/artifacts/'):
|
||||
def take_screenshot(page_name, artifacts_dir='/tmp/artifacts/'): # nosec
|
||||
file_name = page_name.replace(' ', '_')
|
||||
try:
|
||||
el = WebDriverWait(browser, 15)
|
||||
@ -130,7 +130,7 @@ except TimeoutException:
|
||||
sys.exit(1)
|
||||
|
||||
logger.info("The following screenshots were captured:")
|
||||
for root, dirs, files in os.walk("/tmp/artifacts/"):
|
||||
for root, dirs, files in os.walk("/tmp/artifacts/"): # nosec
|
||||
for name in files:
|
||||
logger.info(os.path.join(root, name))
|
||||
|
||||
|
@ -12,7 +12,7 @@ class RedisTest(object):
|
||||
|
||||
def test_connection(self):
|
||||
ping = self.redis_conn.ping()
|
||||
assert ping, "No connection to database"
|
||||
if not ping: raise Exception('No connection to database')
|
||||
print("Successfully connected to database")
|
||||
|
||||
def database_info(self):
|
||||
@ -20,29 +20,29 @@ class RedisTest(object):
|
||||
for client in self.redis_conn.client_list():
|
||||
ip_port.append(client["addr"])
|
||||
print(ip_port)
|
||||
assert self.redis_conn.client_list(), "Database client's list is null"
|
||||
if not self.redis_conn.client_list():
|
||||
raise Exception('Database client list is null')
|
||||
return ip_port
|
||||
|
||||
def test_insert_delete_data(self):
|
||||
key = "test"
|
||||
value = "it's working"
|
||||
result_set = self.redis_conn.set(key, value)
|
||||
assert result_set, "Error: SET command failed"
|
||||
if not result_set: raise Exception('ERROR: SET command failed')
|
||||
print("Successfully SET keyvalue pair")
|
||||
result_get = self.redis_conn.get(key)
|
||||
assert result_get, "Error: GET command failed"
|
||||
if not result_get: raise Exception('ERROR: GET command failed')
|
||||
print("Successfully GET keyvalue pair")
|
||||
db_size = self.redis_conn.dbsize()
|
||||
assert db_size > 0, "Database size not valid"
|
||||
if db_size <= 0: raise Exception("Database size not valid")
|
||||
result_delete = self.redis_conn.delete(key)
|
||||
assert result_delete == 1, "Error: Delete command failed"
|
||||
if not result_delete == 1: raise Exception("Error: Delete command failed")
|
||||
print("Successfully DELETED keyvalue pair")
|
||||
|
||||
def test_client_kill(self, client_ip_port_list):
|
||||
for client_ip_port in client_ip_port_list:
|
||||
result = self.redis_conn.client_kill(client_ip_port)
|
||||
print(result)
|
||||
assert result, "Client failed to be removed"
|
||||
if not result: raise Exception('Client failed to be removed')
|
||||
print("Successfully DELETED client")
|
||||
|
||||
|
||||
|
@ -34,11 +34,9 @@
|
||||
name: openstack-helm-infra-bandit
|
||||
run: playbooks/osh-infra-bandit.yaml
|
||||
nodeset: openstack-helm-single-node
|
||||
# Note(gagehugo): Uncomment this once it passes so that it only runs
|
||||
# when python related files are changed.
|
||||
# files:
|
||||
# - ^.*\.py\.tpl$
|
||||
# - ^.*\.py$
|
||||
files:
|
||||
- ^.*\.py\.tpl$
|
||||
- ^.*\.py$
|
||||
|
||||
- job:
|
||||
name: openstack-helm-infra
|
||||
|
@ -19,8 +19,7 @@
|
||||
check:
|
||||
jobs:
|
||||
- openstack-helm-lint
|
||||
- openstack-helm-infra-bandit:
|
||||
voting: false
|
||||
- openstack-helm-infra-bandit
|
||||
- openstack-helm-infra-aio-logging
|
||||
- openstack-helm-infra-aio-monitoring
|
||||
- openstack-helm-infra-federated-monitoring:
|
||||
|
Loading…
Reference in New Issue
Block a user