Enable service tenant deployment model by default in DevStack
The service tenant deployment model means Trove creates most of the resources(vm, volume, security group, etc.) relating to a database instance in the Trove service tenant rather than the end user. With this deployment model, most of the related resources behind the scenes are invisible to the user, which is a more secure deployment model for either private or public cloud provider. DevStack should follow this model as it will be recommended for Trove deployment in production. Changes included in this patch that are necessary in order to make that happen: - Add 'admin' role to Trove service user(username: trove, project: service) in DevStack. - Create Trove management network resources for Trove service user in DevStack. - Enable Trove remote client configuration by default in DevStack. - Mainly use alt_demo user in alt_demo project for integration tests, config trove user as the admin role user in integration tests. - Disable the module related tests(module_groups) for now because of no use cases but need effort to fix all the failed tests in the service tenant model. Story: #2005445 Task: #30489 Change-Id: I2efb69d3d50344914a875b773f62a227dba2ccaf
This commit is contained in:
parent
37e8dedac2
commit
2e052b0262
@ -51,8 +51,20 @@ function setup_trove_logging {
|
||||
|
||||
function create_trove_accounts {
|
||||
if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
|
||||
create_service_user "trove" "admin"
|
||||
|
||||
# Add trove user to the clouds.yaml
|
||||
CLOUDS_YAML=${CLOUDS_YAML:-/etc/openstack/clouds.yaml}
|
||||
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
|
||||
--file $CLOUDS_YAML \
|
||||
--os-cloud trove \
|
||||
--os-region-name $REGION_NAME \
|
||||
$CA_CERT_ARG \
|
||||
--os-auth-url $KEYSTONE_SERVICE_URI \
|
||||
--os-username trove \
|
||||
--os-password $SERVICE_PASSWORD \
|
||||
--os-project-name $SERVICE_PROJECT_NAME
|
||||
|
||||
create_service_user "trove"
|
||||
|
||||
local trove_service=$(get_or_create_service "trove" \
|
||||
"database" "Trove Service")
|
||||
@ -216,6 +228,15 @@ function configure_trove {
|
||||
|
||||
configure_auth_token_middleware $TROVE_CONF trove
|
||||
iniset $TROVE_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
|
||||
iniset $TROVE_CONF DEFAULT nova_proxy_admin_user trove
|
||||
iniset $TROVE_CONF DEFAULT nova_proxy_admin_tenant_name $SERVICE_PROJECT_NAME
|
||||
iniset $TROVE_CONF DEFAULT nova_proxy_admin_pass $SERVICE_PASSWORD
|
||||
iniset $TROVE_CONF DEFAULT nova_proxy_admin_user_domain_name default
|
||||
iniset $TROVE_CONF DEFAULT nova_proxy_admin_project_domain_name default
|
||||
iniset $TROVE_CONF DEFAULT os_region_name $REGION_NAME
|
||||
iniset $TROVE_CONF DEFAULT remote_nova_client trove.common.single_tenant_remote.nova_client_trove_admin
|
||||
iniset $TROVE_CONF DEFAULT remote_cinder_client trove.common.single_tenant_remote.cinder_client_trove_admin
|
||||
iniset $TROVE_CONF DEFAULT remote_neutron_client trove.common.single_tenant_remote.neutron_client_trove_admin
|
||||
fi
|
||||
|
||||
# configure apache related files
|
||||
@ -239,10 +260,16 @@ function configure_trove {
|
||||
|
||||
iniset $TROVE_TASKMANAGER_CONF database connection `database_connection_url trove`
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user radmin
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name trove
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user trove
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name $SERVICE_PROJECT_NAME
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $SERVICE_PASSWORD
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user_domain_name default
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_project_domain_name default
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT os_region_name $REGION_NAME
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT remote_nova_client trove.common.single_tenant_remote.nova_client_trove_admin
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT remote_cinder_client trove.common.single_tenant_remote.cinder_client_trove_admin
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT remote_neutron_client trove.common.single_tenant_remote.neutron_client_trove_admin
|
||||
|
||||
iniset $TROVE_TASKMANAGER_CONF cassandra tcp_ports 22,7000,7001,7199,9042,9160
|
||||
iniset $TROVE_TASKMANAGER_CONF couchbase tcp_ports 22,8091,8092,4369,11209-11211,21100-21199
|
||||
@ -329,33 +356,6 @@ function init_trove {
|
||||
# Initialize the trove database
|
||||
$TROVE_MANAGE db_sync
|
||||
|
||||
# Add an admin user to the 'tempest' alt_demo tenant.
|
||||
# This is needed to test the guest_log functionality.
|
||||
# The first part mimics the tempest setup, so make sure we have that.
|
||||
ALT_USERNAME=${ALT_USERNAME:-alt_demo}
|
||||
ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
|
||||
ALT_TENANT_ID=$(get_or_create_project ${ALT_TENANT_NAME} default)
|
||||
get_or_create_user ${ALT_USERNAME} "$ADMIN_PASSWORD" "default" "alt_demo@example.com"
|
||||
get_or_add_user_project_role Member ${ALT_USERNAME} ${ALT_TENANT_NAME}
|
||||
|
||||
# The second part adds an admin user to the tenant.
|
||||
ADMIN_ALT_USERNAME=${ADMIN_ALT_USERNAME:-admin_${ALT_USERNAME}}
|
||||
get_or_create_user ${ADMIN_ALT_USERNAME} "$ADMIN_PASSWORD" "default" "admin_alt_demo@example.com"
|
||||
get_or_add_user_project_role admin ${ADMIN_ALT_USERNAME} ${ALT_TENANT_NAME}
|
||||
# Now add these credentials to the clouds.yaml file
|
||||
ADMIN_ALT_DEMO_CLOUD=devstack-alt-admin
|
||||
CLOUDS_YAML=${CLOUDS_YAML:-/etc/openstack/clouds.yaml}
|
||||
$TOP_DIR/tools/update_clouds_yaml.py \
|
||||
--file ${CLOUDS_YAML} \
|
||||
--os-cloud ${ADMIN_ALT_DEMO_CLOUD} \
|
||||
--os-region-name ${REGION_NAME} \
|
||||
--os-identity-api-version 3 \
|
||||
${CA_CERT_ARG} \
|
||||
--os-auth-url ${KEYSTONE_AUTH_URI} \
|
||||
--os-username ${ADMIN_ALT_USERNAME} \
|
||||
--os-password ${ADMIN_PASSWORD} \
|
||||
--os-project-name ${ALT_TENANT_NAME}
|
||||
|
||||
# build and upload sample Trove mysql instance if not set otherwise
|
||||
TROVE_DISABLE_IMAGE_SETUP=`echo ${TROVE_DISABLE_IMAGE_SETUP} | tr '[:upper:]' '[:lower:]'`
|
||||
if [[ ${TROVE_DISABLE_IMAGE_SETUP} != "true" ]]; then
|
||||
@ -399,44 +399,29 @@ function init_trove {
|
||||
fi
|
||||
}
|
||||
|
||||
# Create private IPv4 subnet
|
||||
# Note: This was taken from devstack:lib/neutron_plugins/services/l3 and will need to be maintained
|
||||
function _create_private_subnet_v4 {
|
||||
function create_mgmt_subnet_v4 {
|
||||
local project_id=$1
|
||||
local net_id=$2
|
||||
local name=${3:-$PRIVATE_SUBNET_NAME}
|
||||
local os_cloud=${4:-devstack-admin}
|
||||
local name=$3
|
||||
local ip_range=$4
|
||||
|
||||
local subnet_params="--project $project_id "
|
||||
subnet_params+="--ip-version 4 "
|
||||
if [[ -n "$NETWORK_GATEWAY" ]]; then
|
||||
subnet_params+="--gateway $NETWORK_GATEWAY "
|
||||
fi
|
||||
if [ -n "$SUBNETPOOL_V4_ID" ]; then
|
||||
subnet_params+="--subnet-pool $SUBNETPOOL_V4_ID "
|
||||
else
|
||||
subnet_params+="--subnet-range $FIXED_RANGE "
|
||||
fi
|
||||
subnet_params+="--network $net_id $name"
|
||||
local subnet_id
|
||||
subnet_id=$(openstack --os-cloud $os_cloud --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
|
||||
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
|
||||
subnet_id=$(openstack subnet create --project ${project_id} --ip-version 4 --subnet-range ${ip_range} --gateway none --network ${net_id} $name -c id -f value)
|
||||
die_if_not_set $LINENO subnet_id "Failed to create private IPv4 subnet for network: ${net_id}, project: ${project_id}"
|
||||
echo $subnet_id
|
||||
}
|
||||
|
||||
# Create private IPv6 subnet
|
||||
# Note: This was taken from devstack:lib/neutron_plugins/services/l3 and will need to be maintained
|
||||
function _create_private_subnet_v6 {
|
||||
# Note: Trove is not fully tested in IPv6.
|
||||
function _create_subnet_v6 {
|
||||
local project_id=$1
|
||||
local net_id=$2
|
||||
local name=${3:-$IPV6_PRIVATE_SUBNET_NAME}
|
||||
local os_cloud=${4:-devstack-admin}
|
||||
local name=$3
|
||||
local subnet_params="--ip-version 6 "
|
||||
|
||||
die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set"
|
||||
die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set"
|
||||
local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE"
|
||||
local subnet_params="--project $project_id "
|
||||
subnet_params+="--ip-version 6 "
|
||||
|
||||
if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
|
||||
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
|
||||
fi
|
||||
@ -446,42 +431,40 @@ function _create_private_subnet_v6 {
|
||||
subnet_params+="--subnet-range $FIXED_RANGE_V6 $ipv6_modes} "
|
||||
fi
|
||||
subnet_params+="--network $net_id $name "
|
||||
local ipv6_subnet_id
|
||||
ipv6_subnet_id=$(openstack --os-cloud $os_cloud --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
|
||||
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
|
||||
|
||||
ipv6_subnet_id=$(openstack --project ${project_id} subnet create $subnet_params | grep ' id ' | get_field 2)
|
||||
die_if_not_set $LINENO ipv6_subnet_id "Failed to create private IPv6 subnet for network: ${net_id}, project: ${project_id}"
|
||||
echo $ipv6_subnet_id
|
||||
}
|
||||
|
||||
# Set up a network on the alt_demo tenant. Requires ROUTER_ID, REGION_NAME and IP_VERSION to be set
|
||||
function set_up_network() {
|
||||
local CLOUD_USER=$1
|
||||
local PROJECT_ID=$2
|
||||
local NET_NAME=$3
|
||||
local SUBNET_NAME=$4
|
||||
local IPV6_SUBNET_NAME=$5
|
||||
local SHARED=$6
|
||||
function setup_mgmt_network() {
|
||||
local PROJECT_ID=$1
|
||||
local NET_NAME=$2
|
||||
local SUBNET_NAME=$3
|
||||
local SUBNET_RANGE=$4
|
||||
local SHARED=$5
|
||||
|
||||
local share_flag=""
|
||||
if [[ "${SHARED}" == "TRUE" ]]; then
|
||||
share_flag="--share"
|
||||
fi
|
||||
|
||||
NEW_NET_ID=$(openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" network create --project ${PROJECT_ID} ${share_flag} "$NET_NAME" | grep ' id ' | get_field 2)
|
||||
if [[ "$IP_VERSION" =~ 4.* ]]; then
|
||||
NEW_SUBNET_ID=$(_create_private_subnet_v4 ${PROJECT_ID} ${NEW_NET_ID} ${SUBNET_NAME} ${CLOUD_USER})
|
||||
openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $NEW_SUBNET_ID
|
||||
fi
|
||||
if [[ "$IP_VERSION" =~ .*6 ]]; then
|
||||
NEW_IPV6_SUBNET_ID=$(_create_private_subnet_v6 ${PROJECT_ID} ${NEW_NET_ID} ${IPV6_SUBNET_NAME} ${CLOUD_USER})
|
||||
openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $NEW_IPV6_SUBNET_ID
|
||||
fi
|
||||
network_id=$(openstack network create --project ${PROJECT_ID} ${share_flag} $NET_NAME -c id -f value)
|
||||
die_if_not_set $LINENO network_id "Failed to create network: $NET_NAME, project: ${PROJECT_ID}"
|
||||
|
||||
echo $NEW_NET_ID
|
||||
if [[ "$IP_VERSION" =~ 4.* ]]; then
|
||||
NEW_SUBNET_ID=$(create_mgmt_subnet_v4 ${PROJECT_ID} ${network_id} ${SUBNET_NAME} ${SUBNET_RANGE})
|
||||
openstack router add subnet $ROUTER_ID $NEW_SUBNET_ID
|
||||
fi
|
||||
# Trove doesn't support IPv6 for now.
|
||||
# if [[ "$IP_VERSION" =~ .*6 ]]; then
|
||||
# NEW_IPV6_SUBNET_ID=$(_create_subnet_v6 ${PROJECT_ID} ${network_id} ${IPV6_SUBNET_NAME})
|
||||
# openstack router add subnet $ROUTER_ID $NEW_IPV6_SUBNET_ID
|
||||
# fi
|
||||
}
|
||||
|
||||
# finalize_trove_network() - do the last thing(s) before starting Trove
|
||||
# Set up Trove management network and make configuration change.
|
||||
function finalize_trove_network {
|
||||
|
||||
echo "Finalizing Neutron networking for Trove"
|
||||
echo "Dumping current network parameters:"
|
||||
echo " SERVICE_HOST: $SERVICE_HOST"
|
||||
@ -496,47 +479,65 @@ function finalize_trove_network {
|
||||
echo " SUBNETPOOL_SIZE_V4: $SUBNETPOOL_SIZE_V4"
|
||||
echo " SUBNETPOOL_V4_ID: $SUBNETPOOL_V4_ID"
|
||||
echo " ROUTER_GW_IP: $ROUTER_GW_IP"
|
||||
echo " TROVE_MGMT_SUBNET_RANGE: ${TROVE_MGMT_SUBNET_RANGE}"
|
||||
|
||||
# Create the net/subnet for the alt_demo tenant so the int-tests have a proper network
|
||||
echo "Creating network/subnets for ${ALT_TENANT_NAME} project"
|
||||
ALT_PRIVATE_NETWORK_NAME=${TROVE_PRIVATE_NETWORK_NAME}
|
||||
ALT_PRIVATE_SUBNET_NAME=${TROVE_PRIVATE_SUBNET_NAME}
|
||||
ALT_PRIVATE_IPV6_SUBNET_NAME=ipv6-${ALT_PRIVATE_SUBNET_NAME}
|
||||
ALT_NET_ID=$(set_up_network $ADMIN_ALT_DEMO_CLOUD $ALT_TENANT_ID $ALT_PRIVATE_NETWORK_NAME $ALT_PRIVATE_SUBNET_NAME $ALT_PRIVATE_IPV6_SUBNET_NAME $TROVE_SHARE_NETWORKS)
|
||||
echo "Created network ${ALT_PRIVATE_NETWORK_NAME} (${ALT_NET_ID})"
|
||||
echo "Creating Trove management network/subnet for Trove service project."
|
||||
trove_service_project_id=$(openstack project show $SERVICE_PROJECT_NAME -c id -f value)
|
||||
setup_mgmt_network ${trove_service_project_id} ${TROVE_MGMT_NETWORK_NAME} ${TROVE_MGMT_SUBNET_NAME} ${TROVE_MGMT_SUBNET_RANGE}
|
||||
mgmt_net_id=$(openstack network show ${TROVE_MGMT_NETWORK_NAME} -c id -f value)
|
||||
echo "Created Trove management network ${TROVE_MGMT_NETWORK_NAME}(${mgmt_net_id})"
|
||||
|
||||
# Set up a management network to test that functionality
|
||||
ALT_MGMT_NETWORK_NAME=trove-mgmt
|
||||
ALT_MGMT_SUBNET_NAME=${ALT_MGMT_NETWORK_NAME}-subnet
|
||||
ALT_MGMT_IPV6_SUBNET_NAME=ipv6-${ALT_MGMT_SUBNET_NAME}
|
||||
ALT_MGMT_ID=$(set_up_network $ADMIN_ALT_DEMO_CLOUD $ALT_TENANT_ID $ALT_MGMT_NETWORK_NAME $ALT_MGMT_SUBNET_NAME $ALT_MGMT_IPV6_SUBNET_NAME $TROVE_SHARE_NETWORKS)
|
||||
echo "Created network ${ALT_MGMT_NETWORK_NAME} (${ALT_MGMT_ID})"
|
||||
# Create security group for trove management network. For testing purpose,
|
||||
# we allow everything. In production, the security group should be managed
|
||||
# by the cloud admin.
|
||||
SG_NAME=trove-mgmt
|
||||
openstack security group create --project ${trove_service_project_id} ${SG_NAME}
|
||||
openstack security group rule create --proto icmp --project ${trove_service_project_id} ${SG_NAME}
|
||||
openstack security group rule create --protocol tcp --dst-port 1:65535 --project ${trove_service_project_id} ${SG_NAME}
|
||||
openstack security group rule create --protocol udp --dst-port 1:65535 --project ${trove_service_project_id} ${SG_NAME}
|
||||
|
||||
# Make sure we can reach the VMs
|
||||
local replace_range=${SUBNETPOOL_PREFIX_V4}
|
||||
if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then
|
||||
replace_range=${FIXED_RANGE}
|
||||
# Share the private network to other projects for testing purpose. We make
|
||||
# the private network accessible to control plane below so that we could
|
||||
# reach the private network for integration tests without floating ips
|
||||
# associated, no matter which user the tests are using.
|
||||
shared=$(openstack network show ${PRIVATE_NETWORK_NAME} -c shared -f value)
|
||||
if [[ "$shared" == "False" ]]; then
|
||||
openstack network set ${PRIVATE_NETWORK_NAME} --share
|
||||
fi
|
||||
sudo ip route replace $replace_range via $ROUTER_GW_IP
|
||||
sudo ip route replace ${IPV4_ADDRS_SAFE_TO_USE} via $ROUTER_GW_IP
|
||||
|
||||
# Make sure we can reach the management port of the service VM, this
|
||||
# configuration is only for testing purpose. In production, it's
|
||||
# recommended to config the router in the cloud infrastructure for the
|
||||
# communication between Trove control plane and service VMs.
|
||||
INTERFACE=trove-mgmt
|
||||
MGMT_PORT_ID=$(openstack port create --project ${trove_service_project_id} --security-group ${SG_NAME} --device-owner trove --network ${TROVE_MGMT_NETWORK_NAME} --host=$(hostname) -c id -f value ${INTERFACE}-port)
|
||||
MGMT_PORT_MAC=$(openstack port show -c mac_address -f value $MGMT_PORT_ID)
|
||||
MGMT_PORT_IP=$(openstack port show -f value -c fixed_ips $MGMT_PORT_ID | awk '{FS=",| "; gsub(",",""); gsub("'\''",""); for(i = 1; i <= NF; ++i) {if ($i ~ /^ip_address/) {n=index($i, "="); if (substr($i, n+1) ~ "\\.") print substr($i, n+1)}}}')
|
||||
sudo ovs-vsctl -- --may-exist add-port ${OVS_BRIDGE:-br-int} $INTERFACE -- set Interface $INTERFACE type=internal -- set Interface $INTERFACE external-ids:iface-status=active -- set Interface $INTERFACE external-ids:attached-mac=$MGMT_PORT_MAC -- set Interface $INTERFACE external-ids:iface-id=$MGMT_PORT_ID -- set Interface $INTERFACE external-ids:skip_cleanup=true
|
||||
sudo ip link set dev $INTERFACE address $MGMT_PORT_MAC
|
||||
mask=$(echo ${TROVE_MGMT_SUBNET_RANGE} | awk -F'/' '{print $2}')
|
||||
sudo ip addr add ${MGMT_PORT_IP}/${mask} dev $INTERFACE
|
||||
sudo ip link set $INTERFACE up
|
||||
|
||||
echo "Neutron network list:"
|
||||
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network list
|
||||
openstack network list
|
||||
echo "Neutron subnet list:"
|
||||
openstack subnet list
|
||||
echo "ip route:"
|
||||
sudo ip route
|
||||
|
||||
# Now make sure the conf settings are right
|
||||
iniset $TROVE_CONF DEFAULT network_label_regex "${ALT_PRIVATE_NETWORK_NAME}"
|
||||
iniset $TROVE_CONF DEFAULT network_label_regex ${PRIVATE_NETWORK_NAME}
|
||||
iniset $TROVE_CONF DEFAULT ip_regex ""
|
||||
iniset $TROVE_CONF DEFAULT black_list_regex ""
|
||||
# Don't use a default network for now, until the neutron issues are figured out
|
||||
#iniset $TROVE_CONF DEFAULT management_networks "${ALT_MGMT_ID}"
|
||||
iniset $TROVE_CONF DEFAULT management_networks ""
|
||||
iniset $TROVE_CONF DEFAULT management_networks ${mgmt_net_id}
|
||||
iniset $TROVE_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
|
||||
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_label_regex "${ALT_PRIVATE_NETWORK_NAME}"
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_label_regex ${PRIVATE_NETWORK_NAME}
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT ip_regex ""
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT black_list_regex ""
|
||||
# Don't use a default network for now, until the neutron issues are figured out
|
||||
#iniset $TROVE_TASKMANAGER_CONF DEFAULT management_networks "${ALT_MGMT_ID}"
|
||||
iniset $TROVE_CONF DEFAULT management_networks ""
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT management_networks ${mgmt_net_id}
|
||||
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
|
||||
}
|
||||
|
||||
@ -624,15 +625,22 @@ function _setup_minimal_image {
|
||||
export ELEMENTS_PATH+=:$TRIPLEO_IMAGES_DIR/elements
|
||||
fi
|
||||
|
||||
export DIB_APT_CONF_DIR=/etc/apt/apt.conf.d
|
||||
export DIB_CLOUD_INIT_ETC_HOSTS=true
|
||||
export QEMU_IMG_OPTIONS="--qemu-img-options compat=1.1"
|
||||
export RELEASE=${RELEASE:-'xenial'}
|
||||
export DIB_APT_CONF_DIR=/etc/apt/apt.conf.d
|
||||
export DIB_CLOUD_INIT_ETC_HOSTS=true
|
||||
export DIB_RELEASE=${RELEASE:-'xenial'}
|
||||
|
||||
# https://cloud-images.ubuntu.com/releases is more stable than the daily
|
||||
# builds(https://cloud-images.ubuntu.com/xenial/current/),
|
||||
# e.g. sometimes SHA256SUMS file is missing in the daily builds
|
||||
declare -A releasemapping=( ["xenial"]="16.04" ["bionic"]="18.04")
|
||||
export DIB_CLOUD_IMAGES="https://cloud-images.ubuntu.com/releases/${DIB_RELEASE}/release/"
|
||||
export BASE_IMAGE_FILE="ubuntu-${releasemapping[${DIB_RELEASE}]}-server-cloudimg-amd64-root.tar.gz"
|
||||
|
||||
export TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-'/etc/trove/trove-guestagent.conf'}
|
||||
|
||||
if [ -d ${SSH_DIR} ]; then
|
||||
if [[ -d ${SSH_DIR} && -f ${SSH_DIR}/id_rsa.pub ]]; then
|
||||
cat ${SSH_DIR}/id_rsa.pub >> ${SSH_DIR}/authorized_keys
|
||||
sort ${SSH_DIR}/authorized_keys | uniq > ${SSH_DIR}/authorized_keys.uniq
|
||||
mv ${SSH_DIR}/authorized_keys.uniq ${SSH_DIR}/authorized_keys
|
||||
|
@ -46,15 +46,16 @@ TROVE_MAX_VOLUMES_PER_TENANT=${TROVE_MAX_VOLUMES_PER_TENANT}
|
||||
TROVE_AGENT_CALL_LOW_TIMEOUT=${TROVE_AGENT_CALL_LOW_TIMEOUT}
|
||||
TROVE_AGENT_CALL_HIGH_TIMEOUT=${TROVE_AGENT_CALL_HIGH_TIMEOUT:-1200}
|
||||
TROVE_RESIZE_TIME_OUT=${TROVE_RESIZE_TIME_OUT}
|
||||
TROVE_USAGE_TIMEOUT=${TROVE_USAGE_TIMEOUT}
|
||||
TROVE_USAGE_TIMEOUT=${TROVE_USAGE_TIMEOUT:-900}
|
||||
TROVE_STATE_CHANGE_WAIT_TIME=${TROVE_STATE_CHANGE_WAIT_TIME}
|
||||
TROVE_COMMAND_PROCESS_TIMEOUT=${TROVE_COMMAND_PROCESS_TIMEOUT:-60}
|
||||
|
||||
# Set up the host gateway
|
||||
if is_service_enabled neutron; then
|
||||
TROVE_HOST_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1}
|
||||
TROVE_PRIVATE_NETWORK_NAME=${TROVE_PRIVATE_NETWORK_NAME:-alt-private}
|
||||
TROVE_PRIVATE_SUBNET_NAME=${TROVE_PRIVATE_SUBNET_NAME:-${TROVE_PRIVATE_NETWORK_NAME}-subnet}
|
||||
TROVE_MGMT_NETWORK_NAME=${TROVE_MGMT_NETWORK_NAME:-"trove-mgmt"}
|
||||
TROVE_MGMT_SUBNET_NAME=${TROVE_MGMT_SUBNET_NAME:-${TROVE_MGMT_NETWORK_NAME}-subnet}
|
||||
TROVE_MGMT_SUBNET_RANGE=${TROVE_MGMT_SUBNET_RANGE:-"192.168.254.0/24"}
|
||||
else
|
||||
TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
|
||||
fi
|
||||
|
@ -45,13 +45,13 @@
|
||||
"trove_max_volumes_per_user": 100,
|
||||
"use_reaper":false,
|
||||
"users": [
|
||||
{ "auth_user":"admin",
|
||||
"auth_key":"%admin_password%",
|
||||
"tenant":"admin",
|
||||
"tenant_id":"%admin_tenant_id%",
|
||||
{ "auth_user":"trove",
|
||||
"auth_key":"%service_password%",
|
||||
"tenant":"service",
|
||||
"tenant_id":"%service_tenant_id%",
|
||||
"requirements": {
|
||||
"is_admin":true,
|
||||
"services": ["trove"]
|
||||
"services": ["trove", "swift"]
|
||||
}
|
||||
},
|
||||
{ "auth_user":"alt_demo",
|
||||
@ -60,16 +60,7 @@
|
||||
"tenant_id":"%alt_demo_tenant_id%",
|
||||
"requirements": {
|
||||
"is_admin":false,
|
||||
"services": ["trove"]
|
||||
}
|
||||
},
|
||||
{ "auth_user":"admin_alt_demo",
|
||||
"auth_key":"%admin_password%",
|
||||
"tenant":"alt_demo",
|
||||
"tenant_id":"%alt_demo_tenant_id%",
|
||||
"requirements": {
|
||||
"is_admin":true,
|
||||
"services": ["swift"]
|
||||
"services": ["trove", "swift"]
|
||||
}
|
||||
},
|
||||
{ "auth_user":"demo",
|
||||
@ -99,6 +90,7 @@
|
||||
"neutron_enabled": %neutron_enabled%,
|
||||
"swift_enabled": %swift_enabled%,
|
||||
"shared_network": "%shared_network%",
|
||||
"trove_mgmt_network": "trove-mgmt",
|
||||
"shared_network_subnet": "%shared_network_subnet%",
|
||||
"instance_fault_1_flavor_name": "test.fault_1-1",
|
||||
"instance_fault_1_eph_flavor_name": "test.eph.fault_1-1",
|
||||
|
@ -722,10 +722,11 @@ function mod_confs() {
|
||||
sed -i "s/%region_name%/${REGION_NAME}/g" $TEST_CONF
|
||||
|
||||
# Add the tenant id's into test.conf
|
||||
sed -i "s/%admin_tenant_id%/$(get_attribute_id project admin 1)/g" $TEST_CONF
|
||||
sed -i "s/%service_tenant_id%/$(get_attribute_id project service 1)/g" $TEST_CONF
|
||||
sed -i "s/%alt_demo_tenant_id%/$(get_attribute_id project alt_demo 1)/g" $TEST_CONF
|
||||
sed -i "s/%demo_tenant_id%/$(get_attribute_id project demo 1)/g" $TEST_CONF
|
||||
sed -i "s/%admin_password%/$ADMIN_PASSWORD/g" $TEST_CONF
|
||||
sed -i "s/%service_password%/$SERVICE_PASSWORD/g" $TEST_CONF
|
||||
|
||||
# Enable neutron tests if needed
|
||||
sed -i "s/%neutron_enabled%/$ENABLE_NEUTRON/g" $TEST_CONF
|
||||
@ -733,9 +734,8 @@ function mod_confs() {
|
||||
# Enable backup related tests if Swift is enabled
|
||||
sed -i "s/%swift_enabled%/$ENABLE_SWIFT/g" $TEST_CONF
|
||||
|
||||
# If neutron is enabled, the devstack plugin will have created an alt_demo
|
||||
# network - write this info to the confs so that the integration tests can
|
||||
# use it.
|
||||
# If neutron is enabled, the devstack plugin has already set up the shared
|
||||
# private network for testing.
|
||||
if [[ $ENABLE_NEUTRON = true ]]; then
|
||||
TROVE_NET_ID=$(openstack $CLOUD_ADMIN_ARG network list | grep " $TROVE_PRIVATE_NETWORK_NAME " | awk '{print $2}')
|
||||
TROVE_SUBNET_ID=$(openstack $CLOUD_ADMIN_ARG subnet list | grep " $TROVE_PRIVATE_SUBNET_NAME " | awk '{print $2}')
|
||||
|
@ -69,8 +69,8 @@ TROVE_DASHBOARD_REPO=${TROVE_DASHBOARD_REPO:-${TROVEDASHBOARD_REPO:-${GIT_OPENST
|
||||
TROVE_DASHBOARD_DIR=${TROVE_DASHBOARD_DIR:-${TROVEDASHBOARD_DIR:-${PATH_TROVE_DASHBOARD}}}
|
||||
TROVE_DASHBOARD_BRANCH=${TROVE_DASHBOARD_BRANCH:-${TROVEDASHBOARD_BRANCH:-master}}
|
||||
# Trove specific networking options
|
||||
TROVE_PRIVATE_NETWORK_NAME=alt-private
|
||||
TROVE_PRIVATE_SUBNET_NAME=alt-private-subnet
|
||||
TROVE_PRIVATE_NETWORK_NAME=private
|
||||
TROVE_PRIVATE_SUBNET_NAME=private-subnet
|
||||
|
||||
# Destination for working data
|
||||
DATA_DIR=${DEST}/data
|
||||
@ -87,7 +87,7 @@ MYSQL_PASSWORD=e1a2c042c828d3566d0a
|
||||
RABBIT_PASSWORD=f7999d1955c5014aa32c
|
||||
SERVICE_TOKEN=be19c524ddc92109a224
|
||||
ADMIN_PASSWORD=${ADMIN_PASSWORD:-${OS_PASSWORD:-3de4922d8b6ac5a1aad9}}
|
||||
SERVICE_PASSWORD=7de4162d826bc5a11ad9
|
||||
SERVICE_PASSWORD=${SERVICE_PASSWORD:-"secretservice"}
|
||||
|
||||
# Swift hash used by devstack.
|
||||
SWIFT_HASH=12go358snjw24501
|
||||
|
@ -27,3 +27,4 @@ doc8>=0.6.0 # Apache-2.0
|
||||
astroid==1.6.5 # LGPLv2.1
|
||||
pylint==1.9.2 # GPLv2
|
||||
oslotest>=3.2.0 # Apache-2.0
|
||||
tenacity>=4.9.0 # Apache-2.0
|
||||
|
@ -67,7 +67,7 @@ class InnoBackupEx(base.BackupRunner):
|
||||
|
||||
@property
|
||||
def user_and_pass(self):
|
||||
return (' --user=%(user)s --password=%(password)s ' %
|
||||
return (' --user=%(user)s --password=%(password)s --host=127.0.0.1 ' %
|
||||
{'user': ADMIN_USER_NAME,
|
||||
'password': MySqlApp.get_auth_password()})
|
||||
|
||||
|
@ -1015,8 +1015,10 @@ class Instance(BuiltInstance):
|
||||
if not nics:
|
||||
nics = []
|
||||
if CONF.management_networks:
|
||||
nics = [{"net-id": net_id}
|
||||
for net_id in CONF.management_networks] + nics
|
||||
# Make sure management network interface is always configured after
|
||||
# user defined instance.
|
||||
nics = nics + [{"net-id": net_id}
|
||||
for net_id in CONF.management_networks]
|
||||
if nics:
|
||||
call_args['nics'] = nics
|
||||
if cluster_config:
|
||||
|
@ -1,10 +1,26 @@
|
||||
# Copyright 2019 Catalyst Cloud Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
DBAAS_API = "dbaas.api"
|
||||
PRE_INSTANCES = "dbaas.api.pre_instances"
|
||||
INSTANCES = "dbaas.api.instances"
|
||||
POST_INSTANCES = "dbaas.api.post_instances"
|
||||
SSH_CMD = ('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no '
|
||||
+ ('-o LogLevel=quiet -i '
|
||||
+ os.environ.get('TROVE_TEST_SSH_KEY_FILE')
|
||||
if 'TROVE_TEST_SSH_KEY_FILE' in os.environ else ""))
|
||||
|
||||
# Use '-t' to avoid the warning message 'mesg: ttyname failed: Inappropriate
|
||||
# ioctl for device'
|
||||
SSH_CMD = ("ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
|
||||
"-o LogLevel=quiet -t -i %s" %
|
||||
os.environ.get("TROVE_TEST_SSH_KEY_FILE", ""))
|
||||
|
@ -30,12 +30,13 @@ from trove.tests.api.instances import instance_info
|
||||
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
|
||||
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
|
||||
from trove.tests.api.instances import WaitForGuestInstallationToFinish
|
||||
from trove.tests.api import instances_actions
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests.util import create_dbaas_client
|
||||
from trove.tests.util.users import Requirements
|
||||
|
||||
|
||||
GROUP = "dbaas.api.backups"
|
||||
BACKUP_GROUP = "dbaas.api.backups"
|
||||
BACKUP_NAME = 'backup_test'
|
||||
BACKUP_DESC = 'test description'
|
||||
|
||||
@ -51,8 +52,8 @@ backup_count_prior_to_create = 0
|
||||
backup_count_for_instance_prior_to_create = 0
|
||||
|
||||
|
||||
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
|
||||
groups=[GROUP, tests.INSTANCES],
|
||||
@test(depends_on_groups=[instances_actions.GROUP_STOP_MYSQL],
|
||||
groups=[BACKUP_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
class CreateBackups(object):
|
||||
|
||||
@ -142,13 +143,11 @@ class BackupRestoreMixin(object):
|
||||
time_out=TIMEOUT_INSTANCE_CREATE)
|
||||
|
||||
|
||||
@test(runs_after=[CreateBackups],
|
||||
groups=[GROUP, tests.INSTANCES],
|
||||
@test(depends_on_classes=[CreateBackups],
|
||||
groups=[BACKUP_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
class WaitForBackupCreateToFinish(BackupRestoreMixin):
|
||||
"""
|
||||
Wait until the backup create is finished.
|
||||
"""
|
||||
"""Wait until the backup creation is finished."""
|
||||
|
||||
@test
|
||||
@time_out(TIMEOUT_BACKUP_CREATE)
|
||||
@ -158,7 +157,7 @@ class WaitForBackupCreateToFinish(BackupRestoreMixin):
|
||||
|
||||
|
||||
@test(depends_on=[WaitForBackupCreateToFinish],
|
||||
groups=[GROUP, tests.INSTANCES],
|
||||
groups=[BACKUP_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
class ListBackups(object):
|
||||
|
||||
@ -247,7 +246,7 @@ class ListBackups(object):
|
||||
|
||||
@test(runs_after=[ListBackups],
|
||||
depends_on=[WaitForBackupCreateToFinish],
|
||||
groups=[GROUP, tests.INSTANCES],
|
||||
groups=[BACKUP_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
class IncrementalBackups(BackupRestoreMixin):
|
||||
|
||||
@ -275,7 +274,7 @@ class IncrementalBackups(BackupRestoreMixin):
|
||||
assert_equal(backup_info.id, incremental_info.parent_id)
|
||||
|
||||
|
||||
@test(groups=[GROUP, tests.INSTANCES], enabled=CONFIG.swift_enabled)
|
||||
@test(groups=[BACKUP_GROUP, tests.INSTANCES], enabled=CONFIG.swift_enabled)
|
||||
class RestoreUsingBackup(object):
|
||||
|
||||
@classmethod
|
||||
@ -299,15 +298,15 @@ class RestoreUsingBackup(object):
|
||||
incremental_restore_instance_id = self._restore(incremental_info.id)
|
||||
|
||||
|
||||
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
|
||||
runs_after_groups=['dbaas.api.configurations.define'],
|
||||
groups=[GROUP, tests.INSTANCES],
|
||||
@test(depends_on_classes=[RestoreUsingBackup],
|
||||
groups=[BACKUP_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
class WaitForRestoreToFinish(object):
|
||||
|
||||
@classmethod
|
||||
def _poll(cls, instance_id_to_poll):
|
||||
"""Shared "instance restored" test logic."""
|
||||
|
||||
# This version just checks the REST API status.
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(instance_id_to_poll)
|
||||
@ -324,9 +323,6 @@ class WaitForRestoreToFinish(object):
|
||||
poll_until(result_is_active, time_out=TIMEOUT_INSTANCE_CREATE,
|
||||
sleep_time=10)
|
||||
|
||||
"""
|
||||
Wait until the instance is finished restoring from incremental backup.
|
||||
"""
|
||||
@test(depends_on=[RestoreUsingBackup.test_restore_incremental])
|
||||
def test_instance_restored_incremental(self):
|
||||
try:
|
||||
@ -336,7 +332,8 @@ class WaitForRestoreToFinish(object):
|
||||
|
||||
|
||||
@test(enabled=(not CONFIG.fake_mode and CONFIG.swift_enabled),
|
||||
groups=[GROUP, tests.INSTANCES])
|
||||
depends_on=[WaitForRestoreToFinish],
|
||||
groups=[BACKUP_GROUP, tests.INSTANCES])
|
||||
class VerifyRestore(object):
|
||||
|
||||
@classmethod
|
||||
@ -361,7 +358,8 @@ class VerifyRestore(object):
|
||||
fail('Timed out')
|
||||
|
||||
|
||||
@test(groups=[GROUP, tests.INSTANCES], enabled=CONFIG.swift_enabled)
|
||||
@test(groups=[BACKUP_GROUP, tests.INSTANCES], enabled=CONFIG.swift_enabled,
|
||||
depends_on=[VerifyRestore])
|
||||
class DeleteRestoreInstance(object):
|
||||
|
||||
@classmethod
|
||||
@ -389,8 +387,8 @@ class DeleteRestoreInstance(object):
|
||||
fail('Timed out')
|
||||
|
||||
|
||||
@test(runs_after=[DeleteRestoreInstance],
|
||||
groups=[GROUP, tests.INSTANCES],
|
||||
@test(depends_on=[DeleteRestoreInstance],
|
||||
groups=[BACKUP_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
class DeleteBackups(object):
|
||||
|
||||
|
@ -34,13 +34,13 @@ import six
|
||||
from troveclient.compat import exceptions
|
||||
|
||||
from trove.common.utils import poll_until
|
||||
from trove.tests.api.backups import RestoreUsingBackup
|
||||
from trove import tests
|
||||
from trove.tests.api import backups
|
||||
from trove.tests.api.instances import assert_unprocessable
|
||||
from trove.tests.api.instances import instance_info
|
||||
from trove.tests.api.instances import InstanceTestInfo
|
||||
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
|
||||
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
|
||||
from trove.tests.api.instances import WaitForGuestInstallationToFinish
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests.util.check import AttrCheck
|
||||
from trove.tests.util.check import CollectionCheck
|
||||
@ -50,8 +50,9 @@ from trove.tests.util.mysql import create_mysql_connection
|
||||
from trove.tests.util.users import Requirements
|
||||
|
||||
|
||||
GROUP = "dbaas.api.configurations"
|
||||
CONFIGURATION_GROUP = "dbaas.api.configurations"
|
||||
GROUP_CONFIG_DEFINE = "dbaas.api.configurations.define"
|
||||
CONFIG_NEW_INSTANCE_GROUP = "dbaas.api.configurations.newinstance"
|
||||
CONFIG_NAME = "test_configuration"
|
||||
CONFIG_DESC = "configuration description"
|
||||
|
||||
@ -77,11 +78,12 @@ def _is_valid_timestamp(time_string):
|
||||
|
||||
# helper methods to validate configuration is applied to instance
|
||||
def _execute_query(host, user_name, password, query):
|
||||
print(host, user_name, password, query)
|
||||
print("Starting to query database, host: %s, user: %s, password: %s, "
|
||||
"query: %s" % (host, user_name, password, query))
|
||||
|
||||
with create_mysql_connection(host, user_name, password) as db:
|
||||
result = db.execute(query)
|
||||
return result
|
||||
assert_true(False, "something went wrong in the sql connection")
|
||||
|
||||
|
||||
def _get_address(instance_id):
|
||||
@ -178,9 +180,8 @@ class ConfigurationsTestBase(object):
|
||||
return datastore_test_configs.get("configurations", {})
|
||||
|
||||
|
||||
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
|
||||
runs_after=[RestoreUsingBackup],
|
||||
groups=[GROUP, GROUP_CONFIG_DEFINE])
|
||||
@test(depends_on_groups=[backups.BACKUP_GROUP],
|
||||
groups=[CONFIGURATION_GROUP, GROUP_CONFIG_DEFINE, tests.INSTANCES])
|
||||
class CreateConfigurations(ConfigurationsTestBase):
|
||||
|
||||
@test
|
||||
@ -314,8 +315,8 @@ class CreateConfigurations(ConfigurationsTestBase):
|
||||
assert_equal(resp.status, 200)
|
||||
|
||||
|
||||
@test(runs_after=[CreateConfigurations],
|
||||
groups=[GROUP, GROUP_CONFIG_DEFINE])
|
||||
@test(depends_on=[CreateConfigurations],
|
||||
groups=[CONFIGURATION_GROUP, GROUP_CONFIG_DEFINE, tests.INSTANCES])
|
||||
class AfterConfigurationsCreation(ConfigurationsTestBase):
|
||||
|
||||
@test
|
||||
@ -341,32 +342,6 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
|
||||
@test
|
||||
def test_assign_name_to_instance_using_patch(self):
|
||||
# test assigning a name to an instance
|
||||
new_name = 'new_name_1'
|
||||
report = CONFIG.get_report()
|
||||
report.log("instance_info.id: %s" % instance_info.id)
|
||||
report.log("instance name:%s" % instance_info.name)
|
||||
report.log("instance new name:%s" % new_name)
|
||||
instance_info.dbaas.instances.edit(instance_info.id, name=new_name)
|
||||
assert_equal(202, instance_info.dbaas.last_http_code)
|
||||
check = instance_info.dbaas.instances.get(instance_info.id)
|
||||
assert_equal(200, instance_info.dbaas.last_http_code)
|
||||
assert_equal(check.name, new_name)
|
||||
# Restore instance name
|
||||
instance_info.dbaas.instances.edit(instance_info.id,
|
||||
name=instance_info.name)
|
||||
assert_equal(202, instance_info.dbaas.last_http_code)
|
||||
|
||||
@test
|
||||
def test_assign_configuration_to_invalid_instance_using_patch(self):
|
||||
# test assign config group to an invalid instance
|
||||
invalid_id = "invalid-inst-id"
|
||||
assert_raises(exceptions.NotFound,
|
||||
instance_info.dbaas.instances.edit,
|
||||
invalid_id, configuration=configuration_info.id)
|
||||
|
||||
@test(depends_on=[test_assign_configuration_to_valid_instance])
|
||||
def test_assign_configuration_to_instance_with_config(self):
|
||||
# test assigning a configuration to an instance that
|
||||
@ -384,7 +359,7 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
|
||||
inst = instance_info.dbaas.instances.get(instance_info.id)
|
||||
configuration_id = inst.configuration['id']
|
||||
print("configuration_info: %s" % configuration_id)
|
||||
assert_not_equal(None, inst.configuration['id'])
|
||||
assert_not_equal(None, configuration_id)
|
||||
_test_configuration_is_applied_to_instance(instance_info,
|
||||
configuration_id)
|
||||
|
||||
@ -453,8 +428,8 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
|
||||
configuration_info.id)
|
||||
|
||||
|
||||
@test(runs_after=[AfterConfigurationsCreation],
|
||||
groups=[GROUP, GROUP_CONFIG_DEFINE])
|
||||
@test(depends_on=[AfterConfigurationsCreation],
|
||||
groups=[CONFIGURATION_GROUP, GROUP_CONFIG_DEFINE, tests.INSTANCES])
|
||||
class ListConfigurations(ConfigurationsTestBase):
|
||||
|
||||
@test
|
||||
@ -571,8 +546,8 @@ class ListConfigurations(ConfigurationsTestBase):
|
||||
assert_equal(list_config[0].updated, details_config.updated)
|
||||
|
||||
|
||||
@test(runs_after=[ListConfigurations],
|
||||
groups=[GROUP, GROUP_CONFIG_DEFINE])
|
||||
@test(depends_on=[ListConfigurations],
|
||||
groups=[CONFIGURATION_GROUP, CONFIG_NEW_INSTANCE_GROUP, tests.INSTANCES])
|
||||
class StartInstanceWithConfiguration(ConfigurationsTestBase):
|
||||
|
||||
@test
|
||||
@ -614,8 +589,7 @@ class StartInstanceWithConfiguration(ConfigurationsTestBase):
|
||||
|
||||
|
||||
@test(depends_on_classes=[StartInstanceWithConfiguration],
|
||||
runs_after_groups=['dbaas.api.backups'],
|
||||
groups=[GROUP])
|
||||
groups=[CONFIGURATION_GROUP, CONFIG_NEW_INSTANCE_GROUP, tests.INSTANCES])
|
||||
class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
|
||||
|
||||
@test
|
||||
@ -637,15 +611,16 @@ class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
|
||||
@test(depends_on=[test_instance_with_configuration_active])
|
||||
@time_out(30)
|
||||
def test_get_configuration_details_from_instance_validation(self):
|
||||
# validate that the configuration was applied correctly to the instance
|
||||
"""Test configuration is applied correctly to the instance."""
|
||||
inst = instance_info.dbaas.instances.get(configuration_instance.id)
|
||||
configuration_id = inst.configuration['id']
|
||||
assert_not_equal(None, inst.configuration['id'])
|
||||
assert_not_equal(None, configuration_id)
|
||||
_test_configuration_is_applied_to_instance(configuration_instance,
|
||||
configuration_id)
|
||||
|
||||
|
||||
@test(runs_after=[WaitForConfigurationInstanceToFinish], groups=[GROUP])
|
||||
@test(depends_on=[WaitForConfigurationInstanceToFinish],
|
||||
groups=[CONFIGURATION_GROUP, tests.INSTANCES])
|
||||
class DeleteConfigurations(ConfigurationsTestBase):
|
||||
|
||||
@before_class
|
||||
@ -849,7 +824,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
|
||||
|
||||
@test(runs_after=[test_assign_config_and_name_to_instance_using_patch])
|
||||
def test_unassign_configuration_after_patch(self):
|
||||
# remove the configuration from the instance
|
||||
"""Remove the configuration from the instance"""
|
||||
instance_info.dbaas.instances.edit(instance_info.id,
|
||||
remove_configuration=True)
|
||||
assert_equal(202, instance_info.dbaas.last_http_code)
|
||||
|
@ -15,13 +15,11 @@
|
||||
|
||||
import netaddr
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from time import sleep
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
|
||||
from proboscis import after_class
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_false
|
||||
@ -85,14 +83,17 @@ class InstanceTestInfo(object):
|
||||
self.dbaas_inactive_datastore_version = None # The DS inactive id
|
||||
self.id = None # The ID of the instance in the database.
|
||||
self.local_id = None
|
||||
|
||||
# The IP address of the database instance for the user.
|
||||
self.address = None
|
||||
# The management network IP address.
|
||||
self.mgmt_address = None
|
||||
|
||||
self.nics = None # The dict of type/id for nics used on the instance.
|
||||
shared_network = CONFIG.get('shared_network', None)
|
||||
if shared_network:
|
||||
self.nics = [{'net-id': shared_network}]
|
||||
self.initial_result = None # The initial result from the create call.
|
||||
self.user_ip = None # The IP address of the instance, given to user.
|
||||
self.infra_ip = None # The infrastructure network IP address.
|
||||
self.result = None # The instance info returned by the API
|
||||
self.nova_client = None # The instance of novaclient.
|
||||
self.volume_client = None # The instance of the volume client.
|
||||
@ -126,17 +127,30 @@ class InstanceTestInfo(object):
|
||||
"Flavor href '%s' not found!" % flavor_name)
|
||||
return flavor, flavor_href
|
||||
|
||||
def get_address(self):
|
||||
def get_address(self, mgmt=False):
|
||||
if mgmt:
|
||||
if self.mgmt_address:
|
||||
return self.mgmt_address
|
||||
|
||||
mgmt_netname = test_config.get("trove_mgmt_network", "trove-mgmt")
|
||||
result = self.dbaas_admin.mgmt.instances.show(self.id)
|
||||
if not hasattr(result, 'hostname'):
|
||||
try:
|
||||
return next(str(ip) for ip in result.ip
|
||||
if netaddr.valid_ipv4(ip))
|
||||
except StopIteration:
|
||||
fail("No IPV4 ip found")
|
||||
mgmt_interfaces = result.server['addresses'].get(mgmt_netname, [])
|
||||
mgmt_addresses = [str(inf["addr"]) for inf in mgmt_interfaces
|
||||
if inf["version"] == 4]
|
||||
if len(mgmt_addresses) == 0:
|
||||
fail("No IPV4 ip found for management network.")
|
||||
self.mgmt_address = mgmt_addresses[0]
|
||||
return self.mgmt_address
|
||||
else:
|
||||
return [str(ip) for ip in result.server['addresses']
|
||||
if netaddr.valid_ipv4(ip)]
|
||||
if self.address:
|
||||
return self.address
|
||||
|
||||
result = self.dbaas.instances.get(self.id)
|
||||
addresses = [str(ip) for ip in result.ip if netaddr.valid_ipv4(ip)]
|
||||
if len(addresses) == 0:
|
||||
fail("No IPV4 ip found for database network.")
|
||||
self.address = addresses[0]
|
||||
return self.address
|
||||
|
||||
def get_local_id(self):
|
||||
mgmt_instance = self.dbaas_admin.management.show(self.id)
|
||||
@ -985,12 +999,6 @@ class TestGuestProcess(object):
|
||||
assert_true(isinstance(hwinfo.hwinfo['mem_total'], int))
|
||||
assert_true(isinstance(hwinfo.hwinfo['num_cpus'], int))
|
||||
|
||||
@test
|
||||
def grab_diagnostics_before_tests(self):
|
||||
if CONFIG.test_mgmt:
|
||||
diagnostics = dbaas_admin.diagnostics.get(instance_info.id)
|
||||
diagnostic_tests_helper(diagnostics)
|
||||
|
||||
|
||||
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
|
||||
groups=[GROUP, GROUP_TEST, "dbaas.dns"])
|
||||
@ -1243,34 +1251,18 @@ class TestCreateNotification(object):
|
||||
**expected)
|
||||
|
||||
|
||||
@test(depends_on_groups=['dbaas.api.instances.actions'],
|
||||
groups=[GROUP, tests.INSTANCES, "dbaas.diagnostics"])
|
||||
class CheckDiagnosticsAfterTests(object):
|
||||
"""Check the diagnostics after running api commands on an instance."""
|
||||
@test
|
||||
def test_check_diagnostics_on_instance_after_tests(self):
|
||||
diagnostics = dbaas_admin.diagnostics.get(instance_info.id)
|
||||
assert_equal(200, dbaas.last_http_code)
|
||||
diagnostic_tests_helper(diagnostics)
|
||||
msg = "Fat Pete has emerged. size (%s > 30MB)" % diagnostics.vmPeak
|
||||
assert_true(diagnostics.vmPeak < (30 * 1024), msg)
|
||||
|
||||
|
||||
@test(depends_on=[WaitForGuestInstallationToFinish],
|
||||
depends_on_groups=[GROUP_USERS, GROUP_DATABASES, GROUP_ROOT],
|
||||
groups=[GROUP, GROUP_STOP],
|
||||
runs_after_groups=[GROUP_START,
|
||||
GROUP_START_SIMPLE, GROUP_TEST, tests.INSTANCES])
|
||||
GROUP_START_SIMPLE, GROUP_TEST, tests.INSTANCES],
|
||||
enabled=not do_not_delete_instance())
|
||||
class DeleteInstance(object):
|
||||
"""Delete the created instance."""
|
||||
|
||||
@time_out(3 * 60)
|
||||
@test
|
||||
def test_delete(self):
|
||||
if do_not_delete_instance():
|
||||
CONFIG.get_report().log("TESTS_DO_NOT_DELETE_INSTANCE=True was "
|
||||
"specified, skipping delete...")
|
||||
raise SkipTest("TESTS_DO_NOT_DELETE_INSTANCE was specified.")
|
||||
global dbaas
|
||||
if not hasattr(instance_info, "initial_result"):
|
||||
raise SkipTest("Instance was never created, skipping test...")
|
||||
@ -1294,27 +1286,10 @@ class DeleteInstance(object):
|
||||
fail("A failure occurred when trying to GET instance %s for the %d"
|
||||
" time: %s" % (str(instance_info.id), attempts, str(ex)))
|
||||
|
||||
@time_out(30)
|
||||
@test(enabled=VOLUME_SUPPORT,
|
||||
depends_on=[test_delete])
|
||||
def test_volume_is_deleted(self):
|
||||
try:
|
||||
while True:
|
||||
instance = dbaas.instances.get(instance_info.id)
|
||||
assert_equal(instance.volume['status'], "available")
|
||||
time.sleep(1)
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
except Exception as ex:
|
||||
fail("Failure: %s" % str(ex))
|
||||
|
||||
# TODO(tim-simpson): make sure that the actual instance, volume,
|
||||
# guest status, and DNS entries are deleted.
|
||||
|
||||
|
||||
@test(depends_on=[WaitForGuestInstallationToFinish],
|
||||
runs_after=[DeleteInstance],
|
||||
groups=[GROUP, GROUP_STOP, 'dbaas.usage'])
|
||||
@test(depends_on=[DeleteInstance],
|
||||
groups=[GROUP, GROUP_STOP, 'dbaas.usage'],
|
||||
enabled=not do_not_delete_instance())
|
||||
class AfterDeleteChecks(object):
|
||||
|
||||
@test
|
||||
@ -1616,27 +1591,3 @@ class BadInstanceStatusBug(object):
|
||||
self.instances.remove(id)
|
||||
except exceptions.UnprocessableEntity:
|
||||
sleep(1.0)
|
||||
|
||||
|
||||
def diagnostic_tests_helper(diagnostics):
|
||||
print("diagnostics : %r" % diagnostics._info)
|
||||
allowed_attrs = ['version', 'fdSize', 'vmSize', 'vmHwm', 'vmRss',
|
||||
'vmPeak', 'threads']
|
||||
CheckInstance(None).contains_allowed_attrs(
|
||||
diagnostics._info, allowed_attrs,
|
||||
msg="Diagnostics")
|
||||
assert_true(isinstance(diagnostics.fdSize, int))
|
||||
assert_true(isinstance(diagnostics.threads, int))
|
||||
assert_true(isinstance(diagnostics.vmHwm, int))
|
||||
assert_true(isinstance(diagnostics.vmPeak, int))
|
||||
assert_true(isinstance(diagnostics.vmRss, int))
|
||||
assert_true(isinstance(diagnostics.vmSize, int))
|
||||
actual_version = diagnostics.version
|
||||
update_test_conf = CONFIG.values.get("guest-update-test", None)
|
||||
if update_test_conf is not None:
|
||||
if actual_version == update_test_conf['next-version']:
|
||||
return # This is acceptable but may not match the regex.
|
||||
version_pattern = re.compile(r'[a-f0-9]+')
|
||||
msg = "Version %s does not match pattern %s." % (actual_version,
|
||||
version_pattern)
|
||||
assert_true(version_pattern.match(actual_version), msg)
|
||||
|
@ -22,8 +22,6 @@ from proboscis import before_class
|
||||
from proboscis.decorators import time_out
|
||||
from proboscis import SkipTest
|
||||
from proboscis import test
|
||||
from sqlalchemy import exc as sqlalchemy_exc
|
||||
from sqlalchemy.sql.expression import text
|
||||
from troveclient.compat.exceptions import BadRequest
|
||||
from troveclient.compat.exceptions import HTTPNotImplemented
|
||||
|
||||
@ -38,7 +36,6 @@ from trove.tests.api.instances import instance_info
|
||||
from trove.tests.api.instances import VOLUME_SUPPORT
|
||||
from trove.tests.config import CONFIG
|
||||
import trove.tests.util as testsutil
|
||||
from trove.tests.util.check import Checker
|
||||
from trove.tests.util.check import TypeCheck
|
||||
from trove.tests.util import LocalSqlClient
|
||||
from trove.tests.util.server_connection import create_server_connection
|
||||
@ -46,7 +43,7 @@ from trove.tests.util.server_connection import create_server_connection
|
||||
GROUP = "dbaas.api.instances.actions"
|
||||
GROUP_REBOOT = "dbaas.api.instances.actions.reboot"
|
||||
GROUP_RESTART = "dbaas.api.instances.actions.restart"
|
||||
GROUP_RESIZE = "dbaas.api.instances.actions.resize.instance"
|
||||
GROUP_RESIZE = "dbaas.api.instances.actions.resize"
|
||||
GROUP_STOP_MYSQL = "dbaas.api.instances.actions.stop"
|
||||
MYSQL_USERNAME = "test_user"
|
||||
MYSQL_PASSWORD = "abcde"
|
||||
@ -73,19 +70,27 @@ class MySqlConnection(object):
|
||||
self.client = LocalSqlClient(sql_engine, use_flush=False)
|
||||
|
||||
def is_connected(self):
|
||||
cmd = "SELECT 1;"
|
||||
try:
|
||||
with self.client:
|
||||
self.client.execute(text("""SELECT "Hello.";"""))
|
||||
self.client.execute(cmd)
|
||||
return True
|
||||
except (sqlalchemy_exc.OperationalError,
|
||||
sqlalchemy_exc.DisconnectionError,
|
||||
sqlalchemy_exc.TimeoutError):
|
||||
except Exception as e:
|
||||
print(
|
||||
"Failed to execute command: %s, error: %s" % (cmd, str(e))
|
||||
)
|
||||
return False
|
||||
|
||||
def execute(self, cmd):
|
||||
try:
|
||||
with self.client:
|
||||
self.client.execute(cmd)
|
||||
return True
|
||||
except Exception as e:
|
||||
print(
|
||||
"Failed to execute command: %s, error: %s" % (cmd, str(e))
|
||||
)
|
||||
return False
|
||||
except Exception as ex:
|
||||
print("EX WAS:")
|
||||
print(type(ex))
|
||||
print(ex)
|
||||
raise ex
|
||||
|
||||
|
||||
# Use default value from trove.common.cfg, and it could be overridden by
|
||||
@ -124,6 +129,10 @@ class ActionTestBase(object):
|
||||
def instance_address(self):
|
||||
return instance_info.get_address()
|
||||
|
||||
@property
|
||||
def instance_mgmt_address(self):
|
||||
return instance_info.get_address(mgmt=True)
|
||||
|
||||
@property
|
||||
def instance_id(self):
|
||||
return instance_info.id
|
||||
@ -144,28 +153,34 @@ class ActionTestBase(object):
|
||||
time.sleep(5)
|
||||
|
||||
def ensure_mysql_is_running(self):
|
||||
"""Make sure MySQL is accessible before restarting."""
|
||||
with Checker() as check:
|
||||
if USE_IP:
|
||||
self.connection.connect()
|
||||
check.true(self.connection.is_connected(),
|
||||
"Able to connect to MySQL.")
|
||||
asserts.assert_true(self.connection.is_connected(),
|
||||
"Unable to connect to MySQL.")
|
||||
|
||||
self.proc_id = self.find_mysql_proc_on_instance()
|
||||
check.true(self.proc_id is not None,
|
||||
asserts.assert_is_not_none(self.proc_id,
|
||||
"MySQL process can not be found.")
|
||||
instance = self.instance
|
||||
check.false(instance is None)
|
||||
check.equal(instance.status, "ACTIVE")
|
||||
|
||||
asserts.assert_is_not_none(self.instance)
|
||||
asserts.assert_equal(self.instance.status, "ACTIVE")
|
||||
|
||||
def find_mysql_proc_on_instance(self):
|
||||
server = create_server_connection(self.instance_id)
|
||||
cmd = "ps acux | grep mysqld " \
|
||||
server = create_server_connection(
|
||||
self.instance_id,
|
||||
ip_address=self.instance_mgmt_address
|
||||
)
|
||||
cmd = "sudo ps acux | grep mysqld " \
|
||||
"| grep -v mysqld_safe | awk '{print $2}'"
|
||||
stdout, _ = server.execute(cmd)
|
||||
|
||||
try:
|
||||
stdout = server.execute(cmd)
|
||||
return int(stdout)
|
||||
except ValueError:
|
||||
return None
|
||||
except Exception as e:
|
||||
asserts.fail("Failed to execute command: %s, error: %s" %
|
||||
(cmd, str(e)))
|
||||
|
||||
def log_current_users(self):
|
||||
users = self.dbaas.users.list(self.instance_id)
|
||||
@ -246,22 +261,36 @@ class RebootTestBase(ActionTestBase):
|
||||
|
||||
def mess_up_mysql(self):
|
||||
"""Ruin MySQL's ability to restart."""
|
||||
server = create_server_connection(self.instance_id)
|
||||
cmd = "sudo cp /dev/null /var/lib/mysql/data/ib_logfile%d"
|
||||
server = create_server_connection(self.instance_id,
|
||||
self.instance_mgmt_address)
|
||||
cmd_template = "sudo cp /dev/null /var/lib/mysql/data/ib_logfile%d"
|
||||
instance_info.dbaas_admin.management.stop(self.instance_id)
|
||||
|
||||
for index in range(2):
|
||||
server.execute(cmd % index)
|
||||
cmd = cmd_template % index
|
||||
try:
|
||||
server.execute(cmd)
|
||||
except Exception as e:
|
||||
asserts.fail("Failed to execute command %s, error: %s" %
|
||||
(cmd, str(e)))
|
||||
|
||||
def fix_mysql(self):
|
||||
"""Fix MySQL's ability to restart."""
|
||||
if not FAKE_MODE:
|
||||
server = create_server_connection(self.instance_id)
|
||||
cmd = "sudo rm /var/lib/mysql/data/ib_logfile%d"
|
||||
server = create_server_connection(self.instance_id,
|
||||
self.instance_mgmt_address)
|
||||
cmd_template = "sudo rm /var/lib/mysql/data/ib_logfile%d"
|
||||
# We want to stop mysql so that upstart does not keep trying to
|
||||
# respawn it and block the guest agent from accessing the logs.
|
||||
instance_info.dbaas_admin.management.stop(self.instance_id)
|
||||
|
||||
for index in range(2):
|
||||
server.execute(cmd % index)
|
||||
cmd = cmd_template % index
|
||||
try:
|
||||
server.execute(cmd)
|
||||
except Exception as e:
|
||||
asserts.fail("Failed to execute command %s, error: %s" %
|
||||
(cmd, str(e)))
|
||||
|
||||
def wait_for_failure_status(self):
|
||||
"""Wait until status becomes running."""
|
||||
@ -404,8 +433,7 @@ class RebootTests(RebootTestBase):
|
||||
self.successful_restart()
|
||||
|
||||
|
||||
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP,
|
||||
GROUP_RESIZE],
|
||||
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_RESIZE],
|
||||
depends_on_groups=[GROUP_START], depends_on=[create_user],
|
||||
runs_after=[RebootTests])
|
||||
class ResizeInstanceTest(ActionTestBase):
|
||||
@ -534,24 +562,18 @@ class ResizeInstanceTest(ActionTestBase):
|
||||
# a resize. The code below is an attempt to catch this while proceeding
|
||||
# with the rest of the test (note the use of runs_after).
|
||||
if USE_IP:
|
||||
self.connection.connect()
|
||||
if not self.connection.is_connected():
|
||||
# Ok, this is def. a failure, but before we toss up an error
|
||||
# lets recreate to see how far we can get.
|
||||
CONFIG.get_report().log(
|
||||
"Having to recreate the test_user! Resizing killed it!")
|
||||
self.log_current_users()
|
||||
users = self.dbaas.users.list(self.instance_id)
|
||||
usernames = [user.name for user in users]
|
||||
if MYSQL_USERNAME not in usernames:
|
||||
self.create_user()
|
||||
asserts.fail(
|
||||
"Somehow, the resize made the test user disappear.")
|
||||
asserts.fail("Resize made the test user disappear.")
|
||||
|
||||
@test(depends_on=[test_instance_returns_to_active_after_resize],
|
||||
runs_after=[resize_should_not_delete_users])
|
||||
def test_make_sure_mysql_is_running_after_resize(self):
|
||||
self.ensure_mysql_is_running()
|
||||
|
||||
@test(depends_on=[test_instance_returns_to_active_after_resize],
|
||||
runs_after=[test_make_sure_mysql_is_running_after_resize])
|
||||
@test(depends_on=[test_make_sure_mysql_is_running_after_resize])
|
||||
def test_instance_has_new_flavor_after_resize(self):
|
||||
actual = self.get_flavor_href(self.instance.flavor['id'])
|
||||
expected = self.get_flavor_href(flavor_id=self.expected_new_flavor_id)
|
||||
@ -597,7 +619,7 @@ def resize_should_not_delete_users():
|
||||
asserts.fail("Somehow, the resize made the test user disappear.")
|
||||
|
||||
|
||||
@test(runs_after=[ResizeInstanceTest], depends_on=[create_user],
|
||||
@test(depends_on=[ResizeInstanceTest],
|
||||
groups=[GROUP, tests.INSTANCES, INSTANCE_GROUP, GROUP_RESIZE],
|
||||
enabled=VOLUME_SUPPORT)
|
||||
class ResizeInstanceVolume(ActionTestBase):
|
||||
|
@ -32,13 +32,6 @@ from trove.tests.util.users import Requirements
|
||||
GROUP = "dbaas.api.mgmt.ds_versions"
|
||||
|
||||
|
||||
@test(groups=[GROUP])
|
||||
def mgmt_datastore_version_list_requires_admin_account():
|
||||
"""Verify that an admin context is required to call this function."""
|
||||
client = create_client(is_admin=False)
|
||||
assert_raises(exceptions.Unauthorized, client.mgmt_datastore_versions.list)
|
||||
|
||||
|
||||
@test(groups=[GROUP])
|
||||
class MgmtDataStoreVersion(object):
|
||||
"""Tests the mgmt datastore version methods."""
|
||||
@ -71,6 +64,13 @@ class MgmtDataStoreVersion(object):
|
||||
# datastore-versions should exist for a functional Trove deployment.
|
||||
assert_true(len(self.ds_versions) > 0)
|
||||
|
||||
@test
|
||||
def mgmt_datastore_version_list_requires_admin_account(self):
|
||||
"""Test admin is required to list datastore versions."""
|
||||
client = create_client(is_admin=False)
|
||||
assert_raises(exceptions.Unauthorized,
|
||||
client.mgmt_datastore_versions.list)
|
||||
|
||||
@test(depends_on=[test_mgmt_ds_version_list_original_count])
|
||||
def test_mgmt_ds_version_list_fields_present(self):
|
||||
"""Verify that all expected fields are returned by list method."""
|
||||
|
@ -17,6 +17,7 @@ from time import sleep
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_raises
|
||||
from proboscis.asserts import assert_true
|
||||
from proboscis.asserts import fail
|
||||
from proboscis.decorators import time_out
|
||||
from proboscis import SkipTest
|
||||
from proboscis import test
|
||||
@ -24,11 +25,12 @@ from troveclient.compat import exceptions
|
||||
|
||||
from trove.common.utils import generate_uuid
|
||||
from trove.common.utils import poll_until
|
||||
from trove import tests
|
||||
from trove.tests.api import configurations
|
||||
from trove.tests.api.instances import CheckInstance
|
||||
from trove.tests.api.instances import instance_info
|
||||
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
|
||||
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
|
||||
from trove.tests.api.instances import WaitForGuestInstallationToFinish
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests.scenario import runners
|
||||
from trove.tests.scenario.runners.test_runners import SkipKnownBug
|
||||
@ -42,7 +44,7 @@ class SlaveInstanceTestInfo(object):
|
||||
self.replicated_db = generate_uuid()
|
||||
|
||||
|
||||
GROUP = "dbaas.api.replication"
|
||||
REPLICATION_GROUP = "dbaas.api.replication"
|
||||
slave_instance = SlaveInstanceTestInfo()
|
||||
existing_db_on_master = generate_uuid()
|
||||
backup_count = None
|
||||
@ -52,19 +54,29 @@ def _get_user_count(server_info):
|
||||
cmd = ('mysql -BNq -e \\\'select count\\(*\\) from mysql.user'
|
||||
' where user like \\\"slave_%\\\"\\\'')
|
||||
server = create_server_connection(server_info.id)
|
||||
stdout, stderr = server.execute(cmd)
|
||||
|
||||
try:
|
||||
stdout = server.execute(cmd)
|
||||
return int(stdout)
|
||||
except Exception as e:
|
||||
fail("Failed to execute command: %s, error: %s" % (cmd, str(e)))
|
||||
|
||||
|
||||
def slave_is_running(running=True):
|
||||
|
||||
def check_slave_is_running():
|
||||
server = create_server_connection(slave_instance.id)
|
||||
cmd = ("mysqladmin extended-status "
|
||||
"| awk '/Slave_running/{print $4}'")
|
||||
stdout, stderr = server.execute(cmd)
|
||||
expected = "ON" if running else "OFF"
|
||||
return stdout.rstrip() == expected
|
||||
|
||||
try:
|
||||
stdout = server.execute(cmd)
|
||||
stdout = stdout.rstrip()
|
||||
except Exception as e:
|
||||
fail("Failed to execute command %s, error: %s" %
|
||||
(cmd, str(e)))
|
||||
|
||||
expected = b"ON" if running else b"OFF"
|
||||
return stdout == expected
|
||||
|
||||
return check_slave_is_running
|
||||
|
||||
@ -119,8 +131,8 @@ def validate_master(master, slaves):
|
||||
assert_true(asserted_ids.issubset(master_ids))
|
||||
|
||||
|
||||
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
|
||||
groups=[GROUP],
|
||||
@test(depends_on_groups=[configurations.CONFIGURATION_GROUP],
|
||||
groups=[REPLICATION_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
class CreateReplicationSlave(object):
|
||||
|
||||
@ -153,11 +165,13 @@ class CreateReplicationSlave(object):
|
||||
slave_instance.id = create_slave()
|
||||
|
||||
|
||||
@test(groups=[GROUP], enabled=CONFIG.swift_enabled)
|
||||
@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
|
||||
enabled=CONFIG.swift_enabled,
|
||||
depends_on=[CreateReplicationSlave])
|
||||
class WaitForCreateSlaveToFinish(object):
|
||||
"""Wait until the instance is created and set up as slave."""
|
||||
|
||||
@test(depends_on=[CreateReplicationSlave.test_create_slave])
|
||||
@test
|
||||
@time_out(TIMEOUT_INSTANCE_CREATE)
|
||||
def test_slave_created(self):
|
||||
poll_until(lambda: instance_is_active(slave_instance.id))
|
||||
@ -165,7 +179,7 @@ class WaitForCreateSlaveToFinish(object):
|
||||
|
||||
@test(enabled=(not CONFIG.fake_mode and CONFIG.swift_enabled),
|
||||
depends_on=[WaitForCreateSlaveToFinish],
|
||||
groups=[GROUP])
|
||||
groups=[REPLICATION_GROUP, tests.INSTANCES])
|
||||
class VerifySlave(object):
|
||||
|
||||
def db_is_found(self, database_to_find):
|
||||
@ -191,8 +205,15 @@ class VerifySlave(object):
|
||||
def test_slave_is_read_only(self):
|
||||
cmd = "mysql -BNq -e \\\'select @@read_only\\\'"
|
||||
server = create_server_connection(slave_instance.id)
|
||||
stdout, stderr = server.execute(cmd)
|
||||
assert_equal(stdout, "1\n")
|
||||
|
||||
try:
|
||||
stdout = server.execute(cmd)
|
||||
stdout = int(stdout.rstrip())
|
||||
except Exception as e:
|
||||
fail("Failed to execute command %s, error: %s" %
|
||||
(cmd, str(e)))
|
||||
|
||||
assert_equal(stdout, 1)
|
||||
|
||||
@test(depends_on=[test_slave_is_read_only])
|
||||
def test_create_db_on_master(self):
|
||||
@ -216,7 +237,7 @@ class VerifySlave(object):
|
||||
assert_equal(_get_user_count(instance_info), 1)
|
||||
|
||||
|
||||
@test(groups=[GROUP],
|
||||
@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
|
||||
depends_on=[WaitForCreateSlaveToFinish],
|
||||
runs_after=[VerifySlave],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
@ -232,7 +253,7 @@ class TestInstanceListing(object):
|
||||
validate_master(instance_info, [slave_instance])
|
||||
|
||||
|
||||
@test(groups=[GROUP],
|
||||
@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
|
||||
depends_on=[WaitForCreateSlaveToFinish],
|
||||
runs_after=[TestInstanceListing],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
@ -317,8 +338,15 @@ class TestReplicationFailover(object):
|
||||
|
||||
cmd = "sudo service trove-guestagent stop"
|
||||
server = create_server_connection(self._third_slave.id)
|
||||
stdout, stderr = server.execute(cmd)
|
||||
assert_equal(stdout, "1\n")
|
||||
|
||||
try:
|
||||
stdout = server.execute(cmd)
|
||||
stdout = int(stdout.rstrip())
|
||||
except Exception as e:
|
||||
fail("Failed to execute command %s, error: %s" %
|
||||
(cmd, str(e)))
|
||||
|
||||
assert_equal(stdout, 1)
|
||||
|
||||
@test(depends_on=[disable_master], enabled=False)
|
||||
def test_eject_replica_master(self):
|
||||
@ -333,7 +361,7 @@ class TestReplicationFailover(object):
|
||||
validate_slave(instance_info, slave_instance)
|
||||
|
||||
|
||||
@test(groups=[GROUP],
|
||||
@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
|
||||
depends_on=[WaitForCreateSlaveToFinish],
|
||||
runs_after=[TestReplicationFailover],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
@ -367,12 +395,19 @@ class DetachReplica(object):
|
||||
def check_not_read_only():
|
||||
cmd = "mysql -BNq -e \\\'select @@read_only\\\'"
|
||||
server = create_server_connection(slave_instance.id)
|
||||
stdout, stderr = server.execute(cmd)
|
||||
return stdout.rstrip() == "0"
|
||||
|
||||
try:
|
||||
stdout = server.execute(cmd)
|
||||
stdout = int(stdout)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
return stdout == 0
|
||||
|
||||
poll_until(check_not_read_only)
|
||||
|
||||
|
||||
@test(groups=[GROUP],
|
||||
@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
|
||||
depends_on=[WaitForCreateSlaveToFinish],
|
||||
runs_after=[DetachReplica],
|
||||
enabled=CONFIG.swift_enabled)
|
||||
|
@ -195,31 +195,28 @@ class TestUserAccessPasswordChange(UserAccessBase):
|
||||
user = self._pick_a_user()
|
||||
password = user["password"]
|
||||
self.dbaas.users.change_passwords(instance_info.id, [user])
|
||||
|
||||
asserts.assert_equal(202, self.dbaas.last_http_code)
|
||||
self._check_mysql_connection(user["name"], password)
|
||||
|
||||
@test(depends_on=[test_change_password])
|
||||
def test_change_password_back(self):
|
||||
"""Test change and restore user password."""
|
||||
user = self._pick_a_user()
|
||||
old_password = user["password"]
|
||||
new_password = "NEWPASSWORD"
|
||||
|
||||
user["password"] = new_password
|
||||
self.dbaas.users.change_passwords(instance_info.id, [user])
|
||||
|
||||
asserts.assert_equal(202, self.dbaas.last_http_code)
|
||||
self._check_mysql_connection(user["name"], new_password)
|
||||
|
||||
user["password"] = old_password
|
||||
self.dbaas.users.change_passwords(instance_info.id, [user])
|
||||
self._check_mysql_connection(user["name"], old_password)
|
||||
|
||||
@test(depends_on=[test_change_password_back])
|
||||
def test_change_password_twice(self):
|
||||
# Changing the password twice isn't a problem.
|
||||
user = self._pick_a_user()
|
||||
password = "NEWPASSWORD"
|
||||
user["password"] = password
|
||||
self.dbaas.users.change_passwords(instance_info.id, [user])
|
||||
self.dbaas.users.change_passwords(instance_info.id, [user])
|
||||
self._check_mysql_connection(user["name"], password)
|
||||
asserts.assert_equal(202, self.dbaas.last_http_code)
|
||||
self._check_mysql_connection(user["name"], old_password)
|
||||
|
||||
@after_class(always_run=True)
|
||||
def tearDown(self):
|
||||
|
@ -124,6 +124,7 @@ class TestConfig(object):
|
||||
},
|
||||
"redis": {"volume_support": False},
|
||||
"swift_enabled": True,
|
||||
"trove_mgmt_network": "trove-mgmt",
|
||||
}
|
||||
self._frozen_values = FrozenDict(self._values)
|
||||
self._users = None
|
||||
|
@ -94,20 +94,20 @@ black_box_groups = [
|
||||
GROUP_SERVICES_INITIALIZE,
|
||||
instances.GROUP_START,
|
||||
instances.GROUP_QUOTAS,
|
||||
backups.GROUP,
|
||||
replication.GROUP,
|
||||
configurations.GROUP,
|
||||
datastores.GROUP,
|
||||
backups.BACKUP_GROUP,
|
||||
replication.REPLICATION_GROUP,
|
||||
configurations.CONFIGURATION_GROUP,
|
||||
instances_actions.GROUP_RESIZE,
|
||||
instances_actions.GROUP_STOP_MYSQL,
|
||||
instances.GROUP_STOP,
|
||||
instances.GROUP_GUEST,
|
||||
versions.GROUP,
|
||||
datastores.GROUP,
|
||||
datastore_versions.GROUP,
|
||||
# TODO(SlickNik): The restart tests fail intermittently so pulling
|
||||
# them out of the blackbox group temporarily. Refer to Trove bug:
|
||||
# https://bugs.launchpad.net/trove/+bug/1204233
|
||||
# instances_actions.GROUP_RESTART,
|
||||
instances_actions.GROUP_STOP_MYSQL,
|
||||
instances.GROUP_STOP,
|
||||
versions.GROUP,
|
||||
instances.GROUP_GUEST,
|
||||
datastore_versions.GROUP,
|
||||
]
|
||||
proboscis.register(groups=["blackbox", "mysql"],
|
||||
depends_on_groups=black_box_groups)
|
||||
@ -245,7 +245,9 @@ user_actions_groups.extend([user_actions_group.GROUP])
|
||||
|
||||
# groups common to all datastores
|
||||
common_groups = list(instance_create_groups)
|
||||
common_groups.extend([guest_log_groups, instance_init_groups, module_groups])
|
||||
# NOTE(lxkong): Remove the module related tests(module_groups) for now because
|
||||
# of no use case.
|
||||
common_groups.extend([guest_log_groups, instance_init_groups])
|
||||
|
||||
# Register: Component based groups
|
||||
register(["backup"], backup_groups)
|
||||
|
@ -139,7 +139,8 @@ class SqlHelper(TestHelper):
|
||||
root_client = self.get_client(host, *args, **kwargs)
|
||||
root_client.execute("SELECT 1;")
|
||||
return True
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
print("Failed to execute sql command, error: %s" % str(e))
|
||||
return False
|
||||
|
||||
def get_configuration_value(self, property_name, host, *args, **kwargs):
|
||||
|
@ -101,7 +101,8 @@ class GuestLogRunner(TestRunner):
|
||||
expected_http_code=200,
|
||||
expected_type=guest_log.LogType.USER.name,
|
||||
expected_status=guest_log.LogStatus.Disabled.name,
|
||||
expected_published=None, expected_pending=None):
|
||||
expected_published=None, expected_pending=None,
|
||||
is_admin=False):
|
||||
self.report.log("Executing log_show for log '%s'" % log_name)
|
||||
log_details = client.instances.log_show(
|
||||
self.instance_info.id, log_name)
|
||||
@ -111,12 +112,14 @@ class GuestLogRunner(TestRunner):
|
||||
expected_type=expected_type,
|
||||
expected_status=expected_status,
|
||||
expected_published=expected_published,
|
||||
expected_pending=expected_pending)
|
||||
expected_pending=expected_pending,
|
||||
is_admin=is_admin)
|
||||
|
||||
def assert_log_details(self, log_details, expected_log_name,
|
||||
expected_type=guest_log.LogType.USER.name,
|
||||
expected_status=guest_log.LogStatus.Disabled.name,
|
||||
expected_published=None, expected_pending=None):
|
||||
expected_published=None, expected_pending=None,
|
||||
is_admin=False):
|
||||
"""Check that the action generates the proper response data.
|
||||
For log_published and log_pending, setting the value to 'None'
|
||||
will skip that check (useful when using an existing instance,
|
||||
@ -162,18 +165,23 @@ class GuestLogRunner(TestRunner):
|
||||
"expected %d, got %d" %
|
||||
(expected_log_name, expected_pending,
|
||||
log_details.pending))
|
||||
|
||||
container = self.container
|
||||
prefix = self.prefix_pattern % {
|
||||
'instance_id': self.instance_info.id,
|
||||
'datastore': CONFIG.dbaas_datastore,
|
||||
'log': expected_log_name}
|
||||
metafile = prefix.rstrip('/') + '_metafile'
|
||||
|
||||
if expected_published == 0:
|
||||
self.assert_storage_gone(container, prefix, metafile)
|
||||
self.assert_storage_gone(container, prefix, metafile,
|
||||
is_admin=is_admin)
|
||||
container = 'None'
|
||||
prefix = 'None'
|
||||
else:
|
||||
self.assert_storage_exists(container, prefix, metafile)
|
||||
self.assert_storage_exists(container, prefix, metafile,
|
||||
is_admin=is_admin)
|
||||
|
||||
self.assert_equal(container, log_details.container,
|
||||
"Wrong log container for '%s' log" %
|
||||
expected_log_name)
|
||||
@ -220,7 +228,8 @@ class GuestLogRunner(TestRunner):
|
||||
expected_http_code=200,
|
||||
expected_type=guest_log.LogType.USER.name,
|
||||
expected_status=guest_log.LogStatus.Disabled.name,
|
||||
expected_published=None, expected_pending=None):
|
||||
expected_published=None, expected_pending=None,
|
||||
is_admin=False):
|
||||
self.report.log("Executing log_publish for log '%s' (disable: %s "
|
||||
"discard: %s)" %
|
||||
(log_name, disable, discard))
|
||||
@ -232,7 +241,8 @@ class GuestLogRunner(TestRunner):
|
||||
expected_type=expected_type,
|
||||
expected_status=expected_status,
|
||||
expected_published=expected_published,
|
||||
expected_pending=expected_pending)
|
||||
expected_pending=expected_pending,
|
||||
is_admin=is_admin)
|
||||
|
||||
def assert_log_discard(self, client, log_name,
|
||||
expected_http_code=200,
|
||||
@ -250,9 +260,14 @@ class GuestLogRunner(TestRunner):
|
||||
expected_published=expected_published,
|
||||
expected_pending=expected_pending)
|
||||
|
||||
def assert_storage_gone(self, container, prefix, metafile):
|
||||
def assert_storage_gone(self, container, prefix, metafile, is_admin=False):
|
||||
if is_admin:
|
||||
swift_client = self.admin_swift_client
|
||||
else:
|
||||
swift_client = self.swift_client
|
||||
|
||||
try:
|
||||
headers, container_files = self.swift_client.get_container(
|
||||
headers, container_files = swift_client.get_container(
|
||||
container, prefix=prefix)
|
||||
self.assert_equal(0, len(container_files),
|
||||
"Found files in %s/%s: %s" %
|
||||
@ -265,7 +280,7 @@ class GuestLogRunner(TestRunner):
|
||||
else:
|
||||
raise
|
||||
try:
|
||||
self.swift_client.get_object(container, metafile)
|
||||
swift_client.get_object(container, metafile)
|
||||
self.fail("Found metafile after discard: %s" % metafile)
|
||||
except ClientException as ex:
|
||||
if ex.http_status == 404:
|
||||
@ -275,9 +290,15 @@ class GuestLogRunner(TestRunner):
|
||||
else:
|
||||
raise
|
||||
|
||||
def assert_storage_exists(self, container, prefix, metafile):
|
||||
def assert_storage_exists(self, container, prefix, metafile,
|
||||
is_admin=False):
|
||||
if is_admin:
|
||||
swift_client = self.admin_swift_client
|
||||
else:
|
||||
swift_client = self.swift_client
|
||||
|
||||
try:
|
||||
headers, container_files = self.swift_client.get_container(
|
||||
headers, container_files = swift_client.get_container(
|
||||
container, prefix=prefix)
|
||||
self.assert_true(len(container_files) > 0,
|
||||
"No files found in %s/%s" %
|
||||
@ -288,7 +309,7 @@ class GuestLogRunner(TestRunner):
|
||||
else:
|
||||
raise
|
||||
try:
|
||||
self.swift_client.get_object(container, metafile)
|
||||
swift_client.get_object(container, metafile)
|
||||
except ClientException as ex:
|
||||
if ex.http_status == 404:
|
||||
self.fail("Missing metafile: %s" % metafile)
|
||||
@ -507,7 +528,7 @@ class GuestLogRunner(TestRunner):
|
||||
def run_test_log_publish_again_user(self):
|
||||
for log_name in self._get_exposed_user_log_names():
|
||||
self.assert_log_publish(
|
||||
self.admin_client,
|
||||
self.auth_client,
|
||||
log_name,
|
||||
expected_status=[guest_log.LogStatus.Published.name,
|
||||
guest_log.LogStatus.Partial.name],
|
||||
@ -696,7 +717,9 @@ class GuestLogRunner(TestRunner):
|
||||
expected_type=guest_log.LogType.SYS.name,
|
||||
expected_status=[guest_log.LogStatus.Ready.name,
|
||||
guest_log.LogStatus.Partial.name],
|
||||
expected_published=0, expected_pending=1)
|
||||
expected_published=0, expected_pending=1,
|
||||
is_admin=True
|
||||
)
|
||||
|
||||
def run_test_log_publish_sys(self):
|
||||
log_name = self._get_unexposed_sys_log_name()
|
||||
@ -705,7 +728,8 @@ class GuestLogRunner(TestRunner):
|
||||
log_name,
|
||||
expected_type=guest_log.LogType.SYS.name,
|
||||
expected_status=guest_log.LogStatus.Partial.name,
|
||||
expected_published=1, expected_pending=1)
|
||||
expected_published=1, expected_pending=1,
|
||||
is_admin=True)
|
||||
|
||||
def run_test_log_publish_again_sys(self):
|
||||
log_name = self._get_unexposed_sys_log_name()
|
||||
@ -715,7 +739,8 @@ class GuestLogRunner(TestRunner):
|
||||
expected_type=guest_log.LogType.SYS.name,
|
||||
expected_status=guest_log.LogStatus.Partial.name,
|
||||
expected_published=self._get_last_log_published(log_name) + 1,
|
||||
expected_pending=1)
|
||||
expected_pending=1,
|
||||
is_admin=True)
|
||||
|
||||
def run_test_log_generator_sys(self):
|
||||
log_name = self._get_unexposed_sys_log_name()
|
||||
@ -737,7 +762,7 @@ class GuestLogRunner(TestRunner):
|
||||
self.admin_client,
|
||||
log_name, publish=True,
|
||||
lines=4, expected_lines=4,
|
||||
swift_client=self.swift_client)
|
||||
swift_client=self.admin_swift_client)
|
||||
|
||||
def run_test_log_save_sys(self):
|
||||
log_name = self._get_unexposed_sys_log_name()
|
||||
|
@ -47,24 +47,40 @@ class ModuleRunner(TestRunner):
|
||||
self.MODULE_BINARY_CONTENTS2 = b'\x00\xFF\xea\x9c\x11\xfeok\xb1\x8ax'
|
||||
|
||||
self.module_name_order = [
|
||||
# 0
|
||||
{'suffix': self.MODULE_BINARY_SUFFIX,
|
||||
'priority': True, 'order': 1},
|
||||
# 1
|
||||
{'suffix': self.MODULE_BINARY_SUFFIX2,
|
||||
'priority': True, 'order': 2},
|
||||
# 2
|
||||
{'suffix': '_hidden_all_tenant_auto_priority',
|
||||
'priority': True, 'order': 3},
|
||||
# 3
|
||||
{'suffix': '_hidden', 'priority': True, 'order': 4},
|
||||
# 4
|
||||
{'suffix': '_auto', 'priority': True, 'order': 5},
|
||||
# 5
|
||||
{'suffix': '_live', 'priority': True, 'order': 6},
|
||||
# 6
|
||||
{'suffix': '_priority', 'priority': True, 'order': 7},
|
||||
# 7
|
||||
{'suffix': '_ds', 'priority': False, 'order': 1},
|
||||
# 8
|
||||
{'suffix': '_ds_ver', 'priority': False, 'order': 2},
|
||||
# 9
|
||||
{'suffix': '_all_tenant_ds_ver', 'priority': False, 'order': 3},
|
||||
# 10
|
||||
{'suffix': '', 'priority': False, 'order': 4},
|
||||
# 11
|
||||
{'suffix': '_ds_diff', 'priority': False, 'order': 5},
|
||||
# 12
|
||||
{'suffix': '_diff_tenant', 'priority': False, 'order': 6},
|
||||
# 13
|
||||
{'suffix': '_full_access', 'priority': False, 'order': 7},
|
||||
# 14
|
||||
{'suffix': '_for_update', 'priority': False, 'order': 8},
|
||||
# 15
|
||||
{'suffix': '_updated', 'priority': False, 'order': 8},
|
||||
]
|
||||
|
||||
@ -80,7 +96,6 @@ class ModuleRunner(TestRunner):
|
||||
self.module_count_prior_to_create = 0
|
||||
self.module_ds_count_prior_to_create = 0
|
||||
self.module_ds_all_count_prior_to_create = 0
|
||||
self.module_all_tenant_count_prior_to_create = 0
|
||||
self.module_auto_apply_count_prior_to_create = 0
|
||||
self.module_admin_count_prior_to_create = 0
|
||||
self.module_other_count_prior_to_create = 0
|
||||
@ -106,10 +121,12 @@ class ModuleRunner(TestRunner):
|
||||
|
||||
@property
|
||||
def main_test_module(self):
|
||||
# The module named "test_module_1"
|
||||
return self._get_test_module(0)
|
||||
|
||||
@property
|
||||
def update_test_module(self):
|
||||
# The module named "test_module_1_updated"
|
||||
return self._get_test_module(1)
|
||||
|
||||
@property
|
||||
@ -205,6 +222,7 @@ class ModuleRunner(TestRunner):
|
||||
|
||||
# Tests start here
|
||||
def run_module_delete_existing(self):
|
||||
"""Delete all the testing modules if exist."""
|
||||
modules = self.admin_client.modules.list()
|
||||
for module in modules:
|
||||
if module.name.startswith(self.MODULE_NAME):
|
||||
@ -222,6 +240,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_create_non_admin_auto(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
"""Non-admin cannot create modules by specifying auto_apply."""
|
||||
client = self.auth_client
|
||||
self.assert_raises(
|
||||
expected_exception, expected_http_code,
|
||||
@ -232,6 +251,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_create_non_admin_all_tenant(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
"""Non-admin cannot create modules by specifying all_tenants."""
|
||||
client = self.auth_client
|
||||
self.assert_raises(
|
||||
expected_exception, expected_http_code,
|
||||
@ -242,6 +262,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_create_non_admin_hidden(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
"""Non-admin cannot create modules by specifying visible."""
|
||||
client = self.auth_client
|
||||
self.assert_raises(
|
||||
expected_exception, expected_http_code,
|
||||
@ -252,6 +273,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_create_non_admin_priority(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
"""Non-admin cannot create modules by specifying priority_apply."""
|
||||
client = self.auth_client
|
||||
self.assert_raises(
|
||||
expected_exception, expected_http_code,
|
||||
@ -262,6 +284,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_create_non_admin_no_full_access(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
"""Non-admin cannot create modules by specifying full_access."""
|
||||
client = self.auth_client
|
||||
self.assert_raises(
|
||||
expected_exception, expected_http_code,
|
||||
@ -272,6 +295,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_create_full_access_with_admin_opt(
|
||||
self, expected_exception=exceptions.BadRequest,
|
||||
expected_http_code=400):
|
||||
"""full_access cannot be used together with auto_apply."""
|
||||
client = self.admin_client
|
||||
self.assert_raises(
|
||||
expected_exception, expected_http_code,
|
||||
@ -320,8 +344,6 @@ class ModuleRunner(TestRunner):
|
||||
self.module_ds_all_count_prior_to_create = len(
|
||||
self.auth_client.modules.list(
|
||||
datastore=models.Modules.MATCH_ALL_NAME))
|
||||
self.module_all_tenant_count_prior_to_create = len(
|
||||
self.unauth_client.modules.list())
|
||||
self.module_auto_apply_count_prior_to_create = len(
|
||||
[module for module in self.admin_client.modules.list()
|
||||
if module.auto_apply])
|
||||
@ -329,6 +351,8 @@ class ModuleRunner(TestRunner):
|
||||
self.admin_client.modules.list())
|
||||
self.module_other_count_prior_to_create = len(
|
||||
self.unauth_client.modules.list())
|
||||
|
||||
# Create module "test_module_1" for datastore "all"
|
||||
self.assert_module_create(self.auth_client, 10)
|
||||
|
||||
def assert_module_create(self, client, name_order,
|
||||
@ -361,23 +385,31 @@ class ModuleRunner(TestRunner):
|
||||
priority_apply=priority_apply,
|
||||
apply_order=apply_order,
|
||||
full_access=full_access)
|
||||
|
||||
username = client.real_client.client.username
|
||||
if (('alt' in username and 'admin' not in username) or
|
||||
('admin' in username and visible)):
|
||||
if username == self.instance_info.user.auth_user:
|
||||
self.module_create_count += 1
|
||||
if datastore:
|
||||
if datastore == self.instance_info.dbaas_datastore:
|
||||
self.module_ds_create_count += 1
|
||||
else:
|
||||
self.module_ds_all_create_count += 1
|
||||
elif not visible:
|
||||
self.module_admin_create_count += 1
|
||||
else:
|
||||
elif (username != self.instance_info.admin_user.auth_user and
|
||||
username != self.instance_info.user.auth_user):
|
||||
self.module_other_create_count += 1
|
||||
else:
|
||||
self.module_admin_create_count += 1
|
||||
|
||||
if all_tenants and visible:
|
||||
self.module_all_tenant_create_count += 1
|
||||
if datastore:
|
||||
if datastore == self.instance_info.dbaas_datastore:
|
||||
self.module_ds_create_count += 1
|
||||
else:
|
||||
self.module_ds_all_create_count += 1
|
||||
if auto_apply and visible:
|
||||
self.module_auto_apply_create_count += 1
|
||||
|
||||
self.test_modules.append(result)
|
||||
|
||||
tenant_id = None
|
||||
@ -400,7 +432,11 @@ class ModuleRunner(TestRunner):
|
||||
expected_datastore_version=datastore_version,
|
||||
expected_auto_apply=auto_apply,
|
||||
expected_contents=contents,
|
||||
expected_is_admin=('admin' in username and not full_access))
|
||||
expected_is_admin=(
|
||||
username == self.instance_info.admin_user.auth_user and
|
||||
not full_access
|
||||
)
|
||||
)
|
||||
|
||||
def validate_module(self, module, validate_all=False,
|
||||
expected_name=None,
|
||||
@ -485,6 +521,7 @@ class ModuleRunner(TestRunner):
|
||||
'Unexpected visible')
|
||||
|
||||
def run_module_create_for_update(self):
|
||||
# Create module "test_module_1_updated"
|
||||
self.assert_module_create(self.auth_client, 14)
|
||||
|
||||
def run_module_create_dupe(
|
||||
@ -547,7 +584,12 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_list(self):
|
||||
self.assert_module_list(
|
||||
self.auth_client,
|
||||
self.module_count_prior_to_create + self.module_create_count)
|
||||
(
|
||||
self.module_count_prior_to_create +
|
||||
self.module_create_count +
|
||||
self.module_all_tenant_create_count
|
||||
)
|
||||
)
|
||||
|
||||
def assert_module_list(self, client, expected_count, datastore=None):
|
||||
if datastore:
|
||||
@ -576,7 +618,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_list_unauth_user(self):
|
||||
self.assert_module_list(
|
||||
self.unauth_client,
|
||||
(self.module_all_tenant_count_prior_to_create +
|
||||
(self.module_other_count_prior_to_create +
|
||||
self.module_all_tenant_create_count +
|
||||
self.module_other_create_count))
|
||||
|
||||
@ -665,7 +707,12 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_list_again(self):
|
||||
self.assert_module_list(
|
||||
self.auth_client,
|
||||
self.module_count_prior_to_create + self.module_create_count)
|
||||
(
|
||||
self.module_count_prior_to_create +
|
||||
self.module_create_count +
|
||||
self.module_all_tenant_create_count
|
||||
)
|
||||
)
|
||||
|
||||
def run_module_list_ds(self):
|
||||
self.assert_module_list(
|
||||
@ -698,7 +745,7 @@ class ModuleRunner(TestRunner):
|
||||
self.module_other_create_count))
|
||||
|
||||
def run_module_update(self):
|
||||
self.assert_module_update(
|
||||
self.assert_module_update_description(
|
||||
self.auth_client,
|
||||
self.main_test_module.id,
|
||||
description=self.MODULE_DESC + " modified")
|
||||
@ -732,8 +779,8 @@ class ModuleRunner(TestRunner):
|
||||
"MD5 changed with same contents")
|
||||
|
||||
def run_module_update_auto_toggle(self,
|
||||
expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
expected_exception=exceptions.NotFound,
|
||||
expected_http_code=404):
|
||||
module = self._find_auto_apply_module()
|
||||
toggle_off_args = {'auto_apply': False}
|
||||
toggle_on_args = {'auto_apply': True}
|
||||
@ -762,8 +809,8 @@ class ModuleRunner(TestRunner):
|
||||
**toggle_on_args)
|
||||
|
||||
def run_module_update_all_tenant_toggle(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
self, expected_exception=exceptions.NotFound,
|
||||
expected_http_code=404):
|
||||
module = self._find_all_tenant_module()
|
||||
toggle_off_args = {'all_tenants': False}
|
||||
toggle_on_args = {'all_tenants': True}
|
||||
@ -772,8 +819,8 @@ class ModuleRunner(TestRunner):
|
||||
expected_http_code=expected_http_code)
|
||||
|
||||
def run_module_update_invisible_toggle(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
self, expected_exception=exceptions.NotFound,
|
||||
expected_http_code=404):
|
||||
module = self._find_invisible_module()
|
||||
toggle_off_args = {'visible': True}
|
||||
toggle_on_args = {'visible': False}
|
||||
@ -782,8 +829,8 @@ class ModuleRunner(TestRunner):
|
||||
expected_http_code=expected_http_code)
|
||||
|
||||
def run_module_update_priority_toggle(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
self, expected_exception=exceptions.NotFound,
|
||||
expected_http_code=404):
|
||||
module = self._find_priority_apply_module()
|
||||
toggle_off_args = {'priority_apply': False}
|
||||
toggle_on_args = {'priority_apply': True}
|
||||
@ -810,8 +857,8 @@ class ModuleRunner(TestRunner):
|
||||
self.main_test_module.id, visible=False)
|
||||
|
||||
def run_module_update_non_admin_auto_off(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
self, expected_exception=exceptions.NotFound,
|
||||
expected_http_code=404):
|
||||
module = self._find_auto_apply_module()
|
||||
client = self.auth_client
|
||||
self.assert_raises(
|
||||
@ -819,8 +866,8 @@ class ModuleRunner(TestRunner):
|
||||
client, client.modules.update, module.id, auto_apply=False)
|
||||
|
||||
def run_module_update_non_admin_auto_any(
|
||||
self, expected_exception=exceptions.Forbidden,
|
||||
expected_http_code=403):
|
||||
self, expected_exception=exceptions.NotFound,
|
||||
expected_http_code=404):
|
||||
module = self._find_auto_apply_module()
|
||||
client = self.auth_client
|
||||
self.assert_raises(
|
||||
@ -936,11 +983,6 @@ class ModuleRunner(TestRunner):
|
||||
self.auth_client, self.instance_info.id,
|
||||
self.module_auto_apply_count_prior_to_create)
|
||||
|
||||
def run_module_query_after_remove(self):
|
||||
self.assert_module_query(
|
||||
self.auth_client, self.instance_info.id,
|
||||
self.module_auto_apply_count_prior_to_create + 2)
|
||||
|
||||
def assert_module_query(self, client, instance_id, expected_count,
|
||||
expected_http_code=200, expected_results=None):
|
||||
modquery_list = client.instances.module_query(instance_id)
|
||||
@ -1063,7 +1105,10 @@ class ModuleRunner(TestRunner):
|
||||
|
||||
def run_module_list_instance_after_apply(self):
|
||||
self.assert_module_list_instance(
|
||||
self.auth_client, self.instance_info.id, self.apply_count)
|
||||
self.auth_client,
|
||||
self.instance_info.id,
|
||||
self.apply_count + self.module_auto_apply_count_prior_to_create
|
||||
)
|
||||
|
||||
def run_module_apply_another(self):
|
||||
self.assert_module_apply(self.auth_client, self.instance_info.id,
|
||||
@ -1072,7 +1117,10 @@ class ModuleRunner(TestRunner):
|
||||
|
||||
def run_module_list_instance_after_apply_another(self):
|
||||
self.assert_module_list_instance(
|
||||
self.auth_client, self.instance_info.id, self.apply_count)
|
||||
self.auth_client,
|
||||
self.instance_info.id,
|
||||
self.apply_count + self.module_auto_apply_count_prior_to_create
|
||||
)
|
||||
|
||||
def run_module_update_after_remove(self):
|
||||
name, description, contents, priority, order = (
|
||||
@ -1095,7 +1143,8 @@ class ModuleRunner(TestRunner):
|
||||
{self.main_test_module.md5: 1})
|
||||
|
||||
def run_module_query_after_apply(self):
|
||||
expected_count = self.module_auto_apply_count_prior_to_create + 2
|
||||
expected_count = (self.module_auto_apply_count_prior_to_create +
|
||||
self.apply_count)
|
||||
expected_results = self.create_default_query_expected_results(
|
||||
[self.main_test_module])
|
||||
self.assert_module_query(self.auth_client, self.instance_info.id,
|
||||
@ -1138,7 +1187,8 @@ class ModuleRunner(TestRunner):
|
||||
{self.main_test_module.md5: 1})
|
||||
|
||||
def run_module_query_after_apply_another(self):
|
||||
expected_count = self.module_auto_apply_count_prior_to_create + 3
|
||||
expected_count = (self.module_auto_apply_count_prior_to_create +
|
||||
self.apply_count)
|
||||
expected_results = self.create_default_query_expected_results(
|
||||
[self.main_test_module, self.update_test_module])
|
||||
self.assert_module_query(self.auth_client, self.instance_info.id,
|
||||
@ -1162,7 +1212,10 @@ class ModuleRunner(TestRunner):
|
||||
|
||||
def run_module_list_instance_after_apply_live(self):
|
||||
self.assert_module_list_instance(
|
||||
self.auth_client, self.instance_info.id, self.apply_count)
|
||||
self.auth_client,
|
||||
self.instance_info.id,
|
||||
self.apply_count + self.module_auto_apply_count_prior_to_create
|
||||
)
|
||||
|
||||
def run_module_update_live_update(self):
|
||||
module = self.live_update_test_module
|
||||
@ -1232,6 +1285,7 @@ class ModuleRunner(TestRunner):
|
||||
def run_module_remove(self):
|
||||
self.assert_module_remove(self.auth_client, self.instance_info.id,
|
||||
self.update_test_module.id)
|
||||
self.apply_count -= 1
|
||||
|
||||
def assert_module_remove(self, client, instance_id, module_id,
|
||||
expected_http_code=200):
|
||||
|
@ -87,17 +87,22 @@ class RootActionsRunner(TestRunner):
|
||||
def assert_can_connect(self, instance_id, test_connect_creds):
|
||||
self._assert_connect(instance_id, True, test_connect_creds)
|
||||
|
||||
def _assert_connect(
|
||||
self, instance_id, expected_response, test_connect_creds):
|
||||
def _assert_connect(self, instance_id, expected_response,
|
||||
test_connect_creds):
|
||||
host = self.get_instance_host(instance_id=instance_id)
|
||||
self.report.log("Pinging instance %s with credentials: %s"
|
||||
% (instance_id, test_connect_creds))
|
||||
self.report.log(
|
||||
"Pinging instance %s with credentials: %s, database: %s" %
|
||||
(instance_id, test_connect_creds,
|
||||
self.test_helper.credentials.get("database"))
|
||||
)
|
||||
|
||||
ping_response = self.test_helper.ping(
|
||||
host,
|
||||
username=test_connect_creds[0],
|
||||
password=test_connect_creds[1]
|
||||
password=test_connect_creds[1],
|
||||
database=self.test_helper.credentials.get("database")
|
||||
)
|
||||
|
||||
self.assert_equal(expected_response, ping_response)
|
||||
|
||||
def run_check_root_enabled(self, expected_http_code=200):
|
||||
|
@ -182,6 +182,7 @@ class InstanceTestInfo(object):
|
||||
self.databases = None # The databases created on the instance.
|
||||
self.helper_user = None # Test helper user if exists.
|
||||
self.helper_database = None # Test helper database if exists.
|
||||
self.admin_user = None
|
||||
|
||||
|
||||
class LogOnFail(type):
|
||||
@ -330,7 +331,10 @@ class TestRunner(object):
|
||||
self.instance_info.dbaas_datastore = CONFIG.dbaas_datastore
|
||||
self.instance_info.dbaas_datastore_version = (
|
||||
CONFIG.dbaas_datastore_version)
|
||||
self.instance_info.user = CONFIG.users.find_user_by_name('alt_demo')
|
||||
self.instance_info.user = CONFIG.users.find_user_by_name("alt_demo")
|
||||
self.instance_info.admin_user = CONFIG.users.find_user(
|
||||
Requirements(is_admin=True)
|
||||
)
|
||||
if self.VOLUME_SUPPORT:
|
||||
self.instance_info.volume_size = CONFIG.get('trove_volume_size', 1)
|
||||
self.instance_info.volume = {
|
||||
@ -469,17 +473,20 @@ class TestRunner(object):
|
||||
|
||||
def _create_admin_client(self):
|
||||
"""Create a client from an admin user."""
|
||||
requirements = Requirements(is_admin=True, services=["swift"])
|
||||
requirements = Requirements(is_admin=True, services=["trove"])
|
||||
admin_user = CONFIG.users.find_user(requirements)
|
||||
return create_dbaas_client(admin_user)
|
||||
|
||||
@property
|
||||
def swift_client(self):
|
||||
return self._create_swift_client()
|
||||
return self._create_swift_client(admin=False)
|
||||
|
||||
def _create_swift_client(self):
|
||||
"""Create a swift client from the admin user details."""
|
||||
requirements = Requirements(is_admin=True, services=["swift"])
|
||||
@property
|
||||
def admin_swift_client(self):
|
||||
return self._create_swift_client(admin=True)
|
||||
|
||||
def _create_swift_client(self, admin=True):
|
||||
requirements = Requirements(is_admin=admin, services=["swift"])
|
||||
user = CONFIG.users.find_user(requirements)
|
||||
os_options = {'region_name': CONFIG.trove_client_region_name}
|
||||
return swiftclient.client.Connection(
|
||||
@ -492,7 +499,7 @@ class TestRunner(object):
|
||||
|
||||
@property
|
||||
def nova_client(self):
|
||||
return create_nova_client(self.instance_info.user)
|
||||
return create_nova_client(self.instance_info.admin_user)
|
||||
|
||||
def register_debug_inst_ids(self, inst_ids):
|
||||
"""Method to 'register' an instance ID (or list of instance IDs)
|
||||
@ -768,7 +775,7 @@ class TestRunner(object):
|
||||
self.fail("Found left-over server group: %s" % server_group)
|
||||
|
||||
def get_instance(self, instance_id, client=None):
|
||||
client = client or self.auth_client
|
||||
client = client or self.admin_client
|
||||
return client.instances.get(instance_id)
|
||||
|
||||
def extract_ipv4s(self, ips):
|
||||
|
@ -233,6 +233,7 @@ class BackupAgentTest(trove_testtools.TestCase):
|
||||
' --stream=xbstream'
|
||||
' %(extra_opts)s '
|
||||
' --user=os_admin --password=123'
|
||||
' --host=127.0.0.1'
|
||||
' /var/lib/mysql/data 2>/tmp/innobackupex.log'
|
||||
' | gzip |'
|
||||
' openssl enc -aes-256-cbc -salt '
|
||||
|
@ -79,14 +79,14 @@ UNZIP = "gzip -d -c"
|
||||
ENCRYPT = "openssl enc -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
|
||||
DECRYPT = "openssl enc -d -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
|
||||
XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s "
|
||||
" --user=os_admin --password=password"
|
||||
" --user=os_admin --password=password --host=127.0.0.1"
|
||||
" /var/lib/mysql/data 2>/tmp/innobackupex.log")
|
||||
XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''}
|
||||
XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'}
|
||||
XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream'
|
||||
' --incremental --incremental-lsn=%(lsn)s'
|
||||
' %(extra_opts)s '
|
||||
' --user=os_admin --password=password'
|
||||
' --user=os_admin --password=password --host=127.0.0.1'
|
||||
' /var/lib/mysql/data'
|
||||
' 2>/tmp/innobackupex.log')
|
||||
SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s "
|
||||
|
@ -34,6 +34,7 @@ from proboscis.asserts import fail
|
||||
from proboscis import SkipTest
|
||||
from six.moves.urllib.parse import unquote
|
||||
from sqlalchemy import create_engine
|
||||
import tenacity
|
||||
from troveclient.compat import Dbaas
|
||||
|
||||
from trove.common import cfg
|
||||
@ -41,6 +42,7 @@ from trove.common.utils import import_class
|
||||
from trove.common.utils import import_object
|
||||
from trove.tests.config import CONFIG as test_config
|
||||
from trove.tests.util.client import TestClient
|
||||
from trove.tests.util import mysql
|
||||
from trove.tests.util import test_config as CONFIG
|
||||
from trove.tests.util.users import Requirements
|
||||
|
||||
@ -192,10 +194,8 @@ def dns_checker(mgmt_instance):
|
||||
|
||||
|
||||
def process(cmd):
|
||||
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
result = process.communicate()
|
||||
return result
|
||||
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
|
||||
return output
|
||||
|
||||
|
||||
def string_in_list(str, substr_list):
|
||||
@ -248,9 +248,7 @@ def mysql_connection():
|
||||
|
||||
|
||||
class MySqlConnection(object):
|
||||
|
||||
def assert_fails(self, ip, user_name, password):
|
||||
from trove.tests.util import mysql
|
||||
try:
|
||||
with mysql.create_mysql_connection(ip, user_name, password):
|
||||
pass
|
||||
@ -262,8 +260,15 @@ class MySqlConnection(object):
|
||||
fail("Expected to see permissions failure. Instead got message:"
|
||||
"%s" % mcf.message)
|
||||
|
||||
@tenacity.retry(
|
||||
wait=tenacity.wait_fixed(3),
|
||||
stop=tenacity.stop_after_attempt(5),
|
||||
reraise=True
|
||||
)
|
||||
def create(self, ip, user_name, password):
|
||||
from trove.tests.util import mysql
|
||||
print("Connecting mysql, host: %s, user: %s, password: %s" %
|
||||
(ip, user_name, password))
|
||||
|
||||
return mysql.create_mysql_connection(ip, user_name, password)
|
||||
|
||||
|
||||
|
@ -13,43 +13,62 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from proboscis.asserts import fail
|
||||
import tenacity
|
||||
|
||||
from trove import tests
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests import util
|
||||
from trove.tests.util.users import Requirements
|
||||
|
||||
|
||||
def create_server_connection(instance_id):
|
||||
def create_server_connection(instance_id, ip_address=None):
|
||||
if util.test_config.use_local_ovz:
|
||||
return OpenVZServerConnection(instance_id)
|
||||
return ServerSSHConnection(instance_id)
|
||||
return ServerSSHConnection(instance_id, ip_address=ip_address)
|
||||
|
||||
|
||||
class ServerSSHConnection(object):
|
||||
def __init__(self, instance_id):
|
||||
self.instance_id = instance_id
|
||||
def __init__(self, instance_id, ip_address=None):
|
||||
if not ip_address:
|
||||
req_admin = Requirements(is_admin=True)
|
||||
self.user = util.test_config.users.find_user(req_admin)
|
||||
self.dbaas_admin = util.create_dbaas_client(self.user)
|
||||
self.instance = self.dbaas_admin.management.show(self.instance_id)
|
||||
try:
|
||||
self.ip_address = [str(ip) for ip in self.instance.ip
|
||||
if netaddr.valid_ipv4(ip)][0]
|
||||
except Exception:
|
||||
fail("No IPV4 ip found")
|
||||
user = util.test_config.users.find_user(req_admin)
|
||||
dbaas_admin = util.create_dbaas_client(user)
|
||||
instance = dbaas_admin.management.show(instance_id)
|
||||
|
||||
mgmt_interfaces = instance.server["addresses"].get(
|
||||
CONFIG.trove_mgmt_network, []
|
||||
)
|
||||
mgmt_addresses = [str(inf["addr"]) for inf in mgmt_interfaces
|
||||
if inf["version"] == 4]
|
||||
|
||||
if len(mgmt_addresses) == 0:
|
||||
fail("No IPV4 ip found for management network.")
|
||||
else:
|
||||
self.ip_address = mgmt_addresses[0]
|
||||
else:
|
||||
self.ip_address = ip_address
|
||||
|
||||
TROVE_TEST_SSH_USER = os.environ.get('TROVE_TEST_SSH_USER')
|
||||
if TROVE_TEST_SSH_USER and '@' not in self.ip_address:
|
||||
self.ip_address = TROVE_TEST_SSH_USER + '@' + self.ip_address
|
||||
|
||||
@tenacity.retry(
|
||||
wait=tenacity.wait_fixed(5),
|
||||
stop=tenacity.stop_after_attempt(3),
|
||||
retry=tenacity.retry_if_exception_type(subprocess.CalledProcessError)
|
||||
)
|
||||
def execute(self, cmd):
|
||||
exe_cmd = "%s %s %s" % (tests.SSH_CMD, self.ip_address, cmd)
|
||||
print("RUNNING COMMAND: %s" % exe_cmd)
|
||||
stdout, stderr = util.process(exe_cmd)
|
||||
return (stdout.decode(), stderr.decode())
|
||||
|
||||
output = util.process(exe_cmd)
|
||||
|
||||
print("OUTPUT: %s" % output)
|
||||
return output
|
||||
|
||||
|
||||
class OpenVZServerConnection(object):
|
||||
|
Loading…
Reference in New Issue
Block a user