Fix 'existing_users' context against keystone v3
And restore disabled tests Change-Id: Iea75a9733e8f23aafb19171ae6eb706f8a980870
This commit is contained in:
parent
393b32ce66
commit
42c2e0cba5
@ -14,6 +14,22 @@
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_list_subnets:
|
||||
-
|
||||
args:
|
||||
network_create_args:
|
||||
subnet_create_args:
|
||||
subnet_cidr_start: "1.1.0.0/30"
|
||||
subnets_per_network: 2
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 20 }}
|
||||
concurrency: {{smoke or 10}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronSecurityGroup.create_and_list_security_groups:
|
||||
-
|
||||
@ -65,6 +81,130 @@
|
||||
failure_rate:
|
||||
max: 0
|
||||
|
||||
NeutronNetworks.create_and_list_routers:
|
||||
-
|
||||
args:
|
||||
network_create_args:
|
||||
subnet_create_args:
|
||||
subnet_cidr_start: "1.1.0.0/30"
|
||||
subnets_per_network: 2
|
||||
router_create_args:
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 15}}
|
||||
concurrency: {{smoke or 5}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_list_ports:
|
||||
-
|
||||
args:
|
||||
network_create_args:
|
||||
port_create_args:
|
||||
ports_per_network: 4
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 15}}
|
||||
concurrency: {{smoke or 5}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
# TODO(ikhudoshyn): We need to setup 'pool'
|
||||
# quota properly in rally-gate.sh in order to run it
|
||||
#
|
||||
#NeutronLoadbalancerV1.create_and_list_pools:
|
||||
#-
|
||||
#args:
|
||||
#pool_create_args: {}
|
||||
#runner:
|
||||
#type: "constant"
|
||||
#times: {{smoke or 20}}
|
||||
#concurrency: {{smoke or 10}}
|
||||
#context:
|
||||
#network: {}
|
||||
#lbaas:
|
||||
#pool: {}
|
||||
#lbaas_version: 1
|
||||
#sla:
|
||||
#failure_rate:
|
||||
#max: 0
|
||||
|
||||
# TODO(ikhudoshyn): We need to setup 'pool'
|
||||
# quota properly in rally-gate.sh in order to run it
|
||||
#
|
||||
#NeutronLoadbalancerV1.create_and_delete_pools:
|
||||
#-
|
||||
#args:
|
||||
#pool_create_args: {}
|
||||
#runner:
|
||||
#type: "constant"
|
||||
#times: {{smoke or 20}}
|
||||
#concurrency: {{smoke or 10}}
|
||||
#context:
|
||||
#network: {}
|
||||
#sla:
|
||||
#failure_rate:
|
||||
#max: 0
|
||||
|
||||
# TODO(ikhudoshyn): We need to setup 'pool'
|
||||
# quota properly in rally-gate.sh in order to run it
|
||||
#
|
||||
#NeutronLoadbalancerV1.create_and_update_pools:
|
||||
#-
|
||||
#args:
|
||||
#pool_create_args: {}
|
||||
#pool_update_args: {}
|
||||
#runner:
|
||||
#type: "constant"
|
||||
#times: {{smoke or 20}}
|
||||
#concurrency: {{smoke or 10}}
|
||||
#context:
|
||||
#network: {}
|
||||
#sla:
|
||||
#failure_rate:
|
||||
#max: 0
|
||||
|
||||
# TODO(ikhudoshyn): We need to setup 'pool'
|
||||
# quota properly in rally-gate.sh in order to run it
|
||||
#
|
||||
#NeutronLoadbalancerV1.create_and_list_vips:
|
||||
#-
|
||||
#args:
|
||||
#vip_create_args: {}
|
||||
#runner:
|
||||
#type: "constant"
|
||||
#times: {{smoke or 20}}
|
||||
#concurrency: {{smoke or 10}}
|
||||
#context:
|
||||
#network: {}
|
||||
#sla:
|
||||
#failure_rate:
|
||||
#max: 0
|
||||
|
||||
# TODO(ikhudoshyn): We need to setup 'pool'
|
||||
# quota properly in rally-gate.sh in order to run it
|
||||
#
|
||||
#NeutronLoadbalancerV1.create_and_update_vips:
|
||||
#-
|
||||
#args:
|
||||
#vip_create_args: {}
|
||||
#vip_update_args: {}
|
||||
#pool_create_args: {}
|
||||
#runner:
|
||||
#type: "constant"
|
||||
#times: {{smoke or 20}}
|
||||
#concurrency: {{smoke or 10}}
|
||||
#context:
|
||||
#network: {}
|
||||
#sla:
|
||||
#failure_rate:
|
||||
#max: 0
|
||||
|
||||
NeutronLoadbalancerV1.create_and_list_healthmonitors:
|
||||
-
|
||||
@ -119,6 +259,46 @@
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_update_subnets:
|
||||
-
|
||||
args:
|
||||
network_create_args: {}
|
||||
subnet_create_args: {}
|
||||
subnet_cidr_start: "1.4.0.0/16"
|
||||
subnets_per_network: 2
|
||||
subnet_update_args:
|
||||
enable_dhcp: False
|
||||
name: "_subnet_updated"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 20}}
|
||||
concurrency: {{smoke or 10}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_update_routers:
|
||||
-
|
||||
args:
|
||||
network_create_args: {}
|
||||
subnet_create_args: {}
|
||||
subnet_cidr_start: "1.1.0.0/30"
|
||||
subnets_per_network: 2
|
||||
router_create_args: {}
|
||||
router_update_args:
|
||||
admin_state_up: False
|
||||
name: "_router_updated"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 15}}
|
||||
concurrency: {{smoke or 5}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_delete_networks:
|
||||
-
|
||||
@ -132,6 +312,22 @@
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_delete_subnets:
|
||||
-
|
||||
args:
|
||||
network_create_args: {}
|
||||
subnet_create_args: {}
|
||||
subnet_cidr_start: "1.1.0.0/30"
|
||||
subnets_per_network: 2
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 20}}
|
||||
concurrency: {{smoke or 10}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_delete_floating_ips:
|
||||
-
|
||||
@ -146,15 +342,91 @@
|
||||
failure_rate:
|
||||
max: 0
|
||||
|
||||
|
||||
Quotas.neutron_update:
|
||||
NeutronNetworks.create_and_delete_routers:
|
||||
-
|
||||
args:
|
||||
max_quota: 1024
|
||||
network_create_args: {}
|
||||
subnet_create_args: {}
|
||||
subnet_cidr_start: "1.1.0.0/30"
|
||||
subnets_per_network: 2
|
||||
router_create_args: {}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 10}}
|
||||
concurrency: {{smoke or 2}}
|
||||
times: {{smoke or 15}}
|
||||
concurrency: {{smoke or 5}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_delete_ports:
|
||||
-
|
||||
args:
|
||||
network_create_args: {}
|
||||
port_create_args: {}
|
||||
ports_per_network: 10
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 8}}
|
||||
concurrency: {{smoke or 4}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NeutronNetworks.create_and_update_ports:
|
||||
-
|
||||
args:
|
||||
network_create_args: {}
|
||||
port_create_args: {}
|
||||
ports_per_network: 5
|
||||
port_update_args:
|
||||
admin_state_up: False
|
||||
device_id: "dummy_id"
|
||||
device_owner: "dummy_owner"
|
||||
name: "_port_updated"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 20}}
|
||||
concurrency: {{smoke or 10}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 20
|
||||
|
||||
NovaServers.boot_and_delete_server:
|
||||
-
|
||||
args:
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 1
|
||||
concurrency: 1
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 0
|
||||
|
||||
-
|
||||
args:
|
||||
auto_assign_nic: True
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: {{image_name}}
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 1
|
||||
concurrency: 1
|
||||
context:
|
||||
network:
|
||||
start_cidr: "10.2.0.0/24"
|
||||
networks_per_tenant: 2
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 0
|
||||
@ -184,3 +456,67 @@
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 0
|
||||
|
||||
VMTasks.boot_runcommand_delete:
|
||||
-
|
||||
args:
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: {{image_name}}
|
||||
command:
|
||||
script_file: "~/.rally/extra/instance_test.sh"
|
||||
interpreter: "/bin/sh"
|
||||
username: "cirros"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 4}}
|
||||
concurrency: {{smoke or 2}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 0
|
||||
-
|
||||
args:
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: {{image_name}}
|
||||
command:
|
||||
script_file: "~/.rally/extra/instance_dd_test.sh"
|
||||
interpreter: "/bin/sh"
|
||||
username: "cirros"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: {{smoke or 4}}
|
||||
concurrency: {{smoke or 2}}
|
||||
context:
|
||||
network: {}
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 0
|
||||
|
||||
VMTasks.boot_runcommand_delete_custom_image:
|
||||
-
|
||||
args:
|
||||
command:
|
||||
remote_path: "./dd_test.sh"
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
username: "cirros"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 1
|
||||
concurrency: 1
|
||||
context:
|
||||
image_command_customizer:
|
||||
command:
|
||||
local_path: "/home/jenkins/.rally/extra/install_benchmark.sh"
|
||||
remote_path: "./install_benchmark.sh"
|
||||
flavor:
|
||||
name: "m1.tiny"
|
||||
image:
|
||||
name: {{image_name}}
|
||||
username: "cirros"
|
||||
network: {}
|
||||
|
@ -52,16 +52,26 @@ class ExistingUsers(users.UserContextMixin, context.Context):
|
||||
user_credential = objects.Credential(**user)
|
||||
user_kclient = osclients.Clients(user_credential).keystone()
|
||||
|
||||
if user_kclient.tenant_id not in self.context["tenants"]:
|
||||
self.context["tenants"][user_kclient.tenant_id] = {
|
||||
"id": user_kclient.tenant_id,
|
||||
"name": user_kclient.tenant_name
|
||||
if user_kclient.version == "2.0":
|
||||
tenant_id = user_kclient.tenant_id
|
||||
tenant_name = user_kclient.tenant_name
|
||||
else:
|
||||
tenant_name = user_kclient.project_name
|
||||
tenant_id = user_kclient.project_id
|
||||
|
||||
if not tenant_id:
|
||||
tenant_id = user_kclient.get_project_id(tenant_name)
|
||||
|
||||
if tenant_id not in self.context["tenants"]:
|
||||
self.context["tenants"][tenant_id] = {
|
||||
"id": tenant_id,
|
||||
"name": tenant_name
|
||||
}
|
||||
|
||||
self.context["users"].append({
|
||||
"credential": user_credential,
|
||||
"id": user_kclient.user_id,
|
||||
"tenant_id": user_kclient.tenant_id
|
||||
"tenant_id": tenant_id
|
||||
})
|
||||
|
||||
@logging.log_task_wrapper(LOG.info, _("Exit context: `existing_users`"))
|
||||
|
@ -71,7 +71,9 @@ if [ "$DEVSTACK_GATE_PREPOPULATE_USERS" -eq "1" ]; then
|
||||
if [ "$NEUTRON_EXISTS" ]; then
|
||||
OS_QUOTA_STR="--networks -1 --subnets -1 --routers -1 --vips -1 --floating-ips -1 --subnetpools -1 --secgroups -1 --secgroup-rules -1 --ports -1 --health-monitors -1"
|
||||
openstack --os-interface admin quota set $OS_QUOTA_STR rally-test-project-1
|
||||
openstack --os-interface admin quota show rally-test-project-1
|
||||
openstack --os-interface admin quota set $OS_QUOTA_STR rally-test-project-2
|
||||
openstack --os-interface admin quota show rally-test-project-2
|
||||
fi
|
||||
|
||||
DEPLOYMENT_CONFIG_FILE=~/.rally/with-existing-users-config
|
||||
|
@ -24,10 +24,10 @@ class ExistingUserTestCase(test.TestCase):
|
||||
|
||||
@mock.patch("%s.keystone.existing_users.osclients.Clients" % CTX)
|
||||
@mock.patch("%s.keystone.existing_users.objects.Credential" % CTX)
|
||||
def test_setup(self, mock_credential, mock_clients):
|
||||
user1 = mock.MagicMock(tenant_id="1")
|
||||
user2 = mock.MagicMock(tenant_id="1")
|
||||
user3 = mock.MagicMock(tenant_id="2")
|
||||
def test_setup_keystonev2(self, mock_credential, mock_clients):
|
||||
user1 = mock.MagicMock(tenant_id="1", version="2.0")
|
||||
user2 = mock.MagicMock(tenant_id="1", version="2.0")
|
||||
user3 = mock.MagicMock(tenant_id="2", version="2.0")
|
||||
|
||||
mock_clients.return_value.keystone.side_effect = [
|
||||
user1, user2, user3
|
||||
@ -58,6 +58,44 @@ class ExistingUserTestCase(test.TestCase):
|
||||
self.assertEqual({"id": "2", "name": user3.tenant_name},
|
||||
context["tenants"]["2"])
|
||||
|
||||
@mock.patch("%s.keystone.existing_users.osclients.Clients" % CTX)
|
||||
@mock.patch("%s.keystone.existing_users.objects.Credential" % CTX)
|
||||
def test_setup_keystonev3(self, mock_credential, mock_clients):
|
||||
user1 = mock.MagicMock(project_id="1", version="3")
|
||||
user2 = mock.MagicMock(project_id="1", version="3")
|
||||
user3 = mock.MagicMock(project_id=None, version="3")
|
||||
|
||||
user3.get_project_id.return_value = "2"
|
||||
|
||||
mock_clients.return_value.keystone.side_effect = [
|
||||
user1, user2, user3
|
||||
]
|
||||
|
||||
context = {
|
||||
"task": mock.MagicMock(),
|
||||
"config": {
|
||||
"existing_users": [user1, user2, user3]
|
||||
}
|
||||
}
|
||||
existing_users.ExistingUsers(context).setup()
|
||||
|
||||
self.assertIn("users", context)
|
||||
self.assertIn("tenants", context)
|
||||
self.assertEqual(3, len(context["users"]))
|
||||
self.assertEqual(
|
||||
{
|
||||
"id": user1.user_id,
|
||||
"credential": mock_credential.return_value,
|
||||
"tenant_id": "1"
|
||||
},
|
||||
context["users"][0]
|
||||
)
|
||||
self.assertEqual(["1", "2"], sorted(context["tenants"].keys()))
|
||||
self.assertEqual({"id": "1", "name": user1.project_name},
|
||||
context["tenants"]["1"])
|
||||
self.assertEqual({"id": "2", "name": user3.project_name},
|
||||
context["tenants"]["2"])
|
||||
|
||||
def test_cleanup(self):
|
||||
# NOTE(boris-42): Test that cleanup is not abstract
|
||||
existing_users.ExistingUsers({"task": mock.MagicMock()}).cleanup()
|
||||
|
Loading…
Reference in New Issue
Block a user