Features/Fixes about L2 Agent,NIC name,and MongoDB

New:
Adding support to store test_description;
Adding support to store L2 Agent version;
Remove credentials when storing RAW command;

Bugfix:
Fix the logic for fetching the NIC name

Document enhancements:
Enhance documents for storing results to MongoDB;
Adding a link to an example JSON file generated by VMTP;

Change-Id: Id9181c6ba7900cedb7b19fc8e39cd659ee129374
This commit is contained in:
Yichen Wang 2015-03-04 23:28:38 -08:00
parent dc8719279a
commit 8697935d86
9 changed files with 571 additions and 47 deletions

View File

@ -86,7 +86,7 @@ VMTP will display the results to stdout with the following data:
| | - ICMP
| | | average, min, max and stddev round trip time in ms
Detailed results can also be stored in a file in JSON format using the *--json* command line argument and/or stored directly into a MongoDB server.
Detailed results can also be stored in a file in JSON format using the *--json* command line argument and/or stored directly into a MongoDB server. See :download:`here <_static/example.json>` for an example JSON file that is generated by VMTP.
Limitations and Caveats

View File

@ -164,21 +164,21 @@ udp_loss_rate_range: [2, 5]
vm_bandwidth: 0
#######################################
# PNS MongoDB Connection information
# VMTP MongoDB Connection information
#######################################
########################################
# Default MongoDB port is 27017, to override
#pns_mongod_port: <port no>
#vmtp_mongod_port: <port no>
########################################
# MongoDB pns database.
# use "official_db" for offical runs only.
########################################
pns_db: "pnsdb"
vmtp_db: "client_db"
########################################
# MongoDB collection.
# use "officialdata" for offical runs only.
# MongoDB collection name.
########################################
pns_collection: "testdata"
vmtp_collection: "pns_web_entry"

View File

@ -0,0 +1,409 @@
{
"args": "vmtp.py -c cfg.default.yaml -r ../admin-openrc.sh -p <MASKED> --json juno_ovs_vxlan_2.json --mongod_server 172.29.87.29 --controller-node <MASKED> -d --test_description Yichen's testbed",
"auth_url": "http://172.29.87.180:5000/v2.0",
"cpu_info": "40 * Intel(R) Xeon(R) CPU E5-2660 v2 @ 2.20GHz",
"date": "2015-03-04 22:33:40",
"distro": "CentOS Linux 7",
"encapsulation": "vxlan",
"flows": [
{
"az_from": "nova:hh23-6",
"az_to": "nova:hh23-6",
"desc": "VM to VM same network fixed IP (intra-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.1.4",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"rtt_ms": 0.28,
"throughput_kbps": 14318464,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"rtt_ms": 0.12,
"throughput_kbps": 14426352,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"rtt_ms": 0.13,
"throughput_kbps": 14247563,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.11,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 127744,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.12,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1021703,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.17,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 2496542,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.321",
"rtt_max_ms": "0.741",
"rtt_min_ms": "0.187",
"rtt_stddev": "0.212",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-6",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network fixed IP (intra-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.2",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 116,
"rtt_ms": 0.67,
"throughput_kbps": 1292957,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 218,
"rtt_ms": 0.58,
"throughput_kbps": 1602299,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 606,
"rtt_ms": 0.59,
"throughput_kbps": 1583186,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.94,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 152745,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.39,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1222784,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 2.52,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1342442,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.771",
"rtt_max_ms": "1.126",
"rtt_min_ms": "0.677",
"rtt_stddev": "0.180",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-6",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network floating IP (intra-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.2",
"ip_to": "172.29.87.183",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 560,
"rtt_ms": 0.69,
"throughput_kbps": 1407148,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 184,
"rtt_ms": 0.62,
"throughput_kbps": 1475068,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 310,
"rtt_ms": 0.59,
"throughput_kbps": 1529674,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 3.62,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 153493,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 4.14,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1241424,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 4.37,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1311624,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.646",
"rtt_max_ms": "0.693",
"rtt_min_ms": "0.613",
"rtt_stddev": "0.043",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-5",
"az_to": "nova:hh23-6",
"desc": "VM to VM same network fixed IP (inter-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.1.5",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 99,
"rtt_ms": 0.34,
"throughput_kbps": 2340466,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 67,
"rtt_ms": 0.43,
"throughput_kbps": 2313315,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 63,
"rtt_ms": 0.32,
"throughput_kbps": 2020005,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 50.66,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 76095,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 24.04,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 920877,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 28.84,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1901142,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.657",
"rtt_max_ms": "1.555",
"rtt_min_ms": "0.331",
"rtt_stddev": "0.453",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-5",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network fixed IP (inter-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.4",
"ip_to": "192.168.1.2",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 121,
"rtt_ms": 0.68,
"throughput_kbps": 1344370,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 224,
"rtt_ms": 0.61,
"throughput_kbps": 1448398,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 75,
"rtt_ms": 0.5,
"throughput_kbps": 1301634,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 1.04,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 161581,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 0.98,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1207335,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 3.82,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1330237,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.648",
"rtt_max_ms": "0.984",
"rtt_min_ms": "0.489",
"rtt_stddev": "0.175",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
},
{
"az_from": "nova:hh23-5",
"az_to": "nova:hh23-6",
"desc": "VM to VM different network floating IP (inter-node)",
"distro_id": "Ubuntu",
"distro_version": "14.04",
"ip_from": "192.168.2.4",
"ip_to": "172.29.87.183",
"results": [
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 201,
"rtt_ms": 0.65,
"throughput_kbps": 1371518,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 133,
"rtt_ms": 0.57,
"throughput_kbps": 1388169,
"tool": "nuttcp-7.3.2"
},
{
"pkt_size": 65536,
"protocol": "TCP",
"retrans": 68,
"rtt_ms": 0.56,
"throughput_kbps": 1250003,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 2.66,
"pkt_size": 128,
"protocol": "UDP",
"throughput_kbps": 148525,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 2.02,
"pkt_size": 1024,
"protocol": "UDP",
"throughput_kbps": 1174606,
"tool": "nuttcp-7.3.2"
},
{
"loss_rate": 1.12,
"pkt_size": 8192,
"protocol": "UDP",
"throughput_kbps": 1310265,
"tool": "nuttcp-7.3.2"
},
{
"protocol": "ICMP",
"rtt_avg_ms": "0.606",
"rtt_max_ms": "0.698",
"rtt_min_ms": "0.462",
"rtt_stddev": "0.086",
"rx_packets": "5",
"tool": "ping",
"tx_packets": "5"
}
]
}
],
"l2agent_type": "Open vSwitch agent",
"l2agent_version": "OVS 2.3.1",
"nic_name": "Cisco Systems Inc VIC Ethernet NIC (rev a2)",
"openstack_version": "Juno (2014.2.1)",
"test_description": "Yichen's testbed",
"version": "2.0.1"
}

View File

@ -91,6 +91,12 @@ For exchanging purposes, the image could be saved to a tar archive. You can dist
$ sudo docker save <IMAGE_ID> -o <IMAGE_FILE>
To publish the image to Docker Hub::
$ sudo docker login
$ sudo docker push $USER/vmtp:2.0.0
$ sudo docker push $USER/vmtp:latest
.. _developer_guide_of_openstack:

View File

@ -5,3 +5,5 @@ Caveats and Known Issues
* UDP throughput is not available if iperf is selected (the iperf UDP reported results are not reliable enough for iterating)
* If VMTP hangs for native hosts throughputs, check firewall rules on the hosts to allow TCP/UDP ports 5001 and TCP port 5002
* When storing the results to JSON or MongoDB, the quotes in the command-line will not be saved. In a unix-like environment, the magic happened even before Python can see them. e.g. quotes get consumed, variables get interpolated, etc. Keep this in mind when you want to execute the command stored in "*args*", and pay more attention in any parameter that may have quotes inside like *test_description*.

View File

@ -18,8 +18,9 @@ VMTP Usage
[--bandwidth <bandwidth>] [--tcpbuf <tcp_pkt_size1,...>]
[--udpbuf <udp_pkt_size1,...>] [--no-env] [-d] [-v]
[--stop-on-error] [--vm_image_url <url_to_image>]
[--test_description <test_description>]
OpenStack VM Throughput V2.0.1
OpenStack VM Throughput V2.0.2
optional arguments:
-h, --help show this help message and exit
@ -67,6 +68,8 @@ VMTP Usage
--vm_image_url <url_to_image>
URL to a Linux image in qcow2 format that can be
downloaded from
--test_description <test_description>
The test description to be stored in JSON or MongoDB
Configuration File
@ -208,6 +211,16 @@ Run VMTP on an OpenStack cloud, fetch the defails of the deployment and store it
python vmtp.py -r admin-openrc.sh -p admin --json res.json --controller-node root@192.168.12.34:admin
In addition, VMTP also supports to store the results to a MongoDB server::
python vmtp.py -r admin-openrc.sh -p admin --json res.json --mongod_server 172.29.87.29 --controller-node root@192.168.12.34:admin
Before storing info into MongoDB, some configurations are needed to change to fit in your environment. By default, VMTP will store to database "client_db" with collection name "pns_web_entry", and of course these can be changed in the configuration file. Below are the fields which are related to accessing MongoDB::
vmtp_mongod_port
vmtp_db
vmtp_collection
Example 4: Specify which compute nodes to spawn VMs
"""""""""""""""""""""""""""""""""""""""""""""""""""

View File

@ -43,8 +43,6 @@ class Network(object):
# Store state if the network is ipv4/ipv6 dual stack
self.ipv6_enabled = False
self.agent_type = self._get_agent_type()
# If reusing existing management network just find this network
if self.config.reuse_network_name:
# An existing management network must be reused
@ -120,6 +118,9 @@ class Network(object):
config.dns_nameservers)
self.vm_int_net.append(int_net)
self.l2agent_type = self._get_l2agent_type()
self.internal_iface_dict = self._get_internal_iface_dict()
# Add both internal networks to router interface to enable network to network connectivity
self.__add_router_interface()
@ -315,7 +316,7 @@ class Network(object):
except TypeError:
print "No external router set"
def _get_agent_type(self):
def _get_l2agent_type(self):
'''
Retrieve the list of agents
return 'Linux bridge agent' or 'Open vSwitch agent' or 'Unknown agent'
@ -325,4 +326,45 @@ class Network(object):
agent_type = agent['agent_type']
if 'Linux bridge' in agent_type or 'Open vSwitch' in agent_type:
return agent_type
return 'Unknown agent'
def _get_internal_iface_dict(self):
'''
return a dictionary which contains the information needed to determine
which pysical interface(s) are holding the internal traffic
For Linux Bridge, the Neutron L2 Agent will automatically put the
configurations from Linux Bridge into Neutron config. So just use
the Neutron API to fetch it.
For OVS, the Neutron L2 Agent is not pushing all information to Neutron
config, so we need a second step look-up which will happen in
sshutils.get_nic_name(). Here we just maintain:
In the case of VLAN:
{ '<HOSTNAME>' : '<The bridge which has the interface for internal traffic>' }
In the case of GRE/VxLAN:
{ '<HOSTNAME>' : '<IP Address of local interface>
'''
agents = self.neutron_client.list_agents()['agents']
internal_iface_dict = {}
for agent in agents:
agent_type = agent['agent_type']
hostname = agent['host']
if 'Linux bridge' in agent_type:
agent_detail = self.neutron_client.show_agent(agent['id'])['agent']
ifname = agent_detail['configurations']['interface_mappings']['physnet1']
internal_iface_dict[hostname] = ifname
elif 'Open vSwitch' in agent_type:
network_type = self.vm_int_net[0]['provider:network_type']
agent_detail = self.neutron_client.show_agent(agent['id'])['agent']
if network_type == "vlan":
brname = agent_detail['configurations']['bridge_mappings']['physnet1']
internal_iface_dict[hostname] = brname
elif network_type == "vxlan" or network_type == 'gre':
ipaddr = agent_detail['configurations']['tunneling_ip']
internal_iface_dict[hostname] = ipaddr
return internal_iface_dict

View File

@ -493,7 +493,7 @@ class SSH(object):
return (cores + " * " + model_name)
def get_nic_name(self, agent_type, encap):
def get_nic_name(self, agent_type, encap, internal_iface_dict):
'''
Get the NIC info of the controller.
@ -501,24 +501,30 @@ class SSH(object):
hardware as the compute nodes.
'''
# The internal_ifac_dict is a dictionary contains the mapping between
# hostname and the internal interface name like below:
# {u'hh23-4': u'eth1', u'hh23-5': u'eth1', u'hh23-6': u'eth1'}
cmd = "hostname"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
hostname = std_output.strip()
if hostname in internal_iface_dict:
iface = internal_iface_dict[hostname]
else:
return "Unknown"
# Figure out which interface is for internal traffic
if 'Linux bridge' in agent_type:
cmd = "brctl show | grep 'br-inst' | awk -F' ' '{print $4}'"
# [root@gg34-2 ~]# brctl show
# bridge name bridge id STP enabled interfaces
# br-inst 8000.f872eaad26c7 no eth0
# brq8b0e63e6-d6 8000.ea1393ff32ca no eth1
# tap9e06a20b-28
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
ifname = std_output.strip()
ifname = iface
elif 'Open vSwitch' in agent_type:
if encap == 'vlan':
# [root@hh23-10 ~]# ovs-vsctl list-ports br-inst
# eth1
# phy-br-inst
cmd = 'ovs-vsctl list-ports br-inst | grep "eth"'
cmd = 'ovs-vsctl list-ports ' + iface + ' | grep -E "^[^phy].*"'
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
@ -527,20 +533,17 @@ class SSH(object):
# This is complicated. We need to first get the local IP address on
# br-tun, then do a reverse lookup to get the physical interface.
#
# [root@hh23-4 ~]# ovs-vsctl show | grep -E -m1 -o 'local_ip="[^"]+"'
# local_ip="23.23.2.14"
# [root@hh23-4 ~]# ip addr show to "23.23.2.14"
# 3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
# inet 23.23.2.14/24 brd 23.23.2.255 scope global eth1
# valid_lft forever preferred_lft forever
cmd = "ovs-vsctl show | grep -E -m1 -o 'local_ip=\"[^\"]+\"'"
cmd = cmd + " | sed -r 's/local_ip=\"([^\"]+)\"/\\1/'"
cmd = "ip addr show to `" + cmd + "`"
cmd = cmd + " | grep -E -m1 -o 'eth[0-9]+'"
cmd = "ip addr show to " + iface + " | awk -F: '{print $2}'"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
ifname = std_output.strip()
else:
return "Unknown"
cmd = 'ethtool -i ' + ifname + ' | grep bus-info'
(status, std_output, _) = self.execute(cmd)
@ -556,6 +559,27 @@ class SSH(object):
return (nic_name)
def get_l2agent_version(self, agent_type):
'''
Get the L2 agent version of the controller.
Note: Here we are assuming the controller node has the exact
hardware as the compute nodes.
'''
if 'Linux bridge' in agent_type:
cmd = "brctl --version | awk -F',' '{print $2}'"
ver_string = "Linux Bridge "
elif 'Open vSwitch' in agent_type:
cmd = "ovs-vsctl --version | awk -F')' '{print $2}'"
ver_string = "OVS "
else:
return "Unknown"
(status, std_output, _) = self.execute(cmd)
if status:
return "Unknown"
return ver_string + std_output.strip()
##################################################
@ -565,6 +589,7 @@ class SSH(object):
def main():
# ssh = SSH('localadmin', '172.29.87.29', key_filename='./ssh/id_rsa')
ssh = SSH('localadmin', '172.22.191.173', key_filename='./ssh/id_rsa')
print 'ID=' + ssh.distro_id
print 'ID_LIKE=' + ssh.distro_id_like
print 'VERSION_ID=' + ssh.distro_version
@ -574,7 +599,7 @@ def main():
# print ssh.stat('/tmp')
print ssh.check_openstack_version()
print ssh.get_cpu_info()
# print ssh.get_nic_name("Linux bridge", "vxlan")
print ssh.get_l2agent_version("Open vSwitch agent")
if __name__ == "__main__":
main()

61
vmtp.py
View File

@ -39,7 +39,7 @@ from neutronclient.v2_0 import client as neutronclient
from novaclient.client import Client
from novaclient.exceptions import ClientException
__version__ = '2.0.1'
__version__ = '2.0.2'
from perf_instance import PerfInstance as PerfInstance
@ -109,7 +109,7 @@ class ResultsCollector(object):
def pprint(self, res):
self.ppr.pprint(res)
def get_controller_info(self, cfg):
def get_controller_info(self, cfg, net):
if cfg.ctrl_username and cfg.ctrl_host:
print 'Fetching OpenStack deployment details...'
if cfg.ctrl_password:
@ -125,14 +125,30 @@ class ResultsCollector(object):
self.results['distro'] = sshcon.get_host_os_version()
self.results['openstack_version'] = sshcon.check_openstack_version()
self.results['cpu_info'] = sshcon.get_cpu_info()
if 'agent_type' in self.results and 'encapsulation' in self.results:
if 'l2agent_type' in self.results and 'encapsulation' in self.results:
self.results['nic_name'] = sshcon.get_nic_name(
self.results['agent_type'], self.results['encapsulation'])
self.results['l2agent_type'], self.results['encapsulation'],
net.internal_iface_dict)
self.results['l2agent_version'] = sshcon.get_l2agent_version(
self.results['l2agent_type'])
else:
self.results['nic_name'] = "Unknown"
else:
print 'ERROR: Cannot connect to the controller node.'
def mask_credentials(self):
args = self.results['args']
if not args:
return
list = ['-p', '--host', '--external-host', '--controller-node']
for keyword in list:
pattern = keyword + r'\s+[^\s]+'
string = keyword + ' <MASKED>'
args = re.sub(pattern, string, args)
self.results['args'] = args
def save(self, cfg):
'''Save results in json format file.'''
print('Saving results in json file: ' + cfg.json_file + "...")
@ -143,10 +159,10 @@ class ResultsCollector(object):
'''Save results to MongoDB database.'''
print "Saving results to MongoDB database..."
post_id = pns_mongo.\
pns_add_test_result_to_mongod(cfg.pns_mongod_ip,
cfg.pns_mongod_port,
cfg.pns_db,
cfg.pns_collection,
pns_add_test_result_to_mongod(cfg.vmtp_mongod_ip,
cfg.vmtp_mongod_port,
cfg.vmtp_db,
cfg.vmtp_collection,
self.results)
if post_id is None:
print "ERROR: Failed to add result to DB"
@ -177,7 +193,6 @@ class VmtpTest(object):
self.sec_group = None
self.image_instance = None
self.flavor_type = None
self.agent_type = None
# Create an instance on a particular availability zone
def create_instance(self, inst, az, int_net):
@ -233,8 +248,8 @@ class VmtpTest(object):
self.flavor_type = self.comp.find_flavor(config.flavor_type)
self.net = network.Network(neutron, config)
rescol.add_property('agent_type', self.net.agent_type)
print "OpenStack agent: " + self.net.agent_type
rescol.add_property('l2agent_type', self.net.l2agent_type)
print "OpenStack agent: " + self.net.l2agent_type
try:
network_type = self.net.vm_int_net[0]['provider:network_type']
print "OpenStack network type: " + network_type
@ -618,6 +633,11 @@ if __name__ == '__main__':
help='URL to a Linux image in qcow2 format that can be downloaded from',
metavar='<url_to_image>')
parser.add_argument('--test_description', dest='test_description',
action='store',
help='The test description to be stored in JSON or MongoDB',
metavar='<test_description>')
(opts, args) = parser.parse_known_args()
@ -667,17 +687,23 @@ if __name__ == '__main__':
else:
config.vm_image_url = None
###################################################
# Test Description
###################################################
if opts.test_description:
rescol.add_property('test_description', opts.test_description)
###################################################
# MongoDB Server connection info.
###################################################
if opts.mongod_server:
config.pns_mongod_ip = opts.mongod_server
config.vmtp_mongod_ip = opts.mongod_server
else:
config.pns_mongod_ip = None
config.vmtp_mongod_ip = None
if 'pns_mongod_port' not in config:
if 'vmtp_mongod_port' not in config:
# Set MongoDB default port if not set.
config.pns_mongod_port = 27017
config.vmtp_mongod_port = 27017
# the bandwidth limit for VMs
if opts.vm_bandwidth:
@ -801,10 +827,11 @@ if __name__ == '__main__':
# If saving the results to JSON or MongoDB, get additional details:
if config.json_file or config.pns_mongod_ip:
rescol.get_controller_info(config)
rescol.get_controller_info(config, vmtp.net)
rescol.mask_credentials()
if config.json_file:
rescol.save(config)
if config.pns_mongod_ip:
if config.vmtp_mongod_ip:
rescol.save_to_db(config)