NFP - Devstack and Diskimage Create Scripts

Change-Id: I2f49329206a925d2fd5b3935d21f0bde0b95e42e
Implements: blueprint gbp-network-services-framework
Co-Authored-By: Deepak S <in.live.in@live.in>
Co-Authored-By: Yogesh Rajmane <yogesh.rajmane@oneconvergence.com>
Co-Authored-By: DhuldevValekar <dhuldev.valekar@oneconvergence.com>
Co-Authored-By: ashutosh mishra <mca.ashu4@gmail.com>
This commit is contained in:
Rajendra Machani
2016-04-21 23:49:44 +05:30
committed by ashutosh mishra
parent 623e6fafe1
commit 1af3c0e892
20 changed files with 1107 additions and 0 deletions

79
devstack/README-NFP Normal file
View File

@@ -0,0 +1,79 @@
Fresh Installation Steps:
=========================
(1) Clone stable mitaka devstack.
# git clone https://git.openstack.org/openstack-dev/devstack -b stable/mitaka
(2) Get local.conf.nfp from NFP devstack patch and copy to devstack directory
as local.conf
# cd devstack/
# wget -O local.conf.zip https://review.openstack.org/cat/309145,103,devstack/local.conf.nfp
# unzip local.conf.zip
# mv local.conf*.nfp local.conf
# rm local.conf.zip
(3) Configure local.conf
# Edit local.conf to point HOST_IP to the devstack setup IP address
# Modify the GBPSERVICE_BRANCH to point to the top patch in devstack changeset
(4) Install devstack.
# ./stack.sh
Re-installation Steps:
======================
(1) Follow these steps for cleanup.
# cd devstack
# ./unstack.sh
# ./clean.sh
# sudo rm -rf /opt/stack
# cd ..
# sudo rm -rf devstack
(2) Follow the fresh installation steps.
Steps to test Base Mode:
========================
(1) Create a test chain with the service VM.
# cd /opt/stack/gbp/devstack/exercises/nfp_service
# bash lb_base.sh
(2) Login to the UI, create one member in consumer group and two members
in provider group. Consumer member is HTTP client and provider members
should have webserver deployed.
(3) Test loadbalancer with traffic from consumer VM getting loadbalanced
between the two provider members.
(4) Delete members created in the consumer and provider groups in step 2.
(5) Delete the test chain.
# cd /opt/stack/gbp/devstack/exercises/nfp_service
# bash lb_base_clean.sh
Steps to test Base Mode with VM:
================================
(1) Create a test chain with the service VM.
# cd /opt/stack/gbp/devstack/exercises/nfp_service
# bash fw_base_vm.sh
(2) Login to the UI, create a member in the consumer and provider groups.
(3) Test firewall with traffic from consumer VM.
(4) Log-in into service VM
# cd /opt/stack/gbp/gbpservice/tests/contrib/diskimage-create/output
# sudo ip netns exec nfp-proxy /bin/bash
# ssh -i ./nfp_reference_service_<timestamp> ubuntu@<nfp_service mgmt-ip>
(5) Delete members created in the consumer and provider groups in step 2.
(6) Delete the test chain.
# cd /opt/stack/gbp/devstack/exercises/nfp_service
# bash fw_base_vm_clean.sh

View File

@@ -0,0 +1,23 @@
#!/bin/bash
source /home/stack/devstack/openrc neutron service
#service chain node and spec creation
gbp servicechain-node-create --service-profile base_mode_fw_vm --config 'custom_json:{"mimetype": "config/custom+json","rules": [{"action": "log", "name": "tcp", "service": "tcp/80"}, {"action": "log", "name": "tcp", "service": "tcp/8080"}, {"action": "accept", "name": "tcp", "service": "tcp/22"}, {"action": "accept", "name": "icmp", "service": "icmp"}]}' FWNODE
gbp servicechain-spec-create --nodes "FWNODE" fw-chainspec
# Redirect action, rule, classifier and rule-set
gbp policy-action-create --action-type REDIRECT --action-value fw-chainspec redirect-to-fw
gbp policy-action-create --action-type ALLOW allow-to-fw
gbp policy-classifier-create --protocol tcp --direction bi fw-web-classifier-tcp
gbp policy-classifier-create --protocol udp --direction bi fw-web-classifier-udp
gbp policy-classifier-create --protocol icmp --direction bi fw-web-classifier-icmp
gbp policy-rule-create --classifier fw-web-classifier-tcp --actions redirect-to-fw fw-web-redirect-rule
gbp policy-rule-create --classifier fw-web-classifier-tcp --actions allow-to-fw fw-web-allow-rule-tcp
gbp policy-rule-create --classifier fw-web-classifier-udp --actions allow-to-fw fw-web-allow-rule-udp
gbp policy-rule-create --classifier fw-web-classifier-icmp --actions allow-to-fw fw-web-allow-rule-icmp
gbp policy-rule-set-create --policy-rules "fw-web-redirect-rule fw-web-allow-rule-tcp fw-web-allow-rule-udp fw-web-allow-rule-icmp" fw-webredirect-ruleset
#provider, consumer E-W groups creation
gbp group-create fw-consumer --consumed-policy-rule-sets "fw-webredirect-ruleset=None"
gbp group-create fw-provider --provided-policy-rule-sets "fw-webredirect-ruleset=None"

View File

@@ -0,0 +1,21 @@
#!/bin/bash
source /home/stack/devstack/openrc neutron service
gbp group-delete fw-provider
gbp group-delete fw-consumer
gbp policy-rule-set-delete fw-webredirect-ruleset
gbp policy-rule-delete fw-web-redirect-rule
gbp policy-rule-delete fw-web-allow-rule-tcp
gbp policy-rule-delete fw-web-allow-rule-icmp
gbp policy-rule-delete fw-web-allow-rule-udp
gbp policy-classifier-delete fw-web-classifier-tcp
gbp policy-classifier-delete fw-web-classifier-icmp
gbp policy-classifier-delete fw-web-classifier-udp
gbp policy-action-delete redirect-to-fw
gbp policy-action-delete allow-to-fw
gbp servicechain-spec-delete fw-chainspec
gbp servicechain-node-delete FWNODE

View File

@@ -0,0 +1,22 @@
#!/bin/bash
source /home/stack/devstack/openrc demo demo
# Service chain node and spec creation
gbp servicechain-node-create --service-profile base_mode_lb --template-file ./templates/haproxy_base_mode.template LBNODE
gbp servicechain-spec-create --nodes "LBNODE" lb_chainspec
# REDIRECT action, classifier, rule and rule-set
gbp policy-action-create --action-type REDIRECT --action-value lb_chainspec redirect-to-lb
gbp policy-classifier-create --protocol tcp --direction bi lb-webredirect
gbp policy-rule-create --classifier lb-webredirect --actions redirect-to-lb lb-web-redirect-rule
gbp policy-rule-set-create --policy-rules "lb-web-redirect-rule" lb-webredirect-ruleset
# Network service policy
gbp network-service-policy-create --network-service-params type=ip_single,name=vip_ip,value=self_subnet lb_nsp
# Consumer PTG
gbp group-create lb-consumer --consumed-policy-rule-sets "lb-webredirect-ruleset=None"
# Provider PTG
gbp group-create lb-provider --provided-policy-rule-sets "lb-webredirect-ruleset=None" --network-service-policy lb_nsp

View File

@@ -0,0 +1,28 @@
#!/bin/bash
source /home/stack/devstack/openrc demo demo
echo "Make sure that policy-targets associated to PTGs are deleted!!"
# Delete PTG
gbp group-delete lb-consumer
gbp group-delete lb-provider
# Delete network service policy
gbp network-service-policy-delete lb_nsp
# Delete rule-set
gbp policy-rule-set-delete lb-webredirect-ruleset
# Delete rules
gbp policy-rule-delete lb-web-redirect-rule
# Delete classifier
gbp policy-classifier-delete lb-webredirect
# Delete actions
gbp policy-action-delete redirect-to-lb
# Delete service chain node and specs
gbp servicechain-spec-delete lb_chainspec
gbp servicechain-node-delete LBNODE

View File

@@ -0,0 +1,65 @@
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description": "Template to test Haproxy Loadbalacer service",
"Parameters": {
"Subnet": {
"Description": "Pool Subnet CIDR, on which VIP port should be created",
"Type": "String"
},
"vip_ip": {
"Description": "VIP IP Address",
"Type": "String"
},
"service_chain_metadata": {
"Description": "sc metadata",
"Type": "String"
}
},
"Resources" : {
"HttpHM": {
"Type": "OS::Neutron::HealthMonitor",
"Properties": {
"admin_state_up": true,
"delay": 20,
"expected_codes": "200",
"http_method": "GET",
"max_retries": 3,
"timeout": 10,
"type": "HTTP",
"url_path": "/"
}
},
"HaproxyPool": {
"Type": "OS::Neutron::Pool",
"Properties": {
"admin_state_up": true,
"description": "Haproxy pool from teplate",
"lb_method": "ROUND_ROBIN",
"monitors": [{"Ref":"HttpHM"}],
"name": "Haproxy pool",
"provider": "haproxy",
"protocol": "HTTP",
"subnet_id": {"Ref":"Subnet"},
"vip": {
"subnet": {"Ref":"Subnet"},
"address": {"Ref":"vip_ip"},
"name": "Haproxy vip",
"description": {"Ref":"service_chain_metadata"},
"protocol_port": 80,
"connection_limit": -1,
"admin_state_up": true
}
}
},
"HaproxyLb": {
"Type": "OS::Neutron::LoadBalancer",
"Properties": {
"pool_id": {"Ref":"HaproxyPool"},
"protocol_port": 80
}
}
}
}

235
devstack/lib/nfp Normal file
View File

@@ -0,0 +1,235 @@
#!/bin/bash
NFPSERVICE_DIR=$DEST/gbp
DISK_IMAGE_DIR=$DEST/gbp/gbpservice/tests/contrib
TOP_DIR=$PWD
NEUTRON_CONF_DIR=/etc/neutron
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
NFP_CONF_DIR=/etc/nfp
echo "TOP-DIR-NFP : $PWD"
function prepare_nfp_image_builder {
#setup_develop $NFPSERVICE_DIR
sudo -H -E pip install -r $DISK_IMAGE_DIR/diskimage-create/requirements.txt
sudo apt-get install -y --force-yes qemu-utils
}
function init_nfpgbpservice {
# Run GBP db migrations
gbp-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
iniset $NEUTRON_CONF DEFAULT policy_dirs $NFP_CONF_DIR
}
function install_nfpgbpservice {
git_clone $GBPSERVICE_REPO $NFPSERVICE_DIR $GBPSERVICE_BRANCH
mv $NFPSERVICE_DIR/test-requirements.txt $NFPSERVICE_DIR/_test-requirements.txt
setup_develop $NFPSERVICE_DIR
mv -f $NEUTRON_CONF_DIR/policy.json $NEUTRON_CONF_DIR/policy.json.original 2>/dev/null; true
cp -f $NFPSERVICE_DIR/etc/policy.json $NEUTRON_CONF_DIR/policy.json
mv $NFPSERVICE_DIR/_test-requirements.txt $NFPSERVICE_DIR/test-requirements.txt
}
function create_nfp_image {
TOP_DIR=$TOP_DIR
sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/conf.json
BUILT_IMAGE_PATH=$(cat /tmp/nfp_image_path)
upload_image file://$BUILT_IMAGE_PATH $TOKEN
openstack --os-cloud=devstack-admin flavor create --ram 512 --disk 3 --vcpus 1 m1.nfp-tiny
}
function assign_user_role_credential {
TOP_DIR=$TOP_DIR
source $TOP_DIR/openrc admin admin
#set -x
serviceTenantID=`keystone tenant-list | grep "service" | awk '{print $2}'`
serviceRoleID=`keystone role-list | grep "service" | awk '{print $2}'`
adminRoleID=`keystone role-list | grep "admin" | awk '{print $2}'`
keystone user-role-add --user nova --tenant $serviceTenantID --role $serviceRoleID
keystone user-role-add --user neutron --tenant $serviceTenantID --role $adminRoleID
}
function namespace_delete {
TOP_DIR=$TOP_DIR
source $TOP_DIR/openrc neutron service
#Deletion namespace
NFP_P=`sudo ip netns | grep "nfp-proxy"`
if [ ${#NFP_P} -ne 0 ]; then
sudo ip netns delete nfp-proxy
echo "namespace removed"
fi
#Delete veth peer
PEER=`ip a | grep pt1`
if [ ${#PEER} -ne 0 ]; then
echo "veth peer removed"
sudo ip link delete pt1
fi
#pt1 port removing from ovs
PORT=`sudo ovs-vsctl show | grep "pt1"`
if [ ${#PORT} -ne 0 ]; then
sudo ovs-vsctl del-port br-int pt1
echo "ovs port ptr1 is removed"
fi
echo "nfp-proxy cleaning success.... "
}
function namespace_create {
TOP_DIR=$TOP_DIR
#doing it in namespace_delete, so no need to do it again
#source $1/openrc neutron service
SERVICE_MGMT_NET="l2p_svc_management_ptg"
cidr="/24"
echo "Creating new namespace nfp-proxy...."
#new namespace with name proxy
NFP_P=`sudo ip netns add nfp-proxy`
if [ ${#NFP_P} -eq 0 ]; then
echo "New namepace nfp-proxt create"
else
echo "nfp-proxy creation failed"
exit 0
fi
#Create veth peer
PEER=`sudo ip link add pt0 type veth peer name pt1`
if [ ${#PEER} -eq 0 ]; then
echo "New veth pair created"
else
echo "veth pair creation failed"
exit 0
fi
sleep 1
#move one side of veth into namesape
sudo ip link set pt0 netns nfp-proxy
#create new neutron port in service mgmt network
new_ip=`neutron port-create $SERVICE_MGMT_NET | grep "fixed_ips" | awk '{print $7}' | sed 's/^\"\(.*\)\"}$/\1/'`
if [ ${#new_ip} -lt 5 ]; then
echo "new_ip =$new_ip"
echo "Neutron port creation failed (check source) "
exit 0
else
echo "New Neutron Port Created on Service management network with ip =$new_ip"
fi
new_ip_cidr+="$new_ip/24"
sleep 2
#get the ip address of new port eg : 11.0.0.6 and asign to namespace
sudo ip netns exec nfp-proxy ip addr add $new_ip_cidr dev pt0
#move other side of veth into ovs : br-int
sudo ovs-vsctl add-port br-int pt1
#get id of service management network
smn_id=`neutron net-list | grep "$SERVICE_MGMT_NET" | awk '{print $2}'`
#get the dhcp namespace of service management network
nm_space=`sudo ip netns | grep "$smn_id"`
#get port id from router nampace
port=`sudo ip netns exec $nm_space ip a | grep "tap" | tail -n 1 | awk '{print $7}'`
#get tag_id form port in ovs-bridge
tag_id=`sudo ovs-vsctl list port $port | grep "tag" | tail -n 1 | awk '{print $3}'`
sudo ovs-vsctl set port pt1 tag=$tag_id
#up the both ports
sudo ip netns exec nfp-proxy ip link set pt0 up
sudo ip netns exec nfp-proxy ip link set lo up
sudo ip link set pt1 up
PING=`sudo ip netns exec nfp-proxy ping $2 -q -c 2 > /dev/null`
if [ ${#PING} -eq 0 ]
then
echo "nfp-proxy namespcace creation success and reaching to $2"
else
echo "Fails reaching to $2"
fi
sudo ip netns exec nfp-proxy /usr/bin/nfp_proxy --config-file=/etc/nfp_proxy.ini
}
function create_nfp_gbp_resources {
TOP_DIR=$TOP_DIR
source $TOP_DIR/openrc neutron service
if [[ $DISABLE_BUILD_IMAGE = False ]]; then
IMAGE_PATH=$(cat /tmp/nfp_image_path)
IMAGE_NAME=`basename "$IMAGE_PATH"`
IMAGE_NAME_FLAT="${IMAGE_NAME%.*}"
FLAVOR=m1.nfp-tiny
else
IMAGE_NAME_FLAT="reference_configurator_image"
FLAVOR=m1.small
fi
gbp network-service-policy-create --network-service-params type=ip_pool,name=vip_ip,value=nat_pool svc_mgmt_fip_policy
gbp service-profile-create --servicetype LOADBALANCER --insertion-mode l3 --shared True --service-flavor service_vendor=haproxy,device_type=None --vendor NFP base_mode_lb
gbp service-profile-create --servicetype FIREWALL --insertion-mode l3 --shared True --service-flavor service_vendor=nfp,device_type=nova,image_name=$IMAGE_NAME_FLAT,flavor=$FLAVOR --vendor NFP base_mode_fw_vm
gbp group-create svc_management_ptg --service_management True
}
function get_router_namespace {
TOP_DIR=$TOP_DIR
source $TOP_DIR/openrc neutron service
GROUP="svc_management_ptg"
echo "GroupName: $GROUP"
l2p_id=`gbp ptg-show svc_management_ptg | grep l2_policy_id | awk '{print $4}'`
l3p_id=`gbp l2p-show $l2p_id | grep l3_policy_id | awk '{print $4}'`
RouterId=`gbp l3p-show $l3p_id | grep routers | awk '{print $4}'`
}
function copy_nfp_files_and_start_process {
TOP_DIR=$TOP_DIR
cd /opt/stack/gbp/gbpservice/nfp
sudo cp -r bin/nfp /usr/bin/
sudo chmod +x /usr/bin/nfp
sudo rm -rf /etc/nfp_*
sudo cp -r bin/nfp_orchestrator.ini /etc/
sudo cp -r bin/nfp_proxy_agent.ini /etc/
sudo cp -r bin/nfp_proxy.ini /etc/nfp_proxy.ini
sudo cp -r bin/nfp_proxy /usr/bin/
IpAddr=127.0.0.1
echo "Configuring proxy.ini .... with rest_server_address as $IpAddr"
sudo sed -i "s/rest_server_address=*.*/rest_server_address=$IpAddr/g" /etc/nfp_proxy.ini
sudo sed -i "s/rest_server_port= *.*/rest_server_port=8080/g" /etc/nfp_proxy.ini
ipnetns_router=`sudo ip netns |grep $RouterId`
sed -i 's#source.*#source '$TOP_DIR'/openrc demo demo#g' /opt/stack/gbp/devstack/exercises/nfp_service/*.sh
source $TOP_DIR/functions-common
echo "Starting orchestrator >>>> under screen named : orchestrator"
run_process nfp_orchestrator "sudo /usr/bin/nfp --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/nfp_orchestrator.ini --log-file /opt/stack/logs/nfp_orchestrator.log"
sleep 1
echo "Starting proxy_agent >>>> under screen named : proxy_agent"
run_process nfp_proxy_agent "sudo /usr/bin/nfp --config-file /etc/nfp_proxy_agent.ini --log-file /opt/stack/logs/nfp_proxy_agent.log"
sleep 1
echo "Starting proxy server under Namespace : nfp-proxy namespace >>>> under screen named : proxy"
run_process nfp_proxy "source /opt/stack/gbp/devstack/lib/nfp;namespace_delete $TOP_DIR;namespace_create $TOP_DIR $IpAddr"
sleep 10
cd pecan/api
sudo python setup.py develop
echo "Starting base_configurator >>>> under screen named : base_configurator"
run_process nfp_base_configurator "cd /opt/stack/gbp/gbpservice/nfp/pecan/api;sudo ip netns exec nfp-proxy pecan configurator_decider config.py --mode base"
sleep 1
echo "Running gbp-db-manage"
source $TOP_DIR/openrc neutron service
gbp-db-manage --config-file /etc/neutron/neutron.conf upgrade head
sleep 2
echo "Configuration success ... "
}

25
devstack/local.conf.nfp Normal file
View File

@@ -0,0 +1,25 @@
[[local|localrc]]
DEST=/opt/stack
# Logging
LOGFILE=$DEST/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=False
SCREEN_LOGDIR=$DEST/logs/screen
# Credentials
ADMIN_PASSWORD=mysecret
MYSQL_PASSWORD=mysqlsecret
RABBIT_PASSWORD=myrabbitsecret
SERVICE_PASSWORD=$ADMIN_PASSWORD
SERVICE_TOKEN=admin
HOST_IP=
GBPSERVICE_BRANCH=refs/changes/45/309145/103
enable_plugin gbp https://git.openstack.org/openstack/group-based-policy $GBPSERVICE_BRANCH
ENABLE_NFP=True
#e.g IMAGE_URLS+=http://192.168.6.1/images/cirros-0.3.0-x86_64-disk.img #image extenson should be .img OR img.gz
#IMAGE_URLS+=
DISABLE_BUILD_IMAGE=False

View File

@@ -1,4 +1,5 @@
GBP="Group-Based Policy"
[[ $ENABLE_NFP = True ]] && NFP="Network Function Plugin"
function gbp_configure_nova {
iniset $NOVA_CONF neutron allow_duplicate_networks "True"
@@ -25,29 +26,62 @@ function gbp_configure_neutron {
iniset $NEUTRON_CONF quotas quota_floatingip "-1"
}
function nfp_configure_neutron {
iniset $NEUTRON_CONF keystone_authtoken admin_tenant_name "service"
iniset $NEUTRON_CONF keystone_authtoken admin_user "neutron"
iniset $NEUTRON_CONF keystone_authtoken admin_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF group_policy policy_drivers "implicit_policy,resource_mapping,chain_mapping"
iniset $NEUTRON_CONF node_composition_plugin node_plumber "admin_owned_resources_apic_plumber"
iniset $NEUTRON_CONF node_composition_plugin node_drivers "nfp_node_driver"
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_user "neutron"
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF admin_owned_resources_apic_tscp plumbing_resource_owner_tenant_name "service"
iniset $NEUTRON_CONF group_policy_implicit_policy default_ip_pool "11.0.0.0/8"
iniset $NEUTRON_CONF group_policy_implicit_policy default_proxy_ip_pool "192.169.0.0/16"
iniset $NEUTRON_CONF group_policy_implicit_policy default_external_segment_name "default"
iniset $NEUTRON_CONF nfp_node_driver is_service_admin_owned "True"
iniset $NEUTRON_CONF nfp_node_driver svc_management_ptg_name "svc_management_ptg"
}
# Process contract
if is_service_enabled group-policy; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
echo_summary "Preparing $GBP"
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing $GBP"
if [[ $ENABLE_NFP = True ]]; then
echo_summary "Installing $NFP"
[[ $DISABLE_BUILD_IMAGE = False ]] && prepare_nfp_image_builder
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring $GBP"
gbp_configure_nova
gbp_configure_heat
gbp_configure_neutron
[[ $ENABLE_NFP = True ]] && echo_summary "Configuring $NFP"
[[ $ENABLE_NFP = True ]] && nfp_configure_neutron
# install_apic_ml2
# install_aim
# init_aim
install_gbpclient
install_gbpservice
[[ $ENABLE_NFP = True ]] && install_nfpgbpservice
init_gbpservice
[[ $ENABLE_NFP = True ]] && init_nfpgbpservice
install_gbpheat
install_gbpui
stop_apache_server
start_apache_server
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing $GBP"
if [[ $ENABLE_NFP = True ]]; then
echo_summary "Initializing $NFP"
[[ $DISABLE_BUILD_IMAGE = False ]] && create_nfp_image
assign_user_role_credential
create_nfp_gbp_resources
get_router_namespace
copy_nfp_files_and_start_process
fi
fi
if [[ "$1" == "unstack" ]]; then

View File

@@ -1,5 +1,8 @@
# Make sure the plugin name in local.conf is "gbp", as in: enable_plugin gbp <remote> <branch>
source $DEST/gbp/devstack/lib/gbp
ENABLE_NFP=${ENABLE_NFP:-False}
[[ $ENABLE_NFP = True ]] && source $DEST/gbp/devstack/lib/nfp
[[ $ENABLE_NFP = True ]] && DISABLE_BUILD_IMAGE=${DISABLE_BUILD_IMAGE:-False}
# Enable necessary Neutron plugins, including group_policy and ncp
Q_SERVICE_PLUGIN_CLASSES=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,group_policy,ncp
@@ -33,6 +36,8 @@ enable_service neutron
enable_service group-policy
disable_service tempest
ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
# NFP services
enable_service nfp_orchestrator,nfp_proxy,nfp_proxy_agent,nfp_base_configurator
# Deployment preferences
SYSLOG=${SYSLOG:-True}

View File

@@ -0,0 +1,138 @@
#!/usr/bin/env bash
# **fw_vm.sh**
# Sanity check that firewall(in service VM) service is created with NFP
echo "*********************************************************************"
echo "Begin NFP Exercise: $0"
echo "*********************************************************************"
# Settings
# ========
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Keep track of the current directory
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
source $TOP_DIR/openrc neutron service
create_gbp_resources() {
gbp servicechain-node-create --service-profile base_mode_fw_vm --config 'custom_json:{"mimetype": "config/custom+json","rules": [{"action": "log", "name": "tcp", "service": "tcp/80"}, {"action": "log", "name": "tcp", "service": "tcp/8080"}, {"action": "accept", "name": "tcp", "service": "tcp/22"}, {"action": "accept", "name": "icmp", "service": "icmp"}]}' FWNODE
gbp servicechain-spec-create --nodes "FWNODE" fw-chainspec
gbp policy-action-create --action-type REDIRECT --action-value fw-chainspec redirect-to-fw
gbp policy-action-create --action-type ALLOW allow-to-fw
gbp policy-classifier-create --protocol tcp --direction bi fw-web-classifier-tcp
gbp policy-classifier-create --protocol udp --direction bi fw-web-classifier-udp
gbp policy-classifier-create --protocol icmp --direction bi fw-web-classifier-icmp
gbp policy-rule-create --classifier fw-web-classifier-tcp --actions redirect-to-fw fw-web-redirect-rule
gbp policy-rule-create --classifier fw-web-classifier-tcp --actions allow-to-fw fw-web-allow-rule-tcp
gbp policy-rule-create --classifier fw-web-classifier-udp --actions allow-to-fw fw-web-allow-rule-udp
gbp policy-rule-create --classifier fw-web-classifier-icmp --actions allow-to-fw fw-web-allow-rule-icmp
gbp policy-rule-set-create --policy-rules "fw-web-redirect-rule fw-web-allow-rule-tcp fw-web-allow-rule-udp fw-web-allow-rule-icmp" fw-webredirect-ruleset
gbp group-create fw-consumer --consumed-policy-rule-sets "fw-webredirect-ruleset=None"
gbp group-create fw-provider --provided-policy-rule-sets "fw-webredirect-ruleset=None"
}
delete_gbp_resources() {
gbp group-delete fw-provider
gbp group-delete fw-consumer
gbp policy-rule-set-delete fw-webredirect-ruleset
gbp policy-rule-delete fw-web-redirect-rule
gbp policy-rule-delete fw-web-allow-rule-tcp
gbp policy-rule-delete fw-web-allow-rule-icmp
gbp policy-rule-delete fw-web-allow-rule-udp
gbp policy-classifier-delete fw-web-classifier-tcp
gbp policy-classifier-delete fw-web-classifier-icmp
gbp policy-classifier-delete fw-web-classifier-udp
gbp policy-action-delete redirect-to-fw
gbp policy-action-delete allow-to-fw
gbp servicechain-spec-delete fw-chainspec
gbp servicechain-node-delete FWNODE
}
validate_gbp_resources() {
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "1" ]; then
echo "Chain creation Succeded"
else
echo "Chain creation failed"
fi
}
validate_firewall_resources() {
FirewallRuleCount=`neutron firewall-rule-list -f value | grep Rule | wc -l`
if [ "$FirewallRuleCount" -eq "4" ]; then
echo "Firewall Rule resource created"
else
echo "Firewall Rule resource not created"
fi
FirewallPolicyCount=`neutron firewall-policy-list -f value | grep fw | wc -l`
if [ "$FirewallPolicyCount" -eq "1" ]; then
echo "Firewall Policy resource created"
else
echo "Firewall Policy resource not created"
fi
FirewallCount=`neutron firewall-list -f value | wc -l`
if [ "$FirewallCount" -eq "1" ]; then
echo "Firewall resource created"
FirewallUUID=`neutron firewall-list -f value | awk '{print $1}'`
FirewallStatus=`neutron firewall-show $FirewallUUID -f value -c status`
echo "Firewall resource is in $FirewallStatus state"
else
echo "Firewall resource not created"
fi
}
update_gbp_resources() {
# Update existing chain, by removing 2 rules
#gbp servicechain-node-update FWNODE --template-file $TOP_DIR/nfp-templates/fw_updated_template.yml
#FirewallRuleCount=`neutron firewall-rule-list -f value | wc -l`
#if [ "$FirewallRuleCount" -eq "2" ]; then
# echo "Chain created"
#else
# echo "Chain not created"
#fi
gbp group-delete fw-provider
gbp group-delete fw-consumer
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "0" ]; then
echo "Chain deleted"
else
echo "Chain not deleted"
fi
# Service chain creation/deletion through PRS update
gbp group-create fw-consumer --consumed-policy-rule-sets "fw-webredirect-ruleset=None"
gbp group-create fw-provider
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "0" ]; then
echo "Chain not created"
else
echo "Chain not deleted"
fi
gbp group-update fw-provider --provided-policy-rule-sets "fw-webredirect-ruleset=None"
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "1" ]; then
echo "Chain created"
else
echo "Chain not created"
fi
}
create_gbp_resources
validate_gbp_resources
validate_firewall_resources
update_gbp_resources
delete_gbp_resources

View File

@@ -0,0 +1,204 @@
#!/usr/bin/env bash
# **fw_vm_lb.sh**
# Sanity check that firewall(in service VM) and loadbalancer service chain is created with NFP
echo "*********************************************************************"
echo "Begin NFP Exercise: $0"
echo "*********************************************************************"
# Settings
# ========
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Keep track of the current directory
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
source $TOP_DIR/openrc neutron service
create_gbp_resources() {
# E-W insertion
gbp servicechain-node-create --service-profile base_mode_fw_vm --config 'custom_json:{"mimetype": "config/custom+json","rules": [{"action": "log", "name": "tcp", "service": "tcp/80"}, {"action": "log", "name": "tcp", "service": "tcp/8080"}, {"action": "accept", "name": "tcp", "service": "tcp/22"}, {"action": "accept", "name": "icmp", "service": "icmp"}]}' FW_LB-FWNODE
gbp servicechain-node-create --service-profile base_mode_lb --template-file $TOP_DIR/nfp-templates/haproxy.template FW_LB-LBNODE
gbp servicechain-spec-create --nodes "FW_LB-FWNODE FW_LB-LBNODE" fw_lb_chainspec
gbp policy-action-create --action-type REDIRECT --action-value fw_lb_chainspec redirect-to-fw_lb
gbp policy-classifier-create --protocol tcp --direction bi fw_lb-webredirect
gbp policy-rule-create --classifier fw_lb-webredirect --actions redirect-to-fw_lb fw_lb-web-redirect-rule
gbp policy-rule-set-create --policy-rules "fw_lb-web-redirect-rule" fw_lb-webredirect-ruleset
gbp network-service-policy-create --network-service-params type=ip_single,name=vip_ip,value=self_subnet fw_lb_nsp
gbp group-create fw_lb-consumer --consumed-policy-rule-sets "fw_lb-webredirect-ruleset=None"
gbp group-create fw_lb-provider --provided-policy-rule-sets "fw_lb-webredirect-ruleset=None" --network-service-policy fw_lb_nsp
}
delete_gbp_resources() {
gbp group-delete fw_lb-provider
gbp group-delete fw_lb-consumer
gbp network-service-policy-delete fw_lb_nsp
gbp policy-rule-set-delete fw_lb-webredirect-ruleset
gbp policy-rule-delete fw_lb-web-redirect-rule
gbp policy-classifier-delete fw_lb-webredirect
gbp policy-action-delete redirect-to-fw_lb
gbp servicechain-spec-delete fw_lb_chainspec
gbp servicechain-node-delete FW_LB-LBNODE
gbp servicechain-node-delete FW_LB-FWNODE
}
validate_gbp_resources() {
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "1" ]; then
echo "Chain creation Succeded"
else
echo "Chain creation failed"
fi
ServiceChainNodeCount=`gbp scn-list -f value | grep FW_LB | wc -l`
if [ "$ServiceChainNodeCount" -eq "2" ]; then
echo "Network function creation Succeded"
else
echo "Network function creation failed"
fi
}
validate_firewall_resources() {
FirewallRuleCount=`neutron firewall-rule-list -f value | grep Rule | wc -l`
if [ "$FirewallRuleCount" -eq "4" ]; then
echo "Firewall Rule resource created"
else
echo "Firewall Rule resource not created"
fi
FirewallPolicyCount=`neutron firewall-policy-list -f value | grep fw | wc -l`
if [ "$FirewallPolicyCount" -eq "1" ]; then
echo "Firewall Policy resource created"
else
echo "Firewall Policy resource not created"
fi
FirewallCount=`neutron firewall-list -f value | wc -l`
if [ "$FirewallCount" -eq "1" ]; then
echo "Firewall resource created"
FirewallUUID=`neutron firewall-list -f value | awk '{print $1}'`
FirewallStatus=`neutron firewall-show $FirewallUUID -f value -c status`
echo "Firewall resource is in $FirewallStatus state"
else
echo "Firewall resource not created"
fi
}
validate_loadbalancer_resources() {
LBPoolCount=`neutron lb-pool-list -f value | wc -l`
if [ "$LBPoolCount" -eq "1" ]; then
echo "LB Pool resource created"
LBPoolUUID=`neutron lb-pool-list -f value | awk '{print $1}'`
LBPoolStatus=`neutron lb-pool-show $LBPoolUUID -f value -c status`
echo "LB Pool resource is in $LBPoolStatus state"
else
echo "LB Pool resource not created"
fi
LBVIPCount=`neutron lb-vip-list -f value | wc -l`
if [ "$LBVIPCount" -eq "1" ]; then
echo "LB VIP resource created"
LBVIPUUID=`neutron lb-vip-list -f value | awk '{print $1}'`
LBVIPStatus=`neutron lb-vip-show $LBVIPUUID -f value -c status`
echo "LB VIP resource is in $LBVIPStatus state"
else
echo "LB VIP resource not created"
fi
LBHMCount=`neutron lb-healthmonitor-list -f value | wc -l`
if [ "$LBHMCount" -eq "1" ]; then
echo "LB Healthmonitor resource created"
else
echo "LB Healthmonitor resource not created"
fi
gbp policy-target-create --policy-target-group fw_lb-provider provider_pt1
sleep 5
LBMemberCount=`neutron lb-member-list -f value | wc -l`
if [ "$LBMemberCount" -eq "1" ]; then
echo "LB Member resource created"
else
echo "LB Member resource not created"
fi
gbp policy-target-create --policy-target-group fw_lb-provider provider_pt2
sleep 5
LBMemberCount=`neutron lb-member-list -f value | wc -l`
if [ "$LBMemberCount" -eq "2" ]; then
echo "LB Member resource created"
else
echo "LB Member resource not created"
fi
gbp policy-target-delete provider_pt1
sleep 5
LBMemberCount=`neutron lb-member-list -f value | wc -l`
if [ "$LBMemberCount" -eq "1" ]; then
echo "LB Member resource deleted"
else
echo "LB Member resource not deleted"
fi
gbp policy-target-delete provider_pt2
sleep 5
LBMemberCount=`neutron lb-member-list -f value | wc -l`
if [ "$LBMemberCount" -eq "0" ]; then
echo "LB Member resource deleted"
else
echo "LB Member resource not deleted"
fi
}
update_gbp_resources() {
# Update existing chain, by removing 2 rules
#gbp servicechain-node-update FW_LB-FWNODE --template-file $TOP_DIR/nfp-templates/fw_updated_template.yml
#FirewallRuleCount=`neutron firewall-rule-list -f value | wc -l`
#if [ "$FirewallRuleCount" -eq "2" ]; then
# echo "Chain created"
#else
# echo "Chain not created"
#fi
gbp group-delete fw_lb-provider
gbp group-delete fw_lb-consumer
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "0" ]; then
echo "Chain deleted"
else
echo "Chain not deleted"
fi
# Service chain creation/deletion through PRS update
gbp group-create fw_lb-consumer --consumed-policy-rule-sets "fw_lb-webredirect-ruleset=None"
gbp group-create fw_lb-provider
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "0" ]; then
echo "Chain not created"
else
echo "Chain not deleted"
fi
gbp group-update fw_lb-provider --provided-policy-rule-sets "fw_lb-webredirect-ruleset=None" --network-service-policy fw_lb_nsp
ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l`
if [ "$ServiceChainInstanceCount" -eq "1" ]; then
echo "Chain created"
else
echo "Chain not created"
fi
}
create_gbp_resources
validate_gbp_resources
validate_firewall_resources
validate_loadbalancer_resources
update_gbp_resources
delete_gbp_resources

View File

@@ -1,5 +1,21 @@
#!/bin/bash
NEW_BASE="$BASE/new"
DISK_IMAGE_DIR=$NEW_BASE/group-based-policy/gbpservice/tests/contrib
function prepare_nfp_image_builder {
#setup_develop $NFPSERVICE_DIR
sudo -H -E pip install -r $DISK_IMAGE_DIR/diskimage-create/requirements.txt
sudo apt-get install -y --force-yes qemu-utils
}
function create_nfp_image {
TOP_DIR=$1
sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/conf.json
BUILT_IMAGE_PATH=$(cat /tmp/nfp_image_path)
upload_image file://$BUILT_IMAGE_PATH
openstack --os-cloud=devstack-admin flavor create --ram 512 --disk 3 --vcpus 1 m1.nfp-tiny
}
function assign_user_role_credential {
TOP_DIR=$1
@@ -120,9 +136,15 @@ function namespace_create {
function create_nfp_gbp_resources {
TOP_DIR=$1
source $TOP_DIR/openrc neutron service
IMAGE_PATH=$(cat /tmp/nfp_image_path)
IMAGE_NAME=`basename "$IMAGE_PATH"`
IMAGE_NAME_FLAT="${IMAGE_NAME%.*}"
FLAVOR=m1.nfp-tiny
gbp network-service-policy-create --network-service-params type=ip_pool,name=vip_ip,value=nat_pool svc_mgmt_fip_policy
gbp service-profile-create --servicetype LOADBALANCER --insertion-mode l3 --shared True --service-flavor service_vendor=haproxy,device_type=None --vendor NFP base_mode_lb
gbp service-profile-create --servicetype FIREWALL --insertion-mode l3 --shared True --service-flavor service_vendor=vyos,device_type=None --vendor NFP base_mode_fw
gbp service-profile-create --servicetype FIREWALL --insertion-mode l3 --shared True --service-flavor service_vendor=nfp,device_type=nova,image_name=$IMAGE_NAME_FLAT,flavor=$FLAVOR --vendor NFP base_mode_fw_vm
gbp group-create svc_management_ptg --service_management True
}
@@ -132,7 +154,12 @@ function delete_nfp_gbp_resources {
neutron port-delete nfp-proxy_port
gbp ptg-show svc_management_ptg -f value -c policy_targets
gbp ptg-show svc_management_ptg -f value -c policy_targets | xargs -I {} gbp pt-show {}
nova list
gbp group-delete svc_management_ptg
gbp service-profile-delete base_mode_fw_vm
gbp service-profile-delete base_mode_fw
gbp service-profile-delete base_mode_lb
gbp network-service-policy-delete svc_mgmt_fip_policy
@@ -196,6 +223,8 @@ function copy_nfp_files_and_start_process {
echo "Configuration success ... "
}
function nfp_setup {
prepare_nfp_image_builder
create_nfp_image $1
assign_user_role_credential $1
create_nfp_gbp_resources $1
get_router_namespace $1

View File

@@ -0,0 +1,13 @@
{
"dib":
{
"image_size_in_GB": 3,
"ram_size": 512,
"cache_path": "~/.cache/image-create",
"elements": ["nfp-reference-configurator"]
},
"ubuntu_release":
{
"release": "wily"
}
}

View File

@@ -0,0 +1,124 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#! /usr/bin/python
import datetime
import os
from oslo_serialization import jsonutils
import subprocess
import sys
conf = []
cur_dir = ''
def parse_json(j_file):
global conf
with open(j_file) as json_data:
conf = jsonutils.load(json_data)
return
def dib():
dib = conf['dib']
elems = cur_dir + '/elements/'
# set the elements path in environment variable
os.environ['ELEMENTS_PATH'] = elems
# set the Ubuntu Release for the build in environment variable
os.environ['DIB_RELEASE'] = conf['ubuntu_release']['release']
image_name = 'nfp_reference_service'
# basic elements
dib_args = ['disk-image-create', 'base', 'vm', 'ubuntu', 'devuser',
'dhcp-all-interfaces']
# create user
os.environ['DIB_DEV_USER_USERNAME'] = 'ubuntu'
os.environ['DIB_DEV_USER_SHELL'] = '/bin/bash'
# configures elements
for element in dib['elements']:
dib_args.append(element)
# root login enabled, set password environment varaible
if element == 'root-passwd':
os.environ['DIB_PASSWORD'] = dib['root_password']
if element == 'nfp-reference-configurator':
# set environment variable, needed by 'extra-data.d'
service_dir = cur_dir + '/../nfp_service/'
pecan_dir = os.path.abspath(os.path.join(cur_dir,
'../../../nfp'))
service_dir = os.path.realpath(service_dir)
pecan_dir = os.path.realpath(pecan_dir)
os.environ['PECAN_GIT_PATH'] = pecan_dir
os.environ['SERVICE_GIT_PATH'] = service_dir
# offline mode, assuming the image cache (tar) already exists
dib_args.append('--offline')
cache_path = dib['cache_path'].replace('~', os.environ.get('HOME', '-1'))
dib_args.append('--image-cache')
dib_args.append(cache_path)
dib_args.append('--image-size')
dib_args.append(str(dib['image_size_in_GB']))
timestamp = datetime.datetime.now().strftime('%I%M%p-%d-%m-%Y')
image_name = image_name + '_' + timestamp
dib_args.append('-o')
dib_args.append(str(image_name))
# set environment variable, needed by 'extra-data.d'
os.environ['NFP_IMAGE_NAME'] = image_name
if 'nfp-reference-configurator' in dib['elements']:
os.environ['SSH_RSS_KEY'] = (
"%s/output/%s" % (cur_dir, image_name))
os.environ['DIB_DEV_USER_AUTHORIZED_KEYS'] = (
"%s.pub" % os.environ['SSH_RSS_KEY'])
os.chdir(cur_dir)
out_dir = 'output'
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
os.chdir(out_dir)
print("DIB-ARGS: %r" % dib_args)
ret = subprocess.call(dib_args)
if not ret:
image_path = cur_dir + '/output/' + image_name + '.qcow2'
print("Image location: %s" % image_path)
with open("/tmp/nfp_image_path", "w") as f:
f.write(image_path)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("ERROR: Invalid Usage")
print("Usage:\n\t%s <json config file>" % sys.argv[0])
print("\twhere: <json config file> contains all the configuration")
exit()
# save PWD
cur_dir = os.path.dirname(__file__)
cur_dir = os.path.realpath(cur_dir)
if not cur_dir:
# if script is executed from current dir, get abs path
cur_dir = os.path.realpath('./')
# parse args from json file
parse_json(sys.argv[1])
elements = conf['dib']['elements']
# run Disk Image Builder to create VM image
dib()

View File

@@ -0,0 +1,5 @@
#! /bin/bash
set -eu
ssh-keygen -t rsa -N "" -f ${SSH_RSS_KEY}
sudo cat ${SSH_RSS_KEY}.pub

View File

@@ -0,0 +1,9 @@
#! /bin/bash
set -eu
# copy the reference_configurator and pecan folders to VM at /root/
cp -rL ${SERVICE_GIT_PATH}/reference_configurator ${TMP_MOUNT_PATH}/root/
cp -rL ${PECAN_GIT_PATH}/pecan ${TMP_MOUNT_PATH}/root/
ls -lR ${TMP_MOUNT_PATH}/root/

View File

@@ -0,0 +1,36 @@
#! /bin/bash
set -eu
# copy the reference_configurator and pecan folders to VM at
# /usr/local/lib/python2.7/dist-packages/
cd /usr/local/lib/python2.7/dist-packages/
service_path='gbpservice/tests/contrib/nfp_service'
pecan_path='gbpservice/nfp'
mkdir -p $service_path
mkdir -p $pecan_path
find gbpservice/ -type d -exec touch {}/__init__.py \;
mv /root/reference_configurator $service_path/
mv /root/pecan $pecan_path/
cd $service_path
PWD=`pwd`
ls -lR $PWD/
# copy the pecan.service file
cp -L $PWD/reference_configurator/config/pecan.service /etc/systemd/system/pecan.service
chmod 777 /etc/systemd/system/pecan.service
ls -l /etc/systemd/system/pecan.service
cp -L $PWD/reference_configurator/config/pecan.service /etc/systemd/system/multi-user.target.wants/pecan.service
chmod 755 /etc/systemd/system/multi-user.target.wants/pecan.service
ls -l /etc/systemd/system/multi-user.target.wants/pecan.service
# make pecan service to launch during each system boot
cp -rL $PWD/reference_configurator/bin/nfp-pecan /usr/bin/nfp-pecan
chmod 777 /usr/bin/nfp-pecan
ls -l /usr/bin/nfp-pecan
chmod 777 $PWD/reference_configurator/bin/nfp-pecan
ls -l $PWD/reference_configurator/bin/nfp-pecan

View File

@@ -0,0 +1,8 @@
#!/bin/bash
apt-get install -y --force-yes python-pecan
apt-get install -y --force-yes oslo.serialization oslo.log python-yaml
ls -lR /home/ubuntu/
cd /usr/local/lib/python2.7/dist-packages/gbpservice/nfp/pecan/api && python setup.py develop

View File

@@ -0,0 +1,4 @@
pep8>=1.5.7
pytz
diskimage-builder>=1.15.0
dib-utils>=0.0.8