From 02295e7283ced04c6e76ce0660635fe9139f8d3f Mon Sep 17 00:00:00 2001
From: Eduardo Gonzalez <dabarren@gmail.com>
Date: Wed, 21 Nov 2018 13:48:36 +0100
Subject: [PATCH] Rework documentation into more specific folder

Change index to ease identify what service want to look.
Split docs into more specific folder such as networking
and storage.

Change-Id: Ic7ac12b3dd555fa5c018eeb897ccd4a5a2dfe8f3
---
 doc/source/reference/bare-metal/index.rst     |  11 +
 .../{ => bare-metal}/ironic-guide.rst         |   6 +-
 .../reference/{ => compute}/hyperv-guide.rst  |   6 +-
 doc/source/reference/compute/index.rst        |  15 +
 .../{ => compute}/nova-fake-driver.rst        |   0
 .../reference/{ => compute}/vmware-guide.rst  |   6 +-
 .../{ => compute}/xenserver-guide.rst         |   6 +-
 .../reference/{ => compute}/zun-guide.rst     |   8 +-
 doc/source/reference/containers/index.rst     |  11 +
 .../{ => containers}/kuryr-guide.rst          |   6 +-
 .../external-mariadb-guide.rst                |   0
 doc/source/reference/databases/index.rst      |  10 +
 .../bifrost.rst                               |  12 +-
 .../deployment-and-bootstrapping/index.rst    |  11 +
 .../reference/deployment-config/index.rst     |  11 +
 .../resource-constraints.rst                  |   6 +-
 doc/source/reference/index.rst                |  42 +-
 .../central-logging-guide.rst                 |   6 +-
 .../logging-and-monitoring/index.rst          |  14 +
 .../monasca-guide.rst                         |   6 +-
 .../osprofiler-guide.rst                      |   6 +-
 .../skydive-guide.rst                         |   6 +-
 doc/source/reference/networking-guide.rst     | 490 ------------------
 .../{ => networking}/designate-guide.rst      |   6 +-
 doc/source/reference/networking/dpdk.rst      | 113 ++++
 doc/source/reference/networking/index.rst     |  18 +
 .../networking/neutron-extensions.rst         |  80 +++
 .../reference/networking/opendaylight.rst     |  56 ++
 .../networking/provider-networks.rst          |  21 +
 doc/source/reference/networking/sriov.rst     | 224 ++++++++
 .../reference/orchestration-and-nfv/index.rst |  10 +
 .../tacker-guide.rst                          |   6 +-
 .../{ => shared-services}/glance-guide.rst    |  17 +-
 .../{ => shared-services}/horizon-guide.rst   |   6 +-
 .../reference/shared-services/index.rst       |  12 +
 .../reference/{ => storage}/ceph-guide.rst    |   6 +-
 .../{ => storage}/cinder-guide-hnas.rst       |   0
 .../reference/{ => storage}/cinder-guide.rst  |   6 +-
 .../{ => storage}/external-ceph-guide.rst     |   0
 doc/source/reference/storage/index.rst        |  17 +
 .../reference/{ => storage}/manila-guide.rst  |   6 +-
 .../{ => storage}/manila-hnas-guide.rst       |   0
 .../reference/{ => storage}/swift-guide.rst   |   6 +-
 43 files changed, 718 insertions(+), 587 deletions(-)
 create mode 100644 doc/source/reference/bare-metal/index.rst
 rename doc/source/reference/{ => bare-metal}/ironic-guide.rst (98%)
 rename doc/source/reference/{ => compute}/hyperv-guide.rst (98%)
 create mode 100644 doc/source/reference/compute/index.rst
 rename doc/source/reference/{ => compute}/nova-fake-driver.rst (100%)
 rename doc/source/reference/{ => compute}/vmware-guide.rst (99%)
 rename doc/source/reference/{ => compute}/xenserver-guide.rst (98%)
 rename doc/source/reference/{ => compute}/zun-guide.rst (96%)
 create mode 100644 doc/source/reference/containers/index.rst
 rename doc/source/reference/{ => containers}/kuryr-guide.rst (95%)
 rename doc/source/reference/{ => databases}/external-mariadb-guide.rst (100%)
 create mode 100644 doc/source/reference/databases/index.rst
 rename doc/source/reference/{ => deployment-and-bootstrapping}/bifrost.rst (97%)
 create mode 100644 doc/source/reference/deployment-and-bootstrapping/index.rst
 create mode 100644 doc/source/reference/deployment-config/index.rst
 rename doc/source/reference/{ => deployment-config}/resource-constraints.rst (95%)
 rename doc/source/reference/{ => logging-and-monitoring}/central-logging-guide.rst (99%)
 create mode 100644 doc/source/reference/logging-and-monitoring/index.rst
 rename doc/source/reference/{ => logging-and-monitoring}/monasca-guide.rst (99%)
 rename doc/source/reference/{ => logging-and-monitoring}/osprofiler-guide.rst (93%)
 rename doc/source/reference/{ => logging-and-monitoring}/skydive-guide.rst (90%)
 delete mode 100644 doc/source/reference/networking-guide.rst
 rename doc/source/reference/{ => networking}/designate-guide.rst (98%)
 create mode 100644 doc/source/reference/networking/dpdk.rst
 create mode 100644 doc/source/reference/networking/index.rst
 create mode 100644 doc/source/reference/networking/neutron-extensions.rst
 create mode 100644 doc/source/reference/networking/opendaylight.rst
 create mode 100644 doc/source/reference/networking/provider-networks.rst
 create mode 100644 doc/source/reference/networking/sriov.rst
 create mode 100644 doc/source/reference/orchestration-and-nfv/index.rst
 rename doc/source/reference/{ => orchestration-and-nfv}/tacker-guide.rst (98%)
 rename doc/source/reference/{ => shared-services}/glance-guide.rst (88%)
 rename doc/source/reference/{ => shared-services}/horizon-guide.rst (89%)
 create mode 100644 doc/source/reference/shared-services/index.rst
 rename doc/source/reference/{ => storage}/ceph-guide.rst (99%)
 rename doc/source/reference/{ => storage}/cinder-guide-hnas.rst (100%)
 rename doc/source/reference/{ => storage}/cinder-guide.rst (98%)
 rename doc/source/reference/{ => storage}/external-ceph-guide.rst (100%)
 create mode 100644 doc/source/reference/storage/index.rst
 rename doc/source/reference/{ => storage}/manila-guide.rst (99%)
 rename doc/source/reference/{ => storage}/manila-hnas-guide.rst (100%)
 rename doc/source/reference/{ => storage}/swift-guide.rst (98%)

diff --git a/doc/source/reference/bare-metal/index.rst b/doc/source/reference/bare-metal/index.rst
new file mode 100644
index 0000000000..f0cf85dc31
--- /dev/null
+++ b/doc/source/reference/bare-metal/index.rst
@@ -0,0 +1,11 @@
+==========
+Bare Metal
+==========
+
+This section describes configuring bare metal provisioning
+such as Ironic.
+
+.. toctree::
+   :maxdepth: 1
+
+   ironic-guide
diff --git a/doc/source/reference/ironic-guide.rst b/doc/source/reference/bare-metal/ironic-guide.rst
similarity index 98%
rename from doc/source/reference/ironic-guide.rst
rename to doc/source/reference/bare-metal/ironic-guide.rst
index a08d0c4e5d..9a8c6f88f4 100644
--- a/doc/source/reference/ironic-guide.rst
+++ b/doc/source/reference/bare-metal/ironic-guide.rst
@@ -1,8 +1,8 @@
 .. _ironic-guide:
 
-===============
-Ironic in Kolla
-===============
+================================
+Ironic - Bare Metal provisioning
+================================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/hyperv-guide.rst b/doc/source/reference/compute/hyperv-guide.rst
similarity index 98%
rename from doc/source/reference/hyperv-guide.rst
rename to doc/source/reference/compute/hyperv-guide.rst
index d337915931..4039c90d26 100644
--- a/doc/source/reference/hyperv-guide.rst
+++ b/doc/source/reference/compute/hyperv-guide.rst
@@ -1,8 +1,8 @@
 .. _hyperv-guide:
 
-====================
-Nova-HyperV in Kolla
-====================
+===========
+Nova HyperV
+===========
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/compute/index.rst b/doc/source/reference/compute/index.rst
new file mode 100644
index 0000000000..ba17f64e0e
--- /dev/null
+++ b/doc/source/reference/compute/index.rst
@@ -0,0 +1,15 @@
+=======
+Compute
+=======
+
+This section describes configuring nova hypervisors and
+compute services like HyperV, XenServer and so on.
+
+.. toctree::
+   :maxdepth: 1
+
+   hyperv-guide
+   nova-fake-driver
+   vmware-guide
+   xenserver-guide
+   zun-guide
diff --git a/doc/source/reference/nova-fake-driver.rst b/doc/source/reference/compute/nova-fake-driver.rst
similarity index 100%
rename from doc/source/reference/nova-fake-driver.rst
rename to doc/source/reference/compute/nova-fake-driver.rst
diff --git a/doc/source/reference/vmware-guide.rst b/doc/source/reference/compute/vmware-guide.rst
similarity index 99%
rename from doc/source/reference/vmware-guide.rst
rename to doc/source/reference/compute/vmware-guide.rst
index 7878f1f550..b3f4983997 100644
--- a/doc/source/reference/vmware-guide.rst
+++ b/doc/source/reference/compute/vmware-guide.rst
@@ -1,8 +1,8 @@
 .. _vmware-guide:
 
-===============
-VMware in Kolla
-===============
+======
+VMware
+======
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/xenserver-guide.rst b/doc/source/reference/compute/xenserver-guide.rst
similarity index 98%
rename from doc/source/reference/xenserver-guide.rst
rename to doc/source/reference/compute/xenserver-guide.rst
index 0a212e55cc..70d5ac8708 100644
--- a/doc/source/reference/xenserver-guide.rst
+++ b/doc/source/reference/compute/xenserver-guide.rst
@@ -1,8 +1,8 @@
 .. _XenServer-guide:
 
-==================
-XenServer in Kolla
-==================
+=========
+XenServer
+=========
 
 Overview
 ========
diff --git a/doc/source/reference/zun-guide.rst b/doc/source/reference/compute/zun-guide.rst
similarity index 96%
rename from doc/source/reference/zun-guide.rst
rename to doc/source/reference/compute/zun-guide.rst
index 26e7c98e76..18e607e5e4 100644
--- a/doc/source/reference/zun-guide.rst
+++ b/doc/source/reference/compute/zun-guide.rst
@@ -1,6 +1,6 @@
-============
-Zun in Kolla
-============
+=======================
+Zun - Container service
+=======================
 
 "Zun is an OpenStack Container service. It aims to provide an
 OpenStack API for provisioning and managing containerized
@@ -12,7 +12,7 @@ Preparation and Deployment
 --------------------------
 
 Zun requires kuryr and etcd services, for more information about how to
-configure kuryr refer to :doc:`kuryr-guide`.
+configure kuryr refer to :doc:`../containers/kuryr-guide`.
 
 To allow Zun Compute connect to the Docker Daemon, add the following in the
 ``docker.service`` file on each zun-compute node.
diff --git a/doc/source/reference/containers/index.rst b/doc/source/reference/containers/index.rst
new file mode 100644
index 0000000000..54d94e65ee
--- /dev/null
+++ b/doc/source/reference/containers/index.rst
@@ -0,0 +1,11 @@
+==========
+Containers
+==========
+
+This section describes configuring and running container based services
+including kuryr.
+
+.. toctree::
+   :maxdepth: 1
+
+   kuryr-guide
diff --git a/doc/source/reference/kuryr-guide.rst b/doc/source/reference/containers/kuryr-guide.rst
similarity index 95%
rename from doc/source/reference/kuryr-guide.rst
rename to doc/source/reference/containers/kuryr-guide.rst
index 7071af3d49..68532e9120 100644
--- a/doc/source/reference/kuryr-guide.rst
+++ b/doc/source/reference/containers/kuryr-guide.rst
@@ -1,6 +1,6 @@
-==============
-Kuryr in Kolla
-==============
+============================
+Kuryr - Container networking
+============================
 
 "Kuryr is a Docker network plugin that uses Neutron to provide networking
 services to Docker containers. It provides containerized images for the common
diff --git a/doc/source/reference/external-mariadb-guide.rst b/doc/source/reference/databases/external-mariadb-guide.rst
similarity index 100%
rename from doc/source/reference/external-mariadb-guide.rst
rename to doc/source/reference/databases/external-mariadb-guide.rst
diff --git a/doc/source/reference/databases/index.rst b/doc/source/reference/databases/index.rst
new file mode 100644
index 0000000000..d9c135f04a
--- /dev/null
+++ b/doc/source/reference/databases/index.rst
@@ -0,0 +1,10 @@
+=========
+Databases
+=========
+
+This section describes configuration of database services.
+
+.. toctree::
+   :maxdepth: 1
+
+   external-mariadb-guide
diff --git a/doc/source/reference/bifrost.rst b/doc/source/reference/deployment-and-bootstrapping/bifrost.rst
similarity index 97%
rename from doc/source/reference/bifrost.rst
rename to doc/source/reference/deployment-and-bootstrapping/bifrost.rst
index 422eb570f3..2385199fed 100644
--- a/doc/source/reference/bifrost.rst
+++ b/doc/source/reference/deployment-and-bootstrapping/bifrost.rst
@@ -1,6 +1,6 @@
-=============
-Bifrost Guide
-=============
+===========================
+Bifrost - Standalone Ironic
+===========================
 
 From the ``Bifrost`` developer documentation:
     Bifrost (pronounced bye-frost) is a set of Ansible playbooks that automates
@@ -63,7 +63,7 @@ Preparation
 Prepare the Control Host
 ------------------------
 
-Follow the **Install dependencies** section of the :doc:`../user/quickstart`
+Follow the **Install dependencies** section of the :doc:`../../user/quickstart`
 guide instructions to set up kolla and kolla-ansible dependencies.  Follow
 the instructions in either the **Install kolla for development** section or
 the **Install kolla for deployment or evaluation** section to install kolla
@@ -180,8 +180,8 @@ host on which bifrost is deployed.
 Prepare Kolla-Ansible Configuration
 -----------------------------------
 
-Follow the instructions in :doc:`../user/quickstart` to prepare kolla-ansible's
-global configuration file ``globals.yml``.  For bifrost, the
+Follow the instructions in :doc:`../../user/quickstart` to prepare
+kolla-ansible's global configuration file ``globals.yml``.  For bifrost, the
 ``bifrost_network_interface`` variable should be set to the name of the
 interface that will be used to provision bare metal cloud hosts if this is
 different than ``network_interface``.  For example to use ``eth1``:
diff --git a/doc/source/reference/deployment-and-bootstrapping/index.rst b/doc/source/reference/deployment-and-bootstrapping/index.rst
new file mode 100644
index 0000000000..3f03172ba1
--- /dev/null
+++ b/doc/source/reference/deployment-and-bootstrapping/index.rst
@@ -0,0 +1,11 @@
+============================
+Deployment and bootstrapping
+============================
+
+This section describes deployment and provisioning of baremetal control plane
+hosts.
+
+.. toctree::
+   :maxdepth: 1
+
+   bifrost
diff --git a/doc/source/reference/deployment-config/index.rst b/doc/source/reference/deployment-config/index.rst
new file mode 100644
index 0000000000..29afc39f97
--- /dev/null
+++ b/doc/source/reference/deployment-config/index.rst
@@ -0,0 +1,11 @@
+========================
+Deployment configuration
+========================
+
+This section describes configuration of kolla containers, including limiting
+their resources.
+
+.. toctree::
+   :maxdepth: 1
+
+   resource-constraints
diff --git a/doc/source/reference/resource-constraints.rst b/doc/source/reference/deployment-config/resource-constraints.rst
similarity index 95%
rename from doc/source/reference/resource-constraints.rst
rename to doc/source/reference/deployment-config/resource-constraints.rst
index 3649f26d0c..4413b59920 100644
--- a/doc/source/reference/resource-constraints.rst
+++ b/doc/source/reference/deployment-config/resource-constraints.rst
@@ -1,8 +1,8 @@
 .. _resource-constraints:
 
-=====================================
-Resource Constraints in Kolla Ansible
-=====================================
+====================
+Resource Constraints
+====================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst
index 71feacc8ab..0e1211be07 100644
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -1,32 +1,18 @@
-Projects Deployment References
-==============================
+===========================================
+Projects Deployment Configuration Reference
+===========================================
 
 .. toctree::
    :maxdepth: 2
 
-   ceph-guide
-   external-ceph-guide
-   central-logging-guide
-   external-mariadb-guide
-   cinder-guide
-   cinder-guide-hnas
-   designate-guide
-   hyperv-guide
-   ironic-guide
-   manila-guide
-   manila-hnas-guide
-   monasca-guide
-   nova-fake-driver
-   swift-guide
-   bifrost
-   networking-guide
-   kuryr-guide
-   zun-guide
-   osprofiler-guide
-   skydive-guide
-   vmware-guide
-   tacker-guide
-   xenserver-guide
-   horizon-guide
-   resource-constraints
-   glance-guide
+   compute/index
+   bare-metal/index
+   storage/index
+   networking/index
+   shared-services/index
+   orchestration-and-nfv/index
+   logging-and-monitoring/index
+   containers/index
+   databases/index
+   deployment-config/index
+   deployment-and-bootstrapping/index
diff --git a/doc/source/reference/central-logging-guide.rst b/doc/source/reference/logging-and-monitoring/central-logging-guide.rst
similarity index 99%
rename from doc/source/reference/central-logging-guide.rst
rename to doc/source/reference/logging-and-monitoring/central-logging-guide.rst
index cb494db718..20f3c26f8d 100644
--- a/doc/source/reference/central-logging-guide.rst
+++ b/doc/source/reference/logging-and-monitoring/central-logging-guide.rst
@@ -1,8 +1,8 @@
 .. _central-logging-guide:
 
-========================
-Central Logging in Kolla
-========================
+===============
+Central Logging
+===============
 
 An OpenStack deployment generates vast amounts of log data. In order to
 successfully monitor this and use it to diagnose problems, the standard "ssh
diff --git a/doc/source/reference/logging-and-monitoring/index.rst b/doc/source/reference/logging-and-monitoring/index.rst
new file mode 100644
index 0000000000..2ab7bd4a4c
--- /dev/null
+++ b/doc/source/reference/logging-and-monitoring/index.rst
@@ -0,0 +1,14 @@
+======================
+Logging and monitoring
+======================
+
+This section describes configuration for the different
+logging and monitoring services available in kolla.
+
+.. toctree::
+   :maxdepth: 1
+
+   central-logging-guide
+   monasca-guide
+   osprofiler-guide
+   skydive-guide
diff --git a/doc/source/reference/monasca-guide.rst b/doc/source/reference/logging-and-monitoring/monasca-guide.rst
similarity index 99%
rename from doc/source/reference/monasca-guide.rst
rename to doc/source/reference/logging-and-monitoring/monasca-guide.rst
index db140fa05a..3af3399464 100644
--- a/doc/source/reference/monasca-guide.rst
+++ b/doc/source/reference/logging-and-monitoring/monasca-guide.rst
@@ -1,8 +1,8 @@
 .. _monasca-guide:
 
-================
-Monasca in Kolla
-================
+============================
+Monasca - Monitoring service
+============================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/osprofiler-guide.rst b/doc/source/reference/logging-and-monitoring/osprofiler-guide.rst
similarity index 93%
rename from doc/source/reference/osprofiler-guide.rst
rename to doc/source/reference/logging-and-monitoring/osprofiler-guide.rst
index 2b8284b5d9..9951d44721 100644
--- a/doc/source/reference/osprofiler-guide.rst
+++ b/doc/source/reference/logging-and-monitoring/osprofiler-guide.rst
@@ -1,8 +1,8 @@
 .. _osprofiler-guide:
 
-===================
-OSprofiler in Kolla
-===================
+====================================
+OSprofiler - Cross-project profiling
+====================================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/skydive-guide.rst b/doc/source/reference/logging-and-monitoring/skydive-guide.rst
similarity index 90%
rename from doc/source/reference/skydive-guide.rst
rename to doc/source/reference/logging-and-monitoring/skydive-guide.rst
index 56732bb6ec..686031107a 100644
--- a/doc/source/reference/skydive-guide.rst
+++ b/doc/source/reference/logging-and-monitoring/skydive-guide.rst
@@ -1,8 +1,8 @@
 .. _skydive-guide:
 
-================
-Skydive in Kolla
-================
+====================================
+Skydive - Real time network analyzer
+====================================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/networking-guide.rst b/doc/source/reference/networking-guide.rst
deleted file mode 100644
index 67a8063da6..0000000000
--- a/doc/source/reference/networking-guide.rst
+++ /dev/null
@@ -1,490 +0,0 @@
-.. _networking-guide:
-
-===================
-Networking in Kolla
-===================
-
-Kolla deploys Neutron by default as OpenStack networking component.
-This section describes configuring and running Neutron extensions like
-LBaaS, Networking-SFC, QoS, and so on.
-
-Enabling Provider Networks
-==========================
-
-Provider networks allow to connect compute instances directly to physical
-networks avoiding tunnels. This is necessary for example for some performance
-critical applications. Only administrators of OpenStack can create such
-networks. For provider networks compute hosts must have external bridge
-created and configured by Ansible tasks like it is already done for tenant
-DVR mode networking. Normal tenant non-DVR networking does not need external
-bridge on compute hosts and therefore operators don't need additional
-dedicated network interface.
-
-To enable provider networks, modify the ``/etc/kolla/globals.yml`` file
-as the following example shows:
-
-.. code-block:: yaml
-
-   enable_neutron_provider_networks: "yes"
-
-Enabling Neutron Extensions
-===========================
-
-Networking-SFC
-~~~~~~~~~~~~~~
-
-Preparation and deployment
---------------------------
-
-Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
-
-.. code-block:: yaml
-
-   enable_neutron_sfc: "yes"
-
-Verification
-------------
-
-For setting up a testbed environment and creating a port chain, please refer
-to `networking-sfc documentation
-<https://docs.openstack.org/networking-sfc/latest/contributor/system_design_and_workflow.html>`__.
-
-Neutron VPNaaS (VPN-as-a-Service)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Preparation and deployment
---------------------------
-
-Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
-
-.. code-block:: yaml
-
-   enable_neutron_vpnaas: "yes"
-
-Verification
-------------
-
-VPNaaS is a complex subject, hence this document provides directions for a
-simple smoke test to verify the service is up and running.
-
-On the network node(s), the ``neutron_vpnaas_agent`` should be up (image naming
-and versioning may differ depending on deploy configuration):
-
-.. code-block:: console
-
-   # docker ps --filter name=neutron_vpnaas_agent
-
-   CONTAINER ID   IMAGE                                                               COMMAND         CREATED          STATUS        PORTS  NAMES
-   97d25657d55e   operator:5000/kolla/oraclelinux-source-neutron-vpnaas-agent:4.0.0   "kolla_start"   44 minutes ago   Up 44 minutes        neutron_vpnaas_agent
-
-Kolla-Ansible includes a small script that can be used in tandem with
-``tools/init-runonce`` to verify the VPN using two routers and two Nova VMs:
-
-.. code-block:: console
-
-   tools/init-runonce
-   tools/init-vpn
-
-Verify both VPN services are active:
-
-.. code-block:: console
-
-   # neutron vpn-service-list
-
-   +--------------------------------------+----------+--------------------------------------+--------+
-   | id                                   | name     | router_id                            | status |
-   +--------------------------------------+----------+--------------------------------------+--------+
-   | ad941ec4-5f3d-4a30-aae2-1ab3f4347eb1 | vpn_west | 051f7ce3-4301-43cc-bfbd-7ffd59af539e | ACTIVE |
-   | edce15db-696f-46d8-9bad-03d087f1f682 | vpn_east | 058842e0-1d01-4230-af8d-0ba6d0da8b1f | ACTIVE |
-   +--------------------------------------+----------+--------------------------------------+--------+
-
-Two VMs can now be booted, one on vpn_east, the other on vpn_west, and
-encrypted ping packets observed being sent from one to the other.
-
-For more information on this and VPNaaS in Neutron refer to the
-`Neutron VPNaaS Testing <https://docs.openstack.org/neutron-vpnaas/latest/contributor/index.html#testing>`__
-and the `OpenStack wiki <https://wiki.openstack.org/wiki/Neutron/VPNaaS>`_.
-
-Networking-ODL
-~~~~~~~~~~~~~~
-
-Preparation and deployment
---------------------------
-
-Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
-
-.. code-block:: yaml
-
-   enable_opendaylight: "yes"
-
-Networking-ODL is an additional Neutron plugin that allows the OpenDaylight
-SDN Controller to utilize its networking virtualization features.
-For OpenDaylight to work, the Networking-ODL plugin has to be installed in
-the ``neutron-server`` container. In this case, one could use the
-neutron-server-opendaylight container and the opendaylight container by
-pulling from Kolla dockerhub or by building them locally.
-
-OpenDaylight ``globals.yml`` configurable options with their defaults include:
-
-.. code-block:: yaml
-
-   opendaylight_mechanism_driver: "opendaylight_v2"
-   opendaylight_l3_service_plugin: "odl-router_v2"
-   opendaylight_acl_impl: "learn"
-   enable_opendaylight_qos: "no"
-   enable_opendaylight_l3: "yes"
-   enable_opendaylight_legacy_netvirt_conntrack: "no"
-   opendaylight_port_binding_type: "pseudo-agentdb-binding"
-   opendaylight_features: "odl-mdsal-apidocs,odl-netvirt-openstack"
-   opendaylight_allowed_network_types: '"flat", "vlan", "vxlan"'
-
-Clustered OpenDaylight Deploy
------------------------------
-
-High availability clustered OpenDaylight requires modifying the inventory file
-and placing three or more hosts in the OpenDaylight or Networking groups.
-
-.. note::
-
-   The OpenDaylight role will allow deploy of one or three plus hosts for
-   OpenDaylight/Networking role.
-
-Verification
-------------
-
-Verify the build and deploy operation of Networking-ODL containers. Successful
-deployment will bring up an Opendaylight container in the list of running
-containers on network/opendaylight node.
-
-For the source code, please refer to the following link:
-https://github.com/openstack/networking-odl
-
-OVS with DPDK
-~~~~~~~~~~~~~
-
-Introduction
-------------
-
-Open vSwitch (ovs) is an open source software virtual switch developed
-and distributed via openvswitch.org.
-The Data Plane Development Kit (dpdk) is a collection of userspace
-libraries and tools that facilitate the development of high-performance
-userspace networking applications.
-
-As of the ovs 2.2 release, the ovs netdev datapath has supported integration
-with dpdk for accelerated userspace networking. As of the pike release
-of kolla support for deploying ovs with dpdk (ovs-dpdk) has been added
-to kolla ansible. The ovs-dpdk role introduced in the pike release has been
-tested on centos 7 and ubuntu 16.04 hosts, however, ubuntu is recommended due
-to conflicts with the cgroup configuration created by the default systemd
-version shipped with centos 7.
-
-Prerequisites
--------------
-
-DPDK is a high-performance userspace networking library, as such it has
-several requirements to function correctly that are not required when
-deploying ovs without dpdk.
-
-To function efficiently one of the mechanisms dpdk uses to accelerate
-memory access is the utilisation of kernel hugepages. The use of hugepage
-memory minimises the chance of a translation lookaside buffer(TLB) miss when
-translating virtual to physical memory as it increases the total amount of
-addressable memory that can be cached via the TLB. Hugepage memory pages are
-unswappable contiguous blocks of memory of typically 2MiB or 1GiB in size,
-that can be used to facilitate efficient sharing of memory between guests and
-a vSwitch or DMA mapping between physical nics and the userspace ovs datapath.
-
-To deploy ovs-dpdk on a platform a proportion of system memory should
-be allocated hugepages. While it is possible to allocate hugepages at runtime
-it is advised to allocate them via the kernel command line instead to prevent
-memory fragmentation. This can be achieved by adding the following to the grub
-config and regenerating your grub file.
-
-.. code-block:: console
-
-   default_hugepagesz=2M hugepagesz=2M hugepages=25000
-
-As dpdk is a userspace networking library it requires userspace compatible
-drivers to be able to control the physical interfaces on the platform.
-dpdk technically support 3 kernel drivers ``igb_uio``,``uio_pci_generic``, and
-``vfio_pci``.
-While it is technically possible to use all 3 only ``uio_pci_generic`` and
-``vfio_pci`` are recommended for use with kolla. ``igb_uio`` is BSD licenced
-and distributed as part of the dpdk library. While it has some advantages over
-``uio_pci_generic`` loading the ``igb_uio`` module will taint the kernel and
-possibly invalidate distro support. To successfully deploy ``ovs-dpdk``,
-``vfio_pci`` or ``uio_pci_generic`` kernel module must be present on the
-platform. Most distros include ``vfio_pci`` or ``uio_pci_generic`` as part of
-the default kernel though on some distros you may need to install
-``kernel-modules-extra`` or the distro equivalent prior to running
-:command:`kolla-ansible deploy`.
-
-Installation
-------------
-
-To enable ovs-dpdk, add the following configuration to
-``/etc/kolla/globals.yml`` file:
-
-.. code-block:: yaml
-
-   ovs_datapath: "netdev"
-   enable_ovs_dpdk: yes
-   enable_openvswitch: yes
-   tunnel_interface: "dpdk_bridge"
-   neutron_bridge_name: "dpdk_bridge"
-
-Unlike standard Open vSwitch deployments, the interface specified by
-neutron_external_interface should have an ip address assigned.
-The ip address assigned to neutron_external_interface will be moved to
-the "dpdk_bridge" as part of deploy action.
-When using ovs-dpdk the tunnel_interface must be an ovs bridge with a physical
-interfaces attached for tunnelled traffic to be accelerated by dpdk.
-Note that due to a limitation in ansible variable names which excluded
-the use of - in a variable name it is not possible to use the default
-br-ex name for the neutron_bridge_name or tunnel_interface.
-
-At present, the tunnel interface ip is configured using network manager on
-on ubuntu and systemd on centos family operating systems. systemd is used
-to work around a limitation of the centos network manager implementation which
-does not consider the creation of an ovs bridge to be a hotplug event. In
-the future, a new config option will be introduced to allow systemd to be used
-on all host distros for those who do not wish to enable the network manager
-service on ubuntu.
-
-Limitations
------------
-
-Reconfiguration from kernel ovs to ovs dpdk is currently not supported.
-Changing ovs datapaths on a deployed node requires neutron config changes
-and libvirt xml changes for all running instances including a hard reboot
-of the vm.
-
-When upgrading ovs-dpdk it should be noted that this will always involve
-a dataplane outage. Unlike kernel OVS the dataplane for ovs-dpdk executes in
-the ovs-vswitchd process. This means the lifetime of the dpdk dataplane is
-tied to the lifetime of the ovsdpdk_vswitchd container. As such it is
-recommended to always evacuate all vm workloads from a node running ovs-dpdk
-prior to upgrading.
-
-On ubuntu network manager is required for tunnel networking.
-This requirement will be removed in the future.
-
-Neutron SRIOV
-~~~~~~~~~~~~~
-
-Preparation and deployment
---------------------------
-
-SRIOV requires specific NIC and BIOS configuration and is not supported on all
-platforms. Consult NIC and platform specific documentation for instructions
-on enablement.
-
-Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
-
-.. code-block:: yaml
-
-   enable_neutron_sriov: "yes"
-
-Modify the ``/etc/kolla/config/neutron/ml2_conf.ini`` file and add
-``sriovnicswitch`` to the ``mechanism_drivers``. Also, the provider
-networks used by SRIOV should be configured. Both flat and VLAN are configured
-with the same physical network name in this example:
-
-.. path /etc/kolla/config/neutron/ml2_conf.ini
-.. code-block:: ini
-
-   [ml2]
-   mechanism_drivers = openvswitch,l2population,sriovnicswitch
-
-   [ml2_type_vlan]
-   network_vlan_ranges = sriovtenant1:1000:1009
-
-   [ml2_type_flat]
-   flat_networks = sriovtenant1
-
-Add ``PciPassthroughFilter`` to scheduler_default_filters
-
-The ``PciPassthroughFilter``, which is required by Nova Scheduler service
-on the Controller, should be added to ``scheduler_default_filters``
-
-Modify the ``/etc/kolla/config/nova.conf`` file and add
-``PciPassthroughFilter`` to ``scheduler_default_filters``. this filter is
-required by The Nova Scheduler service on the controller node.
-
-.. path /etc/kolla/config/nova.conf
-.. code-block:: ini
-
-   [DEFAULT]
-   scheduler_default_filters = <existing filters>, PciPassthroughFilter
-   scheduler_available_filters = nova.scheduler.filters.all_filters
-
-Edit the ``/etc/kolla/config/nova.conf`` file and add PCI device whitelisting.
-this is needed by OpenStack Compute service(s) on the Compute.
-
-.. path /etc/kolla/config/nova.conf
-.. code-block:: ini
-
-   [pci]
-   passthrough_whitelist = [{"devname": "ens785f0", "physical_network": "sriovtenant1"}]
-
-Modify the ``/etc/kolla/config/neutron/sriov_agent.ini`` file. Add physical
-network to interface mapping. Specific VFs can also be excluded here. Leaving
-blank means to enable all VFs for the interface:
-
-.. path /etc/kolla/config/neutron/sriov_agent.ini
-.. code-block:: ini
-
-   [sriov_nic]
-   physical_device_mappings = sriovtenant1:ens785f0
-   exclude_devices =
-
-Run deployment.
-
-Verification
-------------
-
-Check that VFs were created on the compute node(s). VFs will appear in the
-output of both ``lspci`` and ``ip link show``.  For example:
-
-.. code-block:: console
-
-   # lspci | grep net
-   05:10.0 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
-
-
-   # ip -d link show ens785f0
-   4: ens785f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ovs-system state UP mode DEFAULT qlen 1000
-   link/ether 90:e2:ba:ba:fb:20 brd ff:ff:ff:ff:ff:ff promiscuity 1
-   openvswitch_slave addrgenmode eui64
-   vf 0 MAC 52:54:00:36:57:e0, spoof checking on, link-state auto, trust off
-   vf 1 MAC 52:54:00:00:62:db, spoof checking on, link-state auto, trust off
-   vf 2 MAC fa:16:3e:92:cf:12, spoof checking on, link-state auto, trust off
-   vf 3 MAC fa:16:3e:00:a3:01, vlan 1000, spoof checking on, link-state auto, trust off
-
-Verify the SRIOV Agent container is running on the compute node(s):
-
-.. code-block:: console
-
-   # docker ps --filter name=neutron_sriov_agent
-   CONTAINER ID   IMAGE                                                                COMMAND        CREATED         STATUS         PORTS  NAMES
-   b03a8f4c0b80   10.10.10.10:4000/registry/centos-source-neutron-sriov-agent:17.04.0  "kolla_start"  18 minutes ago  Up 18 minutes         neutron_sriov_agent
-
-Verify the SRIOV Agent service is present and UP:
-
-.. code-block:: console
-
-   # openstack network agent list
-
-   +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
-   | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
-   +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
-   | 7c06bda9-7b87-487e-a645-cc6c289d9082 | NIC Switch agent   | av09-18-wcp | None              | :-)   | UP    | neutron-sriov-nic-agent   |
-   +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
-
-Create a new provider network. Set ``provider-physical-network`` to the
-physical network name that was configured in ``/etc/kolla/config/nova.conf``.
-Set ``provider-network-type`` to the desired type. If using VLAN, ensure
-``provider-segment`` is set to the correct VLAN ID. This example uses ``VLAN``
-network type:
-
-
-.. code-block:: console
-
-   # openstack network create --project=admin \
-     --provider-network-type=vlan \
-     --provider-physical-network=sriovtenant1 \
-     --provider-segment=1000 \
-     sriovnet1
-
-Create a subnet with a DHCP range for the provider network:
-
-.. code-block:: console
-
-   # openstack subnet create --network=sriovnet1 \
-     --subnet-range=11.0.0.0/24 \
-     --allocation-pool start=11.0.0.5,end=11.0.0.100 \
-     sriovnet1_sub1
-
-Create a port on the provider network with ``vnic_type`` set to ``direct``:
-
-.. code-block:: console
-
-   # openstack port create --network sriovnet1 --vnic-type=direct sriovnet1-port1
-
-Start a new instance with the SRIOV port assigned:
-
-.. code-block:: console
-
-   # openstack server create --flavor flavor1 \
-     --image fc-26 \
-     --nic port-id=`openstack port list | grep sriovnet1-port1 | awk '{print $2}'` \
-     vm1
-
-Verify the instance boots with the SRIOV port. Verify VF assignment by running
-dmesg on the compute node where the instance was placed.
-
-.. code-block:: console
-
-   # dmesg
-   [ 2896.849970] ixgbe 0000:05:00.0: setting MAC fa:16:3e:00:a3:01 on VF 3
-   [ 2896.850028] ixgbe 0000:05:00.0: Setting VLAN 1000, QOS 0x0 on VF 3
-   [ 2897.403367] vfio-pci 0000:05:10.4: enabling device (0000 -> 0002)
-
-For more information see `OpenStack SRIOV documentation <https://docs.openstack.org/neutron/pike/admin/config-sriov.html>`_.
-
-Nova SRIOV
-~~~~~~~~~~
-
-Preparation and deployment
---------------------------
-
-Nova provides a separate mechanism to attach PCI devices to instances that
-is independent from Neutron.  Using the PCI alias configuration option in
-nova.conf, any PCI device (PF or VF) that supports passthrough can be attached
-to an instance.  One major drawback to be aware of when using this method is
-that the PCI alias option uses a device's product id and vendor id only,
-so in environments that have NICs with multiple ports configured for SRIOV,
-it is impossible to specify a specific NIC port to pull VFs from.
-
-Modify the file ``/etc/kolla/config/nova.conf``.  The Nova Scheduler service
-on the control node requires the ``PciPassthroughFilter`` to be added to the
-list of filters and the Nova Compute service(s) on the compute node(s) need
-PCI device whitelisting.  The Nova API service on the control node and the Nova
-Compute service on the compute node also require the ``alias`` option under the
-``[pci]`` section.  The alias can be configured as 'type-VF' to pass VFs or
-'type-PF' to pass the PF. Type-VF is shown in this example:
-
-.. path /etc/kolla/config/nova.conf
-.. code-block:: ini
-
-   [DEFAULT]
-   scheduler_default_filters = <existing filters>, PciPassthroughFilter
-   scheduler_available_filters = nova.scheduler.filters.all_filters
-
-   [pci]
-   passthrough_whitelist = [{"vendor_id": "8086", "product_id": "10fb"}]
-   alias = [{"vendor_id":"8086", "product_id":"10ed", "device_type":"type-VF", "name":"vf1"}]
-
-Run deployment.
-
-Verification
-------------
-
-Create (or use an existing) flavor, and then configure it to request one PCI
-device from the PCI alias:
-
-.. code-block:: console
-
-   # openstack flavor set sriov-flavor --property "pci_passthrough:alias"="vf1:1"
-
-Start a new instance using the flavor:
-
-.. code-block:: console
-
-   # openstack server create --flavor sriov-flavor --image fc-26 vm2
-
-Verify VF devices were created and the instance starts successfully as in
-the Neutron SRIOV case.
-
-For more information see `OpenStack PCI passthrough documentation <https://docs.openstack.org/nova/pike/admin/pci-passthrough.html>`_.
diff --git a/doc/source/reference/designate-guide.rst b/doc/source/reference/networking/designate-guide.rst
similarity index 98%
rename from doc/source/reference/designate-guide.rst
rename to doc/source/reference/networking/designate-guide.rst
index eda5476c4e..ab91cb632c 100644
--- a/doc/source/reference/designate-guide.rst
+++ b/doc/source/reference/networking/designate-guide.rst
@@ -1,8 +1,8 @@
 .. _designate-guide:
 
-==================
-Designate in Kolla
-==================
+=======================
+Designate - DNS service
+=======================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/networking/dpdk.rst b/doc/source/reference/networking/dpdk.rst
new file mode 100644
index 0000000000..e0e1e19532
--- /dev/null
+++ b/doc/source/reference/networking/dpdk.rst
@@ -0,0 +1,113 @@
+.. _dpdk:
+
+====
+DPDK
+====
+
+Introduction
+------------
+
+Open vSwitch (ovs) is an open source software virtual switch developed
+and distributed via openvswitch.org.
+The Data Plane Development Kit (dpdk) is a collection of userspace
+libraries and tools that facilitate the development of high-performance
+userspace networking applications.
+
+As of the ovs 2.2 release, the ovs netdev datapath has supported integration
+with dpdk for accelerated userspace networking. As of the pike release
+of kolla support for deploying ovs with dpdk (ovs-dpdk) has been added
+to kolla ansible. The ovs-dpdk role introduced in the pike release has been
+tested on centos 7 and ubuntu 16.04 hosts, however, ubuntu is recommended due
+to conflicts with the cgroup configuration created by the default systemd
+version shipped with centos 7.
+
+Prerequisites
+-------------
+
+DPDK is a high-performance userspace networking library, as such it has
+several requirements to function correctly that are not required when
+deploying ovs without dpdk.
+
+To function efficiently one of the mechanisms dpdk uses to accelerate
+memory access is the utilisation of kernel hugepages. The use of hugepage
+memory minimises the chance of a translation lookaside buffer(TLB) miss when
+translating virtual to physical memory as it increases the total amount of
+addressable memory that can be cached via the TLB. Hugepage memory pages are
+unswappable contiguous blocks of memory of typically 2MiB or 1GiB in size,
+that can be used to facilitate efficient sharing of memory between guests and
+a vSwitch or DMA mapping between physical nics and the userspace ovs datapath.
+
+To deploy ovs-dpdk on a platform a proportion of system memory should
+be allocated hugepages. While it is possible to allocate hugepages at runtime
+it is advised to allocate them via the kernel command line instead to prevent
+memory fragmentation. This can be achieved by adding the following to the grub
+config and regenerating your grub file.
+
+.. code-block:: console
+
+   default_hugepagesz=2M hugepagesz=2M hugepages=25000
+
+As dpdk is a userspace networking library it requires userspace compatible
+drivers to be able to control the physical interfaces on the platform.
+dpdk technically support 3 kernel drivers ``igb_uio``,``uio_pci_generic``, and
+``vfio_pci``.
+While it is technically possible to use all 3 only ``uio_pci_generic`` and
+``vfio_pci`` are recommended for use with kolla. ``igb_uio`` is BSD licenced
+and distributed as part of the dpdk library. While it has some advantages over
+``uio_pci_generic`` loading the ``igb_uio`` module will taint the kernel and
+possibly invalidate distro support. To successfully deploy ``ovs-dpdk``,
+``vfio_pci`` or ``uio_pci_generic`` kernel module must be present on the
+platform. Most distros include ``vfio_pci`` or ``uio_pci_generic`` as part of
+the default kernel though on some distros you may need to install
+``kernel-modules-extra`` or the distro equivalent prior to running
+:command:`kolla-ansible deploy`.
+
+Installation
+------------
+
+To enable ovs-dpdk, add the following configuration to
+``/etc/kolla/globals.yml`` file:
+
+.. code-block:: yaml
+
+   ovs_datapath: "netdev"
+   enable_ovs_dpdk: yes
+   enable_openvswitch: yes
+   tunnel_interface: "dpdk_bridge"
+   neutron_bridge_name: "dpdk_bridge"
+
+Unlike standard Open vSwitch deployments, the interface specified by
+neutron_external_interface should have an ip address assigned.
+The ip address assigned to neutron_external_interface will be moved to
+the "dpdk_bridge" as part of deploy action.
+When using ovs-dpdk the tunnel_interface must be an ovs bridge with a physical
+interfaces attached for tunnelled traffic to be accelerated by dpdk.
+Note that due to a limitation in ansible variable names which excluded
+the use of - in a variable name it is not possible to use the default
+br-ex name for the neutron_bridge_name or tunnel_interface.
+
+At present, the tunnel interface ip is configured using network manager on
+on ubuntu and systemd on centos family operating systems. systemd is used
+to work around a limitation of the centos network manager implementation which
+does not consider the creation of an ovs bridge to be a hotplug event. In
+the future, a new config option will be introduced to allow systemd to be used
+on all host distros for those who do not wish to enable the network manager
+service on ubuntu.
+
+Limitations
+-----------
+
+Reconfiguration from kernel ovs to ovs dpdk is currently not supported.
+Changing ovs datapaths on a deployed node requires neutron config changes
+and libvirt xml changes for all running instances including a hard reboot
+of the vm.
+
+When upgrading ovs-dpdk it should be noted that this will always involve
+a dataplane outage. Unlike kernel OVS the dataplane for ovs-dpdk executes in
+the ovs-vswitchd process. This means the lifetime of the dpdk dataplane is
+tied to the lifetime of the ovsdpdk_vswitchd container. As such it is
+recommended to always evacuate all vm workloads from a node running ovs-dpdk
+prior to upgrading.
+
+On ubuntu network manager is required for tunnel networking.
+This requirement will be removed in the future.
diff --git a/doc/source/reference/networking/index.rst b/doc/source/reference/networking/index.rst
new file mode 100644
index 0000000000..ad6bbdc98a
--- /dev/null
+++ b/doc/source/reference/networking/index.rst
@@ -0,0 +1,18 @@
+==========
+Networking
+==========
+
+Kolla deploys Neutron by default as OpenStack networking component.
+This section describes configuring and running Neutron extensions like
+LBaaS, Networking-SFC, QoS, and so on.
+
+
+.. toctree::
+   :maxdepth: 1
+
+   designate-guide
+   dpdk
+   neutron-extensions
+   opendaylight
+   provider-networks
+   sriov
diff --git a/doc/source/reference/networking/neutron-extensions.rst b/doc/source/reference/networking/neutron-extensions.rst
new file mode 100644
index 0000000000..6b1d8b6ce8
--- /dev/null
+++ b/doc/source/reference/networking/neutron-extensions.rst
@@ -0,0 +1,80 @@
+.. _neutron-extensions:
+
+==================
+Neutron Extensions
+==================
+
+Networking-SFC
+~~~~~~~~~~~~~~
+
+Preparation and deployment
+--------------------------
+
+Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
+
+.. code-block:: yaml
+
+   enable_neutron_sfc: "yes"
+
+Verification
+------------
+
+For setting up a testbed environment and creating a port chain, please refer
+to `networking-sfc documentation
+<https://docs.openstack.org/networking-sfc/latest/contributor/system_design_and_workflow.html>`__.
+
+Neutron VPNaaS (VPN-as-a-Service)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Preparation and deployment
+--------------------------
+
+Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
+
+.. code-block:: yaml
+
+   enable_neutron_vpnaas: "yes"
+
+Verification
+------------
+
+VPNaaS is a complex subject, hence this document provides directions for a
+simple smoke test to verify the service is up and running.
+
+On the network node(s), the ``neutron_vpnaas_agent`` should be up (image naming
+and versioning may differ depending on deploy configuration):
+
+.. code-block:: console
+
+   # docker ps --filter name=neutron_vpnaas_agent
+
+   CONTAINER ID   IMAGE                                                               COMMAND         CREATED          STATUS        PORTS  NAMES
+   97d25657d55e   operator:5000/kolla/oraclelinux-source-neutron-vpnaas-agent:4.0.0   "kolla_start"   44 minutes ago   Up 44 minutes        neutron_vpnaas_agent
+
+Kolla-Ansible includes a small script that can be used in tandem with
+``tools/init-runonce`` to verify the VPN using two routers and two Nova VMs:
+
+.. code-block:: console
+
+   tools/init-runonce
+   tools/init-vpn
+
+Verify both VPN services are active:
+
+.. code-block:: console
+
+   # neutron vpn-service-list
+
+   +--------------------------------------+----------+--------------------------------------+--------+
+   | id                                   | name     | router_id                            | status |
+   +--------------------------------------+----------+--------------------------------------+--------+
+   | ad941ec4-5f3d-4a30-aae2-1ab3f4347eb1 | vpn_west | 051f7ce3-4301-43cc-bfbd-7ffd59af539e | ACTIVE |
+   | edce15db-696f-46d8-9bad-03d087f1f682 | vpn_east | 058842e0-1d01-4230-af8d-0ba6d0da8b1f | ACTIVE |
+   +--------------------------------------+----------+--------------------------------------+--------+
+
+Two VMs can now be booted, one on vpn_east, the other on vpn_west, and
+encrypted ping packets observed being sent from one to the other.
+
+For more information on this and VPNaaS in Neutron refer to the
+`Neutron VPNaaS Testing <https://docs.openstack.org/neutron-vpnaas/latest/contributor/index.html#testing>`__
+and the `OpenStack wiki <https://wiki.openstack.org/wiki/Neutron/VPNaaS>`_.
diff --git a/doc/source/reference/networking/opendaylight.rst b/doc/source/reference/networking/opendaylight.rst
new file mode 100644
index 0000000000..a7f34b825d
--- /dev/null
+++ b/doc/source/reference/networking/opendaylight.rst
@@ -0,0 +1,56 @@
+.. _opendaylight:
+
+=============================
+Opendaylight - SDN controller
+=============================
+
+Preparation and deployment
+--------------------------
+
+Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
+
+.. code-block:: yaml
+
+   enable_opendaylight: "yes"
+
+Networking-ODL is an additional Neutron plugin that allows the OpenDaylight
+SDN Controller to utilize its networking virtualization features.
+For OpenDaylight to work, the Networking-ODL plugin has to be installed in
+the ``neutron-server`` container. In this case, one could use the
+neutron-server-opendaylight container and the opendaylight container by
+pulling from Kolla dockerhub or by building them locally.
+
+OpenDaylight ``globals.yml`` configurable options with their defaults include:
+
+.. code-block:: yaml
+
+   opendaylight_mechanism_driver: "opendaylight_v2"
+   opendaylight_l3_service_plugin: "odl-router_v2"
+   opendaylight_acl_impl: "learn"
+   enable_opendaylight_qos: "no"
+   enable_opendaylight_l3: "yes"
+   enable_opendaylight_legacy_netvirt_conntrack: "no"
+   opendaylight_port_binding_type: "pseudo-agentdb-binding"
+   opendaylight_features: "odl-mdsal-apidocs,odl-netvirt-openstack"
+   opendaylight_allowed_network_types: '"flat", "vlan", "vxlan"'
+
+Clustered OpenDaylight Deploy
+-----------------------------
+
+High availability clustered OpenDaylight requires modifying the inventory file
+and placing three or more hosts in the OpenDaylight or Networking groups.
+
+.. note::
+
+   The OpenDaylight role will allow deploy of one or three plus hosts for
+   OpenDaylight/Networking role.
+
+Verification
+------------
+
+Verify the build and deploy operation of Networking-ODL containers. Successful
+deployment will bring up an Opendaylight container in the list of running
+containers on network/opendaylight node.
+
+For the source code, please refer to the following link:
+https://github.com/openstack/networking-odl
diff --git a/doc/source/reference/networking/provider-networks.rst b/doc/source/reference/networking/provider-networks.rst
new file mode 100644
index 0000000000..7d2a56ba8f
--- /dev/null
+++ b/doc/source/reference/networking/provider-networks.rst
@@ -0,0 +1,21 @@
+.. _provider-networks:
+
+=================
+Provider Networks
+=================
+
+Provider networks allow to connect compute instances directly to physical
+networks avoiding tunnels. This is necessary for example for some performance
+critical applications. Only administrators of OpenStack can create such
+networks. For provider networks compute hosts must have external bridge
+created and configured by Ansible tasks like it is already done for tenant
+DVR mode networking. Normal tenant non-DVR networking does not need external
+bridge on compute hosts and therefore operators don't need additional
+dedicated network interface.
+
+To enable provider networks, modify the ``/etc/kolla/globals.yml`` file
+as the following example shows:
+
+.. code-block:: yaml
+
+   enable_neutron_provider_networks: "yes"
diff --git a/doc/source/reference/networking/sriov.rst b/doc/source/reference/networking/sriov.rst
new file mode 100644
index 0000000000..be92c6d71f
--- /dev/null
+++ b/doc/source/reference/networking/sriov.rst
@@ -0,0 +1,224 @@
+.. _sriov:
+
+=====
+SRIOV
+=====
+
+Neutron SRIOV
+~~~~~~~~~~~~~
+
+Preparation and deployment
+--------------------------
+
+SRIOV requires specific NIC and BIOS configuration and is not supported on all
+platforms. Consult NIC and platform specific documentation for instructions
+on enablement.
+
+Modify the ``/etc/kolla/globals.yml`` file as the following example shows:
+
+.. code-block:: yaml
+
+   enable_neutron_sriov: "yes"
+
+Modify the ``/etc/kolla/config/neutron/ml2_conf.ini`` file and add
+``sriovnicswitch`` to the ``mechanism_drivers``. Also, the provider
+networks used by SRIOV should be configured. Both flat and VLAN are configured
+with the same physical network name in this example:
+
+.. path /etc/kolla/config/neutron/ml2_conf.ini
+.. code-block:: ini
+
+   [ml2]
+   mechanism_drivers = openvswitch,l2population,sriovnicswitch
+
+   [ml2_type_vlan]
+   network_vlan_ranges = sriovtenant1:1000:1009
+
+   [ml2_type_flat]
+   flat_networks = sriovtenant1
+
+Add ``PciPassthroughFilter`` to scheduler_default_filters
+
+The ``PciPassthroughFilter``, which is required by Nova Scheduler service
+on the Controller, should be added to ``scheduler_default_filters``
+
+Modify the ``/etc/kolla/config/nova.conf`` file and add
+``PciPassthroughFilter`` to ``scheduler_default_filters``. this filter is
+required by The Nova Scheduler service on the controller node.
+
+.. path /etc/kolla/config/nova.conf
+.. code-block:: ini
+
+   [DEFAULT]
+   scheduler_default_filters = <existing filters>, PciPassthroughFilter
+   scheduler_available_filters = nova.scheduler.filters.all_filters
+
+Edit the ``/etc/kolla/config/nova.conf`` file and add PCI device whitelisting.
+this is needed by OpenStack Compute service(s) on the Compute.
+
+.. path /etc/kolla/config/nova.conf
+.. code-block:: ini
+
+   [pci]
+   passthrough_whitelist = [{"devname": "ens785f0", "physical_network": "sriovtenant1"}]
+
+Modify the ``/etc/kolla/config/neutron/sriov_agent.ini`` file. Add physical
+network to interface mapping. Specific VFs can also be excluded here. Leaving
+blank means to enable all VFs for the interface:
+
+.. path /etc/kolla/config/neutron/sriov_agent.ini
+.. code-block:: ini
+
+   [sriov_nic]
+   physical_device_mappings = sriovtenant1:ens785f0
+   exclude_devices =
+
+Run deployment.
+
+Verification
+------------
+
+Check that VFs were created on the compute node(s). VFs will appear in the
+output of both ``lspci`` and ``ip link show``.  For example:
+
+.. code-block:: console
+
+   # lspci | grep net
+   05:10.0 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01)
+
+
+   # ip -d link show ens785f0
+   4: ens785f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ovs-system state UP mode DEFAULT qlen 1000
+   link/ether 90:e2:ba:ba:fb:20 brd ff:ff:ff:ff:ff:ff promiscuity 1
+   openvswitch_slave addrgenmode eui64
+   vf 0 MAC 52:54:00:36:57:e0, spoof checking on, link-state auto, trust off
+   vf 1 MAC 52:54:00:00:62:db, spoof checking on, link-state auto, trust off
+   vf 2 MAC fa:16:3e:92:cf:12, spoof checking on, link-state auto, trust off
+   vf 3 MAC fa:16:3e:00:a3:01, vlan 1000, spoof checking on, link-state auto, trust off
+
+Verify the SRIOV Agent container is running on the compute node(s):
+
+.. code-block:: console
+
+   # docker ps --filter name=neutron_sriov_agent
+   CONTAINER ID   IMAGE                                                                COMMAND        CREATED         STATUS         PORTS  NAMES
+   b03a8f4c0b80   10.10.10.10:4000/registry/centos-source-neutron-sriov-agent:17.04.0  "kolla_start"  18 minutes ago  Up 18 minutes         neutron_sriov_agent
+
+Verify the SRIOV Agent service is present and UP:
+
+.. code-block:: console
+
+   # openstack network agent list
+
+   +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
+   | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
+   +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
+   | 7c06bda9-7b87-487e-a645-cc6c289d9082 | NIC Switch agent   | av09-18-wcp | None              | :-)   | UP    | neutron-sriov-nic-agent   |
+   +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
+
+Create a new provider network. Set ``provider-physical-network`` to the
+physical network name that was configured in ``/etc/kolla/config/nova.conf``.
+Set ``provider-network-type`` to the desired type. If using VLAN, ensure
+``provider-segment`` is set to the correct VLAN ID. This example uses ``VLAN``
+network type:
+
+
+.. code-block:: console
+
+   # openstack network create --project=admin \
+     --provider-network-type=vlan \
+     --provider-physical-network=sriovtenant1 \
+     --provider-segment=1000 \
+     sriovnet1
+
+Create a subnet with a DHCP range for the provider network:
+
+.. code-block:: console
+
+   # openstack subnet create --network=sriovnet1 \
+     --subnet-range=11.0.0.0/24 \
+     --allocation-pool start=11.0.0.5,end=11.0.0.100 \
+     sriovnet1_sub1
+
+Create a port on the provider network with ``vnic_type`` set to ``direct``:
+
+.. code-block:: console
+
+   # openstack port create --network sriovnet1 --vnic-type=direct sriovnet1-port1
+
+Start a new instance with the SRIOV port assigned:
+
+.. code-block:: console
+
+   # openstack server create --flavor flavor1 \
+     --image fc-26 \
+     --nic port-id=`openstack port list | grep sriovnet1-port1 | awk '{print $2}'` \
+     vm1
+
+Verify the instance boots with the SRIOV port. Verify VF assignment by running
+dmesg on the compute node where the instance was placed.
+
+.. code-block:: console
+
+   # dmesg
+   [ 2896.849970] ixgbe 0000:05:00.0: setting MAC fa:16:3e:00:a3:01 on VF 3
+   [ 2896.850028] ixgbe 0000:05:00.0: Setting VLAN 1000, QOS 0x0 on VF 3
+   [ 2897.403367] vfio-pci 0000:05:10.4: enabling device (0000 -> 0002)
+
+For more information see `OpenStack SRIOV documentation <https://docs.openstack.org/neutron/pike/admin/config-sriov.html>`_.
+
+Nova SRIOV
+~~~~~~~~~~
+
+Preparation and deployment
+--------------------------
+
+Nova provides a separate mechanism to attach PCI devices to instances that
+is independent from Neutron.  Using the PCI alias configuration option in
+nova.conf, any PCI device (PF or VF) that supports passthrough can be attached
+to an instance.  One major drawback to be aware of when using this method is
+that the PCI alias option uses a device's product id and vendor id only,
+so in environments that have NICs with multiple ports configured for SRIOV,
+it is impossible to specify a specific NIC port to pull VFs from.
+
+Modify the file ``/etc/kolla/config/nova.conf``.  The Nova Scheduler service
+on the control node requires the ``PciPassthroughFilter`` to be added to the
+list of filters and the Nova Compute service(s) on the compute node(s) need
+PCI device whitelisting.  The Nova API service on the control node and the Nova
+Compute service on the compute node also require the ``alias`` option under the
+``[pci]`` section.  The alias can be configured as 'type-VF' to pass VFs or
+'type-PF' to pass the PF. Type-VF is shown in this example:
+
+.. path /etc/kolla/config/nova.conf
+.. code-block:: ini
+
+   [DEFAULT]
+   scheduler_default_filters = <existing filters>, PciPassthroughFilter
+   scheduler_available_filters = nova.scheduler.filters.all_filters
+
+   [pci]
+   passthrough_whitelist = [{"vendor_id": "8086", "product_id": "10fb"}]
+   alias = [{"vendor_id":"8086", "product_id":"10ed", "device_type":"type-VF", "name":"vf1"}]
+
+Run deployment.
+
+Verification
+------------
+
+Create (or use an existing) flavor, and then configure it to request one PCI
+device from the PCI alias:
+
+.. code-block:: console
+
+   # openstack flavor set sriov-flavor --property "pci_passthrough:alias"="vf1:1"
+
+Start a new instance using the flavor:
+
+.. code-block:: console
+
+   # openstack server create --flavor sriov-flavor --image fc-26 vm2
+
+Verify VF devices were created and the instance starts successfully as in
+the Neutron SRIOV case.
+
+For more information see `OpenStack PCI passthrough documentation <https://docs.openstack.org/nova/pike/admin/pci-passthrough.html>`_.
diff --git a/doc/source/reference/orchestration-and-nfv/index.rst b/doc/source/reference/orchestration-and-nfv/index.rst
new file mode 100644
index 0000000000..3e47ef8cc7
--- /dev/null
+++ b/doc/source/reference/orchestration-and-nfv/index.rst
@@ -0,0 +1,10 @@
+=====================
+Orchestration and NFV
+=====================
+
+This section describes configuration of orchestration and NFV services.
+
+.. toctree::
+   :maxdepth: 2
+
+   tacker-guide
diff --git a/doc/source/reference/tacker-guide.rst b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
similarity index 98%
rename from doc/source/reference/tacker-guide.rst
rename to doc/source/reference/orchestration-and-nfv/tacker-guide.rst
index 72f1fb4add..c22f8a00d2 100644
--- a/doc/source/reference/tacker-guide.rst
+++ b/doc/source/reference/orchestration-and-nfv/tacker-guide.rst
@@ -1,6 +1,6 @@
-===============
-Tacker in Kolla
-===============
+==========================
+Tacker - NFV orchestration
+==========================
 
 "Tacker is an OpenStack service for NFV Orchestration with a general purpose
 VNF Manager to deploy and operate Virtual Network Functions (VNFs) and
diff --git a/doc/source/reference/glance-guide.rst b/doc/source/reference/shared-services/glance-guide.rst
similarity index 88%
rename from doc/source/reference/glance-guide.rst
rename to doc/source/reference/shared-services/glance-guide.rst
index ce1d52b9f8..6a7bce200e 100644
--- a/doc/source/reference/glance-guide.rst
+++ b/doc/source/reference/shared-services/glance-guide.rst
@@ -1,8 +1,8 @@
 .. _glance-guide:
 
-===============
-Glance in Kolla
-===============
+======================
+Glance - Image service
+======================
 
 Glance backends
 ---------------
@@ -41,8 +41,8 @@ Ceph backend
 
 To make use of ``ceph`` backend in glance, simply enable ceph or external ceph.
 By default will enable backend ceph automatically.
-Please refer to :doc:`ceph-guide` or :doc:`external-ceph-guide` on how to
-configure this backend.
+Please refer to :doc:`../storage/ceph-guide`
+or :doc:`../storage/external-ceph-guide` on how to configure this backend.
 
 To enable the ceph backend manually:
 
@@ -54,7 +54,7 @@ VMware backend
 ~~~~~~~~~~~~~~
 
 To make use of VMware datastores as a glance backend,
-enable `glance_backend_vmware` and refer to :doc:`vmware-guide` for
+enable `glance_backend_vmware` and refer to :doc:`../compute/vmware-guide` for
 further VMware configuration.
 
 To enable the vmware backend manually:
@@ -67,7 +67,8 @@ Swift backend
 ~~~~~~~~~~~~~
 
 To store glance images in a swift cluster, the ``swift`` backend should
-be enabled.  Refer to :doc:`swift-guide` on how to configure swift in kolla.
+be enabled.  Refer to :doc:`../storage/swift-guide` on how to configure
+swift in kolla.
 If ceph is enabled, will have higher precedence over swift as glance backend.
 
 To enable the swift backend manually:
@@ -103,7 +104,7 @@ need to be enabled.
 
 .. warning::
 
-    When using glance backend ``file`` without a shared filesytem, this method cannot
+    When using glance backend ``file`` without a shared filesystem, this method cannot
     be used or will end up with a corrupt state of glance services.
     Reasoning behind is because glance api is only running in one host, blocking the
     orchestration of a rolling upgrade.
diff --git a/doc/source/reference/horizon-guide.rst b/doc/source/reference/shared-services/horizon-guide.rst
similarity index 89%
rename from doc/source/reference/horizon-guide.rst
rename to doc/source/reference/shared-services/horizon-guide.rst
index 65e9cdeb64..89d19c72f2 100644
--- a/doc/source/reference/horizon-guide.rst
+++ b/doc/source/reference/shared-services/horizon-guide.rst
@@ -1,8 +1,8 @@
 .. _horizon-guide:
 
-================
-Horizon in Kolla
-================
+=============================
+Horizon - OpenStack dashboard
+=============================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/shared-services/index.rst b/doc/source/reference/shared-services/index.rst
new file mode 100644
index 0000000000..74f82f0ed7
--- /dev/null
+++ b/doc/source/reference/shared-services/index.rst
@@ -0,0 +1,12 @@
+===============
+Shared services
+===============
+
+This section describes configuring different shared service options
+like backends, dashboards and so on.
+
+.. toctree::
+   :maxdepth: 2
+
+   glance-guide
+   horizon-guide
diff --git a/doc/source/reference/ceph-guide.rst b/doc/source/reference/storage/ceph-guide.rst
similarity index 99%
rename from doc/source/reference/ceph-guide.rst
rename to doc/source/reference/storage/ceph-guide.rst
index 34d27a9969..8cf1c11229 100644
--- a/doc/source/reference/ceph-guide.rst
+++ b/doc/source/reference/storage/ceph-guide.rst
@@ -1,8 +1,8 @@
 .. _ceph-guide:
 
-=============
-Ceph in Kolla
-=============
+===============================
+Ceph - Software Defined Storage
+===============================
 
 .. note::
    The out-of-the-box Ceph deployment requires 3 hosts with at least one block
diff --git a/doc/source/reference/cinder-guide-hnas.rst b/doc/source/reference/storage/cinder-guide-hnas.rst
similarity index 100%
rename from doc/source/reference/cinder-guide-hnas.rst
rename to doc/source/reference/storage/cinder-guide-hnas.rst
diff --git a/doc/source/reference/cinder-guide.rst b/doc/source/reference/storage/cinder-guide.rst
similarity index 98%
rename from doc/source/reference/cinder-guide.rst
rename to doc/source/reference/storage/cinder-guide.rst
index 50b2fccd57..cd37f879cb 100644
--- a/doc/source/reference/cinder-guide.rst
+++ b/doc/source/reference/storage/cinder-guide.rst
@@ -1,8 +1,8 @@
 .. _cinder-guide:
 
-===============
-Cinder in Kolla
-===============
+======================
+Cinder - Block storage
+======================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/external-ceph-guide.rst b/doc/source/reference/storage/external-ceph-guide.rst
similarity index 100%
rename from doc/source/reference/external-ceph-guide.rst
rename to doc/source/reference/storage/external-ceph-guide.rst
diff --git a/doc/source/reference/storage/index.rst b/doc/source/reference/storage/index.rst
new file mode 100644
index 0000000000..43559d2277
--- /dev/null
+++ b/doc/source/reference/storage/index.rst
@@ -0,0 +1,17 @@
+=======
+Storage
+=======
+
+This section describes configuration of the different storage backends
+supported by kolla.
+
+.. toctree::
+   :maxdepth: 1
+
+   ceph-guide
+   external-ceph-guide
+   cinder-guide
+   cinder-guide-hnas
+   manila-guide
+   manila-hnas-guide
+   swift-guide
diff --git a/doc/source/reference/manila-guide.rst b/doc/source/reference/storage/manila-guide.rst
similarity index 99%
rename from doc/source/reference/manila-guide.rst
rename to doc/source/reference/storage/manila-guide.rst
index 475f4c4946..5a9dec4226 100644
--- a/doc/source/reference/manila-guide.rst
+++ b/doc/source/reference/storage/manila-guide.rst
@@ -1,8 +1,8 @@
 .. _manila-guide:
 
-===============
-Manila in Kolla
-===============
+===================================
+Manila - Shared filesystems service
+===================================
 
 Overview
 ~~~~~~~~
diff --git a/doc/source/reference/manila-hnas-guide.rst b/doc/source/reference/storage/manila-hnas-guide.rst
similarity index 100%
rename from doc/source/reference/manila-hnas-guide.rst
rename to doc/source/reference/storage/manila-hnas-guide.rst
diff --git a/doc/source/reference/swift-guide.rst b/doc/source/reference/storage/swift-guide.rst
similarity index 98%
rename from doc/source/reference/swift-guide.rst
rename to doc/source/reference/storage/swift-guide.rst
index 92b780404d..c5c4e23d36 100644
--- a/doc/source/reference/swift-guide.rst
+++ b/doc/source/reference/storage/swift-guide.rst
@@ -1,8 +1,8 @@
 .. _swift-guide:
 
-==============
-Swift in Kolla
-==============
+=============================
+Swift- Object storage service
+=============================
 
 Overview
 ~~~~~~~~