diff --git a/doc/training-guide/figures/image18.jpg b/doc/training-guide/figures/image18.jpg
deleted file mode 100644
index 023d0be223..0000000000
Binary files a/doc/training-guide/figures/image18.jpg and /dev/null differ
diff --git a/doc/training-guide/figures/image33.png b/doc/training-guide/figures/image33.png
new file mode 100644
index 0000000000..50c70a2812
Binary files /dev/null and b/doc/training-guide/figures/image33.png differ
diff --git a/doc/training-guide/figures/image34.png b/doc/training-guide/figures/image34.png
new file mode 100644
index 0000000000..d648973b81
Binary files /dev/null and b/doc/training-guide/figures/image34.png differ
diff --git a/doc/training-guide/figures/image35.png b/doc/training-guide/figures/image35.png
new file mode 100644
index 0000000000..9e02a40311
Binary files /dev/null and b/doc/training-guide/figures/image35.png differ
diff --git a/doc/training-guide/figures/image36.png b/doc/training-guide/figures/image36.png
new file mode 100644
index 0000000000..6391845cb6
Binary files /dev/null and b/doc/training-guide/figures/image36.png differ
diff --git a/doc/training-guide/figures/image37.png b/doc/training-guide/figures/image37.png
new file mode 100644
index 0000000000..0beed5b218
Binary files /dev/null and b/doc/training-guide/figures/image37.png differ
diff --git a/doc/training-guide/figures/image38.png b/doc/training-guide/figures/image38.png
new file mode 100644
index 0000000000..610d511210
Binary files /dev/null and b/doc/training-guide/figures/image38.png differ
diff --git a/doc/training-guide/figures/image39.png b/doc/training-guide/figures/image39.png
new file mode 100644
index 0000000000..9454065c28
Binary files /dev/null and b/doc/training-guide/figures/image39.png differ
diff --git a/doc/training-guide/figures/image40.png b/doc/training-guide/figures/image40.png
new file mode 100644
index 0000000000..8499ca1ead
Binary files /dev/null and b/doc/training-guide/figures/image40.png differ
diff --git a/doc/training-guide/figures/image41.png b/doc/training-guide/figures/image41.png
new file mode 100644
index 0000000000..22ef31201a
Binary files /dev/null and b/doc/training-guide/figures/image41.png differ
diff --git a/doc/training-guide/figures/image42.png b/doc/training-guide/figures/image42.png
new file mode 100644
index 0000000000..ee5ffbf72c
Binary files /dev/null and b/doc/training-guide/figures/image42.png differ
diff --git a/doc/training-guide/figures/image43.png b/doc/training-guide/figures/image43.png
new file mode 100644
index 0000000000..4df7326a80
Binary files /dev/null and b/doc/training-guide/figures/image43.png differ
diff --git a/doc/training-guide/figures/image44.png b/doc/training-guide/figures/image44.png
new file mode 100644
index 0000000000..7e319ca0b7
Binary files /dev/null and b/doc/training-guide/figures/image44.png differ
diff --git a/doc/training-guide/figures/image45.png b/doc/training-guide/figures/image45.png
new file mode 100644
index 0000000000..8ce1309131
Binary files /dev/null and b/doc/training-guide/figures/image45.png differ
diff --git a/doc/training-guide/figures/image46.png b/doc/training-guide/figures/image46.png
new file mode 100644
index 0000000000..5d7c8f421e
Binary files /dev/null and b/doc/training-guide/figures/image46.png differ
diff --git a/doc/training-guide/figures/image47.png b/doc/training-guide/figures/image47.png
new file mode 100644
index 0000000000..3b7978b673
Binary files /dev/null and b/doc/training-guide/figures/image47.png differ
diff --git a/doc/training-guide/figures/image48.png b/doc/training-guide/figures/image48.png
new file mode 100644
index 0000000000..e7a0396f5f
Binary files /dev/null and b/doc/training-guide/figures/image48.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image00.png b/doc/training-guide/figures/lab000-virtual-box/image00.png
new file mode 100644
index 0000000000..af132f31e3
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image00.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image01.png b/doc/training-guide/figures/lab000-virtual-box/image01.png
new file mode 100644
index 0000000000..d55c6d52bf
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image01.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image02.png b/doc/training-guide/figures/lab000-virtual-box/image02.png
new file mode 100644
index 0000000000..2bd1e8a397
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image02.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image03.png b/doc/training-guide/figures/lab000-virtual-box/image03.png
new file mode 100644
index 0000000000..de2e23f9f3
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image03.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image04.png b/doc/training-guide/figures/lab000-virtual-box/image04.png
new file mode 100644
index 0000000000..dd810fa615
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image04.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image05.png b/doc/training-guide/figures/lab000-virtual-box/image05.png
new file mode 100644
index 0000000000..dbce3a4f5b
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image05.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image07.png b/doc/training-guide/figures/lab000-virtual-box/image07.png
new file mode 100644
index 0000000000..ea2ce7b9a4
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image07.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image08.png b/doc/training-guide/figures/lab000-virtual-box/image08.png
new file mode 100644
index 0000000000..3ada9f1dac
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image08.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image09.png b/doc/training-guide/figures/lab000-virtual-box/image09.png
new file mode 100644
index 0000000000..79a96f726b
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image09.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image10.png b/doc/training-guide/figures/lab000-virtual-box/image10.png
new file mode 100644
index 0000000000..1e78bf2731
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image10.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image11.png b/doc/training-guide/figures/lab000-virtual-box/image11.png
new file mode 100644
index 0000000000..a9494a6acf
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image11.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image12.png b/doc/training-guide/figures/lab000-virtual-box/image12.png
new file mode 100644
index 0000000000..64bd966806
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image12.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image13.png b/doc/training-guide/figures/lab000-virtual-box/image13.png
new file mode 100644
index 0000000000..b7c9a22a6a
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image13.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image14.png b/doc/training-guide/figures/lab000-virtual-box/image14.png
new file mode 100644
index 0000000000..f99a2ad3a8
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image14.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image15.png b/doc/training-guide/figures/lab000-virtual-box/image15.png
new file mode 100644
index 0000000000..abd4a2d688
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image15.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image16.png b/doc/training-guide/figures/lab000-virtual-box/image16.png
new file mode 100644
index 0000000000..37f381fc17
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image16.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image17.png b/doc/training-guide/figures/lab000-virtual-box/image17.png
new file mode 100644
index 0000000000..a12c6a3ac8
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image17.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image18.png b/doc/training-guide/figures/lab000-virtual-box/image18.png
new file mode 100644
index 0000000000..f7f3a86e02
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image18.png differ
diff --git a/doc/training-guide/figures/lab000-virtual-box/image19.png b/doc/training-guide/figures/lab000-virtual-box/image19.png
new file mode 100644
index 0000000000..d08afb80a4
Binary files /dev/null and b/doc/training-guide/figures/lab000-virtual-box/image19.png differ
diff --git a/doc/training-guide/lab000-important-terms.xml b/doc/training-guide/lab000-important-terms.xml
new file mode 100644
index 0000000000..70ee594cd3
--- /dev/null
+++ b/doc/training-guide/lab000-important-terms.xml
@@ -0,0 +1,60 @@
+
+
+ Important Terms
+ Host Operating System (Host)
+ :
+ ‘Host OS’ or ‘Host’ is commonly referred to the Operating
+ System installed on your hardware (Laptop/Desktop) and hosts the
+ virtual machines. In short, the machine on which your Virtual Box
+ is installed.
+ Guest Operating System
+ (Guest):
+ ‘Guest OS’ or ‘Guest’ is commonly referred to the
+ Operating System installed on your Virtual Box Virtual Machine and
+ is an Virtual Instance independent of the host OS.
+ Node :
+ Node in this context is referring specifically to Servers.
+ Each OpenStack server is a node.
+ Control Node:
+ Control Node hosts the Database, Keystone (Middleware) and
+ the servers for the scope of the current OpenStack deployment. It
+ is kind of like the ‘brains behind OpenStack’ and drives the
+ services like authentication, database etc.
+ Compute Node:
+ Compute Node has the required Hypervisor (Qemu/KVM), and
+ is your Virtual Machine Host.
+ Network Node:
+ Network Node provides Network as a Service and is
+ responsible for providing virtual networks for OpenStack.
+ Using OpenSSH
+
+
+ Once you are done with setting up of network interfaces
+ file, you may switch over to SSH session by remote login into
+ the required server node (Control, Network, Compute) by using
+ OpenSSH Client.
+
+
+ Open Terminal on your Host machine, and type in the
+ following command
+
+
+ $ssh-keygen -t rsa
+
+
+ You should see similar output :
+ Generating public/private rsa key pair.
+Enter file in which to save the key (/u/kim/.ssh/id_rsa): [RETURN]
+Enter passphrase (empty for no passphrase): <can be left empty>
+Enter same passphrase again: <can be left empty>
+Your identification has been saved in /home/user/.ssh/id_rsa.
+Your public key has been saved in /home/user/.ssh/id_rsa.pub.
+The key fingerprint is:
+b7:18:ad:3b:0b:50:5c:e1:da:2d:6f:5b:65:82:94:c5 xyz@example
+
+
+
diff --git a/doc/training-guide/lab000-openstack-training-labs.xml b/doc/training-guide/lab000-openstack-training-labs.xml
new file mode 100644
index 0000000000..10c3550dfa
--- /dev/null
+++ b/doc/training-guide/lab000-openstack-training-labs.xml
@@ -0,0 +1,12 @@
+
+
+ OpenStack Training Labs
+
+
+
+
+
+
+
diff --git a/doc/training-guide/lab000-virtualbox-basics.xml b/doc/training-guide/lab000-virtualbox-basics.xml
new file mode 100644
index 0000000000..8c5b26504b
--- /dev/null
+++ b/doc/training-guide/lab000-virtualbox-basics.xml
@@ -0,0 +1,652 @@
+
+
+ VirtualBox Basics
+ Getting Started
+ The following are the conventional methods of deploying
+ OpenStack on Virtual Box for the sake of a test/sandbox or just to
+ try out OpenStack on commodity hardware.
+ 1. DevStack
+ 2. Vagrant
+ But DevStack and Vagrant bring in some level of automated
+ deployment as running the scripts will get your VirtualBox
+ Instance configured as the required OpenStack deployment. We
+ will be manually deploying OpenStack on VirtualBox Instance to
+ get better view of how OpenStack works.
+ Prerequisite:
+ Well, its a daunting task to just cover all of OpenStack’s
+ concepts let alone Virtualization and Networking. So some basic
+ idea/knowledge on Virtualization, Networking and Linux is
+ required. Even though I will try to keep the level as low as
+ possible for making it easy for Linux Newbies as well as
+ experts.
+ These Virtual Machines and Virtual Networks will be given
+ equal privilege as a physical machine on a physical
+ network.
+ Just for those who would want to do a deeper research or
+ study, for more information you may refer the following
+ links
+ OpenStack:OpenStack
+ Official Documentation (docs.openstack.org)
+ Networking:Computer
+ Networks (5th Edition) by Andrew S. Tanenbaum
+ VirtualBox:Virtual Box
+ Manual (http://www.virtualbox.org/manual/UserManual.html)
+ Requirements :
+ Operating Systems - I recommend Ubuntu Server 12.04 LTS,
+ Ubuntu Server 13.10 or Debian Wheezy
+ Note :Ubuntu 12.10 is not
+ supporting OpenStack Grizzly Packages. Ubuntu team has decided not
+ to package Grizzly Packages for Ubuntu 12.10.
+
+
+ Recommended Requirements.
+
+
+
+
+
+
VT Enabled PC:
+
Intel ix or Amd QuadCore
+
+
+
4GB Ram:
+
DDR2/DDR3
+
+
+
+
+
+ Minimum Requirements.
+
+
+
+
+
+
Non-VT PC's:
+
Intel Core 2 Duo or Amd Dual
+ Core
+
+
+
2GB Ram:
+
DDR2/DDR3
+
+
+
+ If you don't know whether your processor is VT enabled, you
+ could check it by installing cpu checker
+
+
+
+
$sudo apt-get install
+ cpu-checker$sudo kvm-ok
+
+
+
+ If your device does not support VT it will show
+ INFO:Your CPU does not support KVM extensions
+ KVM acceleration can NOT be used
+ You will still be able to use Virtual Box but the instances
+ will be very slow.
+ There are many ways to configure your OpenStack Setup, we
+ will be deploying OpenStack Multi Node using OVS as the Network
+ Plugin and QEMU/ KVM as the hypervisor.
+ Host Only Connections:
+
+
+ Host only connections provide an Internal network
+ between your host and the Virtual Machine instances up and
+ running on your host machine.This network is not traceable
+ by other networks.
+
+
+ You may even use Bridged connection if you have a
+ router/switch. I am assuming the worst case (one IP without
+ any router), so that it is simple to get the required
+ networks running without the hassle of IP tables.
+
+
+ The following are the host only connections that you
+ will be setting up later on :
+
+
+
+
+ vboxnet0 - OpenStack Management Network - Host static IP
+ 10.10.10.1
+
+
+ vboxnet1 - VM Conf.Network - Host Static IP
+ 10.20.20.1
+
+
+ vboxnet2 - VM External Network Access (Host
+ Machine)
+ Network Diagram :
+
+
+
+ Vboxnet0, Vboxnet1, Vboxnet2 - are virtual networks setup up
+ by virtual box with your host machine. This is the way your host
+ can communicate with the virtual machines. These networks are in
+ turn used by virtual box VM’s for OpenStack networks, so that
+ OpenStack’s services can communicate with each other.
+ Setup Your VM Environment
+ Before you can start configuring your Environment you need to
+ download some of the following stuff:
+
+
+
+
+ Oracle Virtual Box
+
+
+ Note:You cannot set up a amd64 VM on a x86 machine.
+
+
+
+ Ubuntu 12.04 Server or Ubuntu 13.04 Server
+
+
+ Note:You need a x86 image for VM's if kvm-ok fails, even
+ though you are on amd64 machine.
+ Note: Even Though I'm using Ubuntu as Host, the same is
+ applicable to Windows, Mac and other Linux Hosts.
+
+
+ If you have i5 or i7 2nd gen processor you can have VT
+ technology inside VM's provided by VmWare. This means that
+ your OpenStack nodes(Which are in turn VM's) will give
+ positive result on KVM-OK. (I call it - Nesting of type-2
+ Hypervisors). Rest of the configurations remain same except
+ for the UI and few other trivial differences.
+
+
+ Configure Virtual Networks
+
+
+ This section of the guide will help you setup your
+ networks for your Virtual Machine.
+
+
+ Launch Virtual Box
+
+
+
+
+ Click on File>Preferences present on the menu bar of
+ Virtual Box.
+
+
+ Select the Network
+ tab.
+
+
+ On the right side you will see an option to add
+ Host-Only networks.
+
+
+
+
+
+ Create three Host-Only Network Connections. As shown
+ above.
+
+
+ Edit the Host-Only Connections to have the following
+ settings.
+
+
+ Vboxnet0
+
+
+
+
+
+
Option
+
Value
+
+
+
IPv4 Address:
+
10.10.10.1
+
+
+
IPv4 Network Mask:
+
255.255.255.0
+
+
+
IPv6 Address:
+
Can be Left Blank
+
+
+
IPv6 Network Mask Length :
+
Can be Left Blank
+
+
+
+
+ Vboxnet1
+
+
+
+
+
+
Option
+
Value
+
+
+
IPv4 Address:
+
10.20.20.1
+
+
+
IPv4 Network Mask:
+
255.255.255.0
+
+
+
IPv6 Address:
+
Can be Left Blank
+
+
+
IPv6 Network Mask Length :
+
Can be Left Blank
+
+
+
+
+ Vboxnet2
+
+
+
+
+
+
Option
+
Value
+
+
+
IPv4 Address:
+
192.168.100.1
+
+
+
IPv4 Network Mask:
+
255.255.255.0
+
+
+
IPv6 Address:
+
Can be Left Blank
+
+
+
IPv6 Network Mask Length :
+
Can be Left Blank
+
+
+
+
+ Install SSH and FTP
+
+
+ You may benefit by installing SSH and FTP so that you
+ could use your remote shell to login into the machine and
+ use your terminal which is more convenient that using the
+ Virtual Machines tty through the Virtual Box's UI. You get a
+ few added comforts like copy - paste commands into the
+ remote terminal which is not possible directly on VM.
+
+
+ FTP is for transferring files to and fro ... you can
+ also use SFTP or install FTPD on both HOST and VM's.
+
+
+ Installation of SSH and FTP with its configuration is
+ out of scope of this GUIDE and I may put it up but it
+ depends upon my free time. If someone wants to contribute to
+ this - please do so.
+
+
+ Note:Please set up the
+ Networks from inside the VM before trying to SSH and FTP into the
+ machines. I would suggest setting it up at once just after the
+ installation of the Server on VM's is over.
+ Install Your VM's Instances
+
+
+ During Installation of The Operating Systems you will be
+ asked for Custom Software to Install , if you are confused
+ or not sure about this, just skip this step by pressing
+ Enter Key without selecting any of the given Options.
+
+
+ Warning - Please do not
+ install any of the other packages except for which are mentioned
+ below unless you know what you are doing. There is a good chance
+ that you may end up getting unwanted errors, package conflicts ...
+ due to the same.
+ Control Node:
+ Create a new virtual machine. Select Ubuntu Server
+
+ Select appropriate RAM, minimum 512 MB of RAM for Control
+ Node. Rest all can be default settings. The hard disk size can
+ be 8GB as default.
+ Configure the networks
+ (Ignore the IP Address for now, you will set it up from
+ inside the VM)
+
+
+
+
+
+
Network Adapter
+
Host-Only Adapter Name
+
IP Address
+
+
+
eth0
+
Vboxnet0
+
10.10.10.51
+
+
+
eth1
+
Vboxnet2
+
10.20.20.51
+
+
+
eth2
+
NAT
+
DHCP
+
+
+
+ Adapter 1 (Vboxnet0)
+
+ Adapter 2 (Vboxnet2)
+
+ Adapter 3 (NAT)
+
+ Now Install Ubuntu Server 12.04 or 13.04 on this
+ machine.
+ Note :Install SSH server
+ when asked for Custom Software to Install. Rest of the packages
+ are not required and may come in the way of OpenStack packages -
+ like DNS servers etc. (not necessary). Unless you know what you
+ are doing.
+ Network Node:
+ Create a new Virtual Machine,
+ Minimum RAM is 512MB. Rest all can be left default. Minimum
+ HDD space 8GB.
+
+ Configure the networks
+ (Ignore the IP Address for now, you will set it up from
+ inside the VM)
+
+
+
+
+
+
Network Adapter
+
Host-Only Adapter Name
+
IP Address
+
+
+
eth0
+
Vboxnet0
+
10.10.10.52
+
+
+
eth1
+
Vboxnet1
+
10.20.20.52
+
+
+
eth2
+
Vboxnet2
+
192.168.100.51
+
+
+
eth3
+
NAT
+
DHCP
+
+
+
+ Adapter 1 (Vboxnet0)
+
+ Adapter 2 (Vboxnet1)
+
+ Adapter 3 (Vboxnet2)
+
+ Adapter 4 (NAT)
+
+ Now Install Ubuntu Server 12.04 or 13.04 on this
+ machine.
+ Note :Install SSH server
+ when asked for Custom Software to Install. Rest of the packages
+ are not required and may come in the way of OpenStack packages -
+ like DNS servers etc. (not necessary). Unless you know what you
+ are doing.
+ Compute Node:
+ Create a new virtual machine, give it atleast 1,000 MB RAM.
+ Rest all can be left as defaults. Give atleast 8GB HDD.
+
+ Configure the networks
+ (Ignore the IP Address for now, you will set it up from
+ inside the VM)
+
+
+
+
+
+
Network Adapter
+
Host-Only Adapter Name
+
IP Address
+
+
+
eth0
+
Vboxnet0
+
10.10.10.53
+
+
+
eth1
+
Vboxnet1
+
10.20.20.53
+
+
+
eth2
+
NAT
+
DHCP
+
+
+
+ Adapter 1 (Vboxnet0)
+
+ Adapter 2 (Vboxnet1)
+
+ Adapter 3 (NAT)
+
+ Now Install Ubuntu Server 12.04 or 13.04 on this
+ machine.
+ Note :Install SSH server
+ when asked for Custom Software to Install. Rest of the packages
+ are not required and may come in the way of OpenStack packages -
+ like DNS servers etc. (not necessary). Unless you know what you
+ are doing.
+ Warnings/Advice :
+
+
+ Well there are a few warnings that I must give you out
+ of experience due to common habits that most people may
+ have :
+
+
+ Sometimes shutting down your Virtual Machine may lead to
+ malfunctioning of OpenStack Services. Try not to direct
+ shutdown your 3. In case your VM's don't get internet.
+
+
+ From your VM Instance, Use ping command to see whether
+ Internet is on.
+
+
+ $ping www.google.com
+
+
+ If its not connected, restart networking
+ service-
+
+
+ $sudo service networking restart
+$ping www.google.com
+
+
+ If this doesn't work, you need to check your network
+ settings from Virtual Box, you may have left something or
+ misconfigured it.
+
+
+ This should reconnect your network about 99% of the
+ times. If you are really unlucky you must be having some
+ other problems or your Internet connection itself is not
+ functioning.
+
+
+ Note :There are known bugs with the ping under NAT.
+ Although the latest versions of Virtual Box have better
+ performance, sometimes ping may not work even if your
+ Network is connected to internet.
+
+
+ Congrats, you are ready with the infrastructure for
+ deploying OpenStack. Just make sure that you have installed
+ Ubuntu Server on the above setup Virtual Box Instances. In the
+ next section we will go through deploying OpenStack using the
+ above created Virtual Box instances.
+
+
\ No newline at end of file
diff --git a/doc/training-guide/lab001-compute-node.xml b/doc/training-guide/lab001-compute-node.xml
new file mode 100644
index 0000000000..3dd7bbd4b4
--- /dev/null
+++ b/doc/training-guide/lab001-compute-node.xml
@@ -0,0 +1,53 @@
+
+
+ Compute Node
+
+
+ Network Diagram :
+
+
+
+ Vboxnet0, Vboxnet1, Vboxnet2 - are virtual networks setup up by virtual
+ box with your host machine. This is the way your host can
+ communicate with the virtual machines. These networks are in turn
+ used by virtual box VM’s for OpenStack networks, so that
+ OpenStack’s services can communicate with each other.
+ Compute Node
+ Start your Controller Node the one you setup in previous
+ section.
+ Preparing Ubuntu
+ 13.04/12.04
+
+
+ After you install Ubuntu Server, go in sudo mode
+
+ $sudo su
+
+
+
+ Add Grizzly repositories:
+ #apt-get install ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
+# echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main >> /etc/apt/sources.list.d/grizzly.list
+
+
+ Update your system:
+ #apt-get update
+#apt-get upgrade
+#apt-get dist-upgrade
+
+
+ More Content To be Added soon.
+
\ No newline at end of file
diff --git a/doc/training-guide/lab001-control-node.xml b/doc/training-guide/lab001-control-node.xml
new file mode 100644
index 0000000000..15513915b6
--- /dev/null
+++ b/doc/training-guide/lab001-control-node.xml
@@ -0,0 +1,49 @@
+
+
+ Control Node
+ Network Diagram :
+
+ Vboxnet0, Vboxnet1, Vboxnet2 - are virtual networks setup up by virtual
+ box with your host machine. This is the way your host can
+ communicate with the virtual machines. These networks are in turn
+ used by virtual box VM’s for OpenStack networks, so that
+ OpenStack’s services can communicate with each other.
+ Controller Node
+ Start your Controller Node the one you setup in previous
+ section.
+ Preparing Ubuntu
+ 13.04/12.04
+
+
+ After you install Ubuntu Server, go in sudo mode
+
+ $sudo su
+
+
+
+ Add Grizzly repositories:
+ #apt-get install ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
+# echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main >> /etc/apt/sources.list.d/grizzly.list
+
+
+ Update your system:
+ #apt-get update
+#apt-get upgrade
+#apt-get dist-upgrade
+
+
+ More Content To be Added soon.
+
\ No newline at end of file
diff --git a/doc/training-guide/lab002-network-node.xml b/doc/training-guide/lab002-network-node.xml
new file mode 100644
index 0000000000..c5b8c01727
--- /dev/null
+++ b/doc/training-guide/lab002-network-node.xml
@@ -0,0 +1,53 @@
+
+
+ Network Node
+
+
+ Network Diagram :
+
+
+
+ Vboxnet0, Vboxnet1, Vboxnet2 - are virtual networks setup up by virtual
+ box with your host machine. This is the way your host can
+ communicate with the virtual machines. These networks are in turn
+ used by virtual box VM’s for OpenStack networks, so that
+ OpenStack’s services can communicate with each other.
+ Network Node
+ Start your Controller Node the one you setup in previous
+ section.
+ Preparing Ubuntu
+ 13.04/12.04
+
+
+ After you install Ubuntu Server, go in sudo mode
+
+ $sudo su
+
+
+
+ Add Grizzly repositories:
+ #apt-get install ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
+# echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main >> /etc/apt/sources.list.d/grizzly.list
+
+
+ Update your system:
+ #apt-get update
+#apt-get upgrade
+#apt-get dist-upgrade
+
+
+ More Content To be Added soon.
+
\ No newline at end of file
diff --git a/doc/training-guide/module001-ch010-block-storage.xml b/doc/training-guide/lab003-openstack-production.xml
similarity index 58%
rename from doc/training-guide/module001-ch010-block-storage.xml
rename to doc/training-guide/lab003-openstack-production.xml
index b08f384c6b..25ee780b30 100644
--- a/doc/training-guide/module001-ch010-block-storage.xml
+++ b/doc/training-guide/lab003-openstack-production.xml
@@ -3,7 +3,7 @@
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="5.0"
- xml:id="module001-ch011-block-storage">
-
OpenStack Block Storage
- To be Added
-
+ xml:id="lab003-openstack-production.xml">
+ OpenStack In Production
+ More Content To be Added.
+
\ No newline at end of file
diff --git a/doc/training-guide/module001-ch001-intro-text.xml b/doc/training-guide/module001-ch001-intro-text.xml
index ff4608ba68..c2edd1bddc 100644
--- a/doc/training-guide/module001-ch001-intro-text.xml
+++ b/doc/training-guide/module001-ch001-intro-text.xml
@@ -8,7 +8,7 @@
Cloud Computing’s hype has certainly attracted IT Giants and
the entire Open Source community's attention, amidst all this
chaos of disrupting the software world and the way we saw and
- thought about it, we need to enlighten ourselves with this massive
+ thought about it. We need to enlighten ourselves with this massive
change as time again, entire Open Source community has seen the
rise of yet another project which is called as OpenStack. It is
believed that OpenStack will have a similar impact on the future
@@ -23,7 +23,7 @@
locked in at high level scientific and corporate places which are
usually tough to get in touch with, further innovating the pace of
education and expanding its possibilities. Being open in nature,
- these resources are free to use and modify, talor them at no cost,
+ these resources are free to use and modify, tailor them at no cost,
shape them as per your requirements and let the cloud do all the
hard work. In data centers today, many computers suffer under-utilization
diff --git a/doc/training-guide/module001-ch003-core-projects.xml b/doc/training-guide/module001-ch003-core-projects.xml
index 72030b4816..119d3cf11b 100644
--- a/doc/training-guide/module001-ch003-core-projects.xml
+++ b/doc/training-guide/module001-ch003-core-projects.xml
@@ -1,9 +1,654 @@
-
- Core Projects
- More Content To be Added ...
-
+
+ OpenStack Projects, History and Releases Overview
+ Project history and releases overview.
+ OpenStack is a cloud computing project to provide an
+ infrastructure as a service (IaaS). It is free open source
+ software released under the terms of the Apache License. The
+ project is managed by the OpenStack Foundation, a non-profit
+ corporate entity established in September 2012 to promote
+ OpenStack software and its community.
+ More than 200 companies joined the project among which are
+ AMD, Brocade Communications Systems, Canonical, Cisco, Dell, EMC,
+ Ericsson, Groupe Bull, HP, IBM, Inktank, Intel, NEC, Rackspace
+ Hosting, Red Hat, SUSE Linux, VMware, and Yahoo!
+ The technology consists of a series of interrelated projects
+ that control pools of processing, storage, and networking
+ resources throughout a datacenter, all managed through a dashboard
+ that gives administrators control while empowering its users to
+ provision resources through a web interface.
+ The OpenStack community collaborates around a six-month,
+ time-based release cycle with frequent development milestones.
+ During the planning phase of each release, the community gathers
+ for the OpenStack Design Summit to facilitate developer working
+ sessions and assemble plans.
+ In July 2010 Rackspace Hosting and NASA jointly launched an
+ open-source cloud-software initiative known as OpenStack. The
+ OpenStack project intended to help organizations which offer
+ cloud-computing services running on standard hardware. The
+ community’s first official release, code-named Austin, appeared
+ four months later, with plans to release regular updates of the
+ software every few months. The early code came from NASA’s Nebula
+ platform as well as from Rackspace’s Cloud Files platform. In July
+ 2011 developers of the Ubuntu Linux distribution decided to adopt
+ OpenStack.
+ Openstack Releases
+
+
+
Nova, Glance, Swift,
+ Horizon, Keystone, Neutron, Cinder, (More to be
+ added)
+
+
+
+ Some OpenStack users include:
+
+
+ PayPal / eBay
+
+
+ NASA
+
+
+ CERN
+
+
+ Yahoo!
+
+
+ Rackspace Cloud
+
+
+ HP Public Cloud
+
+
+ MercadoLibre.com
+
+
+ AT&T
+
+
+ KT (formerly Korea Telecom)
+
+
+ Deutsche Telekom
+
+
+ Wikimedia Labs
+
+
+ Hostalia of Telef nica Group
+
+
+ SUSE Cloud solution
+
+
+ Red Hat OpenShift PaaS solution
+
+
+ Zadara Storage
+
+
+ Mint Services
+
+
+ GridCentric
+
+
+ and many more such users of OpenStack make it a true open
+ standard innovating and driving the worlds biggest Open Cloud
+ Standards (more on User Stories here http://goo.gl/aF4lsL).
+ Release Cycle
+
+ OpenStack is based on a coordinated 6-month release cycle
+ with frequent development milestones. You can find a link to the
+ current development release schedule here. The Release Cycle is
+ made of four major stages. Various OpenStack releases are named
+ as follows Various Companies Contributing to OpenStack
+
+ In a Nutshell, OpenStack...
+
+
+ has had 64,396 commits made by 1,128 contributors
+
+
+ representing 908,491 lines of code
+
+
+ is mostly written in Python
+
+
+ with an average number of source code comments
+
+
+ has a codebase with a long source history
+
+
+ maintained by a very large development team
+
+
+ with increasing Y-O-Y commits
+
+
+ took an estimated 249 years of effort (COCOMO
+ model)
+
+
+ starting with its first commit in May, 2010. (I have
+ deliberatly not
+
+
+ included last commit date since this is an active
+ project with
+
+
+ people working on it from all round the world).
+
+
+
+ For more overview on OpenStack refer
+ http://www.openstack.org or http://goo.gl/4q7nVI, most of the
+ common questions and queries are covered here so as to address
+ the massive amount of questions that may arise out of
+ this.
+ Core Projects Overview
+ Let’s take a dive into some technical aspects of OpenStack,
+ its amazing scalability and flexibility are few of its awesome
+ features that make it a rock-solid cloud computing platform but
+ the OpenSource Nature of it and the fact that its Community
+ driven, it is explicitly meant to serve the OpenSource community
+ and its demands.
+ Being a cloud computing platform, OpenStack consists of many
+ core and incubated projects which as a whole makes it really good
+ as an IaaS cloud computing platform/Operating System. But the
+ following points are the main components of OpenStack that are
+ necessary to be present in the cloud to call it as OpenStack
+ Cloud.
+ Components of OpenStack
+ OpenStack has a modular architecture with various code names
+ for its components. OpenStack has several shared services that
+ span the three pillars of compute, storage and networking,
+ making it easier to implement and operate your cloud. These
+ services - including identity, image management and a web
+ interface - integrate the OpenStack components with each other
+ as well as external systems to provide a unified experience for
+ users as they interact with different cloud resources.
+ Compute (Nova)
+ The OpenStack cloud operating system enables enterprises
+ and service providers to offer on-demand computing resources,
+ by provisioning and managing large networks of virtual
+ machines. Compute resources are accessible via APIs for
+ developers building cloud applications and via web interfaces
+ for administrators and users. The compute architecture is
+ designed to scale horizontally on standard hardware, enabling
+ the cloud economics companies have come to expect.
+
+ OpenStack Compute (Nova) is a cloud computing fabric
+ controller (the main part of an IaaS system). It is written in
+ Python and uses many external libraries such as Eventlet (for
+ concurrent programming), Kombu (for AMQP communication), and
+ SQLAlchemy (for database access). Nova's architecture is
+ designed to scale horizontally on standard hardware with no
+ proprietary hardware or software requirements and provide the
+ ability to integrate with legacy systems and third party
+ technologies. It is designed to manage and automate pools of
+ computer resources and can work with widely available
+ virtualization technologies, as well as bare metal and
+ high-performance computing (HPC) configurations. KVM and
+ XenServer are available choices for hypervisor technology,
+ together with Hyper-V and Linux container technology such as
+ LXC. In addition to different hypervisors, OpenStack runs on
+ ARM.
+ Popular Use Cases:
+
+
+ Service providers offering an IaaS compute platform
+ or services higher up the stack
+
+
+ IT departments acting as cloud service providers for
+ business units and project teams
+
+
+ Processing big data with tools like Hadoop
+
+
+ Scaling compute up and down to meet demand for web
+ resources and applications
+
+
+ High-performance computing (HPC) environments
+ processing diverse and intensive workloads
+
+
+ Object Storage(Swift)
+ In addition to traditional enterprise-class storage
+ technology, many organizations now have a variety of storage
+ needs with varying performance and price requirements.
+ OpenStack has support for both Object Storage and Block
+ Storage, with many deployment options for each depending on
+ the use case.
+
+ OpenStack Object Storage (Swift) is a scalable redundant
+ storage system. Objects and files are written to multiple disk
+ drives spread throughout servers in the data center, with the
+ OpenStack software responsible for ensuring data replication
+ and integrity across the cluster. Storage clusters scale
+ horizontally simply by adding new servers. Should a server or
+ hard drive fail, OpenStack replicates its content from other
+ active nodes to new locations in the cluster. Because
+ OpenStack uses software logic to ensure data replication and
+ distribution across different devices, inexpensive commodity
+ hard drives and servers can be used.
+ Object Storage is ideal for cost effective, scale-out
+ storage. It provides a fully distributed, API-accessible
+ storage platform that can be integrated directly into
+ applications or used for backup, archiving and data retention.
+ Block Storage allows block devices to be exposed and connected
+ to compute instances for expanded storage, better performance
+ and integration with enterprise storage platforms, such as
+ NetApp, Nexenta and SolidFire.
+ A few details on OpenStack’s Object Storage
+
+
+ OpenStack provides redundant, scalable object storage using
+ clusters of standardized servers capable of storing
+ petabytes of data
+
+
+ Object Storage is not a traditional file system, but rather a
+ distributed storage system for static data such as
+ virtual machine images, photo storage, email storage,
+ backups and archives. Having no central "brain" or
+ master point of control provides greater scalability,
+ redundancy and durability.
+
+
+ Objects and files are written to multiple disk drives spread
+ throughout servers in the data center, with the
+ OpenStack software responsible for ensuring data
+ replication and integrity across the cluster.
+
+
+ Storage clusters scale horizontally simply by adding new servers.
+ Should a server or hard drive fail, OpenStack
+ replicates its content from other active nodes to new
+ locations in the cluster. Because OpenStack uses
+ software logic to ensure data replication and
+ distribution across different devices, inexpensive
+ commodity hard drives and servers can be used in lieu
+ of more expensive equipment.
+
+
+ Block Storage(Cinder)
+ OpenStack Block Storage (Cinder) provides persistent block
+ level storage devices for use with OpenStack compute
+ instances. The block storage system manages the creation,
+ attaching and detaching of the block devices to servers. Block
+ storage volumes are fully integrated into OpenStack Compute
+ and the Dashboard allowing for cloud users to manage their own
+ storage needs. In addition to local Linux server storage, it
+ can use storage platforms including Ceph, CloudByte, Coraid,
+ EMC (VMAX and VNX), GlusterFS, IBM Storage (Storwize family,
+ SAN Volume Controller, and XIV Storage System), Linux LIO,
+ NetApp, Nexenta, Scality, SolidFire and HP (Store Virtual and
+ StoreServ 3Par families). Block storage is appropriate for
+ performance sensitive scenarios such as database storage,
+ expandable file systems, or providing a server with access to
+ raw block level storage. Snapshot management provides powerful
+ functionality for backing up data stored on block storage
+ volumes. Snapshots can be restored or used to create a new
+ block storage volume.
+ A few points on OpenStack Block
+ Storage:
+
+
+ OpenStack provides persistent block level storage
+ devices for use with OpenStack compute instances.
+
+
+ The block storage system manages the creation,
+ attaching and detaching of the block devices to servers.
+ Block storage volumes are fully integrated into OpenStack
+ Compute and the Dashboard allowing for cloud users to
+ manage their own storage needs.
+
+
+ In addition to using simple Linux server storage, it
+ has unified storage support for numerous storage platforms
+ including Ceph, NetApp, Nexenta, SolidFire, and
+ Zadara.
+
+
+ Block storage is appropriate for performance sensitive
+ scenarios such as database storage, expandable file
+ systems, or providing a server with access to raw block
+ level storage.
+
+
+ Snapshot management provides powerful functionality
+ for backing up data stored on block storage volumes.
+ Snapshots can be restored or used to create a new block
+ storage volume.
+
+
+ Networking(Neutron)
+ Today's datacenter networks contain more devices than ever
+ before servers, network equipment, storage systems and
+ security appliances many of which are further divided into
+ virtual machines and virtual networks. The number of IP
+ addresses, routing configurations and security rules can
+ quickly grow into the millions. Traditional network management
+ techniques fall short of providing a truly scalable, automated
+ approach to managing these next-generation networks. At the
+ same time, users expect more control and flexibility with
+ quicker provisioning.
+ OpenStack Networking is a pluggable, scalable and
+ API-driven system for managing networks and IP addresses. Like
+ other aspects of the cloud operating system, it can be used by
+ administrators and users to increase the value of existing
+ datacenter assets. OpenStack Networking ensures the network
+ will not be the bottleneck or limiting factor in a cloud
+ deployment and gives users real self-service, even over their
+ network configurations.
+
+ OpenStack Networking (Neutron, formerly Quantum]) is a
+ system for managing networks and IP addresses. Like other
+ aspects of the cloud operating system, it can be used by
+ administrators and users to increase the value of existing
+ data center assets. OpenStack Networking ensures the network
+ will not be the bottleneck or limiting factor in a cloud
+ deployment and gives users real self-service, even over their
+ network configurations.
+ OpenStack Neutron provides networking models for different
+ applications or user groups. Standard models include flat
+ networks or VLANs for separation of servers and traffic.
+ OpenStack Networking manages IP addresses, allowing for
+ dedicated static IPs or DHCP. Floating IPs allow traffic to be
+ dynamically re routed to any of your compute resources, which
+ allows you to redirect traffic during maintenance or in the
+ case of failure. Users can create their own networks, control
+ traffic and connect servers and devices to one or more
+ networks. Administrators can take advantage of
+ software-defined networking (SDN) technology like OpenFlow to
+ allow for high levels of multi-tenancy and massive scale.
+ OpenStack Networking has an extension framework allowing
+ additional network services, such as intrusion detection
+ systems (IDS), load balancing, firewalls and virtual private
+ networks (VPN) to be deployed and managed.
+ Networking Capabilities
+
+
+ OpenStack provides flexible networking models to
+ suit the needs of different applications or user groups.
+ Standard models include flat networks or VLANs for
+ separation of servers and traffic.
+
+
+ OpenStack Networking manages IP addresses, allowing
+ for dedicated static IPs or DHCP. Floating IPs allow
+ traffic to be dynamically rerouted to any of your
+ compute resources, which allows you to redirect traffic
+ during maintenance or in the case of failure.
+
+
+ Users can create their own networks, control traffic
+ and connect servers and devices to one or more
+ networks.
+
+
+ The pluggable backend architecture lets users take
+ advantage of commodity gear or advanced networking
+ services from supported vendors.
+
+
+ Administrators can take advantage of
+ software-defined networking (SDN) technology like
+ OpenFlow to allow for high levels of multi-tenancy and
+ massive scale.
+
+
+ OpenStack Networking has an extension framework
+ allowing additional network services, such as intrusion
+ detection systems (IDS), load balancing, firewalls and
+ virtual private networks (VPN) to be deployed and
+ managed.
+
+
+ Dashboard(Horizon)
+ OpenStack Dashboard (Horizon) provides administrators and
+ users a graphical interface to access, provision and automate
+ cloud-based resources. The design allows for third party
+ products and services, such as billing, monitoring and
+ additional management tools. The dashboard is also brandable
+ for service providers and other commercial vendors who want to
+ make use of it.
+ The dashboard is just one way to interact with OpenStack
+ resources. Developers can automate access or build tools to
+ manage their resources using the native OpenStack API or the
+ EC2 compatibility API.
+ Identity Service(Keystone)
+ OpenStack Identity (Keystone) provides a central directory
+ of users mapped to the OpenStack services they can access. It
+ acts as a common authentication system across the cloud
+ operating system and can integrate with existing backend
+ directory services like LDAP. It supports multiple forms of
+ authentication including standard username and password
+ credentials, token-based systems and AWS-style (i.e. Amazon
+ Web Services) logins. Additionally, the catalog provides a
+ queryable list of all of the services deployed in an OpenStack
+ cloud in a single registry. Users and third-party tools can
+ programmatically determine which resources they can
+ access.
+ Additionally, the catalog provides a queryable list of all
+ of the services deployed in an OpenStack cloud in a single
+ registry. Users and third-party tools can programmatically
+ determine which resources they can access.
+ As an administrator, OpenStack Identity enables you
+ to:
+
+
+ Configure centralized policies across users and
+ systems
+
+
+ Create users and tenants and define permissions for
+ compute, storage and networking resources using role-based
+ access control (RBAC) features
+
+
+ Integrate with an existing directory like LDAP,
+ allowing for a single source of identity authentication
+ across the enterprise.
+
+
+ As a user, OpenStack Identity enables you to:
+
+
+
+
+ Get a list of the services that you can access.
+
+
+ Make API requests
+
+
+ Log into the web dashboard to create resources owned
+ by your account
+
+
+ Image Service(Glance)
+ OpenStack Image Service (Glance) provides discovery,
+ registration and delivery services for disk and server images.
+ Stored images can be used as a template. It can also be used
+ to store and catalog an unlimited number of backups. The Image
+ Service can store disk and server images in a variety of
+ back-ends, including OpenStack Object Storage. The Image
+ Service API provides a standard REST interface for querying
+ information about disk images and lets clients stream the
+ images to new servers.
+ The Image Service can store disk and server images in a
+ variety of back-ends, including OpenStack Object Storage. The
+ Image Service API provides a standard REST interface for
+ querying information about disk images and lets clients stream
+ the images to new servers.
+ Capabilities of the Image Service include:
+
+
+ Administrators can create base templates from which
+ their users can start new compute instances
+
+
+ Users can choose from available images, or create
+ their own from existing servers
+
+
+ Snapshots can also be stored in the Image Service so
+ that virtual machines can be backed up quickly
+
+
+ A multi-format image registry, the image service allows
+ uploads of private and public images in a variety of formats,
+ including:
+
+
+ Raw
+
+
+ Machine (kernel/ramdisk outside of image, a.k.a.
+ AMI)
+
+
+ VHD (Hyper-V)
+
+
+ VDI (VirtualBox)
+
+
+ qcow2 (Qemu/KVM)
+
+
+ VMDK (VMWare)
+
+
+ OVF (VMWare, others)
+
+
+ To checkout the complete list of Core and Incubated
+ projects under OpenStack check out OpenStack’s Launchpad
+ Project Page here : http://goo.gl/ka4SrV
+ Amazon Web Services compatibility
+ OpenStack APIs are compatible with Amazon EC2 and Amazon
+ S3 and thus client applications written for Amazon Web
+ Services can be used with OpenStack with minimal porting
+ effort.
+ Governance
+ OpenStack is governed by a non-profit foundation and its
+ board of directors, a technical committee and a user
+ committee.
+ The foundation's stated mission is by providing shared
+ resources to help achieve the OpenStack Mission by Protecting,
+ Empowering, and Promoting OpenStack software and the community
+ around it, including users, developers and the entire
+ ecosystem. Though, it has little to do with the development of
+ the software, which is managed by the technical committee - an
+ elected group that represents the contributors to the project,
+ and has oversight on all technical matters.
+
\ No newline at end of file
diff --git a/doc/training-guide/module001-ch004-openstack-architecture.xml b/doc/training-guide/module001-ch004-openstack-architecture.xml
index 396cd188bf..c790a3a709 100644
--- a/doc/training-guide/module001-ch004-openstack-architecture.xml
+++ b/doc/training-guide/module001-ch004-openstack-architecture.xml
@@ -5,5 +5,373 @@
version="5.0"
xml:id="module001-ch004-openstack-architecture">
OpenStack Architecture
- More Content To be Added ...
+ Conceptual Architecture
+ The OpenStack project as a whole is designed to deliver a
+ massively scalable cloud operating system. To achieve this, each
+ of the constituent services are designed to work together to
+ provide a complete Infrastructure as a Service (IaaS). This
+ integration is facilitated through public application
+ programming interfaces (APIs) that each service offers (and in
+ turn can consume). While these APIs allow each of the services
+ to use another service, it also allows an implementer to switch
+ out any service as long as they maintain the API. These are
+ (mostly) the same APIs that are available to end users of the
+ cloud.
+ Conceptually, you can picture the relationships between the
+ services as so:
+
+
+
+ Dashboard ("Horizon") provides a web front end to the
+ other OpenStack services
+
+
+ Compute ("Nova") stores and retrieves virtual disks
+ ("images") and associated metadata in Image
+ ("Glance")
+
+
+ Network ("Quantum") provides virtual networking for
+ Compute.
+
+
+ Block Storage ("Cinder") provides storage volumes for
+ Compute.
+
+
+ Image ("Glance") can store the actual virtual disk files
+ in the Object Store("Swift")
+
+
+ All the services authenticate with Identity
+ ("Keystone")
+
+
+ This is a stylized and simplified view of the architecture,
+ assuming that the implementer is using all of the services
+ together in the most common configuration. It also only shows
+ the "operator" side of the cloud -- it does not picture how
+ consumers of the cloud may actually use it. For example, many
+ users will access object storage heavily (and directly).
+ Logical Architecture
+ This picture is consistent with the conceptual architecture
+ above:
+
+
+
+ End users can interact through a common web interface
+ (Horizon) or directly to each service through their
+ API
+
+
+ All services authenticate through a common source
+ (facilitated through keystone)
+
+
+ Individual services interact with each other through
+ their public APIs (except where privileged administrator
+ commands are necessary)
+
+
+ In the sections below, we'll delve into the architecture for
+ each of the services.
+ Dashboard
+ Horizon is a modular Django web application that provides
+ an end user and administrator interface to OpenStack
+ services.
+
+ As with most web applications, the architecture is fairly
+ simple:
+
+
+ Horizon is usually deployed via mod_wsgi in Apache.
+ The code itself is separated into a reusable python module
+ with most of the logic (interactions with various
+ OpenStack APIs) and presentation (to make it easily
+ customizable for different sites).
+
+
+ A database (configurable as to which one). As it
+ relies mostly on the other services for data, it stores
+ very little data of its own.
+
+
+ From a network architecture point of view, this service
+ will need to be customer accessible as well as be able to talk
+ to each service's public APIs. If you wish to use the
+ administrator functionality (i.e. for other services), it will
+ also need connectivity to their Admin API endpoints (which
+ should be non-customer accessible).
+ Compute
+ Nova is the most complicated and distributed component of
+ OpenStack. A large number of processes cooperate to turn end
+ user API requests into running virtual machines. Below is a
+ list of these processes and their functions:
+
+
+ nova-api accepts and responds to end user compute API
+ calls. It supports OpenStack Compute API, Amazon's EC2 API
+ and a special Admin API (for privileged users to perform
+ administrative actions). It also initiates most of the
+ orchestration activities (such as running an instance) as
+ well as enforces some policy (mostly quota checks).
+
+
+ The nova-compute process is primarily a worker daemon
+ that creates and terminates virtual machine instances via
+ hypervisor's APIs (XenAPI for XenServer/XCP, libvirt for
+ KVM or QEMU, VMwareAPI for VMware, etc.). The process by
+ which it does so is fairly complex but the basics are
+ simple: accept actions from the queue and then perform a
+ series of system commands (like launching a KVM instance)
+ to carry them out while updating state in the
+ database.
+
+
+ nova-volume manages the creation, attaching and
+ detaching of z volumes to compute instances (similar
+ functionality to Amazon’s Elastic Block Storage). It can
+ use volumes from a variety of providers such as iSCSI or
+ Rados Block Device in Ceph. A new OpenStack project,
+ Cinder, will eventually replace nova-volume functionality.
+ In the Folsom release, nova-volume and the Block Storage
+ service will have similar functionality.
+
+
+ The nova-network worker daemon is very similar to
+ nova-compute and nova-volume. It accepts networking tasks
+ from the queue and then performs tasks to manipulate the
+ network (such as setting up bridging interfaces or
+ changing iptables rules). This functionality is being
+ migrated to Quantum, a separate OpenStack service. In the
+ Folsom release, much of the functionality will be
+ duplicated between nova-network and Quantum.
+
+
+ The nova-schedule process is conceptually the simplest
+ piece of code in OpenStack Nova: take a virtual machine
+ instance request from the queue and determines where it
+ should run (specifically, which compute server host it
+ should run on).
+
+
+ The queue provides a central hub for passing messages
+ between daemons. This is usually implemented with RabbitMQ
+ today, but could be any AMPQ message queue (such as Apache
+ Qpid). New to the Folsom release is support for Zero
+ MQ.
+
+
+ The SQL database stores most of the build-time and
+ runtime state for a cloud infrastructure. This includes
+ the instance types that are available for use, instances
+ in use, networks available and projects. Theoretically,
+ OpenStack Nova can support any database supported by
+ SQL-Alchemy but the only databases currently being widely
+ used are sqlite3 (only appropriate for test and
+ development work), MySQL and PostgreSQL.
+
+
+ Nova also provides console services to allow end users
+ to access their virtual instance's console through a
+ proxy. This involves several daemons (nova-console,
+ nova-novncproxy and nova-consoleauth).
+
+
+ Nova interacts with many other OpenStack services:
+ Keystone for authentication, Glance for images and Horizon for
+ web interface. The Glance interactions are central. The API
+ process can upload and query Glance while nova-compute will
+ download images for use in launching images.
+ Object Store
+ The swift architecture is very distributed to prevent any
+ single point of failure as well as to scale horizontally. It
+ includes the following components:
+
+
+ Proxy server (swift-proxy-server) accepts incoming
+ requests via the OpenStack Object API or just raw HTTP. It
+ accepts files to upload, modifications to metadata or
+ container creation. In addition, it will also serve files
+ or container listing to web browsers. The proxy server may
+ utilize an optional cache (usually deployed with memcache)
+ to improve performance.
+
+
+ Account servers manage accounts defined with the
+ object storage service.
+
+
+ Container servers manage a mapping of containers (i.e
+ folders) within the object store service.
+
+
+ Object servers manage actual objects (i.e. files) on
+ the storage nodes.
+
+
+ There are also a number of periodic process which run
+ to perform housekeeping tasks on the large data store. The
+ most important of these is the replication services, which
+ ensures consistency and availability through the cluster.
+ Other periodic processes include auditors, updaters and
+ reapers.
+
+
+ Authentication is handled through configurable WSGI
+ middleware (which will usually be Keystone).
+ Image Store
+ The Glance architecture has stayed relatively stable since
+ the Cactus release. The biggest architectural change has been
+ the addition of authentication, which was added in the Diablo
+ release. Just as a quick reminder, Glance has four main parts
+ to it:
+
+
+ glance-api accepts Image API calls for image
+ discovery, image retrieval and image storage.
+
+
+ glance-registry stores, processes and retrieves
+ metadata about images (size, type, etc.).
+
+
+ A database to store the image metadata. Like Nova, you
+ can choose your database depending on your preference (but
+ most people use MySQL or SQlite).
+
+
+ A storage repository for the actual image files. In
+ the diagram above, Swift is shown as the image repository,
+ but this is configurable. In addition to Swift, Glance
+ supports normal filesystems, RADOS block devices, Amazon
+ S3 and HTTP. Be aware that some of these choices are
+ limited to read-only usage.
+
+
+ There are also a number of periodic process which run on
+ Glance to support caching. The most important of these is the
+ replication services, which ensures consistency and
+ availability through the cluster. Other periodic processes
+ include auditors, updaters and reapers.
+ As you can see from the diagram in the Conceptual
+ Architecture section, Glance serves a central role to the
+ overall IaaS picture. It accepts API requests for images (or
+ image metadata) from end users or Nova components and can
+ store its disk files in the object storage service,
+ Swift.
+ Identity
+ Keystone provides a single point of integration for
+ OpenStack policy, catalog, token and authentication.
+
+
+ keystone handles API requests as well as providing
+ configurable catalog, policy, token and identity
+ services.
+
+
+ Each Keystone function has a pluggable backend which
+ allows different ways to use the particular service. Most
+ support standard backends like LDAP or SQL, as well as Key
+ Value Stores (KVS).
+
+
+ Most people will use this as a point of customization for
+ their current authentication services.
+ Network
+ Quantum provides "network connectivity as a service"
+ between interface devices managed by other OpenStack services
+ (most likely Nova). The service works by allowing users to
+ create their own networks and then attach interfaces to them.
+ Like many of the OpenStack services, Quantum is highly
+ configurable due to it's plug-in architecture. These plug-ins
+ accommodate different networking equipment and software. As
+ such, the architecture and deployment can vary dramatically.
+ In the above architecture, a simple Linux networking plug-in
+ is shown.
+
+
+ quantum-server accepts API requests and then routes
+ them to the appropriate quantum plugin for action.
+
+
+ Quantum plugins and agents perform the actual actions
+ such as plugging and unplugging ports, creating networks
+ or subnets and IP addressing. These plugins and agents
+ differ depending on the vendor and technologies used in
+ the particular cloud. Quantum ships with plugins and
+ agents for: Cisco virtual and physical switches, Nicira
+ NVP product, NEC OpenFlow products, Openvswitch, Linux
+ bridging and the Ryu Network Operating System.
+
+
+ The common agents are L3 (layer 3), DHCP (dynamic host
+ IP addressing) and the specific plug-in agent.
+
+
+ Most Quantum installations will also make use of a
+ messaging queue to route information between the
+ quantum-server and various agents as well as a database to
+ store networking state for particular plugins.
+
+
+ Quantum will interact mainly with Nova, where it will
+ provide networks and connectivity for its instances.
+ Block Storage
+ Cinder separates out the persistent block storage
+ functionality that was previously part of Openstack Compute
+ (in the form of nova-volume) into it's own service. The
+ OpenStack Block Storage API allows for manipulation of
+ volumes, volume types (similar to compute flavors) and volume
+ snapshots.
+
+
+ cinder-api accepts API requests and routes them to
+ cinder-volume for action.
+
+
+ cinder-volume acts upon the requests by reading or
+ writing to the Cinder database to maintain state,
+ interacting with other processes (like cinder-scheduler)
+ through a message queue and directly upon block storage
+ providing hardware or software. It can interact with a
+ variety of storage providers through a driver
+ architecture. Currently, there are drivers for IBM,
+ SolidFire, NetApp, Nexenta, Zadara, linux iSCSI and other
+ storage providers.
+
+
+ Much like nova-scheduler, the cinder-scheduler daemon
+ picks the optimal block storage provider node to create
+ the volume on.
+
+
+ Cinder deployments will also make use of a messaging
+ queue to route information between the cinder processes as
+ well as a database to store volume state.
+
+
+ Like Quantum, Cinder will mainly interact with Nova,
+ providing volumes for its instances.
diff --git a/doc/training-guide/module001-ch005-vm-provisioning-walk-through.xml b/doc/training-guide/module001-ch005-vm-provisioning-walk-through.xml
index a858100486..7caed3f1b7 100644
--- a/doc/training-guide/module001-ch005-vm-provisioning-walk-through.xml
+++ b/doc/training-guide/module001-ch005-vm-provisioning-walk-through.xml
@@ -6,4 +6,234 @@
xml:id="module001-ch005-vm-provisioning-walk-through">
VM Provisioning Walk ThroughMore Content To be Added ...
-
+ OpenStack Compute gives you a tool to orchestrate a cloud,
+ including running instances, managing networks, and controlling
+ access to the cloud through users and projects. The underlying
+ open source project's name is Nova, and it provides the software
+ that can control an Infrastructure as a Service (IaaS) cloud
+ computing platform. It is similar in scope to Amazon EC2 and
+ Rackspace Cloud Servers. OpenStack Compute does not include any
+ virtualization software; rather it defines drivers that interact
+ with underlying virtualization mechanisms that run on your host
+ operating system, and exposes functionality over a web-based
+ API.
+ Hypervisors
+ OpenStack Compute requires a hypervisor and Compute controls
+ the hypervisors through an API server. The process for selecting
+ a hypervisor usually means prioritizing and making decisions
+ based on budget and resource constraints as well as the
+ inevitable list of supported features and required technical
+ specifications. The majority of development is done with the KVM
+ and Xen-based hypervisors. Refer to
+
+
+ http://goo.gl/n7AXnC
+ for a detailed list of features and support across the hypervisors.
+ With OpenStack Compute, you can orchestrate clouds using
+ multiple hypervisors in different zones. The types of
+ virtualization standards that may be used with Compute
+ include:
+
+
+ KVM- Kernel-based Virtual Machine (visit http://goo.gl/70dvRb)
+
+
+ LXC- Linux Containers (through libvirt) (visit http://goo.gl/Ous3ly)
+
+
+ QEMU- Quick EMUlator (visit http://goo.gl/WWV9lL)
+
+
+ UML- User Mode Linux (visit http://goo.gl/4HAkJj)
+
+
+ VMWare vSphere4.1 update 1 and newer (visit http://goo.gl/0DBeo5)
+
+
+ Xen- Xen, Citrix XenServer and Xen Cloud Platform (XCP)
+ (visit http://goo.gl/yXP9t1)
+
+
+ Bare Metal- Provisions physical hardware via pluggable
+ sub-drivers. (visit http://goo.gl/exfeSg)
+
+
+ Users and Tenants (Projects)
+ The OpenStack Compute system is designed to be used by many
+ different cloud computing consumers or customers, basically
+ tenants on a shared system, using role-based access assignments.
+ Roles control the actions that a user is allowed to perform. In
+ the default configuration, most actions do not require a
+ particular role, but this is configurable by the system
+ administrator editing the appropriate policy.json file that
+ maintains the rules. For example, a rule can be defined so that
+ a user cannot allocate a public IP without the admin role. A
+ user's access to particular images is limited by tenant, but the
+ username and password are assigned per user. Key pairs granting
+ access to an instance are enabled per user, but quotas to
+ control resource consumption across available hardware resources
+ are per tenant.
+ While the original EC2 API supports users, OpenStack Compute
+ adds the concept of tenants. Tenants are isolated resource
+ containers forming the principal organizational structure within
+ the Compute service. They consist of a separate VLAN, volumes,
+ instances, images, keys, and users. A user can specify which
+ tenant he or she wishes to be known as by appending :project_id
+ to his or her access key. If no tenant is specified in the API
+ request, Compute attempts to use a tenant with the same ID as
+ the user
+ For tenants, quota controls are available to limit
+ the:
+
+
+ Number of volumes which may be created
+
+
+ Total size of all volumes within a project as measured
+ in GB
+
+
+ Number of instances which may be launched
+
+
+ Number of processor cores which may be allocated
+
+
+ Floating IP addresses (assigned to any instance when it
+ launches so the instance has the same publicly accessible IP
+ addresses)
+
+
+ Fixed IP addresses (assigned to the same instance each
+ time it boots, publicly or privately accessible, typically
+ private for management purposes)
+
+
+ Images and Instances
+ This introduction provides a high level overview of what
+ images and instances are and description of the life-cycle of a
+ typical virtual system within the cloud. There are many ways to
+ configure the details of an OpenStack cloud and many ways to
+ implement a virtual system within that cloud. These
+ configuration details as well as the specific command line
+ utilities and API calls to perform the actions described are
+ presented in the Image Managementand Volume
+ Managementchapters.
+ Images are disk images which are templates for virtual
+ machine file systems. The image service, Glance, is responsible
+ for the storage and management of images within
+ OpenStack.
+ Instances are the individual virtual machines running on
+ physical compute nodes. The compute service, Nova, manages
+ instances. Any number of instances maybe started from the same
+ image. Each instance is run from a copy of the base image so
+ runtime changes made by an instance do not change the image it
+ is based on. Snapshots of running instances may be taken which
+ create a new image based on the current disk state of a
+ particular instance.
+ When starting an instance a set of virtual resources known
+ as a flavor must be selected. Flavors define how many virtual
+ CPUs an instance has and the amount of RAM and size of its
+ ephemeral disks. OpenStack provides a number of predefined
+ flavors which cloud administrators may edit or add to. Users
+ must select from the set of available flavors defined on their
+ cloud.
+ Additional resources such as persistent volume storage and
+ public IP address may be added to and removed from running
+ instances. The examples below show the cinder-volume service
+ which provide persistent block storage as opposed to the
+ ephemeral storage provided by the instance flavor.
+ Here is an example of the life cycle of a typical virtual
+ system within an OpenStack cloud to illustrate these
+ concepts.
+ Initial State
+ Images and Instances
+ The following diagram shows the system state prior to
+ launching an instance. The image store fronted by the image
+ service, Glance, has some number of predefined images. In the
+ cloud there is an available compute node with available vCPU,
+ memory and local disk resources. Plus there are a number of
+ predefined volumes in the cinder-volume service.
+ Figure 2.1. Base image state with no running
+ instances
+
+ Launching an instance
+ To launch an instance the user selects an image, a flavor
+ and optionally other attributes. In this case the selected
+ flavor provides a root volume (as all flavors do) labeled vda in
+ the diagram and additional ephemeral storage labeled vdb in the
+ diagram. The user has also opted to map a volume from the
+ cinder-volume store to the third virtual disk, vdc, on this
+ instance.
+ Figure 2.2. Instance creation from image and run time
+ state
+
+ The OpenStack system copies the base image from the image
+ store to local disk which is used as the first disk of the
+ instance (vda), having small images will result in faster start
+ up of your instances as less data needs to be copied across the
+ network. The system also creates a new empty disk image to
+ present as the second disk (vdb). Be aware that the second disk
+ is an empty disk with an emphemeral life as it is destroyed when
+ you delete the instance. The compute node attaches to the
+ requested cinder-volume using iSCSI and maps this to the third
+ disk (vdc) as requested. The vCPU and memory resources are
+ provisioned and the instance is booted from the first drive. The
+ instance runs and changes data on the disks indicated in red in
+ the diagram.
+ There are many possible variations in the details of the
+ scenario, particularly in terms of what the backing storage is
+ and the network protocols used to attach and move storage. One
+ variant worth mentioning here is that the ephemeral storage used
+ for volumes vda and vdb in this example may be backed by network
+ storage rather than local disk. The details are left for later
+ chapters.
+ End State
+ Once the instance has served its purpose and is deleted
+ all state is reclaimed, except the persistent volume. The
+ ephemeral storage is purged. Memory and vCPU resources are
+ released. And of course the image has remained unchanged
+ throughout.
+ Figure 2.3. End state of image and volume after instance
+ exits
+
+ Once you launch a VM in OpenStack, theres something more
+ going on in the background. To understand what's happening
+ behind the Dashboard, lets take a deeper dive into OpenStack’s
+ VM provisioning. For launching a VM, you can either use
+ Command Line Interface or the OpenStack Horizon Dashboard.
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module001-ch006-overview-horizon-cli.xml b/doc/training-guide/module001-ch006-overview-horizon-cli.xml
index 53e69be86b..b68213be1f 100644
--- a/doc/training-guide/module001-ch006-overview-horizon-cli.xml
+++ b/doc/training-guide/module001-ch006-overview-horizon-cli.xml
@@ -5,5 +5,2610 @@
version="5.0"
xml:id="module001-ch006-overview-horizon-cli">
Overview Horizon and OpenStack CLI
- More Content To be Added ...
+ How can I use an OpenStack cloud?
+ As an OpenStack cloud end user, you can provision your own
+ resources within the limits set by administrators. The examples
+ in this guide show you how to complete these tasks by using the
+ OpenStack dashboard and command-line clients. The dashboard,
+ also known as horizon, is a Web-based graphical interface. The
+ command-line clients let you run simple commands to create and
+ manage resources in a cloud and automate tasks by using scripts.
+ Each of the core OpenStack projects has its own command-line
+ client.
+ You can modify these examples for your specific use
+ cases.
+ In addition to these ways of interacting with a cloud, you
+ can access the OpenStack APIs indirectly through cURLcommands
+ or open SDKs, or directly through the APIs. You can automate
+ access or build tools to manage resources and services by using
+ the native OpenStack APIs or the EC2 compatibility API.
+ To use the OpenStack APIs, it helps to be familiar with
+ HTTP/1.1, RESTful web services, the OpenStack services, and JSON
+ or XML data serialization formats.
+ OpenStack dashboard
+ As a cloud end user, the OpenStack dashboard lets you to
+ provision your own resources within the limits set by
+ administrators. You can modify these examples to create other
+ types and sizes of server instances.
+ Overview
+ The following requirements must be fulfilled to access the
+ OpenStack dashboard:
+
+
+ The cloud operator has set up an OpenStack
+ cloud.
+
+
+ You have a recent Web browser that supports HTML5. It
+ must have cookies and JavaScript enabled. To use the VNC
+ client for the dashboard, which is based on noVNC, your
+ browser must support HTML5 Canvas and HTML5 WebSockets.
+ For more details and a list of browsers that support
+ noVNC, seehttps://github.com/kanaka/noVNC/blob/master/README.md,
+ andhttps://github.com/kanaka/noVNC/wiki/Browser-support,
+ respectively.
+
+
+ Learn how to log in to the dashboard and get a short
+ overview of the interface.
+ Log in to the dashboard
+ To log in to the dashboard
+
+
+ Ask your cloud operator for the following
+ information:
+
+
+
+
+ The hostname or public IP address from which you can
+ access the dashboard.
+
+
+ The dashboard is available on the node that has the
+ nova-dashboard server role.
+
+
+ The username and password with which you can log in to
+ the dashboard.
+
+
+
+
+ Open a Web browser that supports HTML5. Make sure that
+ JavaScript and cookies are enabled.
+
+
+ As a URL, enter the host name or IP address that you
+ got from the cloud operator.
+
+
+ https://IP_ADDRESS_OR_HOSTNAME/
+
+
+ On the dashboard log in page, enter your user name and
+ password and click Sign In.
+
+
+ After you log in, the following page appears:
+
+ The top-level row shows the username that you logged in
+ with. You can also access Settingsor Sign Outof the Web
+ interface.
+ If you are logged in as an end user rather than an admin
+ user, the main screen shows only the Projecttab.
+ OpenStack dashboard – Project tab
+ This tab shows details for the projects, or projects, of
+ which you are a member.
+ Select a project from the drop-down list on the left-hand
+ side to access the following categories:
+ Overview
+ Shows basic reports on the project.
+ Instances
+ Lists instances and volumes created by users of the
+ project.
+ From here, you can stop, pause, or reboot any instances or
+ connect to them through virtual network computing
+ (VNC).
+ Volumes
+ Lists volumes created by users of the project.
+ From here, you can create or delete volumes.
+ Images &
+ Snapshots
+ Lists images and snapshots created by users of the
+ project, plus any images that are publicly available. Includes
+ volume snapshots. From here, you can create and delete images
+ and snapshots, and launch instances from images and
+ snapshots.
+ Access &
+ Security
+ On the Security
+ Groupstab, you can list, create, and delete security
+ groups and edit rules for security groups.
+ On the Keypairstab, you
+ can list, create, and import keypairs, and delete keypairs.
+ On the Floating IPstab,
+ you can allocate an IP address to or release it from a
+ project.
+ On the API Accesstab, you
+ can list the API endpoints.
+ Manage images
+ During setup of OpenStack cloud, the cloud operator sets
+ user permissions to manage images. Image upload and management
+ might be restricted to only cloud administrators or cloud
+ operators. Though you can complete most tasks with the OpenStack
+ dashboard, you can manage images through only the glance and
+ nova clients or the Image Service and Compute APIs.
+ Set up access and security
+ Before you launch a virtual machine, you can add security
+ group rules to enable users to ping and SSH to the instances. To
+ do so, you either add rules to the default security group or add a
+ security group with rules. For information, seethe section called “Add security group rules”.
+ Keypairs are SSH credentials that are injected into images
+ when they are launched. For this to work, the image must contain
+ the cloud-init package. For information, seethe section called “Add keypairs”.
+ Add security group rules
+ The following procedure shows you how to add rules to the
+ default security group.
+ To add rules to the default security group
+
+
+ Log in to the OpenStack dashboard.
+
+
+ If you are a member of multiple projects, select a
+ project from the drop-down list at the top of the
+ Projecttab.
+
+
+ Click the Access & Securitycategory.
+
+
+ The dashboard shows the security groups that are
+ available for this project.
+
+
+
+
+
+ Select the default security group and click Edit
+ Rules.
+
+
+ The Security Group Rulespage appears:
+
+
+
+
+
+ Add a TCP rule
+
+
+ Click Add Rule.
+
+
+ The Add Rulewindow appears.
+
+
+
+
+ In the IP Protocollist, select TCP.
+
+
+ In the Openlist, select Port.
+
+
+ In the Portbox, enter 22.
+
+
+ In the Sourcelist, select CIDR.
+
+
+ In the CIDRbox, enter 0.0.0.0/0.
+
+
+ Click Add.
+
+
+ Port 22 is now open for requests from any IP
+ address.
+
+
+ If you want to accept requests from a particular range
+ of IP addresses, specify the IP address block in the
+ CIDRbox.
+
+
+
+
+ Add an ICMP rule
+
+
+ Click Add Rule.
+
+
+ The Add Rulewindow appears.
+
+
+
+
+ In the IP Protocollist, select ICMP.
+
+
+ In the Typebox, enter -1.
+
+
+ In the Codebox, enter -1.
+
+
+ In the Sourcelist, select CIDR.
+
+
+ In the CIDRbox, enter 0.0.0.0/0.
+
+
+ Click Add.
+
+
+ Add keypairs
+ Create at least one keypair for each project. If you have
+ generated a keypair with an external tool, you can import it
+ into OpenStack. The keypair can be used for multiple instances
+ that belong to a project.
+ To add a keypair
+
+
+ Log in to the OpenStack dashboard.
+
+
+ If you are a member of multiple projects, select a
+ project from the drop-down list at the top of the
+ Projecttab.
+
+
+ Click the Access & Securitycategory.
+
+
+ Click the Keypairstab. The dashboard shows the
+ keypairs that are available for this project.
+
+
+ To add a keypair
+
+
+ Click Create Keypair.
+
+
+ The Create Keypairwindow appears.
+
+
+
+
+ In the Keypair Namebox, enter a name for your
+ keypair.
+
+
+ Click Create Keypair.
+
+
+ Respond to the prompt to download the keypair.
+
+
+
+
+ To import a keypair
+
+
+ Click Import Keypair.
+
+
+ The Import Keypairwindow appears.
+
+
+
+
+ In the Keypair Namebox, enter the name of your
+ keypair.
+
+
+ In the Public Keybox, copy the public key.
+
+
+ Click Import Keypair.
+
+
+
+
+ Save the *.pem file locally and change its permissions
+ so that only you can read and write to the file:
+
+
+ $ chmod 0600 MY_PRIV_KEY.pem
+
+
+ Use the ssh-addcommand to make the keypair known to
+ SSH:
+
+
+ $ ssh-add MY_PRIV_KEY.pem
+
+
+ The public key of the keypair is registered in the Nova
+ database.
+ The dashboard lists the keypair in the Access &
+ Securitycategory.
+ Launch instances
+ Instances are virtual machines that run inside the cloud.
+ You can launch an instance directly from one of the available
+ OpenStack images or from an image that you have copied to a
+ persistent volume. The OpenStack Image Service provides a pool
+ of images that are accessible to members of different
+ projects.
+ Launch an instance from an image
+ When you launch an instance from an image, OpenStack
+ creates a local copy of the image on the respective compute
+ node where the instance is started.
+ To launch an instance from an image
+
+
+ Log in to the OpenStack dashboard.
+
+
+ If you are a member of multiple projects, select a
+ project from the drop-down list at the top of the
+ Projecttab.
+
+
+ Click the Images & Snapshotcategory.
+
+
+ The dashboard shows the images that have been uploaded
+ to OpenStack Image Service and are available for this
+ project.
+
+
+ Select an image and click Launch.
+
+
+ In the Launch Imagewindow, specify the
+ following:
+
+
+
+
+ Enter an instance name to assign to the virtual
+ machine.
+
+
+ From the Flavordrop-down list, select the size of the
+ virtual machine to launch.
+
+
+ Select a keypair.
+
+
+ In case an image uses a static root password or a
+ static key set (neither is recommended), you do not need
+ to provide a keypair to launch the instance.
+
+
+ In Instance Count, enter the number of virtual
+ machines to launch from this image.
+
+
+ Activate the security groups that you want to assign
+ to the instance.
+
+
+ Security groups are a kind of cloud firewall that
+ define which incoming network traffic should be forwarded to
+ instances. For details, seethe section called “Add security group
+ rules”.
+
+
+ If you have not created any specific security groups,
+ you can only assign the instance to the default security
+ group.
+
+
+ If you want to boot from volume, click the respective
+ entry to expand its options. Set the options as described
+ inthe section called “Launch an instance from a
+ volume”.
+
+
+
+
+ Click Launch Instance. The instance is started on any
+ of the compute nodes in the cloud.
+
+
+ After you have launched an instance, switch to the
+ Instancescategory to view the instance name, its (private or
+ public) IP address, size, status, task, and power
+ state.
+ Figure 5. OpenStack dashboard – Instances
+ If you did not provide a keypair, security groups, or
+ rules so far, by default the instance can only be accessed
+ from inside the cloud through VNC at this point. Even pinging
+ the instance is not possible. To access the instance through a
+ VNC console, seethe section called “Get a console to an
+ instance”.
+ Launch an instance from a volume
+ You can launch an instance directly from an image that has
+ been copied to a persistent volume.
+ In that case, the instance is booted from the volume,
+ which is provided by nova-volume, through iSCSI.
+ For preparation details, seethe section called “Create or delete a
+ volume”.
+ To boot an instance from the volume, especially note the
+ following steps:
+
+
+ To be able to select from which volume to boot, launch
+ an instance from an arbitrary image. The image you select
+ does not boot. It is replaced by the image on the volume
+ that you choose in the next steps.
+
+
+ In case you want to boot a Xen image from a volume,
+ note the following requirement: The image you launch in
+ must be the same type, fully virtualized or
+ paravirtualized, as the one on the volume.
+
+
+ Select the volume or volume snapshot to boot
+ from.
+
+
+ Enter a device name. Enter vda for KVM images or xvda
+ for Xen images.
+
+
+
+ To launch an instance from a volume
+ You can launch an instance directly from one of the images
+ available through the OpenStack Image Service or from an image
+ that you have copied to a persistent volume. When you launch
+ an instance from a volume, the procedure is basically the same
+ as when launching an instance from an image in OpenStack Image
+ Service, except for some additional steps.
+
+
+ Create a volume as described inthe section called “Create or delete a
+ volume”.
+
+
+ It must be large enough to store an unzipped
+ image.
+
+
+ Create an image.
+
+
+ For details, seeCreating images manuallyin the OpenStack
+ Virtual Machine Image Guide.
+
+
+ Launch an instance.
+
+
+ Attach the volume to the instance as described inthe section called “Attach volumes to
+ instances”.
+
+
+ Assuming that the attached volume is mounted as
+ /dev/vdb, use one of the following commands to copy the
+ image to the attached volume:
+
+
+
+
+ For a raw image:
+
+
+ $ cat IMAGE >/dev/null
+
+
+ Alternatively, use dd.
+
+
+ For a non-raw image:
+
+
+ $ qemu-img convert -O raw IMAGE /dev/vdb
+
+
+ For a *.tar.bz2 image:
+
+
+ $ tar xfjO IMAGE >/dev/null
+
+
+
+
+ Only detached volumes are available for booting.
+ Detach the volume.
+
+
+ To launch an instance from the volume, continue
+ withthe section called “Launch an instance from an
+ image”.
+
+
+ You can launch an instance directly from one of the
+ images available through the OpenStack Image Service. When
+ you do that, OpenStack creates a local copy of the image
+ on the respective compute node where the instance is
+ started.
+
+
+ SSH in to your instance
+
+
+ To SSH into your instance, you use the downloaded keypair
+ file.
+ To SSH into your instance
+
+
+ Copy the IP address for your instance.
+
+
+ Use the SSH command to make a secure connection to the
+ instance. For example:
+
+
+ $ ssh -i MyKey.pem ubuntu@10.0.0.2
+
+
+ A prompt asks, "Are you sure you want to continue
+ connection (yes/no)?" Type yes and you have successfully
+ connected.
+
+
+ Manage instances
+ Create instance snapshots
+
+ To create instance snapshots
+
+
+ Log in to the OpenStack dashboard.
+
+
+ If you are a member of multiple projects, select a
+ project from the drop-down list at the top of the
+ Projecttab.
+
+
+ Click the Instancescategory.
+
+
+ The dashboard lists the instances that are available
+ for this project.
+
+
+ Select the instance of which to create a snapshot.
+ From the Actionsdrop-down list, select Create
+ Snapshot.
+
+
+ In the Create Snapshotwindow, enter a name for the
+ snapshot. Click Create Snapshot. The dashboard shows the
+ instance snapshot in the Images &
+ Snapshotscategory.
+
+
+ To launch an instance from the snapshot, select the
+ snapshot and click Launch. Proceed withthe section called “Launch an instance from an
+ image”.
+
+
+ Control the state of an instance
+ To control the state of an instance
+
+
+ Log in to the OpenStack dashboard.
+
+
+ If you are a member of multiple projects, select a
+ project from the drop-down list at the top of the
+ Projecttab.
+
+
+ Click the Instancescategory.
+
+
+ The dashboard lists the instances that are available
+ for this project.
+
+
+ Select the instance for which you want to change the
+ state.
+
+
+ In the Moredrop-down list in the Actionscolumn,
+ select the state.
+
+
+ Depending on the current state of the instance, you
+ can choose to pause, un-pause, suspend, resume, soft or
+ hard reboot, or terminate an instance.
+
+
+
+ Track usage
+ Use the dashboard's Overviewcategory to track usage of
+ instances for each project.
+
+ You can track costs per month by showing metrics like
+ number of VCPUs, disks, RAM, and uptime of all your
+ instances.
+ To track usage
+
+
+ If you are a member of multiple projects, select a
+ project from the drop-down list at the top of the
+ Projecttab.
+
+
+ Select a month and click Submitto query the instance
+ usage for that month.
+
+
+ Click Download CSV Summaryto download a CVS
+ summary.
+
+
+ Manage volumes
+ Volumes are block storage devices that you can attach to
+ instances. They allow for persistent storage as they can be
+ attached to a running instance, or detached and attached to
+ another instance at any time.
+ In contrast to the instance's root disk, the data of volumes
+ is not destroyed when the instance is deleted.
+ Create or delete a volume
+ To create or delete a volume
+
+
+ Log in to the OpenStack dashboard.
+
+
+ If you are a member of multiple projects, select a
+ Projectfrom the drop-down list at the top of the
+ tab.
+
+
+ Click the Volumescategory.
+
+
+ To create a volume
+
+
+
+
+ Click Create Volume.
+
+
+ In the window that opens, enter a name to assign to a
+ volume, a description (optional), and define the size in
+ GBs.
+
+
+ Confirm your changes.
+
+
+ The dashboard shows the volume in the
+ Volumescategory.
+
+
+
+
+ To delete one or multiple volumes
+
+
+
+
+ Activate the checkboxes in front of the volumes that
+ you want to delete.
+
+
+ Click Delete Volumesand confirm your choice in the
+ pop-up that appears.
+
+
+ A message indicates whether the action was
+ successful.
+
+
+ After you create one or more volumes, you can attach them
+ to instances.
+ You can attach a volume to one instance at a time.
+ View the status of a volume in the Instances &
+ Volumescategory of the dashboard: the volume is either
+ available or In-Use.
+ Attach volumes to instances
+ To attach volumes to instances
+
+
+ Log in to OpenStack dashboard.
+
+
+ If you are a member of multiple projects, select a
+ Projectfrom the drop-down list at the top of the
+ tab.
+
+
+ Click the Volumescategory.
+
+
+ Select the volume to add to an instance and click Edit
+ Attachments.
+
+
+ In the Manage Volume Attachmentswindow, select an
+ instance.
+
+
+ Enter a device name under which the volume should be
+ accessible on the virtual machine.
+
+
+ Click Attach Volumeto confirm your changes. The
+ dashboard shows the instance to which the volume has been
+ attached and the volume's device name.
+
+
+ Now you can log in to the instance, mount the disk,
+ format it, and use it.
+
+
+ To detach a volume from an instance
+
+
+
+
+ Select the volume and click Edit Attachments.
+
+
+ Click Detach Volumeand confirm your changes.
+
+
+ A message indicates whether the action was
+ successful.
+
+
+ OpenStack command-line clients
+ Overview
+ You can use the OpenStack command-line clients to run
+ simple commands that make API calls and automate tasks by
+ using scripts. Internally, each client command runs cURL
+ commands that embed API requests. The OpenStack APIs are
+ RESTful APIs that use the HTTP protocol, including methods,
+ URIs, media types, and response codes.
+ These open-source Python clients run on Linux or Mac OS X
+ systems and are easy to learn and use. Each OpenStack service
+ has its own command-line client. On some client commands, you
+ can specify a debugparameter to show the underlying API
+ request for the command. This is a good way to become familiar
+ with the OpenStack API calls.
+ The following command-line clients are available for the
+ respective services' APIs:
+ cinder(python-cinderclient)
+ Client for the Block Storage Service API. Use to create
+ and manage volumes.
+ glance(python-glanceclient)
+ Client for the Image Service API. Use to create and manage
+ images.
+ keystone(python-keystoneclient)
+ Client for the Identity Service API. Use to create and
+ manage users, tenants, roles, endpoints, and
+ credentials.
+ nova(python-novaclient)
+ Client for the Compute API and its extensions. Use to
+ create and manage images, instances, and flavors.
+ neutron(python-neutronclient)
+ Client for the Networking API. Use to configure networks
+ for guest servers. This client was previously known as
+ neutron.
+ swift(python-swiftclient)
+ Client for the Object Storage API. Use to gather
+ statistics, list items, update metadata, upload, download and
+ delete files stored by the object storage service. Provides
+ access to a swift installation for ad hoc processing.
+ heat(python-heatclient)
+ Client for the Orchestration API. Use to launch stacks
+ from templates, view details of running stacks including
+ events and resources, and update and delete stacks.
+ Install the Openstack command-line clients
+ To install the clients, install the prerequisite software
+ and the Python package for each OpenStack client.
+ Install the clients
+ Use pipto install the OpenStack clients on a Mac OS X
+ or Linux system. It is easy and ensures that you get the
+ latest version of the client from thePython Package
+ Index. Also, piplets you update or remove a
+ package. After you install the clients, you must source an
+ openrc file to set required environment variables before you
+ can request OpenStack services through the clients or the
+ APIs.
+ To install the clients
+
+
+ You must install each client separately.
+
+
+ Run the following command to install or update a
+ client package:
+
+
+ $ sudo pip install [--update]
+ python-<project>client
+
+
+ Where <project> is the project name and has one
+ of the following values:
+
+
+
+
+ nova. Compute API and extensions.
+
+
+ neutron. Networking API.
+
+
+ keystone. Identity Service API.
+
+
+ glance. Image Service API.
+
+
+ swift. Object Storage API.
+
+
+ cinder. Block Storage Service API.
+
+
+ heat. Orchestration API.
+
+
+
+
+ For example, to install the nova client, run the
+ following command:
+
+
+ $ sudo pip install python-novaclient
+
+
+ To update the nova client, run the following
+ command:
+
+
+ $ sudo pip install --upgrade
+ python-novaclient
+
+
+ To remove the nova client, run the following
+ command:
+
+
+ $ sudo pip uninstall python-novaclient
+
+
+ Before you can issue client commands, you must
+ download and source the openrc file to set environment
+ variables. Proceed tothe section called “OpenStack RC file”.
+
+
+ Get the version for a client
+ After you install an OpenStack client, you can search for
+ its version number, as follows:
+ $ pip freeze | grep python-
+ python-glanceclient==0.4.0python-keystoneclient==0.1.2-e
+ git+https://github.com/openstack/python-novaclient.git@077cc0bf22e378c4c4b970f2331a695e440a939f#egg=python_novaclient-devpython-neutronclient==0.1.1python-swiftclient==1.1.1
+ You can also use the yolk -lcommand to see which version of
+ the client is installed:
+ $ yolk -l | grep python-novaclient
+ python-novaclient - 2.6.10.27 - active development
+ (/Users/your.name/src/cloud-servers/src/src/python-novaclient)python-novaclient
+ - 2012.1 - non-active
+ OpenStack RC file
+ To set the required environment variables for the OpenStack
+ command-line clients, you must download and source an
+ environment file, openrc.sh. It is project-specific and contains
+ the credentials used by OpenStack Compute, Image, and Identity
+ services.
+ When you source the file and enter the password, environment
+ variables are set for that shell. They allow the commands to
+ communicate to the OpenStack services that run in the
+ cloud.
+ You can download the file from the OpenStack dashboard as an
+ administrative user or any other user.
+ To download the OpenStack RC file
+
+
+ Log in to the OpenStack dashboard.
+
+
+ On the Projecttab, select the project for which you
+ want to download the OpenStack RC file.
+
+
+ Click Access & Security. Then, click Download
+ OpenStack RC Fileand save the file.
+
+
+ Copy the openrc.sh file to the machine from where you
+ want to run OpenStack commands.
+
+
+ For example, copy the file to the machine from where you
+ want to upload an image with a glance client command.
+
+
+ On any shell from where you want to run OpenStack
+ commands, source the openrc.sh file for the respective
+ project.
+
+
+ In this example, we source the demo-openrc.sh file for
+ the demo project:
+
+
+ $ source demo-openrc.sh
+
+
+ When you are prompted for an OpenStack password, enter
+ the OpenStack password for the user who downloaded the
+ openrc.sh file.
+
+
+ When you run OpenStack client commands, you can override
+ some environment variable settings by using the options that
+ are listed at the end of the nova helpoutput. For example,
+ you can override the OS_PASSWORD setting in the openrc.sh
+ file by specifying a password on a nova command, as
+ follows:
+
+
+ $ nova --password <password> image-list
+
+
+ Where password is your password.
+
+
+ Manage images
+ During setup of OpenStack cloud, the cloud operator sets
+ user permissions to manage images.
+ Image upload and management might be restricted to only
+ cloud administrators or cloud operators.
+ After you upload an image, it is considered golden and you
+ cannot change it.
+ You can upload images through the glance client or the Image
+ Service API. You can also use the nova client to list images,
+ set and delete image metadata, delete images, and take a
+ snapshot of a running instance to create an image.
+ Manage images with the glance client
+ To list or get details for images
+
+
+ To list the available images:
+
+
+ $ glance image-list
+
+
+ You can use grep to filter the list, as
+ follows:
+
+
+ $ glance image-list | grep 'cirros'
+
+
+ To get image details, by name or ID:
+
+
+ $ glance image-show myCirrosImage
+
+
+ To add an image
+
+
+ The following example uploads a CentOS 6.3 image in
+ qcow2 format and configures it for public access:
+
+
+ $glance image-create --name centos63-image
+ --disk-format=qcow2 --container-format=bare
+ --is-public=True ./centos63.qcow2
+
+
+ To create an image
+
+
+ Write any buffered data to disk.
+
+
+ For more information, see theTaking Snapshots in the OpenStack Operations
+ Guide.
+
+
+ To create the image, list instances to get the server
+ ID:
+
+
+ $ nova list
+
+
+ In this example, the server is named myCirrosServer.
+ Use this server to create a snapshot, as follows:
+
+
+ $ nova image-create myCirrosServer
+ myCirrosImage
+
+
+ The command creates a qemu snapshot and automatically
+ uploads the image to your repository. Only the tenant that
+ creates the image has access to it.
+
+
+ Get details for your image to check its status:
+
+
+ $ nova image-show IMAGE
+
+
+ The image status changes from SAVING to ACTIVE. Only
+ the tenant who creates the image has access to it.
+
+
+ To launch an instance from your image
+
+
+ To launch an instance from your image, include the
+ image ID and flavor ID, as follows:
+
+
+ $ nova boot newServer --image
+ 7e5142af-1253-4634-bcc6-89482c5f2e8a --flavor 3
+
+
+ Troubleshoot image creation
+
+
+ You cannot create a snapshot from an instance that
+ has an attached volume. Detach the volume, create the
+ image, and re-mount the volume.
+
+
+ Make sure the version of qemu you are using is
+ version 0.14 or greater. Older versions of qemu result
+ in an "unknown option -s" error message in the
+ nova-compute.log.
+
+
+ Examine the /var/log/nova-api.log and
+ /var/log/nova-compute.log log files for error
+ messages.
+
+
+ Set up access and security for instances
+ When you launch a virtual machine, you can inject a key
+ pair, which provides SSH access to your instance. For this to
+ work, the image must contain the cloud-init package. Create at
+ least one key pair for each project. If you generate a keypair
+ with an external tool, you can import it into OpenStack. You can
+ use the keypair for multiple instances that belong to that
+ project. In case an image uses a static root password or a
+ static key set – neither is recommended – you must not provide a
+ keypair when you launch the instance.
+ A security group is a named collection of network access
+ rules that you use to limit the types of traffic that have
+ access to instances. When you launch an instance, you can assign
+ one or more security groups to it. If you do not create security
+ groups, new instances are automatically assigned to the default
+ security group, unless you explicitly specify a different
+ security group. The associated rules in each security group
+ control the traffic to instances in the group. Any incoming
+ traffic that is not matched by a rule is denied access by
+ default. You can add rules to or remove rules from a security
+ group. You can modify rules for the default and any other
+ security group.
+ You must modify the rules for the default security group
+ because users cannot access instances that use the default group
+ from any IP address outside the cloud.
+ You can modify the rules in a security group to allow access
+ to instances through different ports and protocols. For example,
+ you can modify rules to allow access to instances through SSH,
+ to ping them, or to allow UDP traffic – for example, for a DNS
+ server running on an instance. You specify the following
+ parameters for rules:
+
+
+ Source of traffic. Enable traffic to instances from
+ either IP addresses inside the cloud from other group
+ members or from all IP addresses.
+
+
+ Protocol. Choose TCP for SSH, ICMP for pings, or
+ UDP.
+
+
+ Destination port on virtual machine. Defines a port
+ range. To open a single port only, enter the same value
+ twice. ICMP does not support ports: Enter values to define
+ the codes and types of ICMP traffic to be allowed.
+
+
+ Rules are automatically enforced as soon as you create or
+ modify them.
+ You can also assign a floating IP address to a running
+ instance to make it accessible from outside the cloud. You
+ assign a floating IP address to an instance and attach a block
+ storage device, or volume, for persistent storage.
+ Set up access and security for instances
+ When you launch a virtual machine, you can inject a key
+ pair, which provides SSH access to your instance. For this to
+ work, the image must contain the cloud-init package. Create at
+ least one key pair for each project. If you generate a keypair
+ with an external tool, you can import it into OpenStack. You can
+ use the key pair for multiple instances that belong to that
+ project. In case an image uses a static root password or a
+ static key set – neither is recommended – you must not provide a
+ key pair when you launch the instance.
+ A security group is a named collection of network access
+ rules that you use to limit the types of traffic that have
+ access to instances. When you launch an instance, you can assign
+ one or more security groups to it. If you do not create security
+ groups, new instances are automatically assigned to the default
+ security group, unless you explicitly specify a different
+ security group. The associated rules in each security group
+ control the traffic to instances in the group. Any incoming
+ traffic that is not matched by a rule is denied access by
+ default. You can add rules to or remove rules from a security
+ group. You can modify rules for the default and any other
+ security group.
+ You must modify the rules for the default security group
+ because users cannot access instances that use the default group
+ from any IP address outside the cloud.
+ You can modify the rules in a security group to allow access
+ to instances through different ports and protocols. For example,
+ you can modify rules to allow access to instances through SSH,
+ to ping them, or to allow UDP traffic – for example, for a DNS
+ server running on an instance. You specify the following
+ parameters for rules:
+
+
+ Source of traffic. Enable traffic to instances from
+ either IP addresses inside the cloud from other group
+ members or from all IP addresses.
+
+
+ Protocol. Choose TCP for SSH, ICMP for pings, or
+ UDP.
+
+
+ Destination port on virtual machine. Defines a port
+ range. To open a single port only, enter the same value
+ twice. ICMP does not support ports: Enter values to define
+ the codes and types of ICMP traffic to be allowed.
+
+
+ Rules are automatically enforced as soon as you create or
+ modify them.
+ You can also assign a floating IP address to a running
+ instance to make it accessible from outside the cloud. You
+ assign a floating IP address to an instance and attach a block
+ storage device, or volume, for persistent storage.
+ Add or import keypairs
+ To add a key
+ You can generate a keypair or upload an existing public
+ key.
+
+
+ To generate a keypair, run the following
+ command:
+
+
+ $ nova keypair-add KEY_NAME > MY_KEY.pem
+
+
+ The command generates a keypair named KEY_NAME, writes
+ the private key to the MY_KEY.pem file, and registers the
+ public key at the Nova database.
+
+
+ To set the permissions of the MY_KEY.pem file, run the
+ following command:
+
+
+ $ chmod 600 MY_KEY.pem
+
+
+ The command changes the permissions of the MY_KEY.pem
+ file so that only you can read and write to it.
+
+
+ To import a key
+
+
+ If you have already generated a keypair with the
+ public key located at ~/.ssh/id_rsa.pub, run the following
+ command to upload the public key:
+
+
+ $ nova keypair-add --pub_key ~/.ssh/id_rsa.pub
+ KEY_NAME
+
+
+ The command registers the public key at the Nova
+ database and names the keypair KEY_NAME.
+
+
+ List keypairs to make sure that the uploaded keypair
+ appears in the list:
+
+
+ $ nova keypair-list
+
+
+ Configure security groups and rules
+ To configure security groups
+
+
+ To list all security groups
+
+
+ To list security groups for the current project,
+ including descriptions, enter the following
+ command:
+
+
+ $ nova secgroup-list
+
+
+ To create a security group
+
+
+ To create a security group with a specified name and
+ description, enter the following command:
+
+
+ $ nova secgroup-create SEC_GROUP_NAME
+ GROUP_DESCRIPTION
+
+
+ To delete a security group
+
+
+ To delete a specified group, enter the following
+ command:
+
+
+ $ nova secgroup-delete SEC_GROUP_NAME
+
+
+ To configure security group rules
+ Modify security group rules with the nova
+ secgroup-*-rulecommands.
+
+
+ On a shell, source the OpenStack RC file. For details,
+ seethe section called “OpenStack RC file”.
+
+
+ To list the rules for a security group
+
+
+ $ nova secgroup-list-rules SEC_GROUP_NAME
+
+
+ To allow SSH access to the instances
+
+
+ Choose one of the following sub-steps:
+
+
+
+
+ Add rule for all IPs
+
+
+ Either from all IP addresses (specified as IP subnet
+ in CIDR notation as 0.0.0.0/0):
+
+
+ $ nova secgroup-add-rule SEC_GROUP_NAME tcp 22 22
+ 0.0.0.0/0
+
+
+
+
+ Add rule for security groups
+
+
+ Alternatively, you can allow only IP addresses from
+ other security groups (source groups) to access the
+ specified port:
+
+
+ $ nova secgroup-add-group-rule --ip_proto tcp
+ --from_port 22 \ --to_port 22 SEC_GROUP_NAME
+ SOURCE_GROUP_NAME
+
+
+
+
+ To allow pinging the instances
+
+
+ Choose one of the following sub-steps:
+
+
+
+
+ To allow pinging from IPs
+
+
+ Specify all IP addresses as IP subnet in CIDR
+ notation: 0.0.0.0/0. This command allows access to all
+ codes and all types of ICMP traffic, respectively:
+
+
+ $ nova secgroup-add-rule SEC_GROUP_NAME icmp -1 -1
+ 0.0.0.0/0
+
+
+ To allow pinging from other security groups
+
+
+ To allow only members of other security groups (source
+ groups) to ping instances:
+
+
+ $ nova secgroup-add-group-rule --ip_proto icmp
+ --from_port -1 \ --to_port -1 SEC_GROUP_NAME
+ SOURCE_GROUP_NAME
+
+
+
+
+ To allow access through UDP port
+
+
+ To allow access through a UDP port, such as allowing
+ access to a DNS server that runs on a VM, complete one of
+ the following sub-steps:
+
+
+
+
+ To allow UDP access from IPs
+
+
+ Specify all IP addresses as IP subnet in CIDR
+ notation: 0.0.0.0/0.
+
+
+ $ nova secgroup-add-rule SEC_GROUP_NAME udp 53 53
+ 0.0.0.0/0
+
+
+ To allow UDP access
+
+
+ To allow only IP addresses from other security groups
+ (source groups) to access the specified port:
+
+
+ $ nova secgroup-add-group-rule --ip_proto udp
+ --from_port 53 \ --to_port 53 SEC_GROUP_NAME
+ SOURCE_GROUP_NAME
+
+
+
+
+ To delete a security group rule, specify the same
+ arguments that you used to create the rule.
+
+
+ To delete the security rule that you created inStep 3.a:
+
+
+ $ nova secgroup-delete-rule SEC_GROUP_NAME tcp 22 22
+ 0.0.0.0/0
+
+
+ To delete the security rule that you created inStep 3.b:
+
+
+ $ nova secgroup-delete-group-rule --ip_proto tcp
+ --from_port 22 \ --to_port 22 SEC_GROUP_NAME
+ SOURCE_GROUP_NAME
+
+
+ Launch instances
+ Instances are virtual machines that run inside the
+ cloud.
+ Before you can launch an instance, you must gather
+ parameters such as the image and flavor from which you want to
+ launch your instance.
+ You can launch an instance directly from one of the
+ available OpenStack images or from an image that you have copied
+ to a persistent volume. The OpenStack Image Service provides a
+ pool of images that are accessible to members of different
+ projects.
+ Gather parameters to launch an instance
+ To launch an instance, you must specify the following
+ parameters:
+
+
+ The instance source, which is an image or snapshot.
+ Alternatively, you can boot from a volume, which is block
+ storage, to which you've copied an image or
+ snapshot.
+
+
+ The imageor snapshot, which represents the operating
+ system.
+
+
+ A namefor your instance.
+
+
+ The flavorfor your instance, which defines the
+ compute, memory, and storage capacity of nova computing
+ instances. A flavor is an available hardware configuration
+ for a server. It defines the "size" of a virtual server
+ that can be launched. For more details and a list of
+ default flavors available, see Section 1.5, "Managing
+ Flavors," (⇽ User Guide for Administrators ).
+
+
+ User Data is a special key in the metadata service
+ which holds a file that cloud aware applications within
+ the guest instance can access. For example thecloudinitsystem is an open source package from
+ Ubuntu that handles early initialization of a cloud
+ instance that makes use of this user data.
+
+
+ Access and security credentials, which include one or
+ both of the following credentials:
+
+
+
+
+ A key-pair for your instance, which are SSH
+ credentials that are injected into images when they are
+ launched. For this to work, the image must contain the
+ cloud-init package. Create at least one keypair for each
+ project. If you already have generated a key-pair with an
+ external tool, you can import it into OpenStack. You can
+ use the keypair for multiple instances that belong to that
+ project. For details, refer to Section 1.5.1, Creating or
+ Importing Keys.
+
+
+ A security group, which defines which incoming network
+ traffic is forwarded to instances. Security groups hold a
+ set of firewall policies, known as security group rules.
+ For details, see xx.
+
+
+
+
+ If needed, you can assign a floating (public) IP
+ addressto a running instance and attach a block storage
+ device, or volume, for persistent storage. For details,
+ see Section 1.5.3, Managing IP Addresses and Section 1.7,
+ Managing Volumes.
+
+
+ After you gather the parameters you need to launch an
+ instance, you can launch it from animageor avolume.
+ To gather the parameters to launch an instance
+
+
+ On a shell, source the OpenStack RC file.
+
+
+ List the available flavors:
+
+
+ $ nova flavor-list
+
+
+ Note the ID of the flavor that you want to use for
+ your instance.
+
+
+ List the available images:
+
+
+ $ nova image-list
+
+
+ You can also filter the image list by using grep to
+ find a specific image, like this:
+
+
+ $ nova image-list | grep 'kernel'
+
+
+ Note the ID of the image that you want to boot your
+ instance from.
+
+
+ List the available security groups:
+
+
+ $ nova secgroup-list --all-tenants
+
+
+ If you have not created any security groups, you can
+ assign the instance to only the default security
+ group.
+
+
+ You can also list rules for a specified security
+ group:
+
+
+ $ nova secgroup-list-rules default
+
+
+ In this example, the default security group has been
+ modified to allow HTTP traffic on the instance by
+ permitting TCP traffic on Port 80.
+
+
+ List the available keypairs.
+
+
+ $ nova keypair-list
+
+
+ Note the name of the keypair that you use for SSH
+ access.
+
+
+ Launch an instance from an image
+ Use this procedure to launch an instance from an
+ image.
+ To launch an instance from an image
+
+
+ Now you have all parameters required to launch an
+ instance, run the following command and specify the server
+ name, flavor ID, and image ID. Optionally, you can provide
+ a key name for access control and security group for
+ security. You can also include metadata key and value
+ pairs. For example you can add a description for your
+ server by providing the --meta description="My
+ Server"parameter.
+
+
+ You can pass user data in a file on your local system
+ and pass it at instance launch by using the flag
+ --user-data <user-data-file>.
+
+
+ $ nova boot --flavor FLAVOR_ID --image IMAGE_ID
+ --key_name KEY_NAME --user-data mydata.file \
+ --security_group SEC_GROUP NAME_FOR_INSTANCE --meta
+ KEY=VALUE --meta KEY=VALUE
+
+
+ The command returns a list of server properties,
+ depending on which parameters you provide.
+
+
+ A status of BUILD indicates that the instance has
+ started, but is not yet online.
+
+
+ A status of ACTIVE indicates that your server is
+ active.
+
+
+ Copy the server ID value from the id field in the
+ output. You use this ID to get details for or delete your
+ server.
+
+
+ Copy the administrative password value from the
+ adminPass field. You use this value to log into your
+ server.
+
+
+ Check if the instance is online:
+
+
+ $ nova list
+
+
+ This command lists all instances of the project you
+ belong to, including their ID, their name, their status,
+ and their private (and if assigned, their public) IP
+ addresses.
+
+
+ If the status for the instance is ACTIVE, the instance
+ is online.
+
+
+ To view the available options for the nova
+ listcommand, run the following command:
+
+
+ $ nova help list
+
+
+ If you did not provide a keypair, security groups, or
+ rules, you can only access the instance from inside the
+ cloud through VNC. Even pinging the instance is not
+ possible.
+
+
+ Launch an instance from a volume
+ After youcreate a bootable volume, youlaunch an instance from the volume.
+ To launch an instance from a volume
+
+
+ To create a bootable volume
+
+
+ To create a volume from an image, run the following
+ command:
+
+
+ # cinder create --image-id
+ 397e713c-b95b-4186-ad46-6126863ea0a9 --display-name
+ my-bootable-vol 8
+
+
+ Optionally, to configure your volume, see the
+ Configuring Image Service and Storage for Computechapter
+ in the OpenStack Configuration Reference.
+
+
+ To list volumes
+
+
+ Enter the following command:
+
+
+ $ nova volume-list
+
+
+ Copy the value in the ID field for your volume.
+
+
+
+
+ To launch an instance
+
+
+ Enter the nova boot command with the
+ --block_device_mapping parameter, as follows:
+
+
+ $ nova boot --flavor <flavor>
+ --block_device_mapping
+ <dev_name>=<id>:<type>:<size>:<delete_on_terminate>
+ <name>
+
+
+ The command arguments are:
+
+
+ --flavor flavor
+
+
+ The flavor ID.
+
+
+ --block_device_mapping dev-
+ name=id:type:size:delete-on-terminate
+
+
+
+
+ dev-name. A device name where the volume is attached
+ in the system at /dev/dev_name. This value is typically
+ vda.
+
+
+ id. The ID of the volume to boot from, as shown in the
+ output of nova volume-list.
+
+
+ type. Either snap or any other value, including a
+ blank string. snap means that the volume was created from
+ a snapshot.
+
+
+ size. The size of the volume, in GBs. It is safe to
+ leave this blank and have the Compute service infer the
+ size.
+
+
+ delete-on-terminate. A boolean that indicates whether
+ the volume should be deleted when the instance is
+ terminated. You can specify
+
+
+
+
+ True or 1
+
+
+ False or 0
+
+
+ name
+
+
+ The name for the server.
+
+
+ For example, you might enter the following command to
+ boot from a volume with ID
+ bd7cf584-45de-44e3-bf7f-f7b50bf235e. The volume is not
+ deleted when the instance is terminated:
+
+
+ $ nova boot --flavor 2 --image
+ 397e713c-b95b-4186-ad46-6126863ea0a9
+ --block_device_mapping
+ vda=bd7cf584-45de-44e3-bf7f-f7b50bf235e3:::0
+ myInstanceFromVolume
+
+
+ Now when you list volumes, you can see that the volume
+ is attached to a server:
+
+
+ $ nova volume-list
+
+
+ Additionally, when you list servers, you see the
+ server that you booted from a volume:
+
+
+ $ nova list
+
+
+ Manage instances and hosts
+ Instances are virtual machines that run inside the
+ cloud.
+ Manage IP addresses
+ Each instance can have a private, or fixed, IP address and
+ a public, or floating, one.
+ Private IP addresses are used for communication between
+ instances, and public ones are used for communication with the
+ outside world.
+ When you launch an instance, it is automatically assigned
+ a private IP address that stays the same until you explicitly
+ terminate the instance. Rebooting an instance has no effect on
+ the private IP address.
+ A pool of floating IPs, configured by the cloud operator,
+ is available in OpenStack Compute.
+ You can allocate a certain number of these to a project:
+ The maximum number of floating IP addresses per project is
+ defined by the quota.
+ You can add a floating IP address from this set to an
+ instance of the project. Floating IP addresses can be
+ dynamically disassociated and associated with other instances
+ of the same project at any time.
+ Before you can assign a floating IP address to an
+ instance, you first must allocate floating IPs to a project.
+ After floating IP addresses have been allocated to the current
+ project, you can assign them to running instances.
+ One floating IP address can be assigned to only one
+ instance at a time. Floating IP addresses can be managed with
+ the nova *floating-ip-*commands, provided by the
+ python-novaclient package.
+ To list pools with floating IP addresses
+
+
+ To list all pools that provide floating IP
+ addresses:
+
+
+ $ nova floating-ip-pool-list
+
+
+ To allocate a floating IP address to the current
+ project
+
+
+ The output of the following command shows the freshly
+ allocated IP address:
+
+
+ $ nova floating-ip-pool-list
+
+
+ If more than one pool of IP addresses is available,
+ you can also specify the pool from which to allocate the
+ IP address:
+
+
+ $ floating-ip-create POOL_NAME
+
+
+ To list floating IP addresses allocated to the current
+ project
+
+
+ If an IP is already associated with an instance, the
+ output also shows the IP for the instance, thefixed IP
+ address for the instance, and the name of the pool that
+ provides the floating IP address.
+
+
+ $ nova floating-ip-list
+
+
+ To release a floating IP address from the current
+ project
+
+
+ The IP address is returned to the pool of IP addresses
+ that are available for all projects. If an IP address is
+ currently assigned to a running instance, it is
+ automatically disassociated from the instance.
+
+
+ $ nova floating-ip-delete FLOATING_IP
+
+
+ To assign a floating IP address to an instance
+
+
+ To associate an IP address with an instance, one or
+ multiple floating IP addresses must be allocated to the
+ current project. Check this with:
+
+
+ $ nova floating-ip-list
+
+
+ In addition, you must know the instance's name (or
+ ID). To look up the instances that belong to the current
+ project, use the nova list command.
+
+
+ $ nova add-floating-ip INSTANCE_NAME_OR_ID
+ FLOATING_IP
+
+
+ After you assign the IP with nova add-floating-ipand
+ configure security group rules for the instance, the
+ instance is publicly available at the floating IP
+ address.
+
+
+ To remove a floating IP address from an instance
+
+
+ To remove a floating IP address from an instance, you
+ must specify the same arguments that you used to assign
+ the IP.
+
+
+ $ nova remove-floating-ip INSTANCE_NAME_OR_ID
+ FLOATING_IP
+
+
+ Change the size of your
+ server
+ You change the size of a server by changing its
+ flavor.
+ To change the size of your server
+
+
+ List the available flavors:
+
+
+ $ nova flavor-list
+
+
+ Show information about your server, including its
+ size:
+
+
+ $ nova show myCirrosServer
+
+
+ The size of the server is m1.small (2).
+
+
+ To resize the server, pass the server ID and the
+ desired flavor to the nova resizecommand. Include the
+ --poll parameter to report the resize progress.
+
+
+ $ nova resize myCirrosServer 4 --poll
+
+
+ Instance resizing... 100% completeFinished
+
+
+ Show the status for your server:
+
+
+ $ nova list
+
+
+ When the resize completes, the status becomes
+ VERIFY_RESIZE. To confirm the resize:
+
+
+ $ nova resize-confirm
+ 6beefcf7-9de6-48b3-9ba9-e11b343189b3
+
+
+ The server status becomes ACTIVE.
+
+
+ If the resize fails or does not work as expected, you
+ can revert the resize:
+
+
+ $ nova resize-revert
+ 6beefcf7-9de6-48b3-9ba9-e11b343189b3
+
+
+ The server status becomes ACTIVE.
+
+
+ Stop and start an instance
+ Use one of the following methods to stop and start an
+ instance.
+ Pause and un-pause an instance
+ To pause and un-pause a server
+
+
+ To pause a server, run the following command:
+
+
+ $ nova pause SERVER
+
+
+ This command stores the state of the VM in RAM. A
+ paused instance continues to run in a frozen
+ state.
+
+
+ To un-pause the server, run the following
+ command:
+
+
+ $ nova unpause SERVER
+
+
+ Suspend and resume an instance
+ To suspend and resume a server
+ Administrative users might want to suspend an
+ infrequently used instance or to perform system
+ maintenance.
+
+
+ When you suspend an instance, its VM state is stored
+ on disk, all memory is written to disk, and the virtual
+ machine is stopped. Suspending an instance is similar to
+ placing a device in hibernation; memory and vCPUs become
+ available.
+
+
+ To initiate a hypervisor-level suspend operation,
+ run the following command:
+
+
+ $ nova suspend SERVER
+
+
+ To resume a suspended server:
+
+
+ $ nova resume SERVER
+
+
+ Reboot an instance
+ You can perform a soft or hard reboot of a running
+ instance. A soft reboot attempts a graceful shutdown and
+ restart of the instance. A hard reboot power cycles the
+ instance.
+ To reboot a server
+
+
+ By default, when you reboot a server, it is a soft
+ reboot.
+
+
+ $ nova reboot SERVER
+
+
+ To perform a hard reboot, pass the --hard parameter, as
+ follows:
+ $ nova reboot --hard SERVER
+ Evacuate instances
+ If a cloud compute node fails due to a hardware
+ malfunction or another reason, you can evacuate instances to
+ make them available again.
+ You can choose evacuation parameters for your use
+ case.
+ To preserve user data on server disk, you must configure
+ shared storage on the target host. Also, you must validate
+ that the current VM host is down. Otherwise the evacuation
+ fails with an error.
+ To evacuate your server
+
+
+ To find a different host for the evacuated instance,
+ run the following command to lists hosts:
+
+
+ $ nova host-list
+
+
+ You can pass the instance password to the command by
+ using the --password <pwd> option. If you do not
+ specify a password, one is generated and printed after the
+ command finishes successfully. The following command
+ evacuates a server without shared storage:
+
+
+ $ nova evacuate evacuated_server_name host_b
+
+
+ The command evacuates an instance from a down host to
+ a specified host. The instance is booted from a new disk,
+ but preserves its configuration including its ID, name,
+ uid, IP address, and so on. The command returns a
+ password:
+
+
+ To preserve the user disk data on the evacuated
+ server, deploy OpenStack Compute with shared
+ filesystem.
+
+
+ $ nova evacuate evacuated_server_name host_b
+ --on-shared-storage
+
+
+ Delete an instance
+ When you no longer need an instance, you can delete
+ it.
+ To delete an instance
+
+
+ List all instances:
+
+
+ $ nova list
+
+
+ Use the following command to delete the newServer
+ instance, which is in ERROR state:
+
+
+ $ nova delete newServer
+
+
+ The command does not notify that your server was
+ deleted.
+
+
+ Instead, run the nova listcommand:
+
+
+ $ nova list
+
+
+ The deleted instance does not appear in the
+ list.
+
+
+ Get a console to an instance
+ To get a console to an instance
+ To get a VNC console to an instance, run the following
+ command:
+ $ nova get-vnc-console myCirrosServer xvpvnc
+ The command returns a URL from which you can access your
+ instance:
+ Manage bare metal nodes
+ If you use the bare metal driver, you must create a bare
+ metal node and add a network interface to it. You then launch
+ an instance from a bare metal image. You can list and delete
+ bare metal nodes. When you delete a node, any associated
+ network interfaces are removed. You can list and remove
+ network interfaces that are associated with a bare metal
+ node.
+ Commands
+
+
+ baremetal-interface-add
+
+
+ Adds a network interface to a bare metal node.
+
+
+ baremetal-interface-list
+
+
+ Lists network interfaces associated with a bare metal
+ node.
+
+
+ baremetal-interface-remove
+
+
+ Removes a network interface from a bare metal
+ node.
+
+
+ baremetal-node-create
+
+
+ Creates a bare metal node.
+
+
+ baremetal-node-delete
+
+
+ Removes a bare metal node and any associated
+ interfaces.
+
+
+ baremetal-node-list
+
+
+ Lists available bare metal nodes.
+
+
+ baremetal-node-show
+
+
+ Shows information about a bare metal node.
+
+
+ To manage bare metal nodes
+
+
+ Create a bare metal node.
+
+
+ $ nova baremetal-node-create --pm_address=1.2.3.4
+ --pm_user=ipmi --pm_password=ipmi $(hostname -f) 1 512 10
+ aa:bb:cc:dd:ee:ff
+
+
+ Add network interface information to the node:
+
+
+ $ nova baremetal-interface-add 1
+ aa:bb:cc:dd:ee:ff
+
+
+ Launch an instance from a bare metal image:
+
+
+ $ nova boot --image my-baremetal-image --flavor
+ my-baremetal-flavor test
+
+
+ |... wait for instance to become active ...
+
+
+ You can list bare metal nodes and interfaces. When a
+ node is in use, its status includes the UUID of the
+ instance that runs on it:
+
+
+ $ nova baremetal-node-list
+
+
+ Show details about a bare metal node:
+
+
+ $ nova baremetal-node-show 1
+
+
+ Show usage statistics for hosts and instances
+ You can show basic statistics on resource usage for hosts
+ and instances.
+ To show host usage statistics
+
+
+ List the hosts and the nova-related services that run
+ on them:
+
+
+ $ nova host-list
+
+
+ Get a summary of resource usage of all of the
+ instances running on the host.
+
+
+ $ nova host-describe devstack-grizzly
+
+
+ The cpu column shows the sum of the virtual CPUs for
+ instances running on the host.
+
+
+ The memory_mb column shows the sum of the memory (in
+ MB) allocated to the instances that run on the
+ hosts.
+
+
+ The disk_gb column shows the sum of the root and
+ ephemeral disk sizes (in GB) of the instances that run on
+ the hosts.
+
+
+ To show instance usage statistics
+
+
+ Get CPU, memory, I/O, and network statistics for an
+ instance.
+
+
+ First, list instances:
+
+
+ $ nova list
+
+
+ Then, get diagnostic statistics:
+
+
+ $ nova diagnostics myCirrosServer
+
+
+ Get summary statistics for each tenant:
+
+
+ $ nova usage-list
+
+
+ Usage from 2013-06-25 to 2013-07-24:
+
+
+ Create and manage networks
+ Before you run commands, set the following environment
+ variables:
+ export OS_USERNAME=adminexport OS_PASSWORD=passwordexport
+ OS_TENANT_NAME=adminexport
+ OS_AUTH_URL=http://localhost:5000/v2.0
+ To create and manage networks
+
+
+ List the extensions of the system:
+
+
+ $ neutron ext-list -c alias -c name
+
+
+ Create a network:
+
+
+ $ neutron net-create net1
+
+
+ Created a new network:
+
+
+ Create a network with specified provider network
+ type:
+
+
+ $ neutron net-create net2 --provider:network-type
+ local
+
+
+ Created a new network:
+
+
+ Just as shown previous, the unknown option
+ --provider:network-type is used to create a local provider
+ network.
+
+
+ Create a subnet:
+
+
+ $ neutron subnet-create net1 192.168.2.0/24 --name
+ subnet1
+
+
+ Created a new subnet:
+
+
+ In the previous command, net1 is the network name,
+ 192.168.2.0/24 is the subnet's CIDR. They are positional
+ arguments. --name subnet1 is an unknown option, which
+ specifies the subnet's name.
+
+
+ Create a port with specified IP address:
+
+
+ $ neutron port-create net1 --fixed-ip
+ ip_address=192.168.2.40
+
+
+ Created a new port:
+
+
+ In the previous command, net1 is the network name, which
+ is a positional argument. --fixed-ip ip_address=192.168.2.40
+ is an option, which specifies the port's fixed IP address we
+ wanted.
+
+
+ Create a port without specified IP address:
+
+
+ $ neutron port-create net1
+
+
+ Created a new port:
+
+
+ We can see that the system will allocate one IP address
+ if we don't specify the IP address in command line.
+
+
+ Query ports with specified fixed IP addresses:
+
+
+ $ neutron port-list --fixed-ips ip_address=192.168.2.2
+ ip_address=192.168.2.40
+
+
+ --fixed-ips ip_address=192.168.2.2
+ ip_address=192.168.2.40 is one unknown option.
+
+
+ How to find unknown options?The unknown options can be
+ easily found by watching the output of create_xxx or
+ show_xxx command. For example, in the port creation command,
+ we see the fixed_ips fields, which can be used as an unknown
+ option.
+
+
+ Create and manage stacks
+ To create a stack from an example template file
+
+
+ To create a stack, or template, from anexample template file, run following
+ command:
+
+
+ $ heat stack-create mystack
+ --template-file=/path/to/heat/templates/WordPress_Single_Instance.template--parameters="InstanceType=m1.large;DBUsername=wp;DBPassword=verybadpassword;KeyName=heat_key;LinuxDistribution=F17"
+
+
+ The --parameters values that you specify depend on which
+ parameters are defined in the template. If the template file
+ is hosted on a website, you can specify the URL with
+ --template-url parameter instead of the --template-file
+ parameter.
+
+
+ The command returns the following output:
+
+
+ You can also use the stack-createcommand to validate a
+ template file without creating a stack from it.
+
+
+ To do so, run the following command:
+
+
+ $ heat stack-create mystack
+ --template-file=/path/to/heat/templates/WordPress_Single_Instance.template
+
+
+ If validation fails, the response returns an error
+ message.
+
+
+ To list stacks
+
+
+ To see which stacks are visible to the current user, run
+ the following command:
+
+
+ $ heat stack-list
+
+
+ To view stack details
+ To explore the state and history of a particular stack, you
+ can run a number of commands.
+
+
+ To show the details of a stack, run the following
+ command:
+
+
+ $ heat stack-show mystack
+
+
+ A stack consists of a collection of resources. To list
+ the resources, including their status, in a stack, run the
+ following command:
+
+
+ $ heat resource-list mystack
+
+
+ To show the details for the specified resource in a
+ stack, run the following command:
+
+
+ $ heat resource-show mystack WikiDatabase
+
+
+ Some resources have associated metadata which can change
+ throughout the life-cycle of a resource:
+
+
+ $ heat resource-metadata mystack WikiDatabase
+
+
+ A series of events is generated during the life-cycle of
+ a stack. This command will display those events.
+
+
+ $ heat event-list mystack
+
+
+ To show the details for a particular event, run the
+ following command:
+
+
+ $ heat event-show WikiDatabase 1
+
+
+ To update a stack
+
+
+ To update an existing stack from a modified template
+ file, run a command like the following command:
+
+
+ $ heat stack-update mystack
+ --template-file=/path/to/heat/templates/WordPress_Single_Instance_v2.template
+ --parameters="InstanceType=m1.large;DBUsername=wp;DBPassword=verybadpassword;KeyName=heat_key;LinuxDistribution=F17"
+
+
+ Some resources are updated in-place, while others are
+ replaced with new resources.
+
+
diff --git a/doc/training-guide/module001-ch007-keystone-arch.xml b/doc/training-guide/module001-ch007-keystone-arch.xml
index 3d71f7e74c..5a5cfa2638 100644
--- a/doc/training-guide/module001-ch007-keystone-arch.xml
+++ b/doc/training-guide/module001-ch007-keystone-arch.xml
@@ -6,4 +6,193 @@
xml:id="module001-ch007-keystone-arch">
Ketstone ArchitectureMore Content To be Added ...
+
+ Identity Service Concepts
+ The Identity service performs the following
+ functions:
+
+
+ User management. Tracks users and their
+ permissions.
+
+
+ Service catalog. Provides a catalog of available
+ services with their API endpoints.
+
+
+ To understand the Identity Service, you must understand the
+ following concepts:
+
+ User
+ Digital representation of a person, system, or service who
+ uses OpenStack cloud services. Identity authentication
+ services will validate that incoming request are being made by
+ the user who claims to be making the call. Users have a login
+ and may be assigned tokens to access resources. Users may be
+ directly assigned to a particular tenant and behave as if they
+ are contained in that tenant.
+
+
+ Credentials
+ Data that is known only by a user that proves who they
+ are. In the Identity Service, examples are:
+
+
+ Username and password
+
+
+ Username and API key
+
+
+ An authentication token provided by the Identity
+ Service
+
+
+
+
+ Authentication
+ The act of confirming the identity of a user. The Identity
+ Service confirms an incoming request by validating a set of
+ credentials supplied by the user. These credentials are
+ initially a username and password or a username and API key.
+ In response to these credentials, the Identity Service issues
+ the user an authentication token, which the user provides in
+ subsequent requests.
+
+
+ Token
+ An arbitrary bit of text that is used to access resources.
+ Each token has a scope which describes which resources are
+ accessible with it. A token may be revoked at anytime and is
+ valid for a finite duration.
+ While the Identity Service supports token-based
+ authentication in this release, the intention is for it to
+ support additional protocols in the future. The intent is for
+ it to be an integration service foremost, and not aspire to be
+ a full-fledged identity store and management solution.
+
+
+ Tenant
+ A container used to group or isolate resources and/or
+ identity objects. Depending on the service operator, a tenant
+ may map to a customer, account, organization, or
+ project.
+
+
+ Service
+ An OpenStack service, such as Compute (Nova), Object
+ Storage (Swift), or Image Service (Glance). Provides one or
+ more endpoints through which users can access resources and
+ perform operations.
+
+
+ Endpoint
+ An network-accessible address, usually described by URL,
+ from where you access a service. If using an extension for
+ templates, you can create an endpoint template, which
+ represents the templates of all the consumable services that
+ are available across the regions.
+
+
+ Role
+ A personality that a user assumes that enables them to
+ perform a specific set of operations. A role includes a set of
+ rights and privileges. A user assuming that role inherits
+ those rights and privileges.
+ In the Identity Service, a token that is issued to a user
+ includes the list of roles that user can assume. Services that
+ are being called by that user determine how they interpret the
+ set of roles a user has and which operations or resources each
+ role grants access to.
+
+
+
+
+ User management
+ The main components of Identity user management are:
+
+
+ Users
+
+
+ Tenants
+
+
+ Roles
+
+
+ A userrepresents a human user, and has associated
+ information such as username, password and email. This example
+ creates a user named "alice":
+ $ keystone user-create --name=alice --pass=mypassword123
+ --email=alice@example.com
+ A tenantcan be a project, group, or organization. Whenever
+ you make requests to OpenStack services, you must specify a
+ tenant. For example, if you query the Compute service for a list
+ of running instances, you will receive a list of all of the
+ running instances in the tenant you specified in your query.
+ This example creates a tenant named "acme":
+ $ keystone tenant-create --name=acmeA rolecaptures what
+ operations a user is permitted to perform in a given tenant.
+ This example creates a role named "compute-user":
+ $ keystone role-create --name=compute-userThe Identity
+ service associates a user with a tenant and a role. To continue
+ with our previous examples, we may wish to assign the "alice"
+ user the "compute-user" role in the "acme" tenant:
+ $ keystone user-list
+ $ keystone user-role-add --user=892585 --role=9a764e
+ --tenant-id=6b8fd2
+ A user can be assigned different roles in different tenants:
+ for example, Alice may also have the "admin" role in the
+ "Cyberdyne" tenant. A user can also be assigned multiple roles
+ in the same tenant.
+ The /etc/[SERVICE_CODENAME]/policy.json controls what users
+ are allowed to do for a given service. For example,
+ /etc/nova/policy.json specifies the access policy for the
+ Compute service, /etc/glance/policy.json specifies the access
+ policy for the Image service, and /etc/keystone/policy.json
+ specifies the access policy for the Identity service.
+ The default policy.json files in the Compute, Identity, and
+ Image service recognize only the admin role: all operations that
+ do not require the admin role will be accessible by any user
+ that has any role in a tenant.
+ If you wish to restrict users from performing operations in,
+ say, the Compute service, you need to create a role in the
+ Identity service and then modify /etc/nova/policy.json so that
+ this role is required for Compute operations.
+ For example, this line in /etc/nova/policy.json specifies
+ that there are no restrictions on which users can create
+ volumes: if the user has any role in a tenant, they will be able
+ to create volumes in that tenant.
+
+
+
+
+
+ Service
+ management
+ The Identity Service provides the following service
+ management functions:
+
+
+ Services
+
+
+ Endpoints
+
+
+ The Identity Service also maintains a user that corresponds
+ to each service (such as, a user named nova, for the Compute
+ service) and a special service tenant, which is called
+ service.
+ The commands for creating services and endpoints are
+ described in a later section.
+
diff --git a/doc/training-guide/module001-ch008-queues-messaging.xml b/doc/training-guide/module001-ch008-queues-messaging.xml
index 75aef982ce..10285cb1bf 100644
--- a/doc/training-guide/module001-ch008-queues-messaging.xml
+++ b/doc/training-guide/module001-ch008-queues-messaging.xml
@@ -1,9 +1,440 @@
-
+OpenStack Messaging and Queues
- More Content To be Added ...
+
+ AMQP is the messaging technology chosen by the OpenStack
+ cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any
+ two Nova components and allows them to communicate in a loosely
+ coupled fashion. More precisely, Nova components (the compute
+ fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter)
+ to communicate to one another; however such a paradigm is built
+ atop the publish/subscribe paradigm so that the following benefits
+ can be achieved:
+
+
+ Decoupling between client and servant (such as the client
+ does not need to know where the servant’s reference
+ is).
+
+
+ Full a-synchronism between client and servant (such as the
+ client does not need the servant to run at the same time of
+ the remote call).
+
+
+ Random balancing of remote calls (such as if more servants
+ are up and running, one-way calls are transparently dispatched
+ to the first available servant).
+
+
+ Nova uses direct, fanout, and topic-based exchanges. The
+ architecture looks like the one depicted in the figure
+ below:
+
+ Nova implements RPC (both request+response, and one-way,
+ respectively nicknamed ‘rpc.call’ and ‘rpc.cast’) over AMQP by
+ providing an adapter class which take cares of marshaling and
+ unmarshaling of messages into function calls. Each Nova service
+ (for example Compute, Scheduler, etc.) create two queues at the
+ initialization time, one which accepts messages with routing keys
+ ‘NODE-TYPE.NODE-ID’ (for example compute.hostname) and another,
+ which accepts messages with routing keys as generic ‘NODE-TYPE’
+ (for example compute). The former is used specifically when
+ Nova-API needs to redirect commands to a specific node like
+ ‘euca-terminate instance’. In this case, only the compute node
+ whose host’s hypervisor is running the virtual machine can kill
+ the instance. The API acts as a consumer when RPC calls are
+ request/response, otherwise is acts as publisher only.
+ Nova RPC Mappings
+ The figure below shows the internals of a message broker
+ node (referred to as a RabbitMQ node in the diagrams) when a
+ single instance is deployed and shared in an OpenStack cloud.
+ Every Nova component connects to the message broker and,
+ depending on its personality (for example a compute node or a
+ network node), may use the queue either as an Invoker (such as
+ API or Scheduler) or a Worker (such as Compute or Network).
+ Invokers and Workers do not actually exist in the Nova object
+ model, but we are going to use them as an abstraction for sake
+ of clarity. An Invoker is a component that sends messages in the
+ queuing system via two operations: 1) rpc.call and ii) rpc.cast;
+ a Worker is a component that receives messages from the queuing
+ system and reply accordingly to rcp.call operations.
+ Figure 2 shows the following internal elements:
+
+
+ Topic Publisher:a Topic
+ Publisher comes to life when an rpc.call or an rpc.cast
+ operation is executed; this object is instantiated and used to
+ push a message to the queuing system. Every publisher connects
+ always to the same topic-based exchange; its life-cycle is
+ limited to the message delivery.
+
+
+ Direct Consumer:a
+ Direct Consumer comes to life if (an only if) a rpc.call
+ operation is executed; this object is instantiated and used to
+ receive a response message from the queuing system; Every
+ consumer connects to a unique direct-based exchange via a
+ unique exclusive queue; its life-cycle is limited to the
+ message delivery; the exchange and queue identifiers are
+ determined by a UUID generator, and are marshaled in the
+ message sent by the Topic Publisher (only rpc.call
+ operations).
+
+
+ Topic Consumer:a Topic
+ Consumer comes to life as soon as a Worker is instantiated and
+ exists throughout its life-cycle; this object is used to
+ receive messages from the queue and it invokes the appropriate
+ action as defined by the Worker role. A Topic Consumer
+ connects to the same topic-based exchange either via a shared
+ queue or via a unique exclusive queue. Every Worker has two
+ topic consumers, one that is addressed only during rpc.cast
+ operations (and it connects to a shared queue whose exchange
+ key is ‘topic’) and the other that is addressed only during
+ rpc.call operations (and it connects to a unique queue whose
+ exchange key is ‘topic.host’).
+
+
+ Direct Publisher:a
+ Direct Publisher comes to life only during rpc.call operations
+ and it is instantiated to return the message required by the
+ request/response operation. The object connects to a
+ direct-based exchange whose identity is dictated by the
+ incoming message.
+
+
+ Topic Exchange:The
+ Exchange is a routing table that exists in the context of a
+ virtual host (the multi-tenancy mechanism provided by Qpid or
+ RabbitMQ); its type (such as topic vs. direct) determines the
+ routing policy; a message broker node will have only one
+ topic-based exchange for every topic in Nova.
+
+
+ Direct Exchange:this is
+ a routing table that is created during rpc.call operations;
+ there are many instances of this kind of exchange throughout
+ the life-cycle of a message broker node, one for each rpc.call
+ invoked.
+
+
+ Queue Element:A Queue
+ is a message bucket. Messages are kept in the queue until a
+ Consumer (either Topic or Direct Consumer) connects to the
+ queue and fetch it. Queues can be shared or can be exclusive.
+ Queues whose routing key is ‘topic’ are shared amongst Workers
+ of the same personality.
+
+
+
+ RPC Calls
+ The diagram below shows the message flow during an rp.call
+ operation:
+
+
+ a Topic Publisher is instantiated to send the message
+ request to the queuing system; immediately before the
+ publishing operation, a Direct Consumer is instantiated to
+ wait for the response message.
+
+
+ once the message is dispatched by the exchange, it is
+ fetched by the Topic Consumer dictated by the routing key
+ (such as ‘topic.host’) and passed to the Worker in charge of
+ the task.
+
+
+ once the task is completed, a Direct Publisher is
+ allocated to send the response message to the queuing
+ system.
+
+
+ once the message is dispatched by the exchange, it is
+ fetched by the Direct Consumer dictated by the routing key
+ (such as ‘msg_id’) and passed to the Invoker.
+
+
+
+ RPC Casts
+ The diagram below the message flow during an rp.cast
+ operation:
+
+
+ A Topic Publisher is instantiated to send the message
+ request to the queuing system.
+
+
+ Once the message is dispatched by the exchange, it is
+ fetched by the Topic Consumer dictated by the routing key
+ (such as ‘topic’) and passed to the Worker in charge of the
+ task.
+
+
+
+ AMQP Broker Load
+ At any given time the load of a message broker node running
+ either Qpid or RabbitMQ is function of the following
+ parameters:
+
+
+ Throughput of API calls: the number of API calls (more
+ precisely rpc.call ops) being served by the OpenStack cloud
+ dictates the number of direct-based exchanges, related
+ queues and direct consumers connected to them.
+
+
+ Number of Workers: there is one queue shared amongst
+ workers with the same personality; however there are as many
+ exclusive queues as the number of workers; the number of
+ workers dictates also the number of routing keys within the
+ topic-based exchange, which is shared amongst all
+ workers.
+
+
+ The figure below shows the status of a RabbitMQ node after
+ Nova components’ bootstrap in a test environment. Exchanges and
+ queues being created by Nova components are:
+
+
+ Exchanges
+
+
+
+
+ nova (topic exchange)
+
+
+
+
+ Queues
+
+
+
+
+ compute.phantom (phantom is hostname)
+
+
+ compute
+
+
+ network.phantom (phantom is hostname)
+
+
+ network
+
+
+ scheduler.phantom (phantom is hostname)
+
+
+ scheduler
+
+
+ RabbitMQ Gotchas
+ Nova uses Kombu to connect to the RabbitMQ environment.
+ Kombu is a Python library that in turn uses AMQPLib, a library
+ that implements the standard AMQP 0.8 at the time of writing.
+ When using Kombu, Invokers and Workers need the following
+ parameters in order to instantiate a Connection object that
+ connects to the RabbitMQ server (please note that most of the
+ following material can be also found in the Kombu documentation;
+ it has been summarized and revised here for sake of
+ clarity):
+
+
+ Hostname: The hostname
+ to the AMQP server.
+
+
+ Userid: A valid
+ username used to authenticate to the server.
+
+
+ Password: The password
+ used to authenticate to the server.
+
+
+ Virtual_host: The name
+ of the virtual host to work with. This virtual host must exist
+ on the server, and the user must have access to it. Default is
+ “/”.
+
+
+ Port: The port of the
+ AMQP server. Default is 5672 (amqp).
+
+
+ The following parameters are default:
+
+
+ Insist: insist on
+ connecting to a server. In a configuration with multiple
+ load-sharing servers, the Insist option tells the server that
+ the client is insisting on a connection to the specified
+ server. Default is False.
+
+
+ Connect_timeout: the
+ timeout in seconds before the client gives up connecting to
+ the server. The default is no timeout.
+
+
+ SSL: use SSL to connect
+ to the server. The default is False.
+
+
+ More precisely Consumers need the following
+ parameters:
+
+
+ Connection: the above
+ mentioned Connection object.
+
+
+ Queue:name of the
+ queue.
+
+
+ Exchange:name of the
+ exchange the queue binds to.
+
+
+ Routing_key:the
+ interpretation of the routing key depends on the value of the
+ exchange_type attribute.
+
+
+
+
+ Direct exchange:if the
+ routing key property of the message and the routing_key
+ attribute of the queue are identical, then the message is
+ forwarded to the queue.
+
+
+ Fanout
+ exchange:messages are forwarded to the queues bound
+ the exchange, even if the binding does not have a key.
+
+
+ Topic exchange:if the
+ routing key property of the message matches the routing key of
+ the key according to a primitive pattern matching scheme, then
+ the message is forwarded to the queue. The message routing key
+ then consists of words separated by dots (”.”, like domain
+ names), and two special characters are available; star (“”)
+ and hash (“#”). The star matches any word, and the hash
+ matches zero or more words. For example ”.stock.#” matches the
+ routing keys “usd.stock” and “eur.stock.db” but not
+ “stock.nasdaq”.
+
+
+
+
+ Durable:this flag
+ determines the durability of both exchanges and queues;
+ durable exchanges and queues remain active when a RabbitMQ
+ server restarts. Non-durable exchanges/queues (transient
+ exchanges/queues) are purged when a server restarts. It is
+ worth noting that AMQP specifies that durable queues cannot
+ bind to transient exchanges. Default is True.
+
+
+ Auto_delete:if set, the
+ exchange is deleted when all queues have finished using it.
+ Default is False.
+
+
+ Exclusive:exclusive
+ queues (such as non-shared) may only be consumed from by the
+ current connection. When exclusive is on, this also implies
+ auto_delete. Default is False.
+
+
+ Exchange_type:AMQP
+ defines several default exchange types (routing algorithms)
+ that covers most of the common messaging use cases.
+
+
+ Auto_ack:acknowledgement is handled automatically
+ once messages are received. By default auto_ack is set to
+ False, and the receiver is required to manually handle
+ acknowledgment.
+
+
+ No_ack:it disable
+ acknowledgement on the server-side. This is different from
+ auto_ack in that acknowledgement is turned off altogether.
+ This functionality increases performance but at the cost of
+ reliability. Messages can get lost if a client dies before it
+ can deliver them to the application.
+
+
+ Auto_declare:if this is
+ True and the exchange name is set, the exchange will be
+ automatically declared at instantiation. Auto declare is on by
+ default. Publishers specify most the parameters of Consumers
+ (such as they do not specify a queue name), but they can also
+ specify the following:
+
+
+ Delivery_mode:the
+ default delivery mode used for messages. The value is an
+ integer. The following delivery modes are supported by
+ RabbitMQ:
+
+
+
+
+ 1 or “transient”:the
+ message is transient. Which means it is stored in memory only,
+ and is lost if the server dies or restarts.
+
+
+ 2 or “persistent”:the
+ message is persistent. Which means the message is stored both
+ in-memory, and on disk, and therefore preserved if the server
+ dies or restarts.
+
+
+ The default value is 2 (persistent). During a send
+ operation, Publishers can override the delivery mode of messages
+ so that, for example, transient messages can be sent over a
+ durable queue.
diff --git a/doc/training-guide/module001-ch009-vm-placement.xml b/doc/training-guide/module001-ch009-vm-placement.xml
index e0f5163667..6d0a49bae6 100644
--- a/doc/training-guide/module001-ch009-vm-placement.xml
+++ b/doc/training-guide/module001-ch009-vm-placement.xml
@@ -5,5 +5,203 @@
version="5.0"
xml:id="module001-ch009-vm-placement">
VM Placement
- To be Added
+ Compute uses the nova-scheduler service to determine how to
+ dispatch compute and volume requests. For example, the
+ nova-scheduler service determines which host a VM should launch
+ on. The term hostin the context of filters means a physical node
+ that has a nova-compute service running on it. You can configure
+ the scheduler through a variety of options.
+
+ Just as shown by above figure, nova-scheduler interacts with
+ other components through queue and central database repo. For
+ scheduling, queue is the essential communications hub.
+ All compute nodes (also known as hosts in terms of OpenStack)
+ periodically publish their status, resources available and
+ hardware capabilities to nova-scheduler through the queue.
+ nova-scheduler then collects this data and uses it to make
+ decisions when a request comes in.
+ By default, the compute scheduler is configured as a filter
+ scheduler, as described in the next section. In the default
+ configuration, this scheduler considers hosts that meet all the
+ following criteria:
+
+
+ Are in the requested availability zone
+ (AvailabilityZoneFilter).
+
+
+ Have sufficient RAM available (RamFilter).
+
+
+ Are capable of servicing the request
+ (ComputeFilter).
+
+
+
+
+ Filter Scheduler
+ The Filter Scheduler supports filtering and weighting to
+ make informed decisions on where a new instance should be created.
+ This Scheduler supports only working with Compute Nodes.
+
+ Filtering
+
+ During its work Filter Scheduler firstly makes dictionary
+ of unfiltered hosts, then filters them using filter properties
+ and finally chooses hosts for the requested number of
+ instances (each time it chooses the most weighed host and
+ appends it to the list of selected hosts).
+ If it turns up, that it can’t find candidates for the next
+ instance, it means that there are no more appropriate hosts
+ where the instance could be scheduled.
+ If we speak about filtering and weighting, their work is
+ quite flexible in the Filter Scheduler. There are a lot of
+ filtering strategies for the Scheduler to support. Also you
+ can even implement your own algorithm of filtering.
+ There are some standard filter classes to use
+ (nova.scheduler.filters):
+
+
+ AllHostsFilter - frankly speaking, this filter does no
+ operation. It passes all the available hosts.
+
+
+ ImagePropertiesFilter - filters hosts based on
+ properties defined on the instance’s image. It passes
+ hosts that can support the specified image properties
+ contained in the instance.
+
+
+ AvailabilityZoneFilter - filters hosts by availability
+ zone. It passes hosts matching the availability zone
+ specified in the instance properties.
+
+
+ ComputeCapabilitiesFilter - checks that the
+ capabilities provided by the host compute service satisfy
+ any extra specifications associated with the instance
+ type. It passes hosts that can create the specified
+ instance type.
+
+
+ The extra specifications can have a scope at the
+ beginning of the key string of a key/value pair. The scope
+ format is scope:key and can be nested, i.e. key_string :=
+ scope:key_string. Example like capabilities:cpu_info:
+ features is valid scope format. A key string without any :
+ is non-scope format. Each filter defines it’s valid scope,
+ and not all filters accept non-scope format.
+
+
+ The extra specifications can have an operator at the
+ beginning of the value string of a key/value pair. If
+ there is no operator specified, then a default operator of
+ s== is used. Valid operators are:
+
+
+ * = (equal to or greater than as a number; same as vcpus
+ case)* == (equal to as a number)* != (not equal to as a
+ number)* >= (greater than or equal to as a number)* <=
+ (less than or equal to as a number)* s== (equal to as a
+ string)* s!= (not equal to as a string)* s>= (greater than
+ or equal to as a string)* s> (greater than as a string)*
+ s<= (less than or equal to as a string)* s< (less than
+ as a string)* <in> (substring)* <or> (find one of
+ these)Examples are: ">= 5", "s== 2.1.0", "<in> gcc",
+ and "<or> fpu <or> gpu"
+ class RamFilter(filters.BaseHostFilter):
+ """Ram Filter with over subscription flag"""
+
+ def host_passes(self, host_state, filter_properties):
+ """Only return hosts with sufficient available RAM."""
+
+ instance_type = filter_properties.get('instance_type')
+ requested_ram = instance_type['memory_mb']
+ free_ram_mb = host_state.free_ram_mb
+ total_usable_ram_mb = host_state.total_usable_ram_mb
+ used_ram_mb = total_usable_ram_mb - free_ram_mb
+ return total_usable_ram_mb * FLAGS.ram_allocation_ratio - used_ram_mb >= requested_ram
+ Here ram_allocation_ratio means the virtual RAM to
+ physical RAM allocation ratio (it is 1.5 by default). Really,
+ nice and simple.
+ Next standard filter to describe is AvailabilityZoneFilter
+ and it isn’t difficult too. This filter just looks at the
+ availability zone of compute node and availability zone from
+ the properties of the request. Each compute service has its
+ own availability zone. So deployment engineers have an option
+ to run scheduler with availability zones support and can
+ configure availability zones on each compute host. This
+ classes method host_passes returns True if availability zone
+ mentioned in request is the same on the current compute
+ host.
+ The ImagePropertiesFilter filters hosts based on the
+ architecture, hypervisor type, and virtual machine mode
+ specified in the instance. E.g., an instance might require a
+ host that supports the arm architecture on a qemu compute
+ host. The ImagePropertiesFilter will only pass hosts that can
+ satisfy this request. These instance properties are populated
+ from properties define on the instance’s image. E.g. an image
+ can be decorated with these properties using glance
+ image-update img-uuid --property architecture=arm --property
+ hypervisor_type=qemu Only hosts that satisfy these
+ requirements will pass the ImagePropertiesFilter.
+ ComputeCapabilitiesFilter checks if the host satisfies any
+ extra_specs specified on the instance type. The extra_specs
+ can contain key/value pairs. The key for the filter is either
+ non-scope format (i.e. no : contained), or scope format in
+ capabilities scope (i.e. capabilities:xxx:yyy). One example of
+ capabilities scope is capabilities:cpu_info:features, which
+ will match host’s cpu features capabilities. The
+ ComputeCapabilitiesFilter will only pass hosts whose
+ capabilities satisfy the requested specifications. All hosts
+ are passed if no extra_specs are specified.
+ ComputeFilter is quite simple and passes any host whose
+ compute service is enabled and operational.
+ Now we are going to IsolatedHostsFilter. There can be some
+ special hosts reserved for specific images. These hosts are
+ called isolated. So the images to run on the isolated hosts
+ are also called isolated. This Scheduler checks if
+ image_isolated flag named in instance specifications is the
+ same that the host has.
+
+
+
+ Weights
+ Filter Scheduler uses so-called weightsduring its
+ work.
+ The Filter Scheduler weights hosts based on the config
+ option scheduler_weight_classes, this defaults to
+ nova.scheduler.weights.all_weighers, which selects the only
+ weigher available – the RamWeigher. Hosts are then weighted and
+ sorted with the largest weight winning.
+ Filter Scheduler finds local list of acceptable hosts by
+ repeated filtering and weighing. Each time it chooses a host, it
+ virtually consumes resources on it, so subsequent selections can
+ adjust accordingly. It is useful if the customer asks for the
+ some large amount of instances, because weight is computed for
+ each instance requested.
+
+ In the end Filter Scheduler sorts selected hosts by their
+ weight and provisions instances on them.
diff --git a/doc/training-guide/module001-ch010-vm-provisioning-indepth.xml b/doc/training-guide/module001-ch010-vm-provisioning-indepth.xml
index 55446ac572..dd5dd57303 100644
--- a/doc/training-guide/module001-ch010-vm-provisioning-indepth.xml
+++ b/doc/training-guide/module001-ch010-vm-provisioning-indepth.xml
@@ -6,4 +6,117 @@
xml:id="module001-ch010-vm-provisioning-indepth">
VM Provisioning IndepthMore Content To be Added ...
+ The request flow for provisioning an Instance goes like
+ this:
+
+
+ Dashboard or CLI gets the user credentials authenticates
+ with Keystone via REST api.
+
+
+ Keystone authenticate the credentials and generate & send
+ back auth-token which will be used for sending request to other
+ Components through REST-call.
+
+
+ Dashboard or CLI convert the new instance request
+ specified in ‘launch instance’ or ‘nova-boot’ form to REST
+ API request and send it to nova-api.
+
+
+ nova-api receive the request and sends the request for
+ validation auth-token and access permission to
+ keystone.
+
+
+ Keystone validates the token and sends updated auth headers
+ with roles and permissions.
+
+
+ nova-api interacts with nova-database.
+
+
+ Creates initial db entry for new instance.
+
+
+ nova-api sends the rpc.call request to nova-scheduler
+ excepting to get updated instance entry with host ID
+ specified.
+
+
+ nova-scheduler picks the request from the queue.
+
+
+ nova-scheduler interacts with nova-database to find an
+ appropriate host via filtering and weighing.
+
+
+ Returns the updated instance entry with appropriate host ID
+ after filtering and weighing.
+ nova-scheduler sends the rpc.cast request to nova-compute for
+ ‘launching instance’ on appropriate host .
+
+
+ nova-compute picks the request from the queue.
+
+
+ nova-compute send the rpc.call request to nova-conductor
+ to fetch the instance information such as host ID and flavor(
+ Ram , CPU ,Disk).
+
+
+ nova-conductor picks the request from the queue.
+
+
+ nova-conductor interacts with nova-database.
+
+
+ Return the instance information.
+ nova-compute picks the instance information from the
+ queue.
+
+
+ nova-compute does the REST call by passing auth-token to
+ glance-api to get the Image URI by Image ID from glance and
+ upload image from image storage.
+
+
+ glance-api validates the auth-token with keystone.
+
+
+ nova-compute get the image metadata.
+ nova-compute does the REST-call by passing auth-token to
+ Network API to allocate and configure the network such that
+ instance gets the IP address.
+
+
+ quantum-server validates the auth-token with
+ keystone.
+
+
+ nova-compute get the network info.
+
+
+ nova-compute does the REST call by passing auth-token to
+ Volume API to attach volumes to instance.
+
+
+ cinder-api validates the auth-token with keystone.
+
+
+ nova-compute gets the block storage info.
+
+
+ nova-compute generates data for hypervisor driver and
+ executes request on Hypervisor( via libvirt or api).
+
+
+
diff --git a/doc/training-guide/module001-ch011-block-storage.xml b/doc/training-guide/module001-ch011-block-storage.xml
index b08f384c6b..2c1aaa9eda 100644
--- a/doc/training-guide/module001-ch011-block-storage.xml
+++ b/doc/training-guide/module001-ch011-block-storage.xml
@@ -1,9 +1,251 @@
OpenStack Block Storage
- To be Added
-
+ Block Storage and OpenStack
+ Compute
+ OpenStack provides two classes of block storage, "ephemeral"
+ storage and persistent "volumes". Ephemeral storage exists only
+ for the life of an instance, it will persist across reboots of the
+ guest operating system but when the instance is deleted so is the
+ associated storage. All instances have some ephemeral storage.
+ Volumes are persistent virtualized block devices independent of
+ any particular instance. Volumes may be attached to a single
+ instance at a time, but may be detached or reattached to a
+ different instance while retaining all data, much like a USB
+ drive.
+ Ephemeral Storage
+ Ephemeral storage is associated with a single unique instance.
+ Its size is defined by the flavor of the instance.
+ Data on ephemeral storage ceases to exist when the instance it
+ is associated with is terminated. Rebooting the VM or restarting
+ the host server, however, will not destroy ephemeral data. In the
+ typical use case an instance's root filesystem is stored on
+ ephemeral storage. This is often an unpleasant surprise for people
+ unfamiliar with the cloud model of computing.
+ In addition to the ephemeral root volume all flavors except
+ the smallest, m1.tiny, provide an additional ephemeral block
+ device varying from 20G for the m1.small through 160G for the
+ m1.xlarge by default - these sizes are configurable. This is
+ presented as a raw block device with no partition table or
+ filesystem. Cloud aware operating system images may discover,
+ format, and mount this device. For example the cloud-init package
+ included in Ubuntu's stock cloud images will format this space as
+ an ext3 filesystem and mount it on /mnt. It is important to note
+ this a feature of the guest operating system. OpenStack only
+ provisions the raw storage.
+ Volume Storage
+ Volume storage is independent of any particular instance and
+ is persistent. Volumes are user created and within quota and
+ availability limits may be of any arbitrary size.
+ When first created volumes are raw block devices with no
+ partition table and no filesystem. They must be attached to an
+ instance to be partitioned and/or formatted. Once this is done
+ they may be used much like an external disk drive. Volumes may
+ attached to only one instance at a time, but may be detached and
+ reattached to either the same or different instances.
+ It is possible to configure a volume so that it is bootable
+ and provides a persistent virtual instance similar to traditional
+ non-cloud based virtualization systems. In this use case the
+ resulting instance may still have ephemeral storage depending on
+ the flavor selected, but the root filesystem (and possibly others)
+ will be on the persistent volume and thus state will be maintained
+ even if the instance it shutdown. Details of this configuration
+ are discussed in theOpenStack Clients Guide.
+ Volumes do not provide concurrent access from multiple
+ instances. For that you need either a traditional network
+ filesystem like NFS or CIFS or a cluster filesystem such as
+ GlusterFS. These may be built within an OpenStack cluster or
+ provisioned outside of it, but are not features provided by the
+ OpenStack software.
+ The OpenStack Block Storage service works via the interaction
+ of a series of daemon processes named cinder-* that reside
+ persistently on the host machine or machines. The binaries can all
+ be run from a single node, or spread across multiple nodes. They
+ can also be run on the same node as other OpenStack
+ services.
+ The current services available in OpenStack Block
+ Storage are:
+
+
+ cinder-api - The
+ cinder-api service is a WSGI app that authenticates and routes
+ requests throughout the Block Storage system. It supports the
+ OpenStack API's only, although there is a translation that can
+ be done via Nova's EC2 interface which calls in to the
+ cinderclient.
+
+
+
+
+ cinder-scheduler - The
+ cinder-scheduler is responsible for scheduling/routing
+ requests to the appropriate volume service. As of Grizzly;
+ depending upon your configuration this may be simple
+ round-robin scheduling to the running volume services, or it
+ can be more sophisticated through the use of the Filter
+ Scheduler. The Filter Scheduler is the default in Grizzly and
+ enables filter on things like Capacity, Availability Zone,
+ Volume Types and Capabilities as well as custom
+ filters.
+
+
+
+
+ cinder-volume - The
+ cinder-volume service is responsible for managing Block
+ Storage devices, specifically the back-end devices
+ themselves.
+
+
+
+
+ cinder-backup - The
+ cinder-backup service provides a means to back up a Cinder
+ Volume to OpenStack Object Store (SWIFT).
+
+
+ Introduction to OpenStack Block
+ Storage
+ OpenStack Block Storage provides persistent High Performance
+ Block Storage resources that can be consumed by OpenStack Compute
+ instances. This includes secondary attached storage similar to
+ Amazon's Elastic Block Storage (EBS). In addition images can be
+ written to a Block Storage device and specified for OpenStack
+ Compute to use a bootable persistent instance.
+ There are some differences from Amazon's EBS that one should
+ be aware of. OpenStack Block Storage is not a shared storage
+ solution like NFS, but currently is designed so that the device is
+ attached and in use by a single instance at a time.
+ Backend Storage Devices
+ OpenStack Block Storage requires some form of back-end storage
+ that the service is built on. The default implementation is to use
+ LVM on a local Volume Group named "cinder-volumes". In addition to
+ the base driver implementation, OpenStack Block Storage also
+ provides the means to add support for other storage devices to be
+ utilized such as external Raid Arrays or other Storage
+ appliances.
+ Users and Tenants (Projects)
+ The OpenStack Block Storage system is designed to be used by
+ many different cloud computing consumers or customers, basically
+ tenants on a shared system, using role-based access assignments.
+ Roles control the actions that a user is allowed to perform. In
+ the default configuration, most actions do not require a
+ particular role, but this is configurable by the system
+ administrator editing the appropriate policy.json file that
+ maintains the rules. A user's access to particular volumes is
+ limited by tenant, but the username and password are assigned per
+ user. Key pairs granting access to a volume are enabled per user,
+ but quotas to control resource consumption across available
+ hardware resources are per tenant.
+ For tenants, quota controls are available to limit
+ the:
+
+
+ Number of volumes which may be created
+
+
+ Number of snapshots which may be created
+
+
+ Total number of Giga Bytes allowed per tenant (shared
+ between snapshots and volumes)
+
+
+ Volumes Snapshots and Backups
+ This introduction provides a high level overview of the two
+ basic resources offered by the OpenStack Block Storage service.
+ The first is Volumes and the second is Snapshots which are derived
+ from Volumes.
+ Volumes
+ Volumes are allocated block storage resources that can be
+ attached to instances as secondary storage or they can be used as
+ the root store to boot instances. Volumes are persistent R/W Block
+ Storage devices most commonly attached to the Compute node via
+ iSCSI.
+ Snapshots
+ A Snapshot in OpenStack Block Storage is a read-only point in
+ time copy of a Volume. The Snapshot can be created from a Volume
+ that is currently in use (via the use of '--force True') or in an
+ available state. The Snapshot can then be used to create a new
+ volume via create from snapshot.
+ Backups
+ A Backup is an archived copy of a Volume currently stored in
+ Object Storage (Swift).
+ Managing Volumes
+ Cinder is the OpenStack service that allows you to give extra
+ block level storage to your OpenStack Compute instances. You may
+ recognize this as a similar offering from Amazon EC2 known as
+ Elastic Block Storage (EBS). The default Cinder implementation is
+ an iSCSI solution that employs the use of Logical Volume Manager
+ (LVM) for Linux. Note that a volume may only be attached to one
+ instance at a time. This is not a ‘shared storage’ solution like a
+ SAN of NFS on which multiple servers can attach to. It's also
+ important to note that Cinder also includes a number of drivers to
+ allow you to use a number of other vendor's back-end storage
+ devices in addition to or instead of the base LVM
+ implementation.
+ Here is brief walk-through of a simple create/attach sequence,
+ keep in mind this requires proper configuration of both OpenStack
+ Compute via cinder.conf and OpenStack Block Storage via
+ cinder.conf.
+
+
+ The volume is created via cinder create; which creates
+ an LV into the volume group (VG) "cinder-volumes"
+
+
+ The volume is attached to an instance via nova
+ volume-attach; which creates a unique iSCSI IQN that will be
+ exposed to the compute node
+
+
+ The compute node which run the concerned instance has
+ now an active ISCSI session; and a new local storage
+ (usually a /dev/sdX disk)
+
+
+ libvirt uses that local storage as a storage for the
+ instance; the instance get a new disk (usually a /dev/vdX
+ disk)
+
+
+ Block Storage Capabilities
+
+
+ OpenStack provides persistent block level storage
+ devices for use with OpenStack compute instances.
+
+
+ The block storage system manages the creation, attaching
+ and detaching of the block devices to servers. Block storage
+ volumes are fully integrated into OpenStack Compute and the
+ Dashboard allowing for cloud users to manage their own
+ storage needs.
+
+
+ In addition to using simple Linux server storage, it has
+ unified storage support for numerous storage platforms
+ including Ceph, NetApp, Nexenta, SolidFire, and
+ Zadara.
+
+
+ Block storage is appropriate for performance sensitive
+ scenarios such as database storage, expandable file systems,
+ or providing a server with access to raw block level
+ storage.
+
+
+ Snapshot management provides powerful functionality for
+ backing up data stored on block storage volumes. Snapshots
+ can be restored or used to create a new block storage
+ volume.
+
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module001-ch011-vm-provisioning-indepth.xml b/doc/training-guide/module001-ch011-vm-provisioning-indepth.xml
deleted file mode 100644
index adbbb96f09..0000000000
--- a/doc/training-guide/module001-ch011-vm-provisioning-indepth.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
- VM Provisioning Indepth
- More Content TO be Added ...
-
diff --git a/doc/training-guide/module001-intro-openstack.xml b/doc/training-guide/module001-intro-openstack.xml
index 4d02ac04aa..44dc14bf59 100644
--- a/doc/training-guide/module001-intro-openstack.xml
+++ b/doc/training-guide/module001-intro-openstack.xml
@@ -14,4 +14,4 @@
-
+
\ No newline at end of file
diff --git a/doc/training-guide/module002-ch000-openstack-networking.xml b/doc/training-guide/module002-ch000-openstack-networking.xml
new file mode 100644
index 0000000000..27af862877
--- /dev/null
+++ b/doc/training-guide/module002-ch000-openstack-networking.xml
@@ -0,0 +1,15 @@
+
+
+ Networking in OpenStack
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module002-ch001-networking-in-openstack.xml b/doc/training-guide/module002-ch001-networking-in-openstack.xml
new file mode 100644
index 0000000000..cc5a222de2
--- /dev/null
+++ b/doc/training-guide/module002-ch001-networking-in-openstack.xml
@@ -0,0 +1,269 @@
+
+
+ Networking in OpenStack
+ Networking in OpenStack
+ OpenStack Networking provides a rich tenant-facing API
+ for defining network connectivity and addressing in the
+ cloud. The OpenStack Networking project gives operators
+ the ability to leverage different networking technologies
+ to power their cloud networking. It is a virtual network
+ service that provides a powerful API to define the network
+ connectivity and addressing used by devices from other
+ services, such as OpenStack Compute. It has a rich API
+ which consists of the following components.
+
+
+ Network: An
+ isolated L2 segment, analogous to VLAN in the physical
+ networking world.
+
+
+ Subnet: A block
+ of v4 or v6 IP addresses and associated configuration
+ state.
+
+
+ Port: A
+ connection point for attaching a single device, such
+ as the NIC of a virtual server, to a virtual network.
+ Also describes the associated network configuration,
+ such as the MAC and IP addresses to be used on that
+ port.
+
+
+ You can configure rich network topologies by creating
+ and configuring networks and subnets, and then instructing
+ other OpenStack services like OpenStack Compute to attach
+ virtual devices to ports on these networks. In
+ particular, OpenStack Networking supports each tenant
+ having multiple private networks, and allows tenants to
+ choose their own IP addressing scheme, even if those IP
+ addresses overlap with those used by other tenants. This
+ enables very advanced cloud networking use cases, such as
+ building multi-tiered web applications and allowing
+ applications to be migrated to the cloud without changing
+ IP addresses.
+ Plugin Architecture: Flexibility to Choose
+ Different Network Technologies
+ Enhancing traditional networking solutions to provide rich
+ cloud networking is challenging. Traditional networking is not
+ designed to scale to cloud proportions or to configure
+ automatically.
+ The original OpenStack Compute network implementation
+ assumed a very basic model of performing all isolation through
+ Linux VLANs and IP tables. OpenStack Networking introduces the
+ concept of a plugin, which is a pluggable back-end
+ implementation of the OpenStack Networking API. A plugin can
+ use a variety of technologies to implement the logical API
+ requests. Some OpenStack Networking plugins might use basic
+ Linux VLANs and IP tables, while others might use more
+ advanced technologies, such as L2-in-L3 tunneling or OpenFlow,
+ to provide similar benefits.
+ The current set of plugins include:
+
+
+ Open vSwitch:
+ Documentation included in this guide.
+
+
+ Cisco: Documented
+ externally at: http://wiki.openstack.org/cisco-quantum
+
+
+ Linux Bridge:
+ Documentation included in this guide and http://wiki.openstack.org/Quantum-Linux-Bridge-Plugin
+
+
+
+ Nicira NVP:
+ Documentation include in this guide, NVP Product Overview , and NVP
+ Product Support.
+
+
+ Ryu:
+ https://github.com/osrg/ryu/wiki/OpenStack
+
+
+ NEC OpenFlow:
+ http://wiki.openstack.org/Quantum-NEC-OpenFlow-Plugin
+
+
+ Big Switch, Floodlight REST
+ Proxy:
+ http://www.openflowhub.org/display/floodlightcontroller/Quantum+REST+Proxy+Plugin
+
+
+ PLUMgrid:
+ https://wiki.openstack.org/wiki/Plumgrid-quantum
+
+
+ Hyper-V
+ Plugin
+
+
+ Brocade
+ Plugin
+
+
+ Midonet
+ Plugin
+
+
+ Plugins can have different properties in terms of hardware
+ requirements, features, performance, scale, operator tools,
+ etc. Supporting many plugins enables the cloud administrator
+ to weigh different options and decide which networking
+ technology is right for the deployment.
+ Components of OpenStack Networking
+ To deploy OpenStack Networking, it is useful to understand
+ the different components that make up the solution and how
+ those components interact with each other and with other
+ OpenStack services.
+ OpenStack Networking is a standalone service, just like
+ other OpenStack services such as OpenStack Compute, OpenStack
+ Image service, OpenStack Identity service, and the OpenStack
+ Dashboard. Like those services, a deployment of OpenStack
+ Networking often involves deploying several processes on a
+ variety of hosts.
+ The main process of the OpenStack Networking server is
+ quantum-server, which is a Python daemon that exposes the
+ OpenStack Networking API and passes user requests to the
+ configured OpenStack Networking plugin for additional
+ processing. Typically, the plugin requires access to a
+ database for persistent storage, similar to other OpenStack
+ services.
+ If your deployment uses a controller host to run centralized
+ OpenStack Compute components, you can deploy the OpenStack
+ Networking server on that same host. However, OpenStack
+ Networking is entirely standalone and can be deployed on its
+ own server as well. OpenStack Networking also includes
+ additional agents that might be required depending on your
+ deployment:
+
+
+ plugin agent
+ (quantum-*-agent):Runs on each
+ hypervisor to perform local vswitch configuration.
+ Agent to be run depends on which plugin you are using,
+ as some plugins do not require an agent.
+
+
+ dhcp agent
+ (quantum-dhcp-agent):Provides DHCP
+ services to tenant networks. This agent is the same
+ across all plugins.
+
+
+ l3 agent
+ (quantum-l3-agent):Provides L3/NAT
+ forwarding to provide external network access for VMs
+ on tenant networks. This agent is the same across all
+ plugins.
+
+
+ These agents interact with the main quantum-server process
+ in the following ways:
+
+
+ Through RPC. For example, rabbitmq or qpid.
+
+
+ Through the standard OpenStack Networking
+ API.
+
+
+ OpenStack Networking relies on the OpenStack Identity
+ Project (Keystone) for authentication and authorization of all
+ API request.
+ OpenStack Compute interacts with OpenStack Networking
+ through calls to its standard API. As part of creating a VM,
+ nova-compute communicates with the OpenStack Networking API to
+ plug each virtual NIC on the VM into a particular
+ network.
+ The OpenStack Dashboard (Horizon) has integration with the
+ OpenStack Networking API, allowing administrators and tenant
+ users, to create and manage network services through the
+ Horizon GUI.
+ Place Services on Physical
+ Hosts
+ Like other OpenStack services, OpenStack Networking provides
+ cloud administrators with significant flexibility in deciding
+ which individual services should run on which physical
+ devices. On one extreme, all service daemons can be run on a
+ single physical host for evaluation purposes. On the other,
+ each service could have its own physical hosts, and some cases
+ be replicated across multiple hosts for redundancy.
+ In this guide, we focus primarily on a standard architecture
+ that includes a “cloud controller” host, a “network gateway”
+ host, and a set of hypervisors for running VMs. The "cloud
+ controller" and "network gateway" can be combined in simple
+ deployments, though if you expect VMs to send significant
+ amounts of traffic to or from the Internet, a dedicated
+ network gateway host is suggested to avoid potential CPU
+ contention between packet forwarding performed by the
+ quantum-l3-agent and other OpenStack services.
+ Network Connectivity for Physical
+ Hosts
+
+ A standard OpenStack Networking setup has up to four
+ distinct physical data center networks:
+
+
+ Management
+ network:Used for internal communication
+ between OpenStack Components. The IP addresses on this
+ network should be reachable only within the data
+ center.
+
+
+ Data network:Used
+ for VM data communication within the cloud deployment.
+ The IP addressing requirements of this network depend
+ on the OpenStack Networking plugin in use.
+
+
+ External
+ network:Used to provide VMs with Internet
+ access in some deployment scenarios. The IP addresses
+ on this network should be reachable by anyone on the
+ Internet.
+
+
+ API network:Exposes
+ all OpenStack APIs, including the OpenStack Networking
+ API, to tenants. The IP addresses on this network
+ should be reachable by anyone on the Internet. This
+ may be the same network as the external network, as it
+ is possible to create a subnet for the external
+ network that uses IP allocation ranges to use only
+ less than the full range of IP addresses in an IP
+ block.
+
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module002-ch002-openstack-networking-concepts.xml b/doc/training-guide/module002-ch002-openstack-networking-concepts.xml
new file mode 100644
index 0000000000..b71fdac597
--- /dev/null
+++ b/doc/training-guide/module002-ch002-openstack-networking-concepts.xml
@@ -0,0 +1,64 @@
+
+
+ OpenStack Networking Concepts
+ Network Types
+ The OpenStack Networking configuration provided by the
+ Rackspace Private Cloud cookbooks allows you to choose between
+ VLAN or GRE isolated networks, both provider- and
+ tenant-specific. From the provider side, an administrator can
+ also create a flat network.
+ The type of network that is used for private tenant networks
+ is determined by the network_type attribute, which can be
+ edited in the Chef override_attributes. This attribute sets
+ both the default provider network type and the only type of
+ network that tenants are able to create. Administrators can
+ always create flat and VLAN networks. GRE networks of any type
+ require the network_type to be set to gre.
+ Namespaces
+ For each network you create, the Network node (or Controller
+ node, if combined) will have a unique network namespace
+ (netns) created by the DHCP and Metadata agents. The netns
+ hosts an interface and IP addresses for dnsmasq and the
+ quantum-ns-metadata-proxy. You can view the namespaces with
+ the ip netns [list], and can interact with the namespaces with
+ the ip netns exec <namespace> <command>
+ command.
+ Metadata
+ Not all networks or VMs need metadata access. Rackspace
+ recommends that you use metadata if you are using a single
+ network. If you need metadata, you may also need a default
+ route. (If you don't need a default route, no-gateway will
+ do.)
+ To communicate with the metadata IP address inside the
+ namespace, instances need a route for the metadata network
+ that points to the dnsmasq IP address on the same namespaced
+ interface. OpenStack Networking only injects a route when you
+ do not specify a gateway-ip in the subnet.
+ If you need to use a default route and provide instances
+ with access to the metadata route, create the subnet without
+ specifying a gateway IP and with a static route from 0.0.0.0/0
+ to your gateway IP address. Adjust the DHCP allocation pool so
+ that it will not assign the gateway IP. With this
+ configuration, dnsmasq will pass both routes to instances.
+ This way, metadata will be routed correctly without any
+ changes on the external gateway.
+ OVS Bridges
+ An OVS bridge for provider traffic is created and configured
+ on the nodes where single-network-node and single-compute are
+ applied. Bridges are created, but physical interfaces are not
+ added. An OVS bridge is not created on a Controller-only
+ node.
+ When creating networks, you can specify the type and
+ properties, such as Flat vs. VLAN, Shared vs. Tenant, or
+ Provider vs. Overlay. These properties identify and determine
+ the behavior and resources of instances attached to the
+ network. The cookbooks will create bridges for the
+ configuration that you specify, although they do not add
+ physical interfaces to provider bridges. For example, if you
+ specify a network type of GRE, a br-tun tunnel bridge will be
+ created to handle overlay traffic.
+
\ No newline at end of file
diff --git a/doc/training-guide/module002-ch003-neutron-use-cases.xml b/doc/training-guide/module002-ch003-neutron-use-cases.xml
new file mode 100644
index 0000000000..22fb912175
--- /dev/null
+++ b/doc/training-guide/module002-ch003-neutron-use-cases.xml
@@ -0,0 +1,131 @@
+
+
+ Neutron Use Cases
+ As of now you must be wondering, how to use these awesome
+ features that OpenStack Networking has given to us.
+ Use Case: Single Flat
+ Network
+ In the simplest use case, a single OpenStack Networking
+ network exists. This is a "shared" network, meaning it is
+ visible to all tenants via the OpenStack Networking API.
+ Tenant VMs have a single NIC, and receive a fixed IP
+ address from the subnet(s) associated with that network.
+ This essentially maps to the FlatManager and
+ FlatDHCPManager models provided by OpenStack Compute.
+ Floating IPs are not supported.
+ It is common that such an OpenStack Networking network
+ is a "provider network", meaning it was created by the
+ OpenStack administrator to map directly to an existing
+ physical network in the data center. This allows the
+ provider to use a physical router on that data center
+ network as the gateway for VMs to reach the outside world.
+ For each subnet on an external network, the gateway
+ configuration on the physical router must be manually
+ configured outside of OpenStack.
+
+ Use Case: Multiple Flat
+ Network
+ This use case is very similar to the above Single Flat
+ Network use case, except that tenants see multiple shared
+ networks via the OpenStack Networking API and can choose
+ which network (or networks) to plug into.
+
+ Use Case: Mixed Flat and Private
+ Network
+ This use case is an extension of the above flat network
+ use cases, in which tenants also optionally have access to
+ private per-tenant networks. In addition to seeing one or
+ more shared networks via the OpenStack Networking API,
+ tenants can create additional networks that are only
+ visible to users of that tenant. When creating VMs, those
+ VMs can have NICs on any of the shared networks and/or any
+ of the private networks belonging to the tenant. This
+ enables the creation of "multi-tier" topologies using VMs
+ with multiple NICs. It also supports a model where a VM
+ acting as a gateway can provide services such as routing,
+ NAT, or load balancing.
+
+ Use Case: Provider Router with Private
+ Networks
+ This use provides each tenant with one or more private
+ networks, which connect to the outside world via an
+ OpenStack Networking router. The case where each tenant
+ gets exactly one network in this form maps to the same
+ logical topology as the VlanManager in OpenStack Compute
+ (of course, OpenStack Networking doesn't require VLANs).
+ Using the OpenStack Networking API, the tenant would only
+ see a network for each private network assigned to that
+ tenant. The router object in the API is created and owned
+ by the cloud admin.
+ This model supports giving VMs public addresses using
+ "floating IPs", in which the router maps public addresses
+ from the external network to fixed IPs on private
+ networks. Hosts without floating IPs can still create
+ outbound connections to the external network, as the
+ provider router performs SNAT to the router's external IP.
+ The IP address of the physical router is used as the
+ gateway_ip of the external network subnet, so the provider
+ has a default router for Internet traffic.
+ The router provides L3 connectivity between private
+ networks, meaning that different tenants can reach each
+ others instances unless additional filtering (e.g.,
+ security groups) is used. Because there is only a single
+ router, tenant networks cannot use overlapping IPs. Thus,
+ it is likely that the admin would create the private
+ networks on behalf of tenants.
+
+ Use Case: Per-tenant Routers with Private
+ Networks
+ A more advanced router scenario in which each tenant
+ gets at least one router, and potentially has access to
+ the OpenStack Networking API to create additional routers.
+ The tenant can create their own networks, potentially
+ uplinking those networks to a router. This model enables
+ tenant-defined multi-tier applications, with each tier
+ being a separate network behind the router. Since there
+ are multiple routers, tenant subnets can be overlapping
+ without conflicting, since access to external networks all
+ happens via SNAT or Floating IPs. Each router uplink and
+ floating IP is allocated from the external network
+ subnet.
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module002-ch004-security-in-neutron.xml b/doc/training-guide/module002-ch004-security-in-neutron.xml
new file mode 100644
index 0000000000..52a3b5403f
--- /dev/null
+++ b/doc/training-guide/module002-ch004-security-in-neutron.xml
@@ -0,0 +1,140 @@
+
+
+ Security in Neutron
+ Security Groups
+ Security groups and security group rules allows
+ administrators and tenants the ability to specify the type
+ of traffic and direction (ingress/egress) that is allowed
+ to pass through a port. A security group is a container
+ for security group rules.
+ When a port is created in OpenStack Networking it is
+ associated with a security group. If a security group is
+ not specified the port will be associated with a 'default'
+ security group. By default this group will drop all
+ ingress traffic and allow all egress. Rules can be added
+ to this group in order to change the behaviour.
+ If one desires to use the OpenStack Compute security
+ group APIs and/or have OpenStack Compute orchestrate the
+ creation of new ports for instances on specific security
+ groups, additional configuration is needed. To enable
+ this, one must configure the following file
+ /etc/nova/nova.conf and set the config option
+ security_group_api=neutron on every node running
+ nova-compute and nova-api. After this change is made
+ restart nova-api and nova-compute in order to pick up this
+ change. After this change is made one will be able to use
+ both the OpenStack Compute and OpenStack Network security
+ group API at the same time.
+ Authentication and Authorization
+ OpenStack Networking uses the OpenStack Identity service
+ (project name keystone) as the default authentication
+ service. When OpenStack Identity is enabled Users
+ submitting requests to the OpenStack Networking service
+ must provide an authentication token in X-Auth-Token
+ request header. The aforementioned token should have been
+ obtained by authenticating with the OpenStack Identity
+ endpoint. For more information concerning authentication
+ with OpenStack Identity, please refer to the OpenStack
+ Identity documentation. When OpenStack Identity is
+ enabled, it is not mandatory to specify tenant_id for
+ resources in create requests, as the tenant identifier
+ will be derived from the Authentication token. Please note
+ that the default authorization settings only allow
+ administrative users to create resources on behalf of a
+ different tenant. OpenStack Networking uses information
+ received from OpenStack Identity to authorize user
+ requests. OpenStack Networking handles two kind of
+ authorization policies:
+
+
+ Operation-based:
+ policies specify access criteria for specific
+ operations, possibly with fine-grained control over
+ specific attributes;
+
+
+ Resource-based:whether access to specific
+ resource might be granted or not according to the
+ permissions configured for the resource (currently
+ available only for the network resource). The actual
+ authorization policies enforced in OpenStack
+ Networking might vary from deployment to
+ deployment.
+
+
+ The policy engine reads entries from the policy.json
+ file. The actual location of this file might vary from
+ distribution to distribution. Entries can be updated while
+ the system is running, and no service restart is required.
+ That is to say, every time the policy file is updated, the
+ policies will be automatically reloaded. Currently the
+ only way of updating such policies is to edit the policy
+ file. Please note that in this section we will use both
+ the terms "policy" and "rule" to refer to objects which
+ are specified in the same way in the policy file; in other
+ words, there are no syntax differences between a rule and
+ a policy. We will define a policy something which is
+ matched directly from the OpenStack Networking policy
+ engine, whereas we will define a rule as the elements of
+ such policies which are then evaluated. For instance in
+ create_subnet: [["admin_or_network_owner"]], create_subnet
+ is regarded as a policy, whereas admin_or_network_owner is
+ regarded as a rule.
+ Policies are triggered by the OpenStack Networking
+ policy engine whenever one of them matches an OpenStack
+ Networking API operation or a specific attribute being
+ used in a given operation. For instance the create_subnet
+ policy is triggered every time a POST /v2.0/subnets
+ request is sent to the OpenStack Networking server; on the
+ other hand create_network:shared is triggered every time
+ the shared attribute is explicitly specified (and set to a
+ value different from its default) in a POST /v2.0/networks
+ request. It is also worth mentioning that policies can be
+ also related to specific API extensions; for instance
+ extension:provider_network:set will be triggered if the
+ attributes defined by the Provider Network extensions are
+ specified in an API request.
+ An authorization policy can be composed by one or more
+ rules. If more rules are specified, evaluation policy will
+ be successful if any of the rules evaluates successfully;
+ if an API operation matches multiple policies, then all
+ the policies must evaluate successfully. Also,
+ authorization rules are recursive. Once a rule is matched,
+ the rule(s) can be resolved to another rule, until a
+ terminal rule is reached.
+ The OpenStack Networking policy engine currently defines
+ the following kinds of terminal rules:
+
+
+ Role-based
+ rules: evaluate successfully if the
+ user submitting the request has the specified role.
+ For instance "role:admin"is successful if the user
+ submitting the request is an administrator.
+
+
+ Field-based
+ rules: evaluate successfully if a field
+ of the resource specified in the current request
+ matches a specific value. For instance
+ "field:networks:shared=True" is successful if the
+ attribute shared of the network resource is set to
+ true.
+
+
+ Generic
+ rules:compare an attribute in the resource
+ with an attribute extracted from the user's security
+ credentials and evaluates successfully if the
+ comparison is successful. For instance
+ "tenant_id:%(tenant_id)s" is successful if the tenant
+ identifier in the resource is equal to the tenant
+ identifier of the user submitting the request.
+
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module002-ch005-floating-ips.xml b/doc/training-guide/module002-ch005-floating-ips.xml
new file mode 100644
index 0000000000..7f7f5e63e6
--- /dev/null
+++ b/doc/training-guide/module002-ch005-floating-ips.xml
@@ -0,0 +1,72 @@
+
+
+ Floating IP Addresses And Security Rules
+ OpenStack Networking has the concept of Fixed IPs and
+ Floating IPs. Fixed IPs are assigned to an instance on
+ creation and stay the same until the instance is explicitly
+ terminated. Floating ips are ip addresses that can be
+ dynamically associated with an instance. This address can be
+ disassociated and associated with another instance at any
+ time.
+ Various tasks carried out by Floating IP's as of
+ now.
+
+
+ create IP ranges under a certain group, only
+ available for admin role.
+
+
+ allocate an floating IP to a certain tenant,
+ only available for admin role.
+
+
+ deallocate an floating IP from a certain
+ tenant
+
+
+ associate an floating IP to a given
+ instance
+
+
+ disassociate an floating IP from a certain
+ instance
+
+
+ Just as shown by above figure, we will have
+ nova-network-api to support nova client floating
+ commands. nova-network-api will invoke quantum cli lib
+ to interactive with quantum server via API. The data
+ about floating IPs will be store in to quantum DB.
+ Quantum Agent, which is running on compute host will
+ enforce the floating IP.
+ Multiple Floating
+ IP Pools
+ The L3 API in OpenStack Networking supports multiple
+ floating IP pools. In OpenStack Networking, a floating
+ IP pool is represented as an external network and a
+ floating IP is allocated from a subnet associated with
+ the external network. Since each L3 agent can be
+ associated with at most one external network, we need
+ to invoke multiple L3 agent to define multiple
+ floating IP pools. 'gateway_external_network_id'in L3
+ agent configuration file indicates the external
+ network that the L3 agent handles. You can run
+ multiple L3 agent instances on one host.
+ In addition, when you run multiple L3 agents, make
+ sure that handle_internal_only_routersis set to
+ Trueonly for one L3 agent in an OpenStack Networking
+ deployment and set to Falsefor all other L3 agents.
+ Since the default value of this parameter is True, you
+ need to configure it carefully.
+ Before starting L3 agents, you need to create
+ routers and external networks, then update the
+ configuration files with UUID of external networks and
+ start L3 agents.
+ For the first agent, invoke it with the following
+ l3_agent.ini where handle_internal_only_routers is
+ True.
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch000-openstack-objstore.xml b/doc/training-guide/module003-ch000-openstack-objstore.xml
new file mode 100644
index 0000000000..f133967de0
--- /dev/null
+++ b/doc/training-guide/module003-ch000-openstack-objstore.xml
@@ -0,0 +1,19 @@
+
+
+ OpenStack Object Storage
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/training-guide/module003-ch001-intro-objstore.xml b/doc/training-guide/module003-ch001-intro-objstore.xml
new file mode 100644
index 0000000000..70c10649e4
--- /dev/null
+++ b/doc/training-guide/module003-ch001-intro-objstore.xml
@@ -0,0 +1,32 @@
+
+
+ Introduction to Object Storage
+ OpenStack Object Storage (code-named Swift) is open source
+ software for creating redundant, scalable data storage using
+ clusters of standardized servers to store petabytes of
+ accessible data. It is a long-term storage system for large
+ amounts of static data that can be retrieved, leveraged, and
+ updated. Object Storage uses a distributed architecture with
+ no central point of control, providing greater scalability,
+ redundancy and permanence. Objects are written to multiple
+ hardware devices, with the OpenStack software responsible for
+ ensuring data replication and integrity across the cluster.
+ Storage clusters scale horizontally by adding new nodes.
+ Should a node fail, OpenStack works to replicate its content
+ from other active nodes. Because OpenStack uses software logic
+ to ensure data replication and distribution across different
+ devices, inexpensive commodity hard drives and servers can be
+ used in lieu of more expensive equipment.
+ Object Storage is ideal for cost effective, scale-out
+ storage. It provides a fully distributed, API-accessible
+ storage platform that can be integrated directly into
+ applications or used for backup, archiving and data retention.
+ Block Storage allows block devices to be exposed and connected
+ to compute instances for expanded storage, better performance
+ and integration with enterprise storage platforms, such as
+ NetApp, Nexenta and SolidFire.
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch002-features-benifits.xml b/doc/training-guide/module003-ch002-features-benifits.xml
new file mode 100644
index 0000000000..bfb09239e0
--- /dev/null
+++ b/doc/training-guide/module003-ch002-features-benifits.xml
@@ -0,0 +1,204 @@
+
+
+ Features and Benifits
+
+
+
+
+
Features
+
Benefits
+
+
+
Leverages commodity
+ hardware
+
No
+ lock-in, lower
+ price/GB
+
+
+
HDD/node failure agnostic
+
Self
+ healingReliability, data redundancy protecting
+ from
+ failures
+
+
+
Unlimited storage
+
Huge
+ & flat namespace, highly scalable
+ read/write accessAbility to serve content
+ directly from storage
+ system
+
+
+
Multi-dimensional scalability
+ (scale out architecture)Scale vertically and
+ horizontally-distributed storage
+
Backup
+ and archive large amounts of data with linear
+ performance
+
+
+
Account/Container/Object
+ structureNo nesting, not a
+ traditional file system
+
Optimized
+ for scaleScales to multiple petabytes,
+ billions of
+ objects
+
+
+
Built-in replication3x+ data
+ redundancy compared to 2x on
+ RAID
+
Configurable
+ number of accounts, container and object
+ copies for high
+ availability
+
+
+
Easily add capacity unlike
+ RAID resize
+
Elastic
+ data scaling with
+ ease
+
+
+
No central database
+
Higher
+ performance, no
+ bottlenecks
+
+
+
RAID not required
+
Handle
+ lots of small, random reads and writes
+ efficiently
Detect
+ drive failures preempting data
+ corruption
+
+
+
Expiring objects
+
Users
+ can set an expiration time or a TTL on an
+ object to control
+ access
+
+
+
Direct object access
+
Enable
+ direct browser access to content, such as for
+ a control
+ panel
+
+
+
Realtime visibility into client
+ requests
+
Know
+ what users are
+ requesting
+
+
+
Supports S3 API
+
Utilize
+ tools that were designed for the popular S3
+ API
+
+
+
Restrict containers per
+ account
+
Limit
+ access to control usage by
+ user
+
+
+
Support for NetApp, Nexenta,
+ SolidFire
+
Unified
+ support for block volumes using a variety of
+ storage
+ systems
+
+
+
Snapshot and backup API for block
+ volumes
+
Data
+ protection and recovery for VM
+ data
+
+
+
Standalone volume API
+ available
+
Separate
+ endpoint and API for integration with other
+ compute
+ systems
+
+
+
Integration with Compute
+
Fully
+ integrated to Compute for attaching block
+ volumes and reporting on usage
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch003-obj-store-capabilities.xml b/doc/training-guide/module003-ch003-obj-store-capabilities.xml
new file mode 100644
index 0000000000..2785805504
--- /dev/null
+++ b/doc/training-guide/module003-ch003-obj-store-capabilities.xml
@@ -0,0 +1,100 @@
+
+
+ Object Storage Capabilities
+
+
+ OpenStack provides redundant, scalable object
+ storage using clusters of standardized servers capable
+ of storing petabytes of data
+
+
+ Object Storage is not a traditional file system, but
+ rather a distributed storage system for static data
+ such as virtual machine images, photo storage, email
+ storage, backups and archives. Having no central
+ "brain" or master point of control provides greater
+ scalability, redundancy and durability.
+
+
+ Objects and files are written to multiple disk
+ drives spread throughout servers in the data center,
+ with the OpenStack software responsible for ensuring
+ data replication and integrity across the
+ cluster.
+
+
+ Storage clusters scale horizontally simply by adding
+ new servers. Should a server or hard drive fail,
+ OpenStack replicates its content from other active
+ nodes to new locations in the cluster. Because
+ OpenStack uses software logic to ensure data
+ replication and distribution across different devices,
+ inexpensive commodity hard drives and servers can be
+ used in lieu of more expensive equipment.
+
+
+ Swift Characteristics
+ The key characteristics of Swift include:
+
+
+ All objects stored in Swift have a URL
+
+
+ All objects stored are replicated 3x in
+ as-unique-as-possible zones, which can be defined as a
+ group of drives, a node, a rack etc.
+
+
+ All objects have their own metadata
+
+
+ Developers interact with the object storage system
+ through a RESTful HTTP API
+
+
+ Object data can be located anywhere in the
+ cluster
+
+
+ The cluster scales by adding additional nodes --
+ without sacrificing performance, which allows a more
+ cost-effective linear storage expansion vs. fork-lift
+ upgrades
+
+
+ Data doesn’t have to be migrated to an entirely new
+ storage system
+
+
+ New nodes can be added to the cluster without
+ downtime
+
+
+ Failed nodes and disks can be swapped out with no
+ downtime
+
+
+ Runs on industry-standard hardware, such as Dell,
+ HP, Supermicro etc.
+
+
+
+ Developers can either write directly to the Swift API or use
+ one of the many client libraries that exist for all popular
+ programming languages, such as Java, Python, Ruby and C#.
+ Amazon S3 and RackSpace Cloud Files users should feel very
+ familiar with Swift. For users who have not used an object
+ storage system before, it will require a different approach
+ and mindset than using a traditional filesystem.
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch004-swift-building-blocks.xml b/doc/training-guide/module003-ch004-swift-building-blocks.xml
new file mode 100644
index 0000000000..745b380568
--- /dev/null
+++ b/doc/training-guide/module003-ch004-swift-building-blocks.xml
@@ -0,0 +1,295 @@
+
+
+ Building Blocks of Swift
+ The components that enable Swift to deliver high
+ availability, high durability and high concurrency
+ are:
+
+
+ Proxy
+ Servers:Handles all incoming API
+ requests.
+
+
+ Rings:Maps
+ logical names of data to locations on particular
+ disks.
+
+
+ Zones:Each Zone
+ isolates data from other Zones. A failure in one Zone
+ doesn’t impact the rest of the cluster because data is
+ replicated across the Zones.
+
+
+ Accounts &
+ Containers:Each Account and Container
+ are individual databases that are distributed across
+ the cluster. An Account database contains the list of
+ Containers in that Account. A Container database
+ contains the list of Objects in that Container
+
+
+ Objects:The
+ data itself.
+
+
+ Partitions:A
+ Partition stores Objects, Account databases and
+ Container databases. It’s an intermediate 'bucket'
+ that helps manage locations where data lives in the
+ cluster.
+
+
+
+ Proxy Servers
+ The Proxy Servers are the public face of Swift and
+ handle all incoming API requests. Once a Proxy Server
+ receive a request, it will determine the storage node
+ based on the URL of the object, e.g.
+ https://swift.example.com/v1/account/container/object. The
+ Proxy Servers also coordinates responses, handles failures
+ and coordinates timestamps.
+ Proxy servers use a shared-nothing architecture and can
+ be scaled as needed based on projected workloads. A
+ minimum of two Proxy Servers should be deployed for
+ redundancy. Should one proxy server fail, the others will
+ take over.
+ The Ring
+ A ring represents a mapping between the names of entities
+ stored on disk and their physical location. There are separate
+ rings for accounts, containers, and objects. When other
+ components need to perform any operation on an object,
+ container, or account, they need to interact with the
+ appropriate ring to determine its location in the
+ cluster.
+ The Ring maintains this mapping using zones, devices,
+ partitions, and replicas. Each partition in the ring is
+ replicated, by default, 3 times across the cluster, and the
+ locations for a partition are stored in the mapping maintained
+ by the ring. The ring is also responsible for determining
+ which devices are used for hand off in failure
+ scenarios.
+ Data can be isolated with the concept of zones in the
+ ring. Each replica of a partition is guaranteed to reside
+ in a different zone. A zone could represent a drive, a
+ server, a cabinet, a switch, or even a data center.
+ The partitions of the ring are equally divided among all
+ the devices in the OpenStack Object Storage installation.
+ When partitions need to be moved around (for example if a
+ device is added to the cluster), the ring ensures that a
+ minimum number of partitions are moved at a time, and only
+ one replica of a partition is moved at a time.
+ Weights can be used to balance the distribution of
+ partitions on drives across the cluster. This can be
+ useful, for example, when different sized drives are used
+ in a cluster.
+ The ring is used by the Proxy server and several
+ background processes (like replication).
+ The Ring maps Partitions to physical locations on disk.
+ When other components need to perform any operation on an
+ object, container, or account, they need to interact with
+ the Ring to determine its location in the cluster.
+ The Ring maintains this mapping using zones, devices,
+ partitions, and replicas. Each partition in the Ring is
+ replicated three times by default across the cluster, and
+ the locations for a partition are stored in the mapping
+ maintained by the Ring. The Ring is also responsible for
+ determining which devices are used for handoff should a
+ failure occur.
+
+ The Ring maps partitions to physical locations on
+ disk.
+ The rings determine where data should reside in the
+ cluster. There is a separate ring for account databases,
+ container databases, and individual objects but each ring
+ works in the same way. These rings are externally managed,
+ in that the server processes themselves do not modify the
+ rings, they are instead given new rings modified by other
+ tools.
+ The ring uses a configurable number of bits from a
+ path’s MD5 hash as a partition index that designates a
+ device. The number of bits kept from the hash is known as
+ the partition power, and 2 to the partition power
+ indicates the partition count. Partitioning the full MD5
+ hash ring allows other parts of the cluster to work in
+ batches of items at once which ends up either more
+ efficient or at least less complex than working with each
+ item separately or the entire cluster all at once.
+ Another configurable value is the replica count, which
+ indicates how many of the partition->device assignments
+ comprise a single ring. For a given partition number, each
+ replica’s device will not be in the same zone as any other
+ replica's device. Zones can be used to group devices based on
+ physical locations, power separations, network separations, or
+ any other attribute that would lessen multiple replicas being
+ unavailable at the same time.
+ Zones: Failure Boundaries
+ Swift allows zones to be configured to isolate
+ failure boundaries. Each replica of the data resides
+ in a separate zone, if possible. At the smallest
+ level, a zone could be a single drive or a grouping of
+ a few drives. If there were five object storage
+ servers, then each server would represent its own
+ zone. Larger deployments would have an entire rack (or
+ multiple racks) of object servers, each representing a
+ zone. The goal of zones is to allow the cluster to
+ tolerate significant outages of storage servers
+ without losing all replicas of the data.
+ As we learned earlier, everything in Swift is
+ stored, by default, three times. Swift will place each
+ replica "as-uniquely-as-possible" to ensure both high
+ availability and high durability. This means that when
+ chosing a replica location, Swift will choose a server
+ in an unused zone before an unused server in a zone
+ that already has a replica of the data.
+
+ When a disk fails, replica data is automatically
+ distributed to the other zones to ensure there are
+ three copies of the data
+ Accounts &
+ Containers
+ Each account and container is an individual SQLite
+ database that is distributed across the cluster. An
+ account database contains the list of containers in
+ that account. A container database contains the list
+ of objects in that container.
+
+ To keep track of object data location, each account
+ in the system has a database that references all its
+ containers, and each container database references
+ each object
+ Partitions
+ A Partition is a collection of stored data,
+ including Account databases, Container databases, and
+ objects. Partitions are core to the replication
+ system.
+ Think of a Partition as a bin moving throughout a
+ fulfillment center warehouse. Individual orders get
+ thrown into the bin. The system treats that bin as a
+ cohesive entity as it moves throughout the system. A
+ bin full of things is easier to deal with than lots of
+ little things. It makes for fewer moving parts
+ throughout the system.
+ The system replicators and object uploads/downloads
+ operate on Partitions. As the system scales up,
+ behavior continues to be predictable as the number of
+ Partitions is a fixed number.
+ The implementation of a Partition is conceptually
+ simple -- a partition is just a directory sitting on a
+ disk with a corresponding hash table of what it
+ contains.
+
+ *Swift partitions contain all data in the
+ system.
+ Replication
+ In order to ensure that there are three copies of
+ the data everywhere, replicators continuously examine
+ each Partition. For each local Partition, the
+ replicator compares it against the replicated copies
+ in the other Zones to see if there are any
+ differences.
+ How does the replicator know if replication needs to
+ take place? It does this by examining hashes. A hash
+ file is created for each Partition, which contains
+ hashes of each directory in the Partition. Each of the
+ three hash files is compared. For a given Partition,
+ the hash files for each of the Partition's copies are
+ compared. If the hashes are different, then it is time
+ to replicate and the directory that needs to be
+ replicated is copied over.
+ This is where the Partitions come in handy. With
+ fewer "things" in the system, larger chunks of data
+ are transferred around (rather than lots of little TCP
+ connections, which is inefficient) and there are a
+ consistent number of hashes to compare.
+ The cluster has eventually consistent behavior where
+ the newest data wins.
+
+ *If a zone goes down, one of the nodes containing a
+ replica notices and proactively copies data to a
+ handoff location.
+ To describe how these pieces all come together, let's walk
+ through a few scenarios and introduce the components.
+ Bird-eye View
+ Upload
+
+ A client uses the REST API to make a HTTP request to PUT
+ an object into an existing Container. The cluster receives
+ the request. First, the system must figure out where the
+ data is going to go. To do this, the Account name,
+ Container name and Object name are all used to determine
+ the Partition where this object should live.
+ Then a lookup in the Ring figures out which storage
+ nodes contain the Partitions in question.
+ The data then is sent to each storage node where it is
+ placed in the appropriate Partition. A quorum is required
+ -- at least two of the three writes must be successful
+ before the client is notified that the upload was
+ successful.
+ Next, the Container database is updated asynchronously
+ to reflect that there is a new object in it.
+
+ Download
+ A request comes in for an Account/Container/object.
+ Using the same consistent hashing, the Partition name is
+ generated. A lookup in the Ring reveals which storage
+ nodes contain that Partition. A request is made to one of
+ the storage nodes to fetch the object and if that fails,
+ requests are made to the other nodes.
+
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch005-the-ring.xml b/doc/training-guide/module003-ch005-the-ring.xml
new file mode 100644
index 0000000000..8972b1abca
--- /dev/null
+++ b/doc/training-guide/module003-ch005-the-ring.xml
@@ -0,0 +1,146 @@
+
+
+ Ring Builder
+ The rings are built and managed manually by a utility called
+ the ring-builder. The ring-builder assigns partitions to
+ devices and writes an optimized Python structure to a gzipped,
+ serialized file on disk for shipping out to the servers. The
+ server processes just check the modification time of the file
+ occasionally and reload their in-memory copies of the ring
+ structure as needed. Because of how the ring-builder manages
+ changes to the ring, using a slightly older ring usually just
+ means one of the three replicas for a subset of the partitions
+ will be incorrect, which can be easily worked around.
+ The ring-builder also keeps its own builder file with the
+ ring information and additional data required to build future
+ rings. It is very important to keep multiple backup copies of
+ these builder files. One option is to copy the builder files
+ out to every server while copying the ring files themselves.
+ Another is to upload the builder files into the cluster
+ itself. Complete loss of a builder file will mean creating a
+ new ring from scratch, nearly all partitions will end up
+ assigned to different devices, and therefore nearly all data
+ stored will have to be replicated to new locations. So,
+ recovery from a builder file loss is possible, but data will
+ definitely be unreachable for an extended time.
+ Ring Data Structure
+ The ring data structure consists of three top level
+ fields: a list of devices in the cluster, a list of lists
+ of device ids indicating partition to device assignments,
+ and an integer indicating the number of bits to shift an
+ MD5 hash to calculate the partition for the hash.
+ Partition Assignment
+ List
+ This is a list of array(‘H’) of devices ids. The
+ outermost list contains an array(‘H’) for each
+ replica. Each array(‘H’) has a length equal to the
+ partition count for the ring. Each integer in the
+ array(‘H’) is an index into the above list of devices.
+ The partition list is known internally to the Ring
+ class as _replica2part2dev_id.
+ So, to create a list of device dictionaries assigned
+ to a partition, the Python code would look like:
+ devices = [self.devs[part2dev_id[partition]] for
+ part2dev_id in self._replica2part2dev_id]
+ That code is a little simplistic, as it does not
+ account for the removal of duplicate devices. If a
+ ring has more replicas than devices, then a partition
+ will have more than one replica on one device; that’s
+ simply the pigeonhole principle at work.
+ array(‘H’) is used for memory conservation as there
+ may be millions of partitions.
+ Fractional Replicas
+ A ring is not restricted to having an integer number
+ of replicas. In order to support the gradual changing
+ of replica counts, the ring is able to have a real
+ number of replicas.
+ When the number of replicas is not an integer, then
+ the last element of _replica2part2dev_id will have a
+ length that is less than the partition count for the
+ ring. This means that some partitions will have more
+ replicas than others. For example, if a ring has 3.25
+ replicas, then 25% of its partitions will have four
+ replicas, while the remaining 75% will have just
+ three.
+ Partition Shift Value
+ The partition shift value is known internally to the
+ Ring class as _part_shift. This value used to shift an
+ MD5 hash to calculate the partition on which the data
+ for that hash should reside. Only the top four bytes
+ of the hash is used in this process. For example, to
+ compute the partition for the path
+ /account/container/object the Python code might look
+ like: partition = unpack_from('>I',
+ md5('/account/container/object').digest())[0] >>
+ self._part_shift
+ For a ring generated with part_power P, the
+ partition shift value is 32 - P.
+ Building the Ring
+ The initial building of the ring first calculates the
+ number of partitions that should ideally be assigned to
+ each device based the device’s weight. For example, given
+ a partition power of 20, the ring will have 1,048,576
+ partitions. If there are 1,000 devices of equal weight
+ they will each desire 1,048.576 partitions. The devices
+ are then sorted by the number of partitions they desire
+ and kept in order throughout the initialization
+ process.
+ Note: each device is also assigned a random tiebreaker
+ value that is used when two devices desire the same number
+ of partitions. This tiebreaker is not stored on disk
+ anywhere, and so two different rings created with the same
+ parameters will have different partition assignments. For
+ repeatable partition assignments, RingBuilder.rebalance()
+ takes an optional seed value that will be used to seed
+ Python’s pseudo-random number generator.
+ Then, the ring builder assigns each replica of each
+ partition to the device that desires the most partitions
+ at that point while keeping it as far away as possible
+ from other replicas. The ring builder prefers to assign a
+ replica to a device in a regions that has no replicas
+ already; should there be no such region available, the
+ ring builder will try to find a device in a different
+ zone; if not possible, it will look on a different server;
+ failing that, it will just look for a device that has no
+ replicas; finally, if all other options are exhausted, the
+ ring builder will assign the replica to the device that
+ has the fewest replicas already assigned. Note that
+ assignment of multiple replicas to one device will only
+ happen if the ring has fewer devices than it has
+ replicas.
+ When building a new ring based on an old ring, the
+ desired number of partitions each device wants is
+ recalculated. Next the partitions to be reassigned are
+ gathered up. Any removed devices have all their assigned
+ partitions unassigned and added to the gathered list. Any
+ partition replicas that (due to the addition of new
+ devices) can be spread out for better durability are
+ unassigned and added to the gathered list. Any devices
+ that have more partitions than they now desire have random
+ partitions unassigned from them and added to the gathered
+ list. Lastly, the gathered partitions are then reassigned
+ to devices using a similar method as in the initial
+ assignment described above.
+ Whenever a partition has a replica reassigned, the time
+ of the reassignment is recorded. This is taken into
+ account when gathering partitions to reassign so that no
+ partition is moved twice in a configurable amount of time.
+ This configurable amount of time is known internally to
+ the RingBuilder class as min_part_hours. This restriction
+ is ignored for replicas of partitions on devices that have
+ been removed, as removing a device only happens on device
+ failure and there’s no choice but to make a
+ reassignment.
+ The above processes don’t always perfectly rebalance a
+ ring due to the random nature of gathering partitions for
+ reassignment. To help reach a more balanced ring, the
+ rebalance process is repeated until near perfect (less 1%
+ off) or when the balance doesn’t improve by at least 1%
+ (indicating we probably can’t get perfect balance due to
+ wildly imbalanced zones or too many partitions recently
+ moved).
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch006-more-concepts.xml b/doc/training-guide/module003-ch006-more-concepts.xml
new file mode 100644
index 0000000000..7ff6d4a843
--- /dev/null
+++ b/doc/training-guide/module003-ch006-more-concepts.xml
@@ -0,0 +1,220 @@
+
+
+ A Bit More On Swift
+ Containers and Objects
+ A container is a storage compartment for your data and
+ provides a way for you to organize your data. You can
+ think of a container as a folder in Windows or a
+ directory in UNIX. The primary difference between a
+ container and these other file system concepts is that
+ containers cannot be nested. You can, however, create an
+ unlimited number of containers within your account. Data
+ must be stored in a container so you must have at least
+ one container defined in your account prior to uploading
+ data.
+ The only restrictions on container names is that they
+ cannot contain a forward slash (/) or an ascii null (%00)
+ and must be less than 257 bytes in length. Please note
+ that the length restriction applies to the name after it
+ has been URL encoded. For example, a container name of
+ Course Docs would be URL encoded as Course%20Docs and
+ therefore be 13 bytes in length rather than the expected
+ 11.
+ An object is the basic storage entity and any optional
+ metadata that represents the files you store in the
+ OpenStack Object Storage system. When you upload data to
+ OpenStack Object Storage, the data is stored as-is (no
+ compression or encryption) and consists of a location
+ (container), the object's name, and any metadata
+ consisting of key/value pairs. For instance, you may chose
+ to store a backup of your digital photos and organize them
+ into albums. In this case, each object could be tagged
+ with metadata such as Album : Caribbean Cruise or Album :
+ Aspen Ski Trip.
+ The only restriction on object names is that they must
+ be less than 1024 bytes in length after URL encoding. For
+ example, an object name of C++final(v2).txt should be URL
+ encoded as C%2B%2Bfinal%28v2%29.txt and therefore be 24
+ bytes in length rather than the expected 16.
+ The maximum allowable size for a storage object upon
+ upload is 5 gigabytes (GB) and the minimum is zero bytes.
+ You can use the built-in large object support and the
+ swift utility to retrieve objects larger than 5 GB.
+ For metadata, you should not exceed 90 individual
+ key/value pairs for any one object and the total byte
+ length of all key/value pairs should not exceed 4KB (4096
+ bytes).
+ Language-Specific API
+ Bindings
+ A set of supported API bindings in several popular
+ languages are available from the Rackspace Cloud Files
+ product, which uses OpenStack Object Storage code for its
+ implementation. These bindings provide a layer of
+ abstraction on top of the base REST API, allowing
+ programmers to work with a container and object model
+ instead of working directly with HTTP requests and
+ responses. These bindings are free (as in beer and as in
+ speech) to download, use, and modify. They are all
+ licensed under the MIT License as described in the COPYING
+ file packaged with each binding. If you do make any
+ improvements to an API, you are encouraged (but not
+ required) to submit those changes back to us.
+ The API bindings for Rackspace Cloud Files are hosted
+ athttp://github.com/rackspace. Feel free to
+ coordinate your changes through github or, if you prefer,
+ send your changes to cloudfiles@rackspacecloud.com. Just
+ make sure to indicate which language and version you
+ modified and send a unified diff.
+ Each binding includes its own documentation (either
+ HTML, PDF, or CHM). They also include code snippets and
+ examples to help you get started. The currently supported
+ API binding for OpenStack Object Storage are:
+
+
+ PHP (requires 5.x and the modules: cURL,
+ FileInfo, mbstring)
+
+
+ Python (requires 2.4 or newer)
+
+
+ Java (requires JRE v1.5 or newer)
+
+
+ C#/.NET (requires .NET Framework v3.5)
+
+
+ Ruby (requires 1.8 or newer and mime-tools
+ module)
+
+
+ There are no other supported language-specific bindings
+ at this time. You are welcome to create your own language
+ API bindings and we can help answer any questions during
+ development, host your code if you like, and give you full
+ credit for your work.
+ Proxy Server
+ The Proxy Server is responsible for tying together
+ the rest of the OpenStack Object Storage architecture.
+ For each request, it will look up the location of the
+ account, container, or object in the ring (see below)
+ and route the request accordingly. The public API is
+ also exposed through the Proxy Server.
+ A large number of failures are also handled in the
+ Proxy Server. For example, if a server is unavailable
+ for an object PUT, it will ask the ring for a hand-off
+ server and route there instead.
+ When objects are streamed to or from an object
+ server, they are streamed directly through the proxy
+ server to or from the user – the proxy server does not
+ spool them.
+ You can use a proxy server with account management
+ enabled by configuring it in the proxy server
+ configuration file.
+ Object Server
+ The Object Server is a very simple blob storage
+ server that can store, retrieve and delete objects
+ stored on local devices. Objects are stored as binary
+ files on the filesystem with metadata stored in the
+ file’s extended attributes (xattrs). This requires
+ that the underlying filesystem choice for object
+ servers support xattrs on files. Some filesystems,
+ like ext3, have xattrs turned off by default.
+ Each object is stored using a path derived from the
+ object name’s hash and the operation’s timestamp. Last
+ write always wins, and ensures that the latest object
+ version will be served. A deletion is also treated as
+ a version of the file (a 0 byte file ending with
+ “.ts”, which stands for tombstone). This ensures that
+ deleted files are replicated correctly and older
+ versions don’t magically reappear due to failure
+ scenarios.
+ Container Server
+ The Container Server’s primary job is to handle
+ listings of objects. It does not’t know where those
+ objects are, just what objects are in a specific
+ container. The listings are stored as sqlite database
+ files, and replicated across the cluster similar to
+ how objects are. Statistics are also tracked that
+ include the total number of objects, and total storage
+ usage for that container.
+ Account Server
+ The Account Server is very similar to the Container
+ Server, excepting that it is responsible for listings
+ of containers rather than objects.
+ Replication
+ Replication is designed to keep the system in a
+ consistent state in the face of temporary error
+ conditions like network outages or drive
+ failures.
+ The replication processes compare local data with
+ each remote copy to ensure they all contain the latest
+ version. Object replication uses a hash list to
+ quickly compare subsections of each partition, and
+ container and account replication use a combination of
+ hashes and shared high water marks.
+ Replication updates are push based. For object
+ replication, updating is just a matter of rsyncing
+ files to the peer. Account and container replication
+ push missing records over HTTP or rsync whole database
+ files.
+ The replicator also ensures that data is removed
+ from the system. When an item (object, container, or
+ account) is deleted, a tombstone is set as the latest
+ version of the item. The replicator will see the
+ tombstone and ensure that the item is removed from the
+ entire system.
+ To separate the cluster-internal replication traffic
+ from client traffic, separate replication servers can
+ be used. These replication servers are based on the
+ standard storage servers, but they listen on the
+ replication IP and only respond to REPLICATE requests.
+ Storage servers can serve REPLICATE requests, so an
+ operator can transition to using a separate
+ replication network with no cluster downtime.
+ Replication IP and port information is stored in the
+ ring on a per-node basis. These parameters will be
+ used if they are present, but they are not required.
+ If this information does not exist or is empty for a
+ particular node, the node's standard IP and port will
+ be used for replication.
+ Updaters
+ There are times when container or account data can
+ not be immediately updated. This usually occurs during
+ failure scenarios or periods of high load. If an
+ update fails, the update is queued locally on the file
+ system, and the updater will process the failed
+ updates. This is where an eventual consistency window
+ will most likely come in to play. For example, suppose
+ a container server is under load and a new object is
+ put in to the system. The object will be immediately
+ available for reads as soon as the proxy server
+ responds to the client with success. However, the
+ container server did not update the object listing,
+ and so the update would be queued for a later update.
+ Container listings, therefore, may not immediately
+ contain the object.
+ In practice, the consistency window is only as large
+ as the frequency at which the updater runs and may not
+ even be noticed as the proxy server will route listing
+ requests to the first container server which responds.
+ The server under load may not be the one that serves
+ subsequent listing requests – one of the other two
+ replicas may handle the listing.
+ Auditors
+ Auditors crawl the local server checking the
+ integrity of the objects, containers, and accounts. If
+ corruption is found (in the case of bit rot, for
+ example), the file is quarantined, and replication
+ will replace the bad file from another replica. If
+ other errors are found they are logged (for example,
+ an object’s listing can’t be found on any container
+ server it should be).
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch007-swift-cluster-architecture.xml b/doc/training-guide/module003-ch007-swift-cluster-architecture.xml
new file mode 100644
index 0000000000..7030aa6a9c
--- /dev/null
+++ b/doc/training-guide/module003-ch007-swift-cluster-architecture.xml
@@ -0,0 +1,89 @@
+
+
+ Cluster Arch
+ Access Tier
+
+ Large-scale deployments segment off an "Access Tier".
+ This tier is the “Grand Central” of the Object Storage
+ system. It fields incoming API requests from clients and
+ moves data in and out of the system. This tier is composed
+ of front-end load balancers, ssl- terminators,
+ authentication services, and it runs the (distributed)
+ brain of the object storage system — the proxy server
+ processes.
+ Having the access servers in their own tier enables
+ read/write access to be scaled out independently of
+ storage capacity. For example, if the cluster is on the
+ public Internet and requires ssl-termination and has high
+ demand for data access, many access servers can be
+ provisioned. However, if the cluster is on a private
+ network and it is being used primarily for archival
+ purposes, fewer access servers are needed.
+ As this is an HTTP addressable storage service, a load
+ balancer can be incorporated into the access tier.
+ Typically, this tier comprises a collection of 1U
+ servers. These machines use a moderate amount of RAM and
+ are network I/O intensive. As these systems field each
+ incoming API request, it is wise to provision them with
+ two high-throughput (10GbE) interfaces. One interface is
+ used for 'front-end' incoming requests and the other for
+ 'back-end' access to the object storage nodes to put and
+ fetch data.
+ Factors to Consider
+ For most publicly facing deployments as well as
+ private deployments available across a wide-reaching
+ corporate network, SSL will be used to encrypt traffic
+ to the client. SSL adds significant processing load to
+ establish sessions between clients; more capacity in
+ the access layer will need to be provisioned. SSL may
+ not be required for private deployments on trusted
+ networks.
+ Storage Nodes
+
+ The next component is the storage servers themselves.
+ Generally, most configurations should have each of the
+ five Zones with an equal amount of storage capacity.
+ Storage nodes use a reasonable amount of memory and CPU.
+ Metadata needs to be readily available to quickly return
+ objects. The object stores run services not only to field
+ incoming requests from the Access Tier, but to also run
+ replicators, auditors, and reapers. Object stores can be
+ provisioned with single gigabit or 10 gigabit network
+ interface depending on expected workload and desired
+ performance.
+ Currently 2TB or 3TB SATA disks deliver good
+ price/performance value. Desktop-grade drives can be used
+ where there are responsive remote hands in the datacenter,
+ and enterprise-grade drives can be used where this is not
+ the case.
+ Factors to Consider
+ Desired I/O performance for single-threaded requests
+ should be kept in mind. This system does not use RAID,
+ so each request for an object is handled by a single
+ disk. Disk performance impacts single-threaded
+ response rates.
+ To achieve apparent higher throughput, the object
+ storage system is designed with concurrent
+ uploads/downloads in mind. The network I/O capacity
+ (1GbE, bonded 1GbE pair, or 10GbE) should match your
+ desired concurrent throughput needs for reads and
+ writes.
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch008-account-reaper.xml b/doc/training-guide/module003-ch008-account-reaper.xml
new file mode 100644
index 0000000000..971fa4c771
--- /dev/null
+++ b/doc/training-guide/module003-ch008-account-reaper.xml
@@ -0,0 +1,58 @@
+
+
+ Account Reaper
+ The Account Reaper removes data from deleted accounts in the
+ background.
+ An account is marked for deletion by a reseller issuing a
+ DELETE request on the account’s storage URL. This simply puts
+ the value DELETED into the status column of the account_stat
+ table in the account database (and replicas), indicating the
+ data for the account should be deleted later.
+ There is normally no set retention time and no undelete; it
+ is assumed the reseller will implement such features and only
+ call DELETE on the account once it is truly desired the
+ account’s data be removed. However, in order to protect the
+ Swift cluster accounts from an improper or mistaken delete
+ request, you can set a delay_reaping value in the
+ [account-reaper] section of the account-server.conf to delay
+ the actual deletion of data. At this time, there is no utility
+ to undelete an account; one would have to update the account
+ database replicas directly, setting the status column to an
+ empty string and updating the put_timestamp to be greater than
+ the delete_timestamp. (On the TODO list is writing a utility
+ to perform this task, preferably through a ReST call.)
+ The account reaper runs on each account server and scans the
+ server occasionally for account databases marked for deletion.
+ It will only trigger on accounts that server is the primary
+ node for, so that multiple account servers aren’t all trying
+ to do the same work at the same time. Using multiple servers
+ to delete one account might improve deletion speed, but
+ requires coordination so they aren’t duplicating effort. Speed
+ really isn’t as much of a concern with data deletion and large
+ accounts aren’t deleted that often.
+ The deletion process for an account itself is pretty
+ straightforward. For each container in the account, each
+ object is deleted and then the container is deleted. Any
+ deletion requests that fail won’t stop the overall process,
+ but will cause the overall process to fail eventually (for
+ example, if an object delete times out, the container won’t be
+ able to be deleted later and therefore the account won’t be
+ deleted either). The overall process continues even on a
+ failure so that it doesn’t get hung up reclaiming cluster
+ space because of one troublesome spot. The account reaper will
+ keep trying to delete an account until it eventually becomes
+ empty, at which point the database reclaim process within the
+ db_replicator will eventually remove the database
+ files.
+ Sometimes a persistent error state can prevent some object
+ or container from being deleted. If this happens, you will see
+ a message such as “Account <name> has not been reaped
+ since <date>” in the log. You can control when this is
+ logged with the reap_warn_after value in the [account-reaper]
+ section of the account-server.conf file. By default this is 30
+ days.
+
\ No newline at end of file
diff --git a/doc/training-guide/module003-ch009-replication.xml b/doc/training-guide/module003-ch009-replication.xml
new file mode 100644
index 0000000000..99956f25b7
--- /dev/null
+++ b/doc/training-guide/module003-ch009-replication.xml
@@ -0,0 +1,101 @@
+
+
+ Replication
+ Because each replica in swift functions independently, and
+ clients generally require only a simple majority of nodes
+ responding to consider an operation successful, transient
+ failures like network partitions can quickly cause replicas to
+ diverge. These differences are eventually reconciled by
+ asynchronous, peer-to-peer replicator processes. The
+ replicator processes traverse their local filesystems,
+ concurrently performing operations in a manner that balances
+ load across physical disks.
+ Replication uses a push model, with records and files
+ generally only being copied from local to remote replicas.
+ This is important because data on the node may not belong
+ there (as in the case of handoffs and ring changes), and a
+ replicator can’t know what data exists elsewhere in the
+ cluster that it should pull in. It’s the duty of any node that
+ contains data to ensure that data gets to where it belongs.
+ Replica placement is handled by the ring.
+ Every deleted record or file in the system is marked by a
+ tombstone, so that deletions can be replicated alongside
+ creations. The replication process cleans up tombstones after
+ a time period known as the consistency window. The consistency
+ window encompasses replication duration and how long transient
+ failure can remove a node from the cluster. Tombstone cleanup
+ must be tied to replication to reach replica
+ convergence.
+ If a replicator detects that a remote drive has failed, the
+ replicator uses the get_more_nodes interface for the ring to
+ choose an alternate node with which to synchronize. The
+ replicator can maintain desired levels of replication in the
+ face of disk failures, though some replicas may not be in an
+ immediately usable location. Note that the replicator doesn’t
+ maintain desired levels of replication when other failures,
+ such as entire node failures, occur because most failure are
+ transient.
+ Replication is an area of active development, and likely
+ rife with potential improvements to speed and
+ correctness.
+ There are two major classes of replicator - the db
+ replicator, which replicates accounts and containers, and the
+ object replicator, which replicates object data.
+ DB Replication
+ The first step performed by db replication is a low-cost
+ hash comparison to determine whether two replicas already
+ match. Under normal operation, this check is able to
+ verify that most databases in the system are already
+ synchronized very quickly. If the hashes differ, the
+ replicator brings the databases in sync by sharing records
+ added since the last sync point.
+ This sync point is a high water mark noting the last
+ record at which two databases were known to be in sync,
+ and is stored in each database as a tuple of the remote
+ database id and record id. Database ids are unique amongst
+ all replicas of the database, and record ids are
+ monotonically increasing integers. After all new records
+ have been pushed to the remote database, the entire sync
+ table of the local database is pushed, so the remote
+ database can guarantee that it is in sync with everything
+ with which the local database has previously
+ synchronized.
+ If a replica is found to be missing entirely, the whole
+ local database file is transmitted to the peer using
+ rsync(1) and vested with a new unique id.
+ In practice, DB replication can process hundreds of
+ databases per concurrency setting per second (up to the
+ number of available CPUs or disks) and is bound by the
+ number of DB transactions that must be performed.
+ Object Replication
+ The initial implementation of object replication simply
+ performed an rsync to push data from a local partition to
+ all remote servers it was expected to exist on. While this
+ performed adequately at small scale, replication times
+ skyrocketed once directory structures could no longer be
+ held in RAM. We now use a modification of this scheme in
+ which a hash of the contents for each suffix directory is
+ saved to a per-partition hashes file. The hash for a
+ suffix directory is invalidated when the contents of that
+ suffix directory are modified.
+ The object replication process reads in these hash
+ files, calculating any invalidated hashes. It then
+ transmits the hashes to each remote server that should
+ hold the partition, and only suffix directories with
+ differing hashes on the remote server are rsynced. After
+ pushing files to the remote server, the replication
+ process notifies it to recalculate hashes for the rsynced
+ suffix directories.
+ Performance of object replication is generally bound by
+ the number of uncached directories it has to traverse,
+ usually as a result of invalidated suffix directory
+ hashes. Using write volume and partition counts from our
+ running systems, it was designed so that around 2% of the
+ hash space on a normal node will be invalidated per day,
+ which has experimentally given us acceptable replication
+ speeds.
+
\ No newline at end of file
diff --git a/doc/training-guide/st-training-guides.xml b/doc/training-guide/st-training-guides.xml
index 0ce5ed1c31..4fe661c6f8 100644
--- a/doc/training-guide/st-training-guides.xml
+++ b/doc/training-guide/st-training-guides.xml
@@ -73,4 +73,8 @@
+
+
+
+