From 2479ff50accf55bce9c73d6bda4380edfa4252f8 Mon Sep 17 00:00:00 2001 From: Alexandra Settle Date: Fri, 14 Jul 2017 14:29:34 +0100 Subject: [PATCH] Removing the ops-guide from openstack-manuals The ops guide is intended to be moved to the os wiki. This document can be sourced to migrate in the before-migration tag in the repo. Change-Id: I1ca840909438521d0d1da3469f7fa3f2927a6225 TODO: Add landing page detailing the above --- doc-tools-check-languages.conf | 3 +- doc/common/app-support.rst | 2 - doc/ops-guide/setup.cfg | 27 - doc/ops-guide/setup.py | 30 - doc/ops-guide/source/acknowledgements.rst | 51 - doc/ops-guide/source/app-crypt.rst | 536 - doc/ops-guide/source/app-resources.rst | 62 - doc/ops-guide/source/app-roadmaps.rst | 435 - doc/ops-guide/source/app-usecases.rst | 192 - doc/ops-guide/source/appendix.rst | 12 - doc/ops-guide/source/common | 1 - doc/ops-guide/source/conf.py | 297 - .../source/figures/Check_mark_23x20_02.png | Bin 3064 -> 0 bytes .../source/figures/Check_mark_23x20_02.svg | 60 - .../source/figures/create_project.png | Bin 43038 -> 0 bytes .../source/figures/edit_project_member.png | Bin 58307 -> 0 bytes .../source/figures/network_packet_ping.svg | 3 - .../source/figures/neutron_packet_ping.svg | 1734 -- doc/ops-guide/source/figures/os-ref-arch.svg | 3 - .../source/figures/os_physical_network.svg | 3 - doc/ops-guide/source/figures/osog_00in01.png | Bin 530616 -> 0 bytes doc/ops-guide/source/figures/osog_0201.png | Bin 42897 -> 0 bytes doc/ops-guide/source/figures/osog_1201.png | Bin 44524 -> 0 bytes doc/ops-guide/source/figures/osog_1202.png | Bin 185870 -> 0 bytes doc/ops-guide/source/figures/osog_ac01.png | Bin 74091 -> 0 bytes .../figures/provision-an-instance.graffle | Bin 11758 -> 0 bytes .../source/figures/provision-an-instance.png | Bin 1162270 -> 0 bytes .../source/figures/provision-an-instance.svg | 3 - .../figures/releasecyclegrizzlydiagram.png | Bin 60277 -> 0 bytes doc/ops-guide/source/index.rst | 55 - .../source/locale/ja/LC_MESSAGES/ops-guide.po | 13128 ---------------- .../source/ops-advanced-configuration.rst | 151 - doc/ops-guide/source/ops-backup-recovery.rst | 219 - .../source/ops-capacity-planning-scaling.rst | 423 - .../source/ops-customize-compute.rst | 309 - .../source/ops-customize-conclusion.rst | 9 - .../source/ops-customize-dashboard.rst | 8 - .../source/ops-customize-development.rst | 11 - .../source/ops-customize-objectstorage.rst | 341 - .../ops-customize-provision-instance.rst | 12 - doc/ops-guide/source/ops-customize.rst | 45 - .../source/ops-deployment-factors.rst | 299 - doc/ops-guide/source/ops-lay-of-the-land.rst | 602 - .../source/ops-logging-monitoring-summary.rst | 10 - .../source/ops-logging-monitoring.rst | 15 - doc/ops-guide/source/ops-logging-rsyslog.rst | 105 - doc/ops-guide/source/ops-logging.rst | 257 - .../source/ops-maintenance-complete.rst | 50 - .../source/ops-maintenance-compute.rst | 638 - .../source/ops-maintenance-configuration.rst | 29 - .../source/ops-maintenance-controller.rst | 96 - .../source/ops-maintenance-database.rst | 51 - .../source/ops-maintenance-determine.rst | 92 - .../source/ops-maintenance-hardware.rst | 64 - .../source/ops-maintenance-hdmwy.rst | 54 - .../source/ops-maintenance-rabbitmq.rst | 148 - doc/ops-guide/source/ops-maintenance-slow.rst | 92 - .../source/ops-maintenance-storage.rst | 91 - doc/ops-guide/source/ops-maintenance.rst | 23 - doc/ops-guide/source/ops-monitoring.rst | 437 - .../source/ops-network-troubleshooting.rst | 1095 -- doc/ops-guide/source/ops-planning.rst | 252 - .../source/ops-projects-users-summary.rst | 11 - doc/ops-guide/source/ops-projects-users.rst | 33 - doc/ops-guide/source/ops-projects.rst | 44 - doc/ops-guide/source/ops-quotas.rst | 451 - doc/ops-guide/source/ops-uninstall.rst | 18 - doc/ops-guide/source/ops-upgrades.rst | 553 - .../source/ops-user-facing-operations.rst | 2295 --- doc/ops-guide/source/ops-users.rst | 253 - doc/ops-guide/source/preface.rst | 410 - tools/build-all-rst.sh | 7 +- tools/publishdocs.sh | 1 - www/.htaccess | 3 + 74 files changed, 7 insertions(+), 26737 deletions(-) delete mode 100644 doc/ops-guide/setup.cfg delete mode 100644 doc/ops-guide/setup.py delete mode 100644 doc/ops-guide/source/acknowledgements.rst delete mode 100644 doc/ops-guide/source/app-crypt.rst delete mode 100644 doc/ops-guide/source/app-resources.rst delete mode 100644 doc/ops-guide/source/app-roadmaps.rst delete mode 100644 doc/ops-guide/source/app-usecases.rst delete mode 100644 doc/ops-guide/source/appendix.rst delete mode 120000 doc/ops-guide/source/common delete mode 100644 doc/ops-guide/source/conf.py delete mode 100644 doc/ops-guide/source/figures/Check_mark_23x20_02.png delete mode 100644 doc/ops-guide/source/figures/Check_mark_23x20_02.svg delete mode 100644 doc/ops-guide/source/figures/create_project.png delete mode 100644 doc/ops-guide/source/figures/edit_project_member.png delete mode 100644 doc/ops-guide/source/figures/network_packet_ping.svg delete mode 100644 doc/ops-guide/source/figures/neutron_packet_ping.svg delete mode 100644 doc/ops-guide/source/figures/os-ref-arch.svg delete mode 100644 doc/ops-guide/source/figures/os_physical_network.svg delete mode 100644 doc/ops-guide/source/figures/osog_00in01.png delete mode 100644 doc/ops-guide/source/figures/osog_0201.png delete mode 100644 doc/ops-guide/source/figures/osog_1201.png delete mode 100644 doc/ops-guide/source/figures/osog_1202.png delete mode 100644 doc/ops-guide/source/figures/osog_ac01.png delete mode 100644 doc/ops-guide/source/figures/provision-an-instance.graffle delete mode 100644 doc/ops-guide/source/figures/provision-an-instance.png delete mode 100644 doc/ops-guide/source/figures/provision-an-instance.svg delete mode 100644 doc/ops-guide/source/figures/releasecyclegrizzlydiagram.png delete mode 100644 doc/ops-guide/source/index.rst delete mode 100644 doc/ops-guide/source/locale/ja/LC_MESSAGES/ops-guide.po delete mode 100644 doc/ops-guide/source/ops-advanced-configuration.rst delete mode 100644 doc/ops-guide/source/ops-backup-recovery.rst delete mode 100644 doc/ops-guide/source/ops-capacity-planning-scaling.rst delete mode 100644 doc/ops-guide/source/ops-customize-compute.rst delete mode 100644 doc/ops-guide/source/ops-customize-conclusion.rst delete mode 100644 doc/ops-guide/source/ops-customize-dashboard.rst delete mode 100644 doc/ops-guide/source/ops-customize-development.rst delete mode 100644 doc/ops-guide/source/ops-customize-objectstorage.rst delete mode 100644 doc/ops-guide/source/ops-customize-provision-instance.rst delete mode 100644 doc/ops-guide/source/ops-customize.rst delete mode 100644 doc/ops-guide/source/ops-deployment-factors.rst delete mode 100644 doc/ops-guide/source/ops-lay-of-the-land.rst delete mode 100644 doc/ops-guide/source/ops-logging-monitoring-summary.rst delete mode 100644 doc/ops-guide/source/ops-logging-monitoring.rst delete mode 100644 doc/ops-guide/source/ops-logging-rsyslog.rst delete mode 100644 doc/ops-guide/source/ops-logging.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-complete.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-compute.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-configuration.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-controller.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-database.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-determine.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-hardware.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-hdmwy.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-rabbitmq.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-slow.rst delete mode 100644 doc/ops-guide/source/ops-maintenance-storage.rst delete mode 100644 doc/ops-guide/source/ops-maintenance.rst delete mode 100644 doc/ops-guide/source/ops-monitoring.rst delete mode 100644 doc/ops-guide/source/ops-network-troubleshooting.rst delete mode 100644 doc/ops-guide/source/ops-planning.rst delete mode 100644 doc/ops-guide/source/ops-projects-users-summary.rst delete mode 100644 doc/ops-guide/source/ops-projects-users.rst delete mode 100644 doc/ops-guide/source/ops-projects.rst delete mode 100644 doc/ops-guide/source/ops-quotas.rst delete mode 100644 doc/ops-guide/source/ops-uninstall.rst delete mode 100644 doc/ops-guide/source/ops-upgrades.rst delete mode 100644 doc/ops-guide/source/ops-user-facing-operations.rst delete mode 100644 doc/ops-guide/source/ops-users.rst delete mode 100644 doc/ops-guide/source/preface.rst diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf index 59fc1861fc..852ecb2983 100644 --- a/doc-tools-check-languages.conf +++ b/doc-tools-check-languages.conf @@ -8,7 +8,7 @@ declare -A BOOKS=( ["de"]="install-guide" ["fr"]="install-guide" ["id"]="image-guide install-guide" - ["ja"]="ha-guide image-guide install-guide ops-guide" + ["ja"]="ha-guide image-guide install-guide" ["ko_KR"]="install-guide" ["ru"]="install-guide" ["tr_TR"]="image-guide install-guide arch-design" @@ -47,7 +47,6 @@ declare -A SPECIAL_BOOKS=( ["image-guide"]="RST" ["install-guide"]="RST" ["networking-guide"]="RST" - ["ops-guide"]="RST" # Do not translate for now, we need to fix our scripts first to # generate the content properly. ["install-guide-debconf"]="skip" diff --git a/doc/common/app-support.rst b/doc/common/app-support.rst index dc58f8f3af..61492745b2 100644 --- a/doc/common/app-support.rst +++ b/doc/common/app-support.rst @@ -50,8 +50,6 @@ The following books explain how to configure and run an OpenStack cloud: * `Configuration Reference `_ -* `Operations Guide `_ - * `Networking Guide `_ * `High Availability Guide `_ diff --git a/doc/ops-guide/setup.cfg b/doc/ops-guide/setup.cfg deleted file mode 100644 index 6747b30b6c..0000000000 --- a/doc/ops-guide/setup.cfg +++ /dev/null @@ -1,27 +0,0 @@ -[metadata] -name = openstackopsguide -summary = OpenStack Operations Guide -author = OpenStack -author-email = openstack-docs@lists.openstack.org -home-page = https://docs.openstack.org/ -classifier = -Environment :: OpenStack -Intended Audience :: Information Technology -Intended Audience :: System Administrators -License :: OSI Approved :: Apache Software License -Operating System :: POSIX :: Linux -Topic :: Documentation - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[files] - -[build_sphinx] -warning-is-error = 1 -build-dir = build -source-dir = source - -[wheel] -universal = 1 diff --git a/doc/ops-guide/setup.py b/doc/ops-guide/setup.py deleted file mode 100644 index 736375744d..0000000000 --- a/doc/ops-guide/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/doc/ops-guide/source/acknowledgements.rst b/doc/ops-guide/source/acknowledgements.rst deleted file mode 100644 index ad027b7809..0000000000 --- a/doc/ops-guide/source/acknowledgements.rst +++ /dev/null @@ -1,51 +0,0 @@ -================ -Acknowledgements -================ - -The OpenStack Foundation supported the creation of this book with plane -tickets to Austin, lodging (including one adventurous evening without -power after a windstorm), and delicious food. For about USD $10,000, we -could collaborate intensively for a week in the same room at the -Rackspace Austin office. The authors are all members of the OpenStack -Foundation, which you can join. Go to the `Foundation web -site `_. - -We want to acknowledge our excellent host Rackers at Rackspace in -Austin: - -- Emma Richards of Rackspace Guest Relations took excellent care of our - lunch orders and even set aside a pile of sticky notes that had - fallen off the walls. - -- Betsy Hagemeier, a Fanatical Executive Assistant, took care of a room - reshuffle and helped us settle in for the week. - -- The Real Estate team at Rackspace in Austin, also known as "The - Victors," were super responsive. - -- Adam Powell in Racker IT supplied us with bandwidth each day and - second monitors for those of us needing more screens. - -- On Wednesday night we had a fun happy hour with the Austin OpenStack - Meetup group and Racker Katie Schmidt took great care of our group. - -We also had some excellent input from outside of the room: - -- Tim Bell from CERN gave us feedback on the outline before we started - and reviewed it mid-week. - -- Sébastien Han has written excellent blogs and generously gave his - permission for re-use. - -- Oisin Feeley read it, made some edits, and provided emailed feedback - right when we asked. - -Inside the book sprint room with us each day was our book sprint -facilitator Adam Hyde. Without his tireless support and encouragement, -we would have thought a book of this scope was impossible in five days. -Adam has proven the book sprint method effectively again and again. He -creates both tools and faith in collaborative authoring at -`www.booksprints.net `_. - -We couldn't have pulled it off without so much supportive help and -encouragement. diff --git a/doc/ops-guide/source/app-crypt.rst b/doc/ops-guide/source/app-crypt.rst deleted file mode 100644 index 35480419d1..0000000000 --- a/doc/ops-guide/source/app-crypt.rst +++ /dev/null @@ -1,536 +0,0 @@ -================================= -Tales From the Cryp^H^H^H^H Cloud -================================= - -Herein lies a selection of tales from OpenStack cloud operators. Read, -and learn from their wisdom. - -Double VLAN -~~~~~~~~~~~ - -I was on-site in Kelowna, British Columbia, Canada setting up a new -OpenStack cloud. The deployment was fully automated: Cobbler deployed -the OS on the bare metal, bootstrapped it, and Puppet took over from -there. I had run the deployment scenario so many times in practice and -took for granted that everything was working. - -On my last day in Kelowna, I was in a conference call from my hotel. In -the background, I was fooling around on the new cloud. I launched an -instance and logged in. Everything looked fine. Out of boredom, I ran -:command:`ps aux` and all of the sudden the instance locked up. - -Thinking it was just a one-off issue, I terminated the instance and -launched a new one. By then, the conference call ended and I was off to -the data center. - -At the data center, I was finishing up some tasks and remembered the -lock-up. I logged into the new instance and ran :command:`ps aux` again. -It worked. Phew. I decided to run it one more time. It locked up. - -After reproducing the problem several times, I came to the unfortunate -conclusion that this cloud did indeed have a problem. Even worse, my -time was up in Kelowna and I had to return back to Calgary. - -Where do you even begin troubleshooting something like this? An instance -that just randomly locks up when a command is issued. Is it the image? -Nope—it happens on all images. Is it the compute node? Nope—all nodes. -Is the instance locked up? No! New SSH connections work just fine! - -We reached out for help. A networking engineer suggested it was an MTU -issue. Great! MTU! Something to go on! What's MTU and why would it cause -a problem? - -MTU is maximum transmission unit. It specifies the maximum number of -bytes that the interface accepts for each packet. If two interfaces have -two different MTUs, bytes might get chopped off and weird things -happen—such as random session lockups. - -.. note:: - - Not all packets have a size of 1500. Running the :command:`ls` command over - SSH might only create a single packets less than 1500 bytes. - However, running a command with heavy output, such as :command:`ps aux` - requires several packets of 1500 bytes. - -OK, so where is the MTU issue coming from? Why haven't we seen this in -any other deployment? What's new in this situation? Well, new data -center, new uplink, new switches, new model of switches, new servers, -first time using this model of servers… so, basically everything was -new. Wonderful. We toyed around with raising the MTU at various areas: -the switches, the NICs on the compute nodes, the virtual NICs in the -instances, we even had the data center raise the MTU for our uplink -interface. Some changes worked, some didn't. This line of -troubleshooting didn't feel right, though. We shouldn't have to be -changing the MTU in these areas. - -As a last resort, our network admin (Alvaro) and myself sat down with -four terminal windows, a pencil, and a piece of paper. In one window, we -ran ping. In the second window, we ran ``tcpdump`` on the cloud -controller. In the third, ``tcpdump`` on the compute node. And the forth -had ``tcpdump`` on the instance. For background, this cloud was a -multi-node, non-multi-host setup. - -One cloud controller acted as a gateway to all compute nodes. -VlanManager was used for the network config. This means that the cloud -controller and all compute nodes had a different VLAN for each OpenStack -project. We used the ``-s`` option of ``ping`` to change the packet -size. We watched as sometimes packets would fully return, sometimes they'd -only make it out and never back in, and sometimes the packets would stop at a -random point. We changed ``tcpdump`` to start displaying the hex dump of -the packet. We pinged between every combination of outside, controller, -compute, and instance. - -Finally, Alvaro noticed something. When a packet from the outside hits -the cloud controller, it should not be configured with a VLAN. We -verified this as true. When the packet went from the cloud controller to -the compute node, it should only have a VLAN if it was destined for an -instance. This was still true. When the ping reply was sent from the -instance, it should be in a VLAN. True. When it came back to the cloud -controller and on its way out to the Internet, it should no longer have -a VLAN. False. Uh oh. It looked as though the VLAN part of the packet -was not being removed. - -That made no sense. - -While bouncing this idea around in our heads, I was randomly typing -commands on the compute node: - -.. code-block:: console - - $ ip a - … - 10: vlan100@vlan20: mtu 1500 qdisc noqueue master br100 state UP - … - -"Hey Alvaro, can you run a VLAN on top of a VLAN?" - -"If you did, you'd add an extra 4 bytes to the packet…" - -Then it all made sense… - -.. code-block:: console - - $ grep vlan_interface /etc/nova/nova.conf - vlan_interface=vlan20 - -In ``nova.conf``, ``vlan_interface`` specifies what interface OpenStack -should attach all VLANs to. The correct setting should have been: - -.. code-block:: ini - - vlan_interface=bond0 - -As this would be the server's bonded NIC. - -vlan20 is the VLAN that the data center gave us for outgoing Internet -access. It's a correct VLAN and is also attached to bond0. - -By mistake, I configured OpenStack to attach all tenant VLANs to vlan20 -instead of bond0 thereby stacking one VLAN on top of another. This added -an extra 4 bytes to each packet and caused a packet of 1504 bytes to be -sent out which would cause problems when it arrived at an interface that -only accepted 1500. - -As soon as this setting was fixed, everything worked. - -"The Issue" -~~~~~~~~~~~ - -At the end of August 2012, a post-secondary school in Alberta, Canada -migrated its infrastructure to an OpenStack cloud. As luck would have -it, within the first day or two of it running, one of their servers just -disappeared from the network. Blip. Gone. - -After restarting the instance, everything was back up and running. We -reviewed the logs and saw that at some point, network communication -stopped and then everything went idle. We chalked this up to a random -occurrence. - -A few nights later, it happened again. - -We reviewed both sets of logs. The one thing that stood out the most was -DHCP. At the time, OpenStack, by default, set DHCP leases for one minute -(it's now two minutes). This means that every instance contacts the -cloud controller (DHCP server) to renew its fixed IP. For some reason, -this instance could not renew its IP. We correlated the instance's logs -with the logs on the cloud controller and put together a conversation: - -#. Instance tries to renew IP. - -#. Cloud controller receives the renewal request and sends a response. - -#. Instance "ignores" the response and re-sends the renewal request. - -#. Cloud controller receives the second request and sends a new - response. - -#. Instance begins sending a renewal request to ``255.255.255.255`` - since it hasn't heard back from the cloud controller. - -#. The cloud controller receives the ``255.255.255.255`` request and - sends a third response. - -#. The instance finally gives up. - -With this information in hand, we were sure that the problem had to do -with DHCP. We thought that for some reason, the instance wasn't getting -a new IP address and with no IP, it shut itself off from the network. - -A quick Google search turned up this: `DHCP lease errors in VLAN -mode `_ -which further supported our DHCP theory. - -An initial idea was to just increase the lease time. If the instance -only renewed once every week, the chances of this problem happening -would be tremendously smaller than every minute. This didn't solve the -problem, though. It was just covering the problem up. - -We decided to have ``tcpdump`` run on this instance and see if we could -catch it in action again. Sure enough, we did. - -The ``tcpdump`` looked very, very weird. In short, it looked as though -network communication stopped before the instance tried to renew its IP. -Since there is so much DHCP chatter from a one minute lease, it's very -hard to confirm it, but even with only milliseconds difference between -packets, if one packet arrives first, it arrived first, and if that -packet reported network issues, then it had to have happened before -DHCP. - -Additionally, this instance in question was responsible for a very, very -large backup job each night. While "The Issue" (as we were now calling -it) didn't happen exactly when the backup happened, it was close enough -(a few hours) that we couldn't ignore it. - -Further days go by and we catch The Issue in action more and more. We -find that dhclient is not running after The Issue happens. Now we're -back to thinking it's a DHCP issue. Running ``/etc/init.d/networking`` -restart brings everything back up and running. - -Ever have one of those days where all of the sudden you get the Google -results you were looking for? Well, that's what happened here. I was -looking for information on dhclient and why it dies when it can't renew -its lease and all of the sudden I found a bunch of OpenStack and dnsmasq -discussions that were identical to the problem we were seeing! - -`Problem with Heavy Network IO and -Dnsmasq `_. - -`instances losing IP address while running, due to No -DHCPOFFER `_. - -Seriously, Google. - -This bug report was the key to everything: `KVM images lose connectivity -with bridged -network `_. - -It was funny to read the report. It was full of people who had some -strange network problem but didn't quite explain it in the same way. - -So it was a qemu/kvm bug. - -At the same time of finding the bug report, a co-worker was able to -successfully reproduce The Issue! How? He used ``iperf`` to spew a ton -of bandwidth at an instance. Within 30 minutes, the instance just -disappeared from the network. - -Armed with a patched qemu and a way to reproduce, we set out to see if -we've finally solved The Issue. After 48 hours straight of hammering the -instance with bandwidth, we were confident. The rest is history. You can -search the bug report for "joe" to find my comments and actual tests. - -Disappearing Images -~~~~~~~~~~~~~~~~~~~ - -At the end of 2012, Cybera (a nonprofit with a mandate to oversee the -development of cyberinfrastructure in Alberta, Canada) deployed an -updated OpenStack cloud for their `DAIR -project `_. A few days into -production, a compute node locks up. Upon rebooting the node, I checked -to see what instances were hosted on that node so I could boot them on -behalf of the customer. Luckily, only one instance. - -The :command:`nova reboot` command wasn't working, so I used :command:`virsh`, -but it immediately came back with an error saying it was unable to find the -backing disk. In this case, the backing disk is the Glance image that is -copied to ``/var/lib/nova/instances/_base`` when the image is used for -the first time. Why couldn't it find it? I checked the directory and -sure enough it was gone. - -I reviewed the ``nova`` database and saw the instance's entry in the -``nova.instances`` table. The image that the instance was using matched -what virsh was reporting, so no inconsistency there. - -I checked Glance and noticed that this image was a snapshot that the -user created. At least that was good news—this user would have been the -only user affected. - -Finally, I checked StackTach and reviewed the user's events. They had -created and deleted several snapshots—most likely experimenting. -Although the timestamps didn't match up, my conclusion was that they -launched their instance and then deleted the snapshot and it was somehow -removed from ``/var/lib/nova/instances/_base``. None of that made sense, -but it was the best I could come up with. - -It turns out the reason that this compute node locked up was a hardware -issue. We removed it from the DAIR cloud and called Dell to have it -serviced. Dell arrived and began working. Somehow or another (or a fat -finger), a different compute node was bumped and rebooted. Great. - -When this node fully booted, I ran through the same scenario of seeing -what instances were running so I could turn them back on. There were a -total of four. Three booted and one gave an error. It was the same error -as before: unable to find the backing disk. Seriously, what? - -Again, it turns out that the image was a snapshot. The three other -instances that successfully started were standard cloud images. Was it a -problem with snapshots? That didn't make sense. - -A note about DAIR's architecture: ``/var/lib/nova/instances`` is a -shared NFS mount. This means that all compute nodes have access to it, -which includes the ``_base`` directory. Another centralized area is -``/var/log/rsyslog`` on the cloud controller. This directory collects -all OpenStack logs from all compute nodes. I wondered if there were any -entries for the file that :command:`virsh` is reporting: - -.. code-block:: console - - dair-ua-c03/nova.log:Dec 19 12:10:59 dair-ua-c03 - 2012-12-19 12:10:59 INFO nova.virt.libvirt.imagecache - [-] Removing base file: - /var/lib/nova/instances/_base/7b4783508212f5d242cbf9ff56fb8d33b4ce6166_10 - -Ah-hah! So OpenStack was deleting it. But why? - -A feature was introduced in Essex to periodically check and see if there -were any ``_base`` files not in use. If there were, OpenStack Compute -would delete them. This idea sounds innocent enough and has some good -qualities to it. But how did this feature end up turned on? It was -disabled by default in Essex. As it should be. It was `decided to be -turned on in Folsom `_. -I cannot emphasize enough that: - -*Actions which delete things should not be enabled by default.* - -Disk space is cheap these days. Data recovery is not. - -Secondly, DAIR's shared ``/var/lib/nova/instances`` directory -contributed to the problem. Since all compute nodes have access to this -directory, all compute nodes periodically review the \_base directory. -If there is only one instance using an image, and the node that the -instance is on is down for a few minutes, it won't be able to mark the -image as still in use. Therefore, the image seems like it's not in use -and is deleted. When the compute node comes back online, the instance -hosted on that node is unable to start. - -The Valentine's Day Compute Node Massacre -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although the title of this story is much more dramatic than the actual -event, I don't think, or hope, that I'll have the opportunity to use -"Valentine's Day Massacre" again in a title. - -This past Valentine's Day, I received an alert that a compute node was -no longer available in the cloud—meaning, - -.. code-block:: console - - $ openstack compute service list - -showed this particular node in a down state. - -I logged into the cloud controller and was able to both ``ping`` and SSH -into the problematic compute node which seemed very odd. Usually if I -receive this type of alert, the compute node has totally locked up and -would be inaccessible. - -After a few minutes of troubleshooting, I saw the following details: - -- A user recently tried launching a CentOS instance on that node - -- This user was the only user on the node (new node) - -- The load shot up to 8 right before I received the alert - -- The bonded 10gb network device (bond0) was in a DOWN state - -- The 1gb NIC was still alive and active - -I looked at the status of both NICs in the bonded pair and saw that -neither was able to communicate with the switch port. Seeing as how each -NIC in the bond is connected to a separate switch, I thought that the -chance of a switch port dying on each switch at the same time was quite -improbable. I concluded that the 10gb dual port NIC had died and needed -replaced. I created a ticket for the hardware support department at the -data center where the node was hosted. I felt lucky that this was a new -node and no one else was hosted on it yet. - -An hour later I received the same alert, but for another compute node. -Crap. OK, now there's definitely a problem going on. Just like the -original node, I was able to log in by SSH. The bond0 NIC was DOWN but -the 1gb NIC was active. - -And the best part: the same user had just tried creating a CentOS -instance. What? - -I was totally confused at this point, so I texted our network admin to -see if he was available to help. He logged in to both switches and -immediately saw the problem: the switches detected spanning tree packets -coming from the two compute nodes and immediately shut the ports down to -prevent spanning tree loops: - -.. code-block:: console - - Feb 15 01:40:18 SW-1 Stp: %SPANTREE-4-BLOCK_BPDUGUARD: Received BPDU packet on Port-Channel35 with BPDU guard enabled. Disabling interface. (source mac fa:16:3e:24:e7:22) - Feb 15 01:40:18 SW-1 Ebra: %ETH-4-ERRDISABLE: bpduguard error detected on Port-Channel35. - Feb 15 01:40:18 SW-1 Mlag: %MLAG-4-INTF_INACTIVE_LOCAL: Local interface Port-Channel35 is link down. MLAG 35 is inactive. - Feb 15 01:40:18 SW-1 Ebra: %LINEPROTO-5-UPDOWN: Line protocol on Interface Port-Channel35 (Server35), changed state to down - Feb 15 01:40:19 SW-1 Stp: %SPANTREE-6-INTERFACE_DEL: Interface Port-Channel35 has been removed from instance MST0 - Feb 15 01:40:19 SW-1 Ebra: %LINEPROTO-5-UPDOWN: Line protocol on Interface Ethernet35 (Server35), changed state to down - -He re-enabled the switch ports and the two compute nodes immediately -came back to life. - -Unfortunately, this story has an open ending... we're still looking into -why the CentOS image was sending out spanning tree packets. Further, -we're researching a proper way on how to mitigate this from happening. -It's a bigger issue than one might think. While it's extremely important -for switches to prevent spanning tree loops, it's very problematic to -have an entire compute node be cut from the network when this happens. -If a compute node is hosting 100 instances and one of them sends a -spanning tree packet, that instance has effectively DDOS'd the other 99 -instances. - -This is an ongoing and hot topic in networking circles —especially with -the raise of virtualization and virtual switches. - -Down the Rabbit Hole -~~~~~~~~~~~~~~~~~~~~ - -Users being able to retrieve console logs from running instances is a -boon for support—many times they can figure out what's going on inside -their instance and fix what's going on without bothering you. -Unfortunately, sometimes overzealous logging of failures can cause -problems of its own. - -A report came in: VMs were launching slowly, or not at all. Cue the -standard checks—nothing on the Nagios, but there was a spike in network -towards the current master of our RabbitMQ cluster. Investigation -started, but soon the other parts of the queue cluster were leaking -memory like a sieve. Then the alert came in—the master Rabbit server -went down and connections failed over to the slave. - -At that time, our control services were hosted by another team and we -didn't have much debugging information to determine what was going on -with the master, and we could not reboot it. That team noted that it -failed without alert, but managed to reboot it. After an hour, the -cluster had returned to its normal state and we went home for the day. - -Continuing the diagnosis the next morning was kick started by another -identical failure. We quickly got the message queue running again, and -tried to work out why Rabbit was suffering from so much network traffic. -Enabling debug logging on nova-api quickly brought understanding. A -``tail -f /var/log/nova/nova-api.log`` was scrolling by faster -than we'd ever seen before. CTRL+C on that and we could plainly see the -contents of a system log spewing failures over and over again - a system -log from one of our users' instances. - -After finding the instance ID we headed over to -``/var/lib/nova/instances`` to find the ``console.log``: - -.. code-block:: console - - adm@cc12:/var/lib/nova/instances/instance-00000e05# wc -l console.log - 92890453 console.log - adm@cc12:/var/lib/nova/instances/instance-00000e05# ls -sh console.log - 5.5G console.log - -Sure enough, the user had been periodically refreshing the console log -page on the dashboard and the 5G file was traversing the Rabbit cluster -to get to the dashboard. - -We called them and asked them to stop for a while, and they were happy -to abandon the horribly broken VM. After that, we started monitoring the -size of console logs. - -To this day, `the issue `__ -doesn't have a permanent resolution, but we look forward to the discussion -at the next summit. - -Havana Haunted by the Dead -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Felix Lee of Academia Sinica Grid Computing Centre in Taiwan contributed -this story. - -I just upgraded OpenStack from Grizzly to Havana 2013.2-2 using the RDO -repository and everything was running pretty well—except the EC2 API. - -I noticed that the API would suffer from a heavy load and respond slowly -to particular EC2 requests such as ``RunInstances``. - -Output from ``/var/log/nova/nova-api.log`` on :term:`Havana`: - -.. code-block:: console - - 2014-01-10 09:11:45.072 129745 INFO nova.ec2.wsgi.server - [req-84d16d16-3808-426b-b7af-3b90a11b83b0 - 0c6e7dba03c24c6a9bce299747499e8a 7052bd6714e7460caeb16242e68124f9] - 117.103.103.29 "GET - /services/Cloud?AWSAccessKeyId=[something]&Action=RunInstances&ClientToken=[something]&ImageId=ami-00000001&InstanceInitiatedShutdownBehavior=terminate... - HTTP/1.1" status: 200 len: 1109 time: 138.5970151 - -This request took over two minutes to process, but executed quickly on -another co-existing Grizzly deployment using the same hardware and -system configuration. - -Output from ``/var/log/nova/nova-api.log`` on :term:`Grizzly`: - -.. code-block:: console - - 2014-01-08 11:15:15.704 INFO nova.ec2.wsgi.server - [req-ccac9790-3357-4aa8-84bd-cdaab1aa394e - ebbd729575cb404081a45c9ada0849b7 8175953c209044358ab5e0ec19d52c37] - 117.103.103.29 "GET - /services/Cloud?AWSAccessKeyId=[something]&Action=RunInstances&ClientToken=[something]&ImageId=ami-00000007&InstanceInitiatedShutdownBehavior=terminate... - HTTP/1.1" status: 200 len: 931 time: 3.9426181 - -While monitoring system resources, I noticed a significant increase in -memory consumption while the EC2 API processed this request. I thought -it wasn't handling memory properly—possibly not releasing memory. If the -API received several of these requests, memory consumption quickly grew -until the system ran out of RAM and began using swap. Each node has 48 -GB of RAM and the "nova-api" process would consume all of it within -minutes. Once this happened, the entire system would become unusably -slow until I restarted the nova-api service. - -So, I found myself wondering what changed in the EC2 API on Havana that -might cause this to happen. Was it a bug or a normal behavior that I now -need to work around? - -After digging into the nova (OpenStack Compute) code, I noticed two -areas in ``api/ec2/cloud.py`` potentially impacting my system: - -.. code-block:: python - - instances = self.compute_api.get_all(context, - search_opts=search_opts, - sort_dir='asc') - - sys_metas = self.compute_api.get_all_system_metadata( - context, search_filts=[{'key': ['EC2_client_token']}, - {'value': [client_token]}]) - -Since my database contained many records—over 1 million metadata records -and over 300,000 instance records in "deleted" or "errored" states—each -search took a long time. I decided to clean up the database by first -archiving a copy for backup and then performing some deletions using the -MySQL client. For example, I ran the following SQL command to remove -rows of instances deleted for over a year: - -.. code-block:: console - - mysql> delete from nova.instances where deleted=1 and terminated_at < (NOW() - INTERVAL 1 YEAR); - -Performance increased greatly after deleting the old records and my new -deployment continues to behave well. diff --git a/doc/ops-guide/source/app-resources.rst b/doc/ops-guide/source/app-resources.rst deleted file mode 100644 index 1c998987a7..0000000000 --- a/doc/ops-guide/source/app-resources.rst +++ /dev/null @@ -1,62 +0,0 @@ -========= -Resources -========= - -OpenStack -~~~~~~~~~ - -- `OpenStack Installation Tutorial for openSUSE and SUSE Linux Enterprise - Server `_ - -- `OpenStack Installation Tutorial for Red Hat Enterprise Linux and CentOS - `_ - -- `OpenStack Installation Tutorial for Ubuntu - Server `_ - -- `OpenStack Administrator Guide `_ - -- `OpenStack Cloud Computing Cookbook (Packt - Publishing) `_ - -Cloud (General) -~~~~~~~~~~~~~~~ - -- `The NIST Definition of Cloud - Computing `_ - -Python -~~~~~~ - -- `Dive Into Python (Apress) `_ - -Networking -~~~~~~~~~~ - -- `TCP/IP Illustrated, Volume 1: The Protocols, 2/E - (Pearson) `_ - -- `The TCP/IP Guide (No Starch - Press) `_ - -- `A tcpdump Tutorial and - Primer `_ - -Systems Administration -~~~~~~~~~~~~~~~~~~~~~~ - -- `UNIX and Linux Systems Administration Handbook (Prentice - Hall) `_ - -Virtualization -~~~~~~~~~~~~~~ - -- `The Book of Xen (No Starch - Press) `_ - -Configuration Management -~~~~~~~~~~~~~~~~~~~~~~~~ - -- `Puppet Labs Documentation `_ - -- `Pro Puppet (Apress) `_ diff --git a/doc/ops-guide/source/app-roadmaps.rst b/doc/ops-guide/source/app-roadmaps.rst deleted file mode 100644 index 48d28e574f..0000000000 --- a/doc/ops-guide/source/app-roadmaps.rst +++ /dev/null @@ -1,435 +0,0 @@ -===================== -Working with Roadmaps -===================== - -The good news: OpenStack has unprecedented transparency when it comes to -providing information about what's coming up. The bad news: each release -moves very quickly. The purpose of this appendix is to highlight some of -the useful pages to track, and take an educated guess at what is coming -up in the next release and perhaps further afield. - -OpenStack follows a six month release cycle, typically releasing in -April/May and October/November each year. At the start of each cycle, -the community gathers in a single location for a design summit. At the -summit, the features for the coming releases are discussed, prioritized, -and planned. The below figure shows an example release cycle, with dates -showing milestone releases, code freeze, and string freeze dates, along -with an example of when the summit occurs. Milestones are interim releases -within the cycle that are available as packages for download and -testing. Code freeze is putting a stop to adding new features to the -release. String freeze is putting a stop to changing any strings within -the source code. - -.. image:: figures/osog_ac01.png - :width: 100% - - -Information Available to You -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are several good sources of information available that you can use -to track your OpenStack development desires. - -Release notes are maintained on the OpenStack wiki, and also shown here: - -.. list-table:: - :widths: 25 25 25 25 - :header-rows: 1 - - * - Series - - Status - - Releases - - Date - * - Liberty - - `Under Development - `_ - - 2015.2 - - Oct, 2015 - * - Kilo - - `Current stable release, security-supported - `_ - - `2015.1 `_ - - Apr 30, 2015 - * - Juno - - `Security-supported - `_ - - `2014.2 `_ - - Oct 16, 2014 - * - Icehouse - - `End-of-life - `_ - - `2014.1 `_ - - Apr 17, 2014 - * - - - - - `2014.1.1 `_ - - Jun 9, 2014 - * - - - - - `2014.1.2 `_ - - Aug 8, 2014 - * - - - - - `2014.1.3 `_ - - Oct 2, 2014 - * - Havana - - End-of-life - - `2013.2 `_ - - Apr 4, 2013 - * - - - - - `2013.2.1 `_ - - Dec 16, 2013 - * - - - - - `2013.2.2 `_ - - Feb 13, 2014 - * - - - - - `2013.2.3 `_ - - Apr 3, 2014 - * - - - - - `2013.2.4 `_ - - Sep 22, 2014 - * - - - - - `2013.2.1 `_ - - Dec 16, 2013 - * - Grizzly - - End-of-life - - `2013.1 `_ - - Apr 4, 2013 - * - - - - - `2013.1.1 `_ - - May 9, 2013 - * - - - - - `2013.1.2 `_ - - Jun 6, 2013 - * - - - - - `2013.1.3 `_ - - Aug 8, 2013 - * - - - - - `2013.1.4 `_ - - Oct 17, 2013 - * - - - - - `2013.1.5 `_ - - Mar 20, 2015 - * - Folsom - - End-of-life - - `2012.2 `_ - - Sep 27, 2012 - * - - - - - `2012.2.1 `_ - - Nov 29, 2012 - * - - - - - `2012.2.2 `_ - - Dec 13, 2012 - * - - - - - `2012.2.3 `_ - - Jan 31, 2013 - * - - - - - `2012.2.4 `_ - - Apr 11, 2013 - * - Essex - - End-of-life - - `2012.1 `_ - - Apr 5, 2012 - * - - - - - `2012.1.1 `_ - - Jun 22, 2012 - * - - - - - `2012.1.2 `_ - - Aug 10, 2012 - * - - - - - `2012.1.3 `_ - - Oct 12, 2012 - * - Diablo - - Deprecated - - `2011.3 `_ - - Sep 22, 2011 - * - - - - - `2011.3.1 `_ - - Jan 19, 2012 - * - Cactus - - Deprecated - - `2011.2 `_ - - Apr 15, 2011 - * - Bexar - - Deprecated - - `2011.1 `_ - - Feb 3, 2011 - * - Austin - - Deprecated - - `2010.1 `_ - - Oct 21, 2010 - -Here are some other resources: - -- `A breakdown of current features under development, with their target - milestone `_ - -- `A list of all features, including those not yet under - development `_ - -- `Rough-draft design discussions ("etherpads") from the last design - summit `_ - -- `List of individual code changes under - review `_ - -Influencing the Roadmap -~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack truly welcomes your ideas (and contributions) and highly -values feedback from real-world users of the software. By learning a -little about the process that drives feature development, you can -participate and perhaps get the additions you desire. - -Feature requests typically start their life in Etherpad, a collaborative -editing tool, which is used to take coordinating notes at a design -summit session specific to the feature. This then leads to the creation -of a blueprint on the Launchpad site for the particular project, which -is used to describe the feature more formally. Blueprints are then -approved by project team members, and development can begin. - -Therefore, the fastest way to get your feature request up for -consideration is to create an Etherpad with your ideas and propose a -session to the design summit. If the design summit has already passed, -you may also create a blueprint directly. Read this `blog post about how -to work with blueprints -`_ -the perspective of Victoria Martínez, a developer intern. - -The roadmap for the next release as it is developed can be seen at -`Releases `_. - -To determine the potential features going in to future releases, or to -look at features implemented previously, take a look at the existing -blueprints such as `OpenStack Compute (nova) -Blueprints `_, `OpenStack -Identity (keystone) -Blueprints `_, and release -notes. - -Aside from the direct-to-blueprint pathway, there is another very -well-regarded mechanism to influence the development roadmap: -the user survey. Found at `OpenStack User Survey -`_, -it allows you to provide details of your deployments and needs, anonymously by -default. Each cycle, the user committee analyzes the results and produces a -report, including providing specific information to the technical -committee and project team leads. - -Aspects to Watch -~~~~~~~~~~~~~~~~ - -You want to keep an eye on the areas improving within OpenStack. The -best way to "watch" roadmaps for each project is to look at the -blueprints that are being approved for work on milestone releases. You -can also learn from PTL webinars that follow the OpenStack summits twice -a year. - -Driver Quality Improvements ---------------------------- - -A major quality push has occurred across drivers and plug-ins in Block -Storage, Compute, and Networking. Particularly, developers of Compute -and Networking drivers that require proprietary or hardware products are -now required to provide an automated external testing system for use -during the development process. - -Easier Upgrades ---------------- - -One of the most requested features since OpenStack began (for components -other than Object Storage, which tends to "just work"): easier upgrades. -In all recent releases internal messaging communication is versioned, -meaning services can theoretically drop back to backward-compatible -behavior. This allows you to run later versions of some components, -while keeping older versions of others. - -In addition, database migrations are now tested with the Turbo Hipster -tool. This tool tests database migration performance on copies of -real-world user databases. - -These changes have facilitated the first proper OpenStack upgrade guide, -found in :doc:`ops-upgrades`, and will continue to improve in the next -release. - -Deprecation of Nova Network ---------------------------- - -With the introduction of the full software-defined networking stack -provided by OpenStack Networking (neutron) in the Folsom release, -development effort on the initial networking code that remains part of -the Compute component has gradually lessened. While many still use -``nova-network`` in production, there has been a long-term plan to -remove the code in favor of the more flexible and full-featured -OpenStack Networking. - -An attempt was made to deprecate ``nova-network`` during the Havana -release, which was aborted due to the lack of equivalent functionality -(such as the FlatDHCP multi-host high-availability mode mentioned in -this guide), lack of a migration path between versions, insufficient -testing, and simplicity when used for the more straightforward use cases -``nova-network`` traditionally supported. Though significant effort has -been made to address these concerns, ``nova-network`` was not be -deprecated in the Juno release. In addition, to a limited degree, -patches to ``nova-network`` have again begin to be accepted, such as -adding a per-network settings feature and SR-IOV support in Juno. - -This leaves you with an important point of decision when designing your -cloud. OpenStack Networking is robust enough to use with a small number -of limitations (performance issues in some scenarios, only basic high -availability of layer 3 systems) and provides many more features than -``nova-network``. However, if you do not have the more complex use cases -that can benefit from fuller software-defined networking capabilities, -or are uncomfortable with the new concepts introduced, ``nova-network`` -may continue to be a viable option for the next 12 months. - -Similarly, if you have an existing cloud and are looking to upgrade from -``nova-network`` to OpenStack Networking, you should have the option to -delay the upgrade for this period of time. However, each release of -OpenStack brings significant new innovation, and regardless of your use -of networking methodology, it is likely best to begin planning for an -upgrade within a reasonable timeframe of each release. - -As mentioned, there's currently no way to cleanly migrate from -``nova-network`` to neutron. We recommend that you keep a migration in -mind and what that process might involve for when a proper migration -path is released. - -Distributed Virtual Router -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One of the long-time complaints surrounding OpenStack Networking was the -lack of high availability for the layer 3 components. The Juno release -introduced Distributed Virtual Router (DVR), which aims to solve this -problem. - -Early indications are that it does do this well for a base set of -scenarios, such as using the ML2 plug-in with Open vSwitch, one flat -external network and VXLAN tenant networks. However, it does appear that -there are problems with the use of VLANs, IPv6, Floating IPs, high -north-south traffic scenarios and large numbers of compute nodes. It is -expected these will improve significantly with the next release, but bug -reports on specific issues are highly desirable. - -Replacement of Open vSwitch Plug-in with Modular Layer 2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Modular Layer 2 plug-in is a framework allowing OpenStack Networking -to simultaneously utilize the variety of layer-2 networking technologies -found in complex real-world data centers. It currently works with the -existing Open vSwitch, Linux Bridge, and Hyper-V L2 agents and is -intended to replace and deprecate the monolithic plug-ins associated -with those L2 agents. - -New API Versions -~~~~~~~~~~~~~~~~ - -The third version of the Compute API was broadly discussed and worked on -during the Havana and Icehouse release cycles. Current discussions -indicate that the V2 API will remain for many releases, and the next -iteration of the API will be denoted v2.1 and have similar properties to -the existing v2.0, rather than an entirely new v3 API. This is a great -time to evaluate all API and provide comments while the next generation -APIs are being defined. A new working group was formed specifically to -`improve OpenStack APIs `_ -and create design guidelines, which you are welcome to join. - -OpenStack on OpenStack (TripleO) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This project continues to improve and you may consider using it for -greenfield deployments, though according to the latest user survey -results it remains to see widespread uptake. - -Data processing service for OpenStack (sahara) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A much-requested answer to big data problems, a dedicated team has been -making solid progress on a Hadoop-as-a-Service project. - -Bare metal Deployment (ironic) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The bare-metal deployment has been widely lauded, and development -continues. The Juno release brought the OpenStack Bare metal drive into -the Compute project, and it was aimed to deprecate the existing -bare-metal driver in Kilo. If you are a current user of the bare metal -driver, a particular blueprint to follow is `Deprecate the bare metal -driver -`_ - -Database as a Service (trove) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The OpenStack community has had a database-as-a-service tool in -development for some time, and we saw the first integrated release of it -in Icehouse. From its release it was able to deploy database servers out -of the box in a highly available way, initially supporting only MySQL. -Juno introduced support for Mongo (including clustering), PostgreSQL and -Couchbase, in addition to replication functionality for MySQL. In Kilo, -more advanced clustering capability was delivered, in addition to better -integration with other OpenStack components such as Networking. - -Message Service (zaqar) -~~~~~~~~~~~~~~~~~~~~~~~ - -A service to provide queues of messages and notifications was released. - -DNS service (designate) -~~~~~~~~~~~~~~~~~~~~~~~ - -A long requested service, to provide the ability to manipulate DNS -entries associated with OpenStack resources has gathered a following. -The designate project was also released. - -Scheduler Improvements -~~~~~~~~~~~~~~~~~~~~~~ - -Both Compute and Block Storage rely on schedulers to determine where to -place virtual machines or volumes. In Havana, the Compute scheduler -underwent significant improvement, while in Icehouse it was the -scheduler in Block Storage that received a boost. Further down the -track, an effort started this cycle that aims to create a holistic -scheduler covering both will come to fruition. Some of the work that was -done in Kilo can be found under the `Gantt -project `_. - -Block Storage Improvements --------------------------- - -Block Storage is considered a stable project, with wide uptake and a -long track record of quality drivers. The team has discussed many areas -of work at the summits, including better error reporting, automated -discovery, and thin provisioning features. - -Toward a Python SDK -------------------- - -Though many successfully use the various python-\*client code as an -effective SDK for interacting with OpenStack, consistency between the -projects and documentation availability waxes and wanes. To combat this, -an `effort to improve the -experience `_ has -started. Cross-project development efforts in OpenStack have a checkered -history, such as the `unified client -project `_ having -several false starts. However, the early signs for the SDK project are -promising, and we expect to see results during the Juno cycle. diff --git a/doc/ops-guide/source/app-usecases.rst b/doc/ops-guide/source/app-usecases.rst deleted file mode 100644 index 595a8ea917..0000000000 --- a/doc/ops-guide/source/app-usecases.rst +++ /dev/null @@ -1,192 +0,0 @@ -========= -Use Cases -========= - -This appendix contains a small selection of use cases from the -community, with more technical detail than usual. Further examples can -be found on the `OpenStack website `_. - -NeCTAR -~~~~~~ - -Who uses it: researchers from the Australian publicly funded research -sector. Use is across a wide variety of disciplines, with the purpose of -instances ranging from running simple web servers to using hundreds of -cores for high-throughput computing. - -Deployment ----------- - -Using OpenStack Compute cells, the NeCTAR Research Cloud spans eight -sites with approximately 4,000 cores per site. - -Each site runs a different configuration, as a resource cells in an -OpenStack Compute cells setup. Some sites span multiple data centers, -some use off compute node storage with a shared file system, and some -use on compute node storage with a non-shared file system. Each site -deploys the Image service with an Object Storage back end. A central -Identity, dashboard, and Compute API service are used. A login to the -dashboard triggers a SAML login with Shibboleth, which creates an -account in the Identity service with an SQL back end. An Object Storage -Global Cluster is used across several sites. - -Compute nodes have 24 to 48 cores, with at least 4 GB of RAM per core -and approximately 40 GB of ephemeral storage per core. - -All sites are based on Ubuntu 14.04, with KVM as the hypervisor. The -OpenStack version in use is typically the current stable version, with 5 -to 10 percent back-ported code from trunk and modifications. - -Resources ---------- - -- `OpenStack.org case - study `_ - -- `NeCTAR-RC GitHub `_ - -- `NeCTAR website `_ - -MIT CSAIL -~~~~~~~~~ - -Who uses it: researchers from the MIT Computer Science and Artificial -Intelligence Lab. - -Deployment ----------- - -The CSAIL cloud is currently 64 physical nodes with a total of 768 -physical cores and 3,456 GB of RAM. Persistent data storage is largely -outside the cloud on NFS, with cloud resources focused on compute -resources. There are more than 130 users in more than 40 projects, -typically running 2,000–2,500 vCPUs in 300 to 400 instances. - -We initially deployed on Ubuntu 12.04 with the Essex release of -OpenStack using FlatDHCP multi-host networking. - -The software stack is still Ubuntu 12.04 LTS, but now with OpenStack -Havana from the Ubuntu Cloud Archive. KVM is the hypervisor, deployed -using `FAI `_ and Puppet for configuration -management. The FAI and Puppet combination is used lab-wide, not only -for OpenStack. There is a single cloud controller node, which also acts -as network controller, with the remainder of the server hardware -dedicated to compute nodes. - -Host aggregates and instance-type extra specs are used to provide two -different resource allocation ratios. The default resource allocation -ratios we use are 4:1 CPU and 1.5:1 RAM. Compute-intensive workloads use -instance types that require non-oversubscribed hosts where ``cpu_ratio`` -and ``ram_ratio`` are both set to 1.0. Since we have hyper-threading -enabled on our compute nodes, this provides one vCPU per CPU thread, or -two vCPUs per physical core. - -With our upgrade to Grizzly in August 2013, we moved to OpenStack -Networking, neutron (quantum at the time). Compute nodes have -two-gigabit network interfaces and a separate management card for IPMI -management. One network interface is used for node-to-node -communications. The other is used as a trunk port for OpenStack managed -VLANs. The controller node uses two bonded 10g network interfaces for -its public IP communications. Big pipes are used here because images are -served over this port, and it is also used to connect to iSCSI storage, -back-ending the image storage and database. The controller node also has -a gigabit interface that is used in trunk mode for OpenStack managed -VLAN traffic. This port handles traffic to the dhcp-agent and -metadata-proxy. - -We approximate the older ``nova-network`` multi-host HA setup by using -"provider VLAN networks" that connect instances directly to existing -publicly addressable networks and use existing physical routers as their -default gateway. This means that if our network controller goes down, -running instances still have their network available, and no single -Linux host becomes a traffic bottleneck. We are able to do this because -we have a sufficient supply of IPv4 addresses to cover all of our -instances and thus don't need NAT and don't use floating IP addresses. -We provide a single generic public network to all projects and -additional existing VLANs on a project-by-project basis as needed. -Individual projects are also allowed to create their own private GRE -based networks. - -Resources ---------- - -- `CSAIL homepage `_ - -DAIR -~~~~ - -Who uses it: DAIR is an integrated virtual environment that leverages -the CANARIE network to develop and test new information communication -technology (ICT) and other digital technologies. It combines such -digital infrastructure as advanced networking and cloud computing and -storage to create an environment for developing and testing innovative -ICT applications, protocols, and services; performing at-scale -experimentation for deployment; and facilitating a faster time to -market. - -Deployment ----------- - -DAIR is hosted at two different data centers across Canada: one in -Alberta and the other in Quebec. It consists of a cloud controller at -each location, although, one is designated the "master" controller that -is in charge of central authentication and quotas. This is done through -custom scripts and light modifications to OpenStack. DAIR is currently -running Havana. - -For Object Storage, each region has a swift environment. - -A NetApp appliance is used in each region for both block storage and -instance storage. There are future plans to move the instances off the -NetApp appliance and onto a distributed file system such as :term:`Ceph` or -GlusterFS. - -VlanManager is used extensively for network management. All servers have -two bonded 10GbE NICs that are connected to two redundant switches. DAIR -is set up to use single-node networking where the cloud controller is -the gateway for all instances on all compute nodes. Internal OpenStack -traffic (for example, storage traffic) does not go through the cloud -controller. - -Resources ---------- - -- `DAIR homepage `__ - -CERN -~~~~ - -Who uses it: researchers at CERN (European Organization for Nuclear -Research) conducting high-energy physics research. - -Deployment ----------- - -The environment is largely based on Scientific Linux 6, which is Red Hat -compatible. We use KVM as our primary hypervisor, although tests are -ongoing with Hyper-V on Windows Server 2008. - -We use the Puppet Labs OpenStack modules to configure Compute, Image -service, Identity, and dashboard. Puppet is used widely for instance -configuration, and Foreman is used as a GUI for reporting and instance -provisioning. - -Users and groups are managed through Active Directory and imported into -the Identity service using LDAP. CLIs are available for nova and -Euca2ools to do this. - -There are three clouds currently running at CERN, totaling about 4,700 -compute nodes, with approximately 120,000 cores. The CERN IT cloud aims -to expand to 300,000 cores by 2015. - -Resources ---------- - -- `OpenStack in Production: A tale of 3 OpenStack - Clouds `_ - -- `Review of CERN Data Centre - Infrastructure `_ - -- `CERN Cloud Infrastructure User - Guide `_ diff --git a/doc/ops-guide/source/appendix.rst b/doc/ops-guide/source/appendix.rst deleted file mode 100644 index dc27aa0f51..0000000000 --- a/doc/ops-guide/source/appendix.rst +++ /dev/null @@ -1,12 +0,0 @@ -Appendix -~~~~~~~~ - -.. toctree:: - :maxdepth: 1 - - app-usecases.rst - app-crypt.rst - app-roadmaps.rst - app-resources.rst - common/app-support.rst - common/glossary.rst diff --git a/doc/ops-guide/source/common b/doc/ops-guide/source/common deleted file mode 120000 index dc879abe93..0000000000 --- a/doc/ops-guide/source/common +++ /dev/null @@ -1 +0,0 @@ -../../common \ No newline at end of file diff --git a/doc/ops-guide/source/conf.py b/doc/ops-guide/source/conf.py deleted file mode 100644 index afc3df57f1..0000000000 --- a/doc/ops-guide/source/conf.py +++ /dev/null @@ -1,297 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -# import sys - -import openstackdocstheme - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['openstackdocstheme'] - -# Add any paths that contain templates here, relative to this directory. -# templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -repository_name = "openstack/openstack-manuals" -bug_project = 'openstack-manuals' -project = u'Operations Guide' -bug_tag = u'ops-guide' -copyright = u'2016-2017, OpenStack contributors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '15.0' -# The full version, including alpha/beta/rc tags. -release = '15.0.0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['common/cli*', 'common/nova*', - 'common/appendix.rst', - 'common/get-started*', 'common/dashboard*'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [openstackdocstheme.get_html_theme_path()] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = [] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# So that we can enable "log-a-bug" links from each output HTML page, this -# variable must be set to a format that includes year, month, day, hours and -# minutes. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -html_use_index = False - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'ops-guide' - -# If true, publish source files -html_copy_source = False - -# -- Options for LaTeX output --------------------------------------------- -pdf_theme_path = openstackdocstheme.get_pdf_theme_path() -openstack_logo = openstackdocstheme.get_openstack_logo_path() - -latex_custom_template = r""" -\newcommand{\openstacklogo}{%s} -\usepackage{%s} -""" % (openstack_logo, pdf_theme_path) - -latex_engine = 'xelatex' - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'a4paper', - - # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '11pt', - - #Default figure align - 'figure_align': 'H', - - # Not to generate blank page after chapter - 'classoptions': ',openany', - - # Additional stuff for the LaTeX preamble. - 'preamble': latex_custom_template, -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'OpsGuide.tex', u'Operations Guide', - u'OpenStack contributors', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'opsguide', u'Operations Guide', - [u'OpenStack contributors'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'OpsGuide', u'Operations Guide', - u'OpenStack contributors', 'OpsGuide', - 'This book provides information about designing and operating ' - 'OpenStack clouds.', 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# -- Options for Internationalization output ------------------------------ -locale_dirs = ['locale/'] diff --git a/doc/ops-guide/source/figures/Check_mark_23x20_02.png b/doc/ops-guide/source/figures/Check_mark_23x20_02.png deleted file mode 100644 index e6e5d5a72bac6ae7b181acae33e5029093081163..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3064 zcmVKLZ*U+IBfRsybQWXdwQbLP>6pAqfylh#{fb6;Z(vMMVS~$e@S=j*ftg6;Uhf59&ghTmgWD0l;*T zI709Y^p6lP1rIRMx#05C~cW=H_Aw*bJ-5DT&Z2n+x)QHX^p z00esgV8|mQcmRZ%02D^@S3L16t`O%c004NIvOKvYIYoh62rY33S640`D9%Y2D-rV&neh&#Q1i z007~1e$oCcFS8neI|hJl{-P!B1ZZ9hpmq0)X0i`JwE&>$+E?>%_LC6RbVIkUx0b+_+BaR3cnT7Zv!AJxW zizFb)h!jyGOOZ85F;a?DAXP{m@;!0_IfqH8(HlgRxt7s3}k3K`kFu>>-2Q$QMFfPW!La{h336o>X zu_CMttHv6zR;&ZNiS=X8v3CR#fknUxHUxJ0uoBa_M6WNWeqIg~6QE69c9o#eyhGvpiOA@W-aonk<7r1(?fC{oI5N*U!4 zfg=2N-7=cNnjjOr{yriy6mMFgG#l znCF=fnQv8CDz++o6_Lscl}eQ+l^ZHARH>?_s@|##Rr6KLRFA1%Q+=*RRWnoLsR`7U zt5vFIcfW3@?wFpwUVxrVZ>QdQz32KIeJ}k~{cZZE^+ya? z2D1z#2HOnI7(B%_ac?{wFUQ;QQA1tBKtrWrm0_3Rgps+?Jfqb{jYbcQX~taRB;#$y zZN{S}1|}gUOHJxc?wV3fxuz+mJ4`!F$IZ;mqRrNsHJd##*D~ju=bP7?-?v~|cv>vB zsJ6IeNwVZxrdjT`yl#bBIa#GxRa#xMMy;K#CDyyGyQdMSxlWT#tDe?p!?5wT$+oGt z8L;Kp2HUQ-ZMJ=3XJQv;x5ci*?vuTfeY$;({XGW_huIFR9a(?@3)XSs8O^N5RyOM=TTmp(3=8^+zpz2r)C z^>JO{deZfso3oq3?Wo(Y?l$ge?uXo;%ru`Vo>?<<(8I_>;8Eq#KMS9gFl*neeosSB zfoHYnBQIkwkyowPu(zdms`p{<7e4kra-ZWq<2*OsGTvEV%s0Td$hXT+!*8Bnh2KMe zBmZRodjHV?r+_5^X9J0WL4jKW`}lf%A-|44I@@LTvf1rHjG(ze6+w@Jt%Bvjts!X0 z?2xS?_ve_-kiKB_KiJlZ$9G`c^=E@oNG)mWWaNo-3TIW8)$Hg0Ub-~8?KhvJ>$ z3*&nim@mj(aCxE5!t{lw7O5^0EIO7zOo&c6l<+|iDySBWCGrz@C5{St!X3hAA}`T4 z(TLbXTq+(;@<=L8dXnssyft|w#WSTW<++3>sgS%(4NTpeI-VAqb|7ssJvzNHgOZVu zaYCvgO_R1~>SyL=cFU|~g|hy|Zi}}s9+d~lYqOB71z9Z$wnC=pR9Yz4DhIM>Wmjgu z&56o6maCpC&F##y%G;1PobR9i?GnNg;gYtchD%p19a!eQtZF&3JaKv33gZ<8D~47E ztUS1iwkmDaPpj=$m#%)jCVEY4fnLGNg2A-`YwHVD3gv};>)hAvT~AmqS>Lr``i7kw zJ{5_It`yrBmlc25DBO7E8;5VoznR>Ww5hAaxn$2~(q`%A-YuS64wkBy=9dm`4cXeX z4c}I@?e+FW+b@^RDBHV(wnMq2zdX3SWv9u`%{xC-q*U}&`cyXV(%rRT*Z6MH?i+i& z_B8C(+grT%{XWUQ+f@NoP1R=AW&26{v-dx)iK^-Nmiuj8txj!m?Z*Ss1N{dh4z}01 z)YTo*JycSU)+_5r4#yw9{+;i4Ee$peRgIj+;v;ZGdF1K$3E%e~4LaI(jC-u%2h$&R z9cLXcYC@Xwnns&bn)_Q~Te?roKGD|d-g^8;+aC{{G(1^(O7m37Y1-+6)01cN&y1aw zoqc{T`P^XJqPBbIW6s}d4{z_f5Om?vMgNQEJG?v2T=KYd^0M3I6IZxbny)%vZR&LD zJpPl@Psh8QyPB@KTx+@RdcC!KX7}kEo;S|j^u2lU7XQ}Oo;f|;z4Ll+_r>@1-xl3| zawq-H%e&ckC+@AhPrP6BKT#_XdT7&;F71j}Joy zkC~6lh7E@6o;W@^IpRNZ{ptLtL(gQ-CY~4mqW;US7Zxvm_|@yz&e53Bp_lTPlfP|z zrTyx_>lv@x#=^!PzR7qqF<$gm`|ZJZ+;<)Cqu&ot2z=0000WV@Og>004R=004l4008;_004mL004C`008P>0026e000+nl3&F} z0003aNkl&JY=r$Ebi7{JMP!6m zm-#%*5kdMaA_4#a2q8dM;9T8-&y5@)BIpWC(x+Y^lM2P5|OYk;;i1JDE5F3E2NTzYsxeXY{(VB-4!hLkxwg%wd zRKjL{a;v0_vo0SGUeTgPE;&uW#yuAtLiU7Yy~vdu8=R? diff --git a/doc/ops-guide/source/figures/Check_mark_23x20_02.svg b/doc/ops-guide/source/figures/Check_mark_23x20_02.svg deleted file mode 100644 index 3051a2f937..0000000000 --- a/doc/ops-guide/source/figures/Check_mark_23x20_02.svg +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - image/svg+xml - - - - - - - - diff --git a/doc/ops-guide/source/figures/create_project.png b/doc/ops-guide/source/figures/create_project.png deleted file mode 100644 index 8906bcac35769912c11cdb7ea954cafe61135619..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 43038 zcmeFZWmp{D(kR+9gS&fhCune&U?I3eaCdi0Ah-pB1P>k{Km4?Wv_GOKV_gM2ortK)@difEy6Du<~-3(a?DKo9BOf zIhz7$!7|SIi`Rck@_&!Sx3=-J0sx2xNMdR2W#t0GbpU`wVdd`W4FJerK|F(>xBD;n z1cZq^K>+~(1??AX_XkY%3%2|N=J`#dqa_E@+y?+OEDH-yI{?7>#h=#K$`0g%;||^< zaJ6=D1)DtuVM!}{3u^$N=mu%-y12Ogg5e;{YWZ(`?EVe5u(14_r-g;x-|!#0fGxoX z-*@nGceV)l{pJ77i;J@lXs_Re9=u}Ndn##xpG2VDUOBm`{DQGSxW&~{O&x>@K=`u* zsL`MBoV|~dE(rgk|7PVS2ig)K0RSjqZE;@(gh5+ENqn4iW&VWuEIdKzH$7C%!CP79 zPuSAaP3uqoP+uD_d2J96J`3t=Z=?Kcto-5`)Ib;qgui+@Y5$VpSKFmPd%0iu-!z^+TEApw1o0=fo(jKo1ZiMoE?yw?`?)YK zD~sPUkb!t1k@7e&qApYiW>>q780o&hqeSf!b0=B>>xFAs=Ng|0MJp`}PVC6#+MiTj}9WCx( za-zti+(%JDQ9xn)3jjM5%pu(KFA4tUtO~4v?f$Te4QMe}VD~Q?BxX=*0VG)@2~d7eb|exc3V;)d zAJjw!Nea|W0L1=fyn<%H z53e8q{950C(Ez})*6(#2LNf9%oXrjZh=cP3{p-JQj?V$0vIhW2}@K8RP&pzyPp+PBo4`H*2hM;ia4STGU_%HY zG6_N*1C{hAaHqtAk2BdDJaimowIMOXLHZm152eK%#GO_`(9kL&C6mkY~ z8FD>x4>+SXka~l7~`@@(E=WWgX=d6%~~nl>=1*RUOp= z)f+VmH4F7MYCGx}>L%(r8YUVI8XuYhnh}~SS~yw;S|wT=+8Ek4+7&t;Iup7mx;nZw zdH{MddI@?H`Uv_a`V|H~1`CD+h7N`!#uJPe7&RC@7z-H3n3$OKm?D^(m=2haF<)T5 z!TgN5ig|%WfW?j_hxG`{2P+Ay9P1O-EY>kLHa0W1G`118H+B+s1$Gzq681R`Ar2Rg zGLAJ)7)}mO1I`%EcU*K_CR`a@Q`|t@4BYp)Be>u2(D9h@?KxLEB9Wr=y}%qVu6Eq#LHYqGzGkr4OgCrC(&gU=U`oWk_S_WPmf$Fsd>J zGgdLqF`+XFGubm`GW9Y2WM*O3XO3oWWZq#RV^L-aWT|3VV8v#YV)bAxVx3?^VH0L^ zV#{Ou$_`~0V7F(_VISduatLxbaO81}-bKDEa@X~4(cNiIEKV6tU(PDdRW1^)hg{)Y zja=Wk8M%$PQ@H!MZ+QfGoOw!k=6MNtAMl3rHu3)8W9PHt%i){2hkZ}sUf8`4_m22E z_-**}`DX+O1ylv11v&+81cd}W1#1L%g&2j*g>r?ag^7eUgcF26iy({0iiC-@h+K#Y zih7H_6Fn5;6mt@*6x$JJ5w{U95nq>}m#~m1lvt6Zl{A-pDY+s=CuJd3B()~ZAZ;yO zCcQ1gCgUjcTIRbfkF2L`z3fjpQMnMgPxoQ>74OI0AC@PO*OkwfUs9k~uv4f~fGY|p z1}T10LRL~yN>Q3tzN2iVT%inqAoL*QLAMH)indC&%GyKrhn^3cRH3R5RG+IZs4=Oz zsx_(u>dNZR)fY8bG(0q#HBmG*G;=h!w0O0GwfeLPwav6Ewa;{9b&_@Fby;62cub-;FV!&+>VlZe(Vd!A^!3fnzk)L7m)-FWknz@w-~Qzk4X{wDpV z6sFFmt!B7p7H03wkuFjATt52lQs;{JPj_pnrr+1z{e!3p17+DpC6Xh8-9W54J5Q7xs5HlQmFE%UoHqJV3Af6{aBmO$U zDq$dzH}ORxBFQ#sI9V_`{~5|N*Jo2HQYo)e@lyj+*Pp9CZ%Cs}i%UC6H%sr&;Lj+? z#K`o?TzT>E#fL1$tY=x*+4k9!IkGvmxm3Baxj*x)^TzU}^J@z36vP)?zI1pwQ>a+j zP{dr6QH)aTTf9}GSJGE1Tv|~^Q5IizQ|?;6@=EJfcZE>n1HAEhv-j5g?bJJ!cb)Hr-`CVJ)#cU`*2gtK8Uh;NjrNTz9}GW? zetht;vq`L}zL~4JyoIhMrJIt`=@Umb{*@TW}Wk023-@~n%%=a zDm|Zj6?(h+Wcu1aOMGtW7w!KzAUx1GC^*R)bO z`CX%4zqnz$X}VRpT|l@X5bhS97QcM~7NBs4h0(6%l zAPorXLFAz_FkvJzWDFEER30=#^azZ0EEH@DoJKqhd}o3IA_3xDQh>~fe3(*@>e-!r z8fn^Ox={uUMiC}MW+xUG)<}lkUcn3 z8G2Zwnx|H%-lDOtg{)1kL!pbO2h+ROzcK`j(2a2(VVfYCoSLqgeKr4N@!qn`D$6>~ zCcxItPRCx>f!~qciO!ka1=khkdg8X`{?((|v&1XOJIp7_x85Hqz$WlhFc88VDjKHz z*!YQ0cv3|A)1XNH$n~h?XpI=ASj;#e9-4rk$eJXXtoBSl#VFP2xj~vrrHaY zEX8cu9LZdfynFea1?(?*3RQ|CidRaN%U+l7SKwDNRq?(StG-{OTC4xY?5+Jf_xFBv zq4nVnk&Q7Q;y)%gr8nocl(p8i^|ViaTIrbTtnISxChPv(lhNzfXZzWtUw=SzP-RH* zi_);ph{xBLqqF0f6Wo(>Q_9owGs3e>b2xJs^E(SOiyceF%MmMftGa7y>#7^-o9bH+ zwncVGcUE@e_W1W!_FsJqJW%?M_Pydz@o*hpeB}Sb^jQ5w_0;jK^E~>p@#a1P@n`?@ z!jNFQNGr(eC~!0wx*&!AiWfav{Y6=}c#090hkBIR!Xxxq7*ic=UN`c(?g#?)mad2tWjS1mlF% zg$aeHM6yJU#qNkLfTQ4%B)R0YRHn4C471FY?4;b=`w8+k3gU|Rid#x8%IOb$Rcszw zs@ki$sk>-cYN~6AX|wB4>f-1@_0IIS4WtjinmU*nnBTYHwj{HHSnXL) z+H~92+m+d8I6QT9b24<6c42YF21mlG`;y0r7oE46PnK_o-;Doyz_-AQV9XHeP}VTo z$0(1NpVWsZM|eIpjns`Yh<1pHh|PNUTeGn_T^@BBeC-<@21h^z`J6*v#-3 z0azvqDWVC+!KFv&>B z*Tpf4aj6NNN!zK%)1@;bvnTTe3!IA&m)w^#SDIJH)|S@SHukrW!QOweJGzhmjpsn% zJM$s#;W>QkX!^&%@yC{80h~5fiaIi6dzwSpj(+MGxg?s@^-z)P*$uw6e69 zbS3mE^!p4kj4X_uOr}g1%xNq`Lt098q^!@Ahz7aRFR;+~V9bJU*az zQ_J^|Z|7bFKQ;dc0WE=Vf{{WrLXDu;vLg~EN+Mb%CL=Z`ZYBO*B1jTdGDnJEYDzj# zhFa!>th(&1oV^_4ev!O}0=hz%Vz3gw(w=hBgGVZ4DpL>BRkhV{)%w*VG{iJcG;6h- zv{|$_bP9D%^vLxl^pg#g3=oF(M$X2J#*2?KOw>%yOw-Jm%<9Z#%;zlpE$J<1z;(pl zM$uNtPQ+f>!NSqS$--I61QSZ_7VFKt5n0 z&^d@GXgs(%q%E{Ntn2Z|Cl%ot5fM+_B8{WuquFDSVu}J@~G6UEVMlKRYJw% zN{cF?*Ep|#R3Fq_yuo@){f_UwMqN;SUBmeYk&o_8ScnFtW+dmNS!5DqOXMLGloU;rrc~%uEq8pW z`Kga--qCu}3D8~B_c3HJ+B1nW<1nwVyl0JJvt*ayV7`lU_lom7*9P|_Pd#rCA1~k1 zy%>H0{zHKef~i6Q!qy@WMR~+X#c?FKBy*%WWD;fj#tu1)DuPTzeIVbjNLH zahEjL@9qygD!s70J$&Z;6#PE~at4)!(1zwbCV7$)f${W7hDfQT41^z83$@(Yvj>ore7n+fC~&NA08? za$Tl9=6%}zQiI%I*hU0LUB}O-k>=1B3YH01+ct4_g7;+(Y7eh|2%adMVf;)yAGp}L z+`O8-uDf}5Yl-0by?*@ae^fBnM*!v==m2)$9v}hc$#np8FvAc8!~$7B1@IBfBP;@N z2r`5M!t=ZL10ZRTH;`e-0TdT{4{8LBfWCun!f0S-uoBoeBtfL7NR!BH$WM?Lz`oBx z1yJo#C(-25KA>}>S7NYXyvF3mY{$~TTF3UsLB~nQrNey##;^1Eegq^0jf8rH2Sg#n z7{o6~I7tRb-N;DEK9M_65K(-hbfaRVTDX%%Z9qdtGfSICXHCz;05D82Rxv$ge#F8L z#&UaXL+tOtT`ZY1f-9KYo5zY*nh%R__+A{piU6s=k>IRQpKyank!XTgpt!F@tmFr& zpECDkt>xnHm&rFMG%GeMwJP^NSbYdpWmnTvkJM{uLKHTAx6SlLZON$$Yd!Wanm#lY%FRq`v|6m|pkZ1NKm2smSQ2w8&9^{c1ic2eP@P$IZiuExKO_0yUxGaxIOx_|B(Q6 zFsDlf_C5!gYmx&rzzmZM5CYEpLZA-#3@m~39t*+@k%pK+f*?7NR>&$89qe@rXgc&O zj2LDEYek|$dW^J(Y>4~?MGmDE%%^mqJw#hX_rk!%c!Q~j1;c8@_Q2uAImhk6%fj~| zFeFqadO)m6@_Ncth8RY#AL*Imn`2sy+*s1zV^R< zd%b%@cVl>ydNXiybIW^ceVchZczcWBMc5!-AclVRznASjzF)fs0Kt+4(~nCC#4$4f zpr!)AO$q{WRfIs?6obLW0swq+{^M-t7cmN$*(s~gK1YQYa3fTdk04^Zy#Sj|A4@Vr;$<7F|l!}&(qQ~GGAn67ZsP3 zmX*J%sC@UnuD+r1!^fu1uI`@RzR&#wW8)K(Q`0lEbE|9X8=G6(JG*;FKaNjM&wid? zT>jDvTtWX(3%vfJ*?;JT59$R)LV_Wo{?ZEq^#dym9|@U`7llAZ6V<|lke=@e8j);D z;oA;$hI?9YVoT3a3=&5ERi>j~s{PjNe@?OR|0T`-R_tGT&4UX8^mlFkLFr$isf`)B3;1>9U0#Z~!)*jR?E?8h98CP7jWsW~1RC_+n7I4J;y&2E`OqWMWbUGS9jgbD zdpGOa5ws1lB{S$d#$+@`*9A9u?Y4e5Z@sGy=UC3trVdYSr@q@otGsD=(SgJxyGd5N z#2PL=Naaw6UHalkM4g?@EC^Ssn>@( zg!Y8h{d^LNeNJ+C7JR<%9!$7KgKBDWH_SD0O2jf-AuFvZoaDZ*D<$M>q3pt%gXhPQ zoFI;n%lQ$v6kSS<#OD2<4(ItT80Q`V4b!t~H11Eqrp~ z3`iqEEK%=JOyy3;3mb*mU^WDRdD0Dz#LYWNfM`CG@3g?TB3}}T& zAAZ0=^K2GxprfLC<4MFO(zi*|X-0T1h&!8osB*;Jccb44pOp!52w3F^Vl_1+SyM)j zntfFDfCYE(J4QlZfHa?&L{OjKGI}UZFe+UAi4O|AG5T6P;FTlh8aILcR3m8vUj3X) z2CJ86N`h1_InPj;AWnow*?KMg_tenl3&;8&$HceeW?#z7mV&;A%&*kWM@{P=O(u60 z#F$~X$giln8Ad64f397T+D(Xgq)<4;jCDTqL635HOr|y)0oc{fMXcF=ywX3PR-G0X zTH9}BF#Fih5UrT2&UYSQs-rHshkD^+g`pz(6&RLuhM5wsnNj*P$lX-m5;^I&lh=R- z+11w+EUq+tmLbT=_yP(oXwspLdhRN_zU7K$R@_G9KiEZ0bkia!xFg9hNk3M5t?_2o zAy<N`9zKKa)a`_hTcpzs5y30Ygbcxzd%x{h;gg+Kc?b zwdMJrGVhPLT8ppw&O>U0rN^#WzhyU6mGy@9yS>T#_PT<3!=(CHDxB7ErO8~sNS9kk)(%3A2xiZ?H64}#HG%}CEx zdZno9aUlvtc6irW5cIP8G7uh9Q`t$i(7NN_8PRIOutKp0*OTmzc)egV|mW& zO#WQ%>Rzxw&RTirGW9-%P(1(D3xZxYx7A~=hpFsMvgS$Taz%v9E8WO)DZ`2t2%tmd zgoB&te0{|(N`Tb6e)3doP$Zk4MCMNS6z^Nvj>qO?E6)^yD1Mqy-l8{OaqWtxQ6$Xi z6;|BEqWs*{=7ovpDTLNZAn`up>xQ0=bXgQhiAv|<<54b zV1Bo_OSR&Fs`K|BSa_37_R2?DhikOE~U0EUlo?!gq{JBTD36U5ig= z_KBYSC@qMxip@UxRPO90Vm}%_7qq ztUH(+&^^1EOuJ`ckUd6BtWbJAmVHt`chTCulBHI_wAMCcdE;h%doWUdixE0=OLktd z-hWHzU)v>G@L1f9$mMRJ?dzO}dsVGZon-~#5usD^1J?rvuw;sxv{1%i$hk2*Y|X73 z7q_jx)Za^KYV*0Ki=&Fewo{qXq}HN~CIsg}C5QEQ@dlAv(IdO0YnCGfaCUia1)gfy zOj*D~N>cZls$X%UM`9&uhe1;Oo?@YE{Wu^O*ZM+><=ngyA6Ksdpfp!??mw- zX!zLkK#o=x-J_ANWc+jWj=<|G;^-KzRpvSA>#Ao&Y$hMAHpQcXXOzSTQq*BiVW`2x zQ+000yK@2vATii%4ejY!pk8Xebs{DK{_JC`;{%r`#4`shfO)|n0-*7aUd@b^MB5<= zWxjcNEwj#HqUyY+jy3BrH4;R11VF{P>51@pKHc3A>FvBgY4x4g{u*YB0Os{OMYnro zBy(05RXXNEv&tQXj*=HtuuB9t%UDgU+(-F6zuBU1p|Dty)O^%_w2*nFclxOM(sY-T zyWJ~;(0A+h;9&NcY5#p&!_1{qS6E|Ia;@T;I*y|hPK$Dx|Ie|_?uF7DSpV0R>*BwBT}2{s;)#%u^p3pvy-w@mx$?YxyP z`n)h;@cJWDcfihUA9HJb2TdJmCjv;2s>eQzx}`j4P&*>Zrp{tmIG>Zq_kKWBo`Ol~ z`8{$$0qq6KU^=pbaW?{3$ut#&m$pB-)`TyuYsGOBN;dk0HsGdbRo^E$$^saWP_=MH z?)0!zG#s>qB7nfT&@ebQd#ztcNjur{<>~k2$tKpKm(V!Y|E(1Vv+v6qI{(Z!Zx!Ek zuKpxjnGE2B>>I9#MC+I>L=eR2&2_EdQF+J6Qv`rtvo;|fuyVs;)*LCz_%p*pmG9-YeM(xx!r8Dy z)tG&$MVVJV1>Emx3uQ5Pv^Yc2tbLPK2L06Vl9)dq0`R}^sH`#m*sC6>j_EJdNsTgY ztO(bOwpzVHeKs_88B(YBEQ<_yRKi~MAri>7ek{RJQdD{9=^P+g4X0VjXszvE%X9i< zM0{4upYNRMwLA4gl`-OcF$*0Wq7LtzULUzVz9w8>WaF2j+#wM?D(FOuLkYy#tLKhH0N6eSgvRI7`tbbcA#0QhiG>q(Z^KnS&k?>DQ2T7H6`fAkW;(g3 zsh+jN;Vs_P6NYq)>%XseyL40#?(O2GHCO5Cc9%BaR_Cg6c*pSZH*{M4B7+%&U0)4{ zCYu)DjEq-rd$3p;lK3?vb>so|C!YXw-6l3p;%5>jvxEK-pKdXZT(!>w$ z>ts;U+@LpnUz;xr?@k|`(q4!|8z)>bA~>iFo3!`xPExx$l@e`xW73v@$0zf596*;i z{udu2t#|d&enI&R)&AIRq`?x?ir?l}ZZK7k?(QYKIGtJ7)qO}44Y%WYq2iLzu<)Vd z({7JH0{Es2N9{F(g;MTk+jU3RKQ}xQtl%mH2VlTk*<=a@{1=bOWAzxZYS75Vr)MX$ zjT;w@H$Mm!CEJDNRbRHZ{7A%IQS6Yy+JAdq(A5V0TKB~Wg`0S?A6}Pujxc8cLYMX9l zIreBEy))I(3k;eZC6%{cCMtc=u zw!6#6`=mGU_Ep6**E|Fu`8I4B^OgwW8&`UhQyw<*+UzH|I7ZUUHu+s&_J9!-_qhi0 z9DC-!6aO0%X;S-2vwx%2{C6gR$v+SL$E1Ibuzv>4Uvul9Y4Pui#Q*mz4PqQz`C5$6 zVvbO%SCb=5nWntOqUu|dgz1$vR==uyP;u!b$;@pddE(NylWwFcmpIs1e&5Kj6=BTZ z$i6bASlNCtn(pQ>@VsGqr@&|7e2H=GG;=bM#%^e??QCz_Zss=JzIJ;!?o#R;bO%oH z5kM*!9rO(&fNs!je-(C}g8=HKUJh_v92X`3J4pxVH{H7}L;yeBZYORdSbs5*S|ddO z6N3wH`g0pJHZC_2DJMNlhGU?Qrr}aR2<@ z>M#H0uj3*nZUSy9r8Xk`3vL(MPZ}_;eQRe5=8A8XgknM4gYlD!d2OHLO>>voPjWC6 zxz&^+IikF|q**b`jrXMpxvlWK6P4w{v^dd+PQ*H<)Qxpr`SSUv5#qwCbEob=Xz$!v zx?frBJG_MsM!KVE*HiMo`oQ*YbH3LO@O@XdAOmMKVM0 z!DW3&%JB?VWuB4bnAdw(WtS%9S|Q(q(t~EndsTe1iycU$+MkZPJjuGhmE3)TGc~7< z9nsMzXJ+2FY8HiCx-++x3_sg8jD9p(Ha5f-;{C-wqf~>}epvkCCppiIGzxT`@f5a# z0G_NhH;kRC#MVnlb!CX29Lezi!(qpKu3dxEff;@<@43TE`^H~!g1XWk>m9_6I#Tj< z+BsLw%Zu^7kwi~s?wtW%Ej->tIRt=-y*@--BAk&srxCHp>0IqwTY6Z|x9fI~&Ham# zla`b1Npq8O{n*^>X?x1i(c4xMl?uuLW=Y87oPCPBGTTA}9E5j#9=0tN| z+VN{r)_XGAt`E-U<{wNqtAA^K244`~W7j^5W%FzOsXW(T&DChv@uP>cr50&tw4ajL z#$1jA7LFz!j;pCTJv(~M4;R>+%#5^ww~ps&cOATLkHwzYw8~m>%Iokn=c%$}Xy0P| zG26j4*}@I?Qk)Rix6H>8GK0tLoK4q^yfWdQb9HU|@>urGOzalbV`b-j#lg5oD$0W? zvZ`~Jo(Dl@Ylo%ztz)R2r_QAZ^v`{#bAnjhqTjb=(~yomHi*G++!gq8qWW3_ zZ$g~+_0H7(yYDv}$6RGfFBbag6S}IEoSXJ%+c%d6wzmu4w~&NichJ`Jk+m$A4;}*x+J34tB7Ki^9j_*X30G?=98Kv9 zw8pvgFu4hwv=M-fjE^wsJHy$MC-{a*(|JF7QdC^TbRuC?Q;G2X)#2(ghb5tjYbL?x zSWTw&9$e3+{UEk3yqfFTYTlq$1jM(R{VecNCqLp=gAX{|5+AWd@fQ}!*E;-A6(|~EJSus!FbJ^@h?gW&z(E&2 zFf}?>VK4gpZJ*#kTA5CtaU~Ed)nm_TO0CpSty)C5WO3c5x~~3owkA473$LqUtb&+% z!pNbOE3Nk3qCgWv?b|R*^$Yf<+M6eK2w*J7@qDkX=O$KaqjInP47mUSJaDOsjza(& zCT3svSKThiMo*R{@wv}=JY>n?qIRJ&2x4NSP`B?%I!D6qjEsne?e9B`Frg{&xC?rmx7o2Nox2Cno z&zcldGM$~HhghxgNjjWaKQBeBDR#$)sJ0MS>@Cl39czvs6i;$zss%M0d|%Di(@z>! z=KLupTLtkUU-GPeY#%IOoSBp5V|?c8tJjuk8mj-byqhSkdf-gmb#L$c*|hHb@LYk# z1`cjuHRox1>kx_8g5sT?;X`&ocWTuL&bP{g=AIs7ZJKSFk0#n^{i!jNDf%k!u$WHV z9fHq&N=$pl_o9qR{5_ivq0_pPE~8&UaDH(a8=e2YG?3q?((n$iR8+6HdNb@t{sVgmU=6#kcaccopknm&LtV@xywrII&CVrT zVYmF%&Ce?bFydmjqkC)h^=a_Iihxu{3Y+SK`kt}fgi%$_4Lp)44z?0*=Y3w-5akot zcK>?OqkDRzb*U~K<;AUYPMa=j21nR}o8KFrk5=oxu0L_vZOgEmOUDu-`APSsrHVB% zi_!{ZZqI$d<7>o(Z*5wAh}T+vq*NmBI%$(S%1yX`cLvGJMa)wDRFOFPRRD=C*$R91 zt~&=^u$n$44`%p)j608yvti@feaPNt2uL zT(#Law54HpIxE2MZR^85UVV4R`GfK7n?TR^-zquQ^TgH~yQ`{wu}!j~3Vf;;cGd0Z z#)})`gcsK-(O;O>cRC-F@wr&!(o!lL1nVA!uXj=7PbA06Pxi&$-I!pB>cy7im?<`} zVx>(w#vgd{qHdWdq#J3j1w8A}udI#pFcX?>b{S>a+)_U~6`Pe+-yGXD3$3p5`61a; z^kF$VpPrf%u9mn)=gBp9wrIGCPX_zk4!#7GF@FsEWGR78CNQfXZ<*J_ttp8bU}_HUUF3uzJ$;Z;;W=tY2Mu5LeiCsoHc%+a zTOd&~9(YjfaNzf@^%xBTY@>jZ8zJBN!)dF%G1>4TcT~pgxVf#RA4_6Y z%6k6H8oTs7);8kdCGIlcBYePxGCm zpsJifh-PLt=_|QDq97BcMP>%wt%nK8O@h}it+%t=RM~YlMfy#QRRia%tKX~px+w}K zD);ilzxQn`SlSIbBxz8rT|6)D5~Ro#Wjd>MaCx<0HQ124#-DXO?ral5b%n858~fV% zn%e^&-3_{;{MFpL(_GvFgli-dT#=;{1b5#(MW>7M)t5)3lLj`3H;!$m*SNcN;Zv>a zcZBru9jNcT8B1(U@8(@l8VRw2-CI~dQnYzGxRqL2`dOs&x_*|6+Ow&g)AHNldp~8P z>A=R)$?EE9HJiLw^#`OqzST|jhe5OU84Z&XcQ0=W<%2U{2TLE7N1QI7D~!Yy9}zd# zr1)MwX>3faPTkgsnlmUE&?L>|cO80~xCgmN#?}wEI=a^#zh>fee^gw~t!&PC-$*dG zQGvD0*t#WLA(!A&`22*G-=ZrzX38V@>g8A?hhB)!A@LlTQTD#!Zh7CNlBLW)wXZf7 za!_YxHnprhYDT|DEqb2pR=FpMJ}z)N$u#a`U{3m$GeIJj!e(J!R0P?;yOl; zD66Z}VjHo4(teL5>YGWo?{k-M{;%pB^VRROXw!?$M~jd?ga!O_XVr*^+|Kmn_ zI6dq4p&1~=@r%ED&rV%)JJC6+`jKhZ>c>;e<3rm$`rf-AOK$?sOqQ)?PV(ForVk{S3(e;D^h(l?T}qXU7eo_3 zD}6qx7CoQuHf4oxM@!N=&TV&I#ke51hRf9i;3GA}`PZ}3CYI@GgsXFwLz>bkpY&q; znDU<&u3<_M?=Ml03d9E)>ok3RO>&SbqOIm4cYqRL{MMNsnc&XLlzjA&${!S|9^Pk8 zHHnTH?dJrIKI4Vkio#EICRNwx?TQ0~pR0P?jbFfyirSctT+7#PjJrwlNt%x=nxebf zn8%9awUXv$?zL{XuG&DvYXxU&B&CU;Mg(r?t4MVpl5;1W%-%g!cI7ud#FtFjtZz-z zQ()wzWVe+IK4!qm5`2CCgwg{H;q@K)KibwzUY@Bsxy<=BM0t%Al3cv})+*L7!ToMi z6b$>M$i?O0eR^L{ZV0=!4U#p32UZ@sqZ}?Ka|uFnD4!rF5x4vL3!GpPkiSgQGbx?m zr8-K<^@Lo1y432&;!EQz`k2_rwSqH6Wo=iuU9D`Y8hBFr0PiU$Q&98ak6o*=Vucb% zHP5`Y0~?q2;Z=Deeex-A9t;Xf&Z`lCtZhAbkg;EIE2MbM zt{%61!t7*r&h-%ito!^Np^WnTx&E_wWeOMnI`t|mzQ}*M*OWr##kEAuvO}!D-$TwC z>lmfwCG!NnkP(5eduD_J0WRXaZmXLiC}vpm%2@g&(*&^_8LLCj2h3;_zo`!hlQWb< zanE%Mh}(A_PPM~LybwU|4wxlZJ~F<2`o#%6xt^A~cKx`0m#Cy{!ELWupf-fkD39mz zo8?N{>+1sVlY>fiIZ>LJP>IH=(jzKKh6!!mJS0yO%!=Uc1aF0qS$5ve=TCMGYuDO| z`bmnhvU~OwtKI}n3K0-D*@&Q6G!x(jAip=E`445i!~WA8dX+0v!&1?zG2TbXK3q3y zMt$9(bmO#f^Qx0v)pSvc8T?UxiCW6qgMJz%z)%4Le!nR$m3DQD%9@y%{P=-@@dr*# z=2%PR4TEk!TJ$tlVN6<>|IQ5g7jFaruaM*j`4)#$5fbo2-@diD6`WCY$ z?0nIthTEYFNov>b*B& zR4f$JBICidF}~-grTj|a?W6Y(H%2I2=28hleN3tz&%$7LkbR{7M~`KS>13Wu^EsCD z=G9ZlFmm}gNP~YKnGjj8Qe@L$%rbO%Y*Z%rDPW7P&H{IUM|eeFUaE&X){iG7W4$Nv z^_v6qx3#t$^-8qnjh3jJU|{#3CjBoXjISypvwQ7!tyrC$CQ>5jPVROXroDe2E?cVnZ)NB#VQj#@~5ci%Gb zdcH#M3?DLhlB6u5s#7cbKjgS?lhjpR)^=hEH;8VbSs0P{FEQQ4@hve+kJ}}oKnL)r+uS@QAGOpcP48W= zt^=1{`L)z`MRu3A+I9P6^#upjf+;rqsUS(9j?tYsLx-{WczfhJSypB(rBwd;r|!yP zk5L(Or-qtBp8j-S9&sVc6x`b-dL3sipGi4sknnh3VMTPsuGF~bUAjY~@A9=I0toOZ zB`z`>o9P3kd1BcRZ&hYApD01iVL`HjC30#Kky`4&fxa_MDGpT~@SFKO^f9e&$ms6fgMiegO zjC++EKk%Uq-w$S8@=vfp`bZ-=`e`xO_jUWZOfccin>ISo?+sJGolxYfUY@l)?oRN0 zq}=GcrYth-Wui+|Z0h77)#6!<+N3Dhh69h=cU#jPy{k~mN_U7{7iMohShr=;oTMKy zq9&>>K!fZrWH9c+A7U)~rsd5WzvLP~&rjm`a^IhAP z2+iy{5w7a=F}5s7s=#x~PF`{i(89RFYiRnE(t&2S^o+I`KT@18#F--@S2@3k` z?j92!dao+CLi#sNlDH7Sr%=jl7dgE#?<-Yq)Al$6@FmB!y2zUJ5|h%Oe0hBnf-*af zdC6Tg$Z{|315v6<=Ju6Wuk5SAnba5;plhk|5jkxo?`5vJZ7Vcx(Tcs)h6Q|2z`OpU zy`|USf$Cz9&$UZfe@LH*ADAgcH{0j&kGqn8_hHhd;X>cjxc9hhq`5)1G-_!^+_&n? zh&sM(Y01&Zx4_NG?^U`}MNuq)@+=?Gz#hbt&*MX`Z9VJjMTdGj7MDa~G8XUjVQmdK zOSpiGq0fPK*Ja37NsiLT>?t;8WXav70prbskvmmdBVCs`1#SYRQ8_Ly$eoOyeMGD= zyA)_ChB|<3FBI^{0H{2SPBiwv)abLgY3P`j4yAq^F!U$vpm5FdE84xK89(s8LICF% zjd4?qQO@4S_&OX`4KlB>!PrtclFPjlA^d0c0hJIkgZNc48HLHWuJUO|M+*W=It`KfEf$>yuMF)Z& zJ~k#jdCVx_Jsys`vcwl3Z@nKqlNrZhK|E0eX`y6HKNRn_p}@85+u@0|(I-bGhsU9P zX9jH}TXtgC)y+b7ZTB=(;m4nZC=!}l`=U-}-L`yEzEcbna)`b$?-}^c$b%{}EM3zX zN5Wp|ryUkV*`@6`%<=uUIg&{8(H7_g{x82>fQp#<_5L?2@IP50rH?BQeJFecOfTKo z&_^B|^-W&uuc3dp9c8M2|1R9YTrZy`Ys224xN(rtLfT!LYMXprxaONAv9gHrEZ6Jx zZp&Pw1l{?aAiDPWspQ6!(?xu~~NOGC9+P`t;It&5GsNi11GQndv`zxAc1hB(q zfdC$Rg2!sAH2NU#k;$LnX<7>IKkfgq-#_E$pZW98`ti^D`Op60xzrsr9T$?^hX789 z-heKzNZ18)e#0^zVM@geRoHlL`}K2`y$Px9y({pX?sIteGy*74KTN*J-$<=G5#S7H zeCl)6s?6n5k2b|Ih@L=*#8~K3xtb}ynliC+GR)`keCM<0#~N{kJLA4~uGCExl!Ygn z@?Mk$i0S{{?RZUz0Eqp~9yAGxmc6yJ^8_Syrty2vU89|N#Luet&T;z!?K-Qq&kB#Q z8+)eTCM$tqi6Tfw*e?L%qt#)a~s*=C5^_YWmY6@j9+Tx@%a+xpjiRyE7>eQc_dLkv9>ch8gg zJ`cAGDXe(+_%`A8O2sb;G{fDV*0tl$8!R|BWi*$mRP|U<9WBY1%M+!TC=#whT!sMc z_u|hK2lZW-r>_MLK))us>^7|Sg61VL(Q%g(R;Cc&U5)P2#f3;+U2?|fp8y}`cFEv7 zN5yN`B)8~C$#KCdZeb<+O_ATzz3k4=iZIXQ&AA??ByRRo#??%I`oG%y?y#n|HSZuw zN2EyyrHD!srAZ5lG&x92uO{D8jAE1Y9NJg zbMBoxdgk6c_so2AXXg9(BhQolWUaN=ezVrQ-}d{hkK8D6W%*#OfpVt;=gy)9Wr450 zBRMB#;~rf@ zZm1}$_Bu<`u4K`Jgr_tq-I@a2CaRE^{+zm>tW2inS_W_}O3C$}EpNGGKV)`%Fx;_h zcYR>fJx$#dA^b4x6<3D$ccJtXJjmu5;-J8Fb%SFQ1_X21KTwCR`X|RYy z=zT?W=5@V5{c8(aEZ*o&MrN7~AdJU7-_N%X9e{3?nQyJQ5Dh~_z63J^S!y%r5>Ryo z#7adm@snW3M3RC1{P?M`@YO({#s!x@lKC7`G-Ncor?_S?N zeMjpfh{?tJ0+VB+W4HcC8%=?*_Q_4ial}q!G`}{rqN+3r<~k)tEtXq0=(bj0;AUC0 zSl!o5{q$k!z_HeIS5J7Iu&%&Gg^q*!GWxC4?FOV{mN<&VeYNafofC>aKl7eZtf7iW z??!O5z$;#_Z;{!1qy#A5^On3Y(f0r(u*!3t)T8~rW?-8d@beSu``X8t4R zKt{eXxu8Q-K4jE{$>PbE{CL_{A-#o$z>?WBu@4ivg3Us(X8~vnb%0BARoZeYY6)Xy zw{&VRc5SZB2E?XNvw2#DAlvmSBQokEujOvtLH3yjn@Iz0Z=2HHCjs7#me|s;4gOJsW#=+90=$1yvA!D6GtrQJLt$X9 zpOsVrq-tjpd8*X)xZ(*HvEqOg)Qkpx8TAMQNM1?3i^j$aozV zF1>P2jMAdFXl?78%${HWNSok=_qKjH7mPkm+IXC=0$PZ6KOagHOA=N)S#?4z$IwxT z-IIcC*EW*AnuEvXb@qd+PlA~zI~{*rDc+)2K3}nyHZ{yNBxHIH+8}5n&N!l2Hkt9g zv^bQRAXb`?SHA8CBQI+Yf2Lb)U~GSZe@0ltz$o%MHD&Y`&3T?~Yrf+F;^QLuVuwS) zUHkel#zR78#o1O-bPvQir6I|BvnrExMk5n+ICq0yljgwqWGCNT{?-@=QO1V%nx(V( zQ4fDPN6{O9NwTkMK1+i!pKt}|UgRuMxJY8@bBE7fw4MrL7kUv5dZ%4>S`I|k1EN{c zXOc3V*2~WV0Q(>xkYRpK`>`+p_0r=)`#h~4@ky0asu^%g4|PI^%QyJhO}*%i7X)ee zfMS$~~bc3N^y)+$M$UgXe2a%4-AnAcP+AT92Z4 z=*2FlIrm#F>E1UgQOh}{_5PYuwH0*~m&qm^JbAv@% zgF%5Hye{zC=78>9gq7m(BxuC}h(LDe2ET)r^KG_afpv_iNZrr|{_`sZpz}?!49+@* zuO{t)x|~^3+w|HVsUL{yQ$R>JbCyGapkw9=7naWF3kqzc6x^ za+-n;pU>%AxiTlqEK=HlA>Zu+JFn(M30K}7nzBHrZ z4YWCr7Yt2qhPvX)hk8gvtpiY-<4>Jd*u4a5fHp}XiQt