Initial fork out of Nova.
This commit is contained in:
commit
c53d8e343e
24
.gitignore
vendored
Normal file
24
.gitignore
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
*.pyc
|
||||||
|
*.DS_Store
|
||||||
|
local_settings.py
|
||||||
|
CA/
|
||||||
|
keeper
|
||||||
|
instances
|
||||||
|
keys
|
||||||
|
build/*
|
||||||
|
build-stamp
|
||||||
|
cinder.egg-info
|
||||||
|
nova.egg-info
|
||||||
|
.cinder-venv
|
||||||
|
.nova-venv
|
||||||
|
.venv
|
||||||
|
.tox
|
||||||
|
*.sqlite
|
||||||
|
*.log
|
||||||
|
*.mo
|
||||||
|
tools/conf/cinder.conf*
|
||||||
|
tools/conf/nova.conf*
|
||||||
|
cover/*
|
||||||
|
dist/*
|
||||||
|
.coverage
|
||||||
|
covhtml
|
4
.gitreview
Normal file
4
.gitreview
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
[gerrit]
|
||||||
|
host=review.openstack.org
|
||||||
|
port=29418
|
||||||
|
project=openstack/cinder.git
|
81
.mailmap
Normal file
81
.mailmap
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# Format is:
|
||||||
|
# <preferred e-mail> <other e-mail 1>
|
||||||
|
# <preferred e-mail> <other e-mail 2>
|
||||||
|
<aaron.lee@rackspace.com> <wwkeyboard@gmail.com>
|
||||||
|
<anotherjesse@gmail.com> <jesse@dancelamb>
|
||||||
|
<anotherjesse@gmail.com> <jesse@gigantor.local>
|
||||||
|
<anotherjesse@gmail.com> <jesse@ubuntu>
|
||||||
|
<anotherjesse@gmail.com> <jesse@aire.local>
|
||||||
|
<ant@openstack.org> <amesserl@rackspace.com>
|
||||||
|
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
|
||||||
|
<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
|
||||||
|
<brian.waldon@rackspace.com> <bcwaldon@gmail.com>
|
||||||
|
<bschott@isi.edu> <bfschott@gmail.com>
|
||||||
|
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
|
||||||
|
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
|
||||||
|
<code@term.ie> <github@anarkystic.com>
|
||||||
|
<code@term.ie> <termie@preciousroy.local>
|
||||||
|
<corywright@gmail.com> <cory.wright@rackspace.com>
|
||||||
|
<dan@nicira.com> <danwent@dan-xs3-cs>
|
||||||
|
<dan@nicira.com> <danwent@gmail.com>
|
||||||
|
<dtroyer@gmail.com> <dt-github@xr7.org>
|
||||||
|
<Dave.Walker@canonical.com> <DaveWalker@ubuntu.com>
|
||||||
|
<derekh@redhat.com> <higginsd@gmail.com>
|
||||||
|
<devin.carlen@gmail.com> <devcamcar@illian.local>
|
||||||
|
<doug.hellmann@dreamhost.com> <doug.hellmann@gmail.com>
|
||||||
|
<dprince@redhat.com> <dan.prince@rackspace.com>
|
||||||
|
<edouard1.thuleau@orange.com> <thuleau@gmail.com>
|
||||||
|
<ewan.mellor@citrix.com> <emellor@silver>
|
||||||
|
<ghe@debian.org> <ghe.rivero@gmail.com>
|
||||||
|
<itoumsn@nttdata.co.jp> <itoumsn@shayol>
|
||||||
|
<jake@ansolabs.com> <jake@markupisart.com>
|
||||||
|
<jake@ansolabs.com> <admin@jakedahn.com>
|
||||||
|
<jaypipes@gmail.com> <jpipes@serialcoder>
|
||||||
|
<jeblair@hp.com> <james.blair@rackspace.com>
|
||||||
|
<jeblair@hp.com> <corvus@inaugust.com>
|
||||||
|
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
||||||
|
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
|
||||||
|
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
|
||||||
|
<johannes.erdfelt@rackspace.com> <johannes@compute3.221.st>
|
||||||
|
<john.griffith@solidfire.com> <john.griffith8@gmail.com>
|
||||||
|
<josh@jk0.org> <josh.kearney@rackspace.com>
|
||||||
|
<justin@fathomdb.com> <justinsb@justinsb-desktop>
|
||||||
|
<justin@fathomdb.com> <superstack@superstack.org>
|
||||||
|
<lorin@nimbisservices.com> <lorin@isi.edu>
|
||||||
|
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
|
||||||
|
<masumotok@nttdata.co.jp> <root@openstack2-api>
|
||||||
|
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
|
||||||
|
<matt.dietz@rackspace.com> <mdietz@openstack>
|
||||||
|
<mikal@stillhq.com> <michael.still@canonical.com>
|
||||||
|
<mordred@inaugust.com> <mordred@hudson>
|
||||||
|
<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
|
||||||
|
<rnirmal@gmail.com> <nirmal.ranganathan@rackspace.com>
|
||||||
|
<rnirmal@gmail.com> <nirmal.ranganathan@rackspace.coom>
|
||||||
|
<paul@openstack.org> <paul.voccio@rackspace.com>
|
||||||
|
<paul@openstack.org> <pvoccio@castor.local>
|
||||||
|
<paul@openstack.org> <paul@substation9.com>
|
||||||
|
<rconradharris@gmail.com> <rick.harris@rackspace.com>
|
||||||
|
<rlane@wikimedia.org> <laner@controller>
|
||||||
|
<sandy.walsh@rackspace.com> <sandy@sandywalsh.com>
|
||||||
|
<sleepsonthefloor@gmail.com> <root@tonbuntu>
|
||||||
|
<soren.hansen@rackspace.com> <soren@linux2go.dk>
|
||||||
|
<throughnothing@gmail.com> <will.wolf@rackspace.com>
|
||||||
|
<tim.simpson@rackspace.com> <tim.simpson4@gmail.com>
|
||||||
|
<todd@ansolabs.com> <todd@lapex>
|
||||||
|
<todd@ansolabs.com> <todd@rubidine.com>
|
||||||
|
<todd@ansolabs.com> <xtoddx@gmail.com>
|
||||||
|
<trey.morris@rackspace.com> <treyemorris@gmail.com>
|
||||||
|
<tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
|
||||||
|
<ueno.nachi@lab.ntt.co.jp> <nati.ueno@gmail.com>
|
||||||
|
<ueno.nachi@lab.ntt.co.jp> <nova@u4>
|
||||||
|
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
|
||||||
|
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
||||||
|
<vishvananda@gmail.com> <root@ubuntu>
|
||||||
|
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
||||||
|
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
|
||||||
|
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
|
||||||
|
<reldan@oscloud.ru> <enugaev@griddynamics.com>
|
||||||
|
<kshileev@gmail.com> <kshileev@griddynamics.com>
|
||||||
|
<nsokolov@griddynamics.com> <nsokolov@griddynamics.net>
|
||||||
|
<troy.toman@rackspace.com> <ttcl@mac.com>
|
||||||
|
<zulcss@ubuntu.com> <chuck.short@canonical.com>
|
211
Authors
Normal file
211
Authors
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
Aaron Lee <aaron.lee@rackspace.com>
|
||||||
|
Adam Gandelman <adamg@canonical.com>
|
||||||
|
Adam Johnson <adjohn@gmail.com>
|
||||||
|
Adrian Smith <adrian_f_smith@dell.com>
|
||||||
|
Ahmad Hassan <ahmad.hassan@hp.com>
|
||||||
|
Alex Meade <alex.meade@rackspace.com>
|
||||||
|
Alexander Sakhnov <asakhnov@mirantis.com>
|
||||||
|
Alexander Kovalev <akovalev@mirantis.com>
|
||||||
|
Alvaro Lopez Garcia <aloga@ifca.unican.es>
|
||||||
|
Andrew Bogott <abogott@wikimedia.org>
|
||||||
|
Andrew Clay Shafer <acs@parvuscaptus.com>
|
||||||
|
Andrey Brindeyev <abrindeyev@griddynamics.com>
|
||||||
|
Andy Smith <code@term.ie>
|
||||||
|
Andy Southgate <andy.southgate@citrix.com>
|
||||||
|
Anne Gentle <anne@openstack.org>
|
||||||
|
Ante Karamatić <ivoks@ubuntu.com>
|
||||||
|
Anthony Young <sleepsonthefloor@gmail.com>
|
||||||
|
Antony Messerli <ant@openstack.org>
|
||||||
|
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
||||||
|
Arvind Somya <asomya@cisco.com>
|
||||||
|
Asbjørn Sannes <asbjorn.sannes@interhost.no>
|
||||||
|
Ben McGraw <ben@pistoncloud.com>
|
||||||
|
Ben Swartzlander <bswartz@netapp.com>
|
||||||
|
Bilal Akhtar <bilalakhtar@ubuntu.com>
|
||||||
|
Brad Hall <brad@nicira.com>
|
||||||
|
Brad McConnell <bmcconne@rackspace.com>
|
||||||
|
Brendan Maguire <B_Maguire@Dell.com>
|
||||||
|
Brian Elliott <brian.elliott@rackspace.com>
|
||||||
|
Brian Lamar <brian.lamar@rackspace.com>
|
||||||
|
Brian Schott <bschott@isi.edu>
|
||||||
|
Brian Waldon <brian.waldon@rackspace.com>
|
||||||
|
Chiradeep Vittal <chiradeep@cloud.com>
|
||||||
|
Chmouel Boudjnah <chmouel@chmouel.com>
|
||||||
|
Chris Behrens <cbehrens@codestud.com>
|
||||||
|
Christian Berendt <berendt@b1-systems.de>
|
||||||
|
Chris Fattarsi <chris.fattarsi@pistoncloud.com>
|
||||||
|
Christopher MacGown <chris@pistoncloud.com>
|
||||||
|
Chuck Short <zulcss@ubuntu.com>
|
||||||
|
Cole Robinson <crobinso@redhat.com>
|
||||||
|
Cor Cornelisse <cor@hyves.nl>
|
||||||
|
Cory Wright <corywright@gmail.com>
|
||||||
|
Dan Prince <dprince@redhat.com>
|
||||||
|
Dan Wendlandt <dan@nicira.com>
|
||||||
|
Daniel P. Berrange <berrange@redhat.com>
|
||||||
|
Dave Lapsley <dlapsley@nicira.com>
|
||||||
|
Dave Walker <Dave.Walker@canonical.com>
|
||||||
|
David Pravec <David.Pravec@danix.org>
|
||||||
|
David Subiros <david.perez5@hp.com>
|
||||||
|
Dean Troyer <dtroyer@gmail.com>
|
||||||
|
Deepak Garg <deepak.garg@citrix.com>
|
||||||
|
Derek Higgins <derekh@redhat.com>
|
||||||
|
Devdeep Singh <devdeep.singh@citrix.com>
|
||||||
|
Devendra Modium <dmodium@isi.edu>
|
||||||
|
Devin Carlen <devin.carlen@gmail.com>
|
||||||
|
Dina Belova <dbelova@mirantis.com>
|
||||||
|
Donal Lafferty <donal.lafferty@citrix.com>
|
||||||
|
Dong-In David Kang <dkang@isi.edu>
|
||||||
|
Doug Hellmann <doug.hellmann@dreamhost.com>
|
||||||
|
Duncan McGreggor <duncan@dreamhost.com>
|
||||||
|
Ed Leafe <ed@leafe.com>
|
||||||
|
Edouard Thuleau <edouard1.thuleau@orange.com>
|
||||||
|
Eldar Nugaev <reldan@oscloud.ru>
|
||||||
|
Eoghan Glynn <eglynn@redhat.com>
|
||||||
|
Eric Day <eday@oddments.org>
|
||||||
|
Eric Windisch <eric@cloudscaling.com>
|
||||||
|
Evan Callicoat <diopter@gmail.com>
|
||||||
|
Ewan Mellor <ewan.mellor@citrix.com>
|
||||||
|
François Charlier <francois.charlier@ecindernce.com>
|
||||||
|
Gabe Westmaas <gabe.westmaas@rackspace.com>
|
||||||
|
Gabriel Hurley <gabriel@strikeawe.com>
|
||||||
|
Gary Kotton <garyk@radware.com>
|
||||||
|
Gaurav Gupta <gaurav@denali-systems.com>
|
||||||
|
Greg Althaus <galthaus@austin.rr.com>
|
||||||
|
Hengqing Hu <hudayou@hotmail.com>
|
||||||
|
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
|
||||||
|
Hisaki Ohara <hisaki.ohara@intel.com>
|
||||||
|
Ilya Alekseyev <ilyaalekseyev@acm.org>
|
||||||
|
Ionuț Arțăriși <iartarisi@suse.cz>
|
||||||
|
Isaku Yamahata <yamahata@valinux.co.jp>
|
||||||
|
Ivan Kolodyazhny <e0ne@e0ne.info>
|
||||||
|
J. Daniel Schmidt <jdsn@suse.de>
|
||||||
|
Jake Dahn <jake@ansolabs.com>
|
||||||
|
James E. Blair <jeblair@hp.com>
|
||||||
|
Jason Cannavale <jason.cannavale@rackspace.com>
|
||||||
|
Jason Koelker <jason@koelker.net>
|
||||||
|
Jay Pipes <jaypipes@gmail.com>
|
||||||
|
JC Martin <jcmartin@ebaysf.com>
|
||||||
|
Jesse Andrews <anotherjesse@gmail.com>
|
||||||
|
Jimmy Bergman <jimmy@sigint.se>
|
||||||
|
Joe Gordon <jogo@cloudscaling.com>
|
||||||
|
Joe Heck <heckj@mac.com>
|
||||||
|
Joel Moore <joelbm24@gmail.com>
|
||||||
|
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
|
||||||
|
John Dewey <john@dewey.ws>
|
||||||
|
John Garbutt <john.garbutt@citrix.com>
|
||||||
|
John Griffith <john.griffith@solidfire.com>
|
||||||
|
John Kennedy <john.m.kennedy@intel.com>
|
||||||
|
John Tran <jtran@attinteractive.com>
|
||||||
|
Jonathan Bryce <jbryce@jbryce.com>
|
||||||
|
Jordan Rinke <jordan@openstack.org>
|
||||||
|
Joseph Suh <jsuh@isi.edu>
|
||||||
|
Joseph W. Breu <breu@breu.org>
|
||||||
|
Josh Durgin <joshd@hq.newdream.net>
|
||||||
|
Josh Kearney <josh@jk0.org>
|
||||||
|
Josh Kleinpeter <josh@kleinpeter.org>
|
||||||
|
Joshua Harlow <harlowja@yahoo-inc.com>
|
||||||
|
Joshua McKenty <jmckenty@gmail.com>
|
||||||
|
Juan G. Hernando Rivero <ghe@debian.org>
|
||||||
|
Julien Danjou <julien.danjou@ecindernce.com>
|
||||||
|
Justin Santa Barbara <justin@fathomdb.com>
|
||||||
|
Justin Shepherd <jshepher@rackspace.com>
|
||||||
|
Kei Masumoto <masumotok@nttdata.co.jp>
|
||||||
|
Keisuke Tagami <tagami.keisuke@lab.ntt.co.jp>
|
||||||
|
masumoto<masumotok@nttdata.co.jp>
|
||||||
|
masukotm<masukotm@nttdata.co.jp>
|
||||||
|
Ken Pepple <ken.pepple@gmail.com>
|
||||||
|
Kevin Bringard <kbringard@attinteractive.com>
|
||||||
|
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
|
||||||
|
Kiall Mac Innes <kiall@managedit.ie>
|
||||||
|
Kirill Shileev <kshileev@gmail.com>
|
||||||
|
Koji Iida <iida.koji@lab.ntt.co.jp>
|
||||||
|
Liam Kelleher <liam.kelleher@hp.com>
|
||||||
|
Likitha Shetty <likitha.shetty@citrix.com>
|
||||||
|
Loganathan Parthipan <parthipan@hp.com>
|
||||||
|
Lorin Hochstein <lorin@nimbisservices.com>
|
||||||
|
Lvov Maxim <usrleon@gmail.com>
|
||||||
|
Mandar Vaze <mandar.vaze@vertex.co.in>
|
||||||
|
Mandell Degerness <mdegerne@gmail.com>
|
||||||
|
Mark McClain <mark.mcclain@dreamhost.com>
|
||||||
|
Mark McLoughlin <markmc@redhat.com>
|
||||||
|
Mark Washenberger <mark.washenberger@rackspace.com>
|
||||||
|
Maru Newby <mnewby@internap.com>
|
||||||
|
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||||
|
Matt Dietz <matt.dietz@rackspace.com>
|
||||||
|
Matt Stephenson <mattstep@mattstep.net>
|
||||||
|
Matthew Hooker <matt@cloudscaling.com>
|
||||||
|
Michael Basnight <mbasnigh@rackspace.com>
|
||||||
|
Michael Gundlach <michael.gundlach@rackspace.com>
|
||||||
|
Michael Still <mikal@stillhq.com>
|
||||||
|
Mike Lundy <mike@pistoncloud.com>
|
||||||
|
Mike Milner <mike.milner@canonical.com>
|
||||||
|
Mike Pittaro <mikeyp@lahondaresearch.org>
|
||||||
|
Mike Scherbakov <mihgen@gmail.com>
|
||||||
|
Mikyung Kang <mkkang@isi.edu>
|
||||||
|
Mohammed Naser <mnaser@vexxhost.com>
|
||||||
|
Monsyne Dragon <mdragon@rackspace.com>
|
||||||
|
Monty Taylor <mordred@inaugust.com>
|
||||||
|
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
||||||
|
MotoKen <motokentsai@gmail.com>
|
||||||
|
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
|
||||||
|
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
|
||||||
|
Naveed Massjouni <naveedm9@gmail.com>
|
||||||
|
Nick Bartos <nick@pistoncloud.com>
|
||||||
|
Nikhil Komawar <nikhil.komawar@rackspace.com>
|
||||||
|
Nikolay Sokolov <nsokolov@griddynamics.com>
|
||||||
|
Nirmal Ranganathan <rnirmal@gmail.com>
|
||||||
|
Ollie Leahy <oliver.leahy@hp.com>
|
||||||
|
Pádraig Brady <pbrady@redhat.com>
|
||||||
|
Paul McMillan <paul.mcmillan@nebula.com>
|
||||||
|
Paul Voccio <paul@openstack.org>
|
||||||
|
Peng Yong <ppyy@pubyun.com>
|
||||||
|
Philip Knouff <philip.knouff@mailtrust.com>
|
||||||
|
Renier Morales <renierm@us.ibm.com>
|
||||||
|
Renuka Apte <renuka.apte@citrix.com>
|
||||||
|
Ricardo Carrillo Cruz <emaildericky@gmail.com>
|
||||||
|
Rick Clark <rick@openstack.org>
|
||||||
|
Rick Harris <rconradharris@gmail.com>
|
||||||
|
Rob Kost <kost@isi.edu>
|
||||||
|
Robert Esker <esker@netapp.com>
|
||||||
|
Russell Bryant <rbryant@redhat.com>
|
||||||
|
Russell Sim <russell.sim@gmail.com>
|
||||||
|
Ryan Lane <rlane@wikimedia.org>
|
||||||
|
Ryan Lucio <rlucio@internap.com>
|
||||||
|
Ryu Ishimoto <ryu@midokura.jp>
|
||||||
|
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
|
||||||
|
Sandy Walsh <sandy.walsh@rackspace.com>
|
||||||
|
Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com>
|
||||||
|
Scott Moser <smoser@ubuntu.com>
|
||||||
|
Sean Dague <sdague@linux.vnet.ibm.com>
|
||||||
|
Soren Hansen <soren.hansen@rackspace.com>
|
||||||
|
Stanislaw Pitucha <stanislaw.pitucha@hp.com>
|
||||||
|
Stephanie Reese <reese.sm@gmail.com>
|
||||||
|
Sumit Naiksatam <snaiksat@cisco.com>
|
||||||
|
Thierry Carrez <thierry@openstack.org>
|
||||||
|
Tim Simpson <tim.simpson@rackspace.com>
|
||||||
|
Todd Willey <todd@ansolabs.com>
|
||||||
|
Tomoe Sugihara <tomoe@midokura.com>
|
||||||
|
Tomoya Masuko<masukotm@nttdata.co.jp>
|
||||||
|
Thorsten Tarrach <thorsten@atomia.com>
|
||||||
|
Trey Morris <trey.morris@rackspace.com>
|
||||||
|
Troy Toman <troy.toman@rackspace.com>
|
||||||
|
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||||
|
Unmesh Gurjar <unmesh.gurjar@vertex.co.in>
|
||||||
|
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||||
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
Vivek Y S <vivek.ys@gmail.com>
|
||||||
|
Vladimir Popovski <vladimir@zadarastorage.com>
|
||||||
|
Vaddi kiran <vaddi_kiran@persistent.co.in>
|
||||||
|
William Henry <whenry@redhat.com>
|
||||||
|
William Kelly <william.kelly@rackspace.com>
|
||||||
|
William Wolf <throughnothing@gmail.com>
|
||||||
|
Yaguang Tang <heut2008@gmail.com>
|
||||||
|
Yoshiaki Tamura <yoshi@midokura.jp>
|
||||||
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
|
Yun Mao <yunmao@gmail.com>
|
||||||
|
Yun Shen <Yun.Shen@hp.com>
|
||||||
|
Yuriy Taraday <yorik.sar@gmail.com>
|
||||||
|
Zed Shaw <zedshaw@zedshaw.com>
|
||||||
|
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||||
|
Zhongyue Luo <lzyeval@gmail.com>
|
||||||
|
Ziad Sawalha <github@highbridgellc.com>
|
213
HACKING.rst
Normal file
213
HACKING.rst
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
Cinder Style Commandments
|
||||||
|
=======================
|
||||||
|
|
||||||
|
- Step 1: Read http://www.python.org/dev/peps/pep-0008/
|
||||||
|
- Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
|
||||||
|
- Step 3: Read on
|
||||||
|
|
||||||
|
|
||||||
|
General
|
||||||
|
-------
|
||||||
|
- Put two newlines between top-level code (funcs, classes, etc)
|
||||||
|
- Put one newline between methods in classes and anywhere else
|
||||||
|
- Do not write "except:", use "except Exception:" at the very least
|
||||||
|
- Include your name with TODOs as in "#TODO(termie)"
|
||||||
|
- Do not shadow a built-in or reserved word. Example::
|
||||||
|
|
||||||
|
def list():
|
||||||
|
return [1, 2, 3]
|
||||||
|
|
||||||
|
mylist = list() # BAD, shadows `list` built-in
|
||||||
|
|
||||||
|
class Foo(object):
|
||||||
|
def list(self):
|
||||||
|
return [1, 2, 3]
|
||||||
|
|
||||||
|
mylist = Foo().list() # OKAY, does not shadow built-in
|
||||||
|
|
||||||
|
|
||||||
|
Imports
|
||||||
|
-------
|
||||||
|
- Do not import objects, only modules (*)
|
||||||
|
- Do not import more than one module per line (*)
|
||||||
|
- Do not make relative imports
|
||||||
|
- Order your imports by the full module path
|
||||||
|
- Organize your imports according to the following template
|
||||||
|
|
||||||
|
(*) exceptions are:
|
||||||
|
|
||||||
|
- imports from ``migrate`` package
|
||||||
|
- imports from ``sqlalchemy`` package
|
||||||
|
- imports from ``cinder.db.sqlalchemy.session`` module
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
{{stdlib imports in human alphabetical order}}
|
||||||
|
\n
|
||||||
|
{{third-party lib imports in human alphabetical order}}
|
||||||
|
\n
|
||||||
|
{{cinder imports in human alphabetical order}}
|
||||||
|
\n
|
||||||
|
\n
|
||||||
|
{{begin your code}}
|
||||||
|
|
||||||
|
|
||||||
|
Human Alphabetical Order Examples
|
||||||
|
---------------------------------
|
||||||
|
Example::
|
||||||
|
|
||||||
|
import httplib
|
||||||
|
import logging
|
||||||
|
import random
|
||||||
|
import StringIO
|
||||||
|
import time
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
import cinder.api.ec2
|
||||||
|
from cinder.api import openstack
|
||||||
|
from cinder.auth import users
|
||||||
|
from cinder.endpoint import cloud
|
||||||
|
import cinder.flags
|
||||||
|
from cinder import test
|
||||||
|
|
||||||
|
|
||||||
|
Docstrings
|
||||||
|
----------
|
||||||
|
Example::
|
||||||
|
|
||||||
|
"""A one line docstring looks like this and ends in a period."""
|
||||||
|
|
||||||
|
|
||||||
|
"""A multi line docstring has a one-line summary, less than 80 characters.
|
||||||
|
|
||||||
|
Then a new paragraph after a newline that explains in more detail any
|
||||||
|
general information about the function, class or method. Example usages
|
||||||
|
are also great to have here if it is a complex class for function.
|
||||||
|
|
||||||
|
When writing the docstring for a class, an extra line should be placed
|
||||||
|
after the closing quotations. For more in-depth explanations for these
|
||||||
|
decisions see http://www.python.org/dev/peps/pep-0257/
|
||||||
|
|
||||||
|
If you are going to describe parameters and return values, use Sphinx, the
|
||||||
|
appropriate syntax is as follows.
|
||||||
|
|
||||||
|
:param foo: the foo parameter
|
||||||
|
:param bar: the bar parameter
|
||||||
|
:returns: return_type -- description of the return value
|
||||||
|
:returns: description of the return value
|
||||||
|
:raises: AttributeError, KeyError
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
Dictionaries/Lists
|
||||||
|
------------------
|
||||||
|
If a dictionary (dict) or list object is longer than 80 characters, its items
|
||||||
|
should be split with newlines. Embedded iterables should have their items
|
||||||
|
indented. Additionally, the last item in the dictionary should have a trailing
|
||||||
|
comma. This increases readability and simplifies future diffs.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
my_dictionary = {
|
||||||
|
"image": {
|
||||||
|
"name": "Just a Snapshot",
|
||||||
|
"size": 2749573,
|
||||||
|
"properties": {
|
||||||
|
"user_id": 12,
|
||||||
|
"arch": "x86_64",
|
||||||
|
},
|
||||||
|
"things": [
|
||||||
|
"thing_one",
|
||||||
|
"thing_two",
|
||||||
|
],
|
||||||
|
"status": "ACTIVE",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Calling Methods
|
||||||
|
---------------
|
||||||
|
Calls to methods 80 characters or longer should format each argument with
|
||||||
|
newlines. This is not a requirement, but a guideline::
|
||||||
|
|
||||||
|
unnecessarily_long_function_name('string one',
|
||||||
|
'string two',
|
||||||
|
kwarg1=constants.ACTIVE,
|
||||||
|
kwarg2=['a', 'b', 'c'])
|
||||||
|
|
||||||
|
|
||||||
|
Rather than constructing parameters inline, it is better to break things up::
|
||||||
|
|
||||||
|
list_of_strings = [
|
||||||
|
'what_a_long_string',
|
||||||
|
'not as long',
|
||||||
|
]
|
||||||
|
|
||||||
|
dict_of_numbers = {
|
||||||
|
'one': 1,
|
||||||
|
'two': 2,
|
||||||
|
'twenty four': 24,
|
||||||
|
}
|
||||||
|
|
||||||
|
object_one.call_a_method('string three',
|
||||||
|
'string four',
|
||||||
|
kwarg1=list_of_strings,
|
||||||
|
kwarg2=dict_of_numbers)
|
||||||
|
|
||||||
|
|
||||||
|
Internationalization (i18n) Strings
|
||||||
|
-----------------------------------
|
||||||
|
In order to support multiple languages, we have a mechanism to support
|
||||||
|
automatic translations of exception and log strings.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
msg = _("An error occurred")
|
||||||
|
raise HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
If you have a variable to place within the string, first internationalize the
|
||||||
|
template string then do the replacement.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
msg = _("Missing parameter: %s") % ("flavor",)
|
||||||
|
LOG.error(msg)
|
||||||
|
|
||||||
|
If you have multiple variables to place in the string, use keyword parameters.
|
||||||
|
This helps our translators reorder parameters when needed.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
msg = _("The server with id %(s_id)s has no key %(m_key)s")
|
||||||
|
LOG.error(msg % {"s_id": "1234", "m_key": "imageId"})
|
||||||
|
|
||||||
|
|
||||||
|
Creating Unit Tests
|
||||||
|
-------------------
|
||||||
|
For every new feature, unit tests should be created that both test and
|
||||||
|
(implicitly) document the usage of said feature. If submitting a patch for a
|
||||||
|
bug that had no unit test, a new passing unit test should be added. If a
|
||||||
|
submitted bug fix does have a unit test, be sure to add a new one that fails
|
||||||
|
without the patch and passes with the patch.
|
||||||
|
|
||||||
|
For more information on creating unit tests and utilizing the testing
|
||||||
|
infrastructure in OpenStack Cinder, please read cinder/testing/README.rst.
|
||||||
|
|
||||||
|
|
||||||
|
openstack-common
|
||||||
|
----------------
|
||||||
|
|
||||||
|
A number of modules from openstack-common are imported into the project.
|
||||||
|
|
||||||
|
These modules are "incubating" in openstack-common and are kept in sync
|
||||||
|
with the help of openstack-common's update.py script. See:
|
||||||
|
|
||||||
|
http://wiki.openstack.org/CommonLibrary#Incubation
|
||||||
|
|
||||||
|
The copy of the code should never be directly modified here. Please
|
||||||
|
always update openstack-common first and then run the script to copy
|
||||||
|
the changes across.
|
176
LICENSE
Normal file
176
LICENSE
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
37
MANIFEST.in
Normal file
37
MANIFEST.in
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
include HACKING.rst
|
||||||
|
include LICENSE run_tests.sh
|
||||||
|
include README.rst
|
||||||
|
include MANIFEST.in pylintrc Authors
|
||||||
|
include openstack-common.conf
|
||||||
|
include babel.cfg tox.ini
|
||||||
|
graft cinder/CA
|
||||||
|
graft doc
|
||||||
|
graft smoketests
|
||||||
|
graft tools
|
||||||
|
graft etc
|
||||||
|
graft contrib
|
||||||
|
graft plugins
|
||||||
|
graft cinder/api/openstack/*/schemas
|
||||||
|
include cinder/auth/*.schema
|
||||||
|
include cinder/auth/cinderrc.template
|
||||||
|
include cinder/auth/opendj.sh
|
||||||
|
include cinder/auth/slap.sh
|
||||||
|
include cinder/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||||
|
include cinder/db/sqlalchemy/migrate_repo/README
|
||||||
|
include cinder/db/sqlalchemy/migrate_repo/versions/*.sql
|
||||||
|
include cinder/openstack/common/README
|
||||||
|
include cinder/virt/interfaces.template
|
||||||
|
include cinder/virt/libvirt*.xml.template
|
||||||
|
include cinder/virt/cpuinfo.xml.template
|
||||||
|
include cinder/testing/README.rst
|
||||||
|
include cinder/tests/db/cinder.austin.sqlite
|
||||||
|
include cinder/tests/image/*.tar.gz
|
||||||
|
include cinder/tests/policy.json
|
||||||
|
include cinder/tests/test_migrations.conf
|
||||||
|
include cinder/tests/xenapi/vm_rrd.xml
|
||||||
|
include plugins/xenapi/README
|
||||||
|
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
||||||
|
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_cinder.py
|
||||||
|
global-exclude *.pyc
|
||||||
|
|
||||||
|
recursive-include cinder/locale *
|
21
README.rst
Normal file
21
README.rst
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
The Choose Your Own Adventure README for Cinder
|
||||||
|
===============================================
|
||||||
|
|
||||||
|
You have come across a storage service for an open cloud computing service.
|
||||||
|
It has identified itself as "Cinder." It was abstracted from the Nova project.
|
||||||
|
|
||||||
|
To monitor it from a distance: follow `@openstack <http://twitter.com/openstack>`_ on twitter.
|
||||||
|
|
||||||
|
To tame it for use in your own cloud: read http://docs.openstack.org
|
||||||
|
|
||||||
|
To study its anatomy: read http://cinder.openstack.org
|
||||||
|
|
||||||
|
To dissect it in detail: visit http://github.com/openstack/cinder
|
||||||
|
|
||||||
|
To taunt it with its weaknesses: use http://bugs.launchpad.net/cinder
|
||||||
|
|
||||||
|
To watch it: http://jenkins.openstack.org
|
||||||
|
|
||||||
|
To hack at it: read HACKING
|
||||||
|
|
||||||
|
To cry over its pylint problems: http://jenkins.openstack.org/job/cinder-pylint/violations
|
70
bin/cinder-all
Executable file
70
bin/cinder-all
Executable file
@ -0,0 +1,70 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack, LLC
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Starter script for All cinder services.
|
||||||
|
|
||||||
|
This script attempts to start all the cinder services in one process. Each
|
||||||
|
service is started in its own greenthread. Please note that exceptions and
|
||||||
|
sys.exit() on the starting of a service are logged and the script will
|
||||||
|
continue attempting to launch the rest of the services.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||||
|
sys.argv[0]), os.pardir, os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import service
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger('cinder.all')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
utils.monkey_patch()
|
||||||
|
servers = []
|
||||||
|
# cinder-api
|
||||||
|
for api in flags.FLAGS.enabled_apis:
|
||||||
|
try:
|
||||||
|
servers.append(service.WSGIService(api))
|
||||||
|
except (Exception, SystemExit):
|
||||||
|
logging.exception(_('Failed to load %s') % '%s-api' % api)
|
||||||
|
|
||||||
|
for binary in ['cinder-volume', 'cinder-scheduler']:
|
||||||
|
try:
|
||||||
|
servers.append(service.Service.create(binary=binary))
|
||||||
|
except (Exception, SystemExit):
|
||||||
|
LOG.exception(_('Failed to load %s'), binary)
|
||||||
|
service.serve(*servers)
|
||||||
|
service.wait()
|
47
bin/cinder-api
Executable file
47
bin/cinder-api
Executable file
@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Starter script for Cinder OS API."""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
|
||||||
|
sys.argv[0]), os.pardir, os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import service
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
utils.monkey_patch()
|
||||||
|
server = service.WSGIService('osapi_volume')
|
||||||
|
service.serve(server)
|
||||||
|
service.wait()
|
635
bin/cinder-manage
Executable file
635
bin/cinder-manage
Executable file
@ -0,0 +1,635 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
# Interactive shell based on Django:
|
||||||
|
#
|
||||||
|
# Copyright (c) 2005, the Lawrence Journal-World
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
# this list of conditions and the following disclaimer.
|
||||||
|
#
|
||||||
|
# 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# 3. Neither the name of Django nor the names of its contributors may be
|
||||||
|
# used to endorse or promote products derived from this software without
|
||||||
|
# specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
CLI interface for cinder management.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import errno
|
||||||
|
import gettext
|
||||||
|
import json
|
||||||
|
import math
|
||||||
|
import netaddr
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import StringIO
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
# If ../cinder/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')):
|
||||||
|
sys.path.insert(0, POSSIBLE_TOPDIR)
|
||||||
|
|
||||||
|
gettext.install('cinder', unicode=1)
|
||||||
|
|
||||||
|
from cinder.compat import flagfile
|
||||||
|
from cinder import context
|
||||||
|
from cinder import db
|
||||||
|
from cinder.db import migration
|
||||||
|
from cinder import exception
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.openstack.common import importutils
|
||||||
|
from cinder import quota
|
||||||
|
from cinder import rpc
|
||||||
|
from cinder import utils
|
||||||
|
from cinder import version
|
||||||
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
# Decorators for actions
|
||||||
|
def args(*args, **kwargs):
|
||||||
|
def _decorator(func):
|
||||||
|
func.__dict__.setdefault('options', []).insert(0, (args, kwargs))
|
||||||
|
return func
|
||||||
|
return _decorator
|
||||||
|
|
||||||
|
|
||||||
|
def param2id(object_id):
|
||||||
|
"""Helper function to convert various id types to internal id.
|
||||||
|
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
|
||||||
|
"""
|
||||||
|
if '-' in object_id:
|
||||||
|
# FIXME(ja): mapping occurs in nova?
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
return int(object_id)
|
||||||
|
|
||||||
|
|
||||||
|
class ShellCommands(object):
|
||||||
|
def bpython(self):
|
||||||
|
"""Runs a bpython shell.
|
||||||
|
|
||||||
|
Falls back to Ipython/python shell if unavailable"""
|
||||||
|
self.run('bpython')
|
||||||
|
|
||||||
|
def ipython(self):
|
||||||
|
"""Runs an Ipython shell.
|
||||||
|
|
||||||
|
Falls back to Python shell if unavailable"""
|
||||||
|
self.run('ipython')
|
||||||
|
|
||||||
|
def python(self):
|
||||||
|
"""Runs a python shell.
|
||||||
|
|
||||||
|
Falls back to Python shell if unavailable"""
|
||||||
|
self.run('python')
|
||||||
|
|
||||||
|
@args('--shell', dest="shell", metavar='<bpython|ipython|python >',
|
||||||
|
help='Python shell')
|
||||||
|
def run(self, shell=None):
|
||||||
|
"""Runs a Python interactive interpreter."""
|
||||||
|
if not shell:
|
||||||
|
shell = 'bpython'
|
||||||
|
|
||||||
|
if shell == 'bpython':
|
||||||
|
try:
|
||||||
|
import bpython
|
||||||
|
bpython.embed()
|
||||||
|
except ImportError:
|
||||||
|
shell = 'ipython'
|
||||||
|
if shell == 'ipython':
|
||||||
|
try:
|
||||||
|
import IPython
|
||||||
|
# Explicitly pass an empty list as arguments, because
|
||||||
|
# otherwise IPython would use sys.argv from this script.
|
||||||
|
shell = IPython.Shell.IPShell(argv=[])
|
||||||
|
shell.mainloop()
|
||||||
|
except ImportError:
|
||||||
|
shell = 'python'
|
||||||
|
|
||||||
|
if shell == 'python':
|
||||||
|
import code
|
||||||
|
try:
|
||||||
|
# Try activating rlcompleter, because it's handy.
|
||||||
|
import readline
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# We don't have to wrap the following import in a 'try',
|
||||||
|
# because we already know 'readline' was imported successfully.
|
||||||
|
import rlcompleter
|
||||||
|
readline.parse_and_bind("tab:complete")
|
||||||
|
code.interact()
|
||||||
|
|
||||||
|
@args('--path', dest='path', metavar='<path>', help='Script path')
|
||||||
|
def script(self, path):
|
||||||
|
"""Runs the script from the specifed path with flags set properly.
|
||||||
|
arguments: path"""
|
||||||
|
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
|
||||||
|
|
||||||
|
|
||||||
|
def _db_error(caught_exception):
|
||||||
|
print caught_exception
|
||||||
|
print _("The above error may show that the database has not "
|
||||||
|
"been created.\nPlease create a database using "
|
||||||
|
"'cinder-manage db sync' before running this command.")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
class HostCommands(object):
|
||||||
|
"""List hosts"""
|
||||||
|
|
||||||
|
def list(self, zone=None):
|
||||||
|
"""Show a list of all physical hosts. Filter by zone.
|
||||||
|
args: [zone]"""
|
||||||
|
print "%-25s\t%-15s" % (_('host'),
|
||||||
|
_('zone'))
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
now = utils.utcnow()
|
||||||
|
services = db.service_get_all(ctxt)
|
||||||
|
if zone:
|
||||||
|
services = [s for s in services if s['availability_zone'] == zone]
|
||||||
|
hosts = []
|
||||||
|
for srv in services:
|
||||||
|
if not [h for h in hosts if h['host'] == srv['host']]:
|
||||||
|
hosts.append(srv)
|
||||||
|
|
||||||
|
for h in hosts:
|
||||||
|
print "%-25s\t%-15s" % (h['host'], h['availability_zone'])
|
||||||
|
|
||||||
|
|
||||||
|
class DbCommands(object):
|
||||||
|
"""Class for managing the database."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@args('--version', dest='version', metavar='<version>',
|
||||||
|
help='Database version')
|
||||||
|
def sync(self, version=None):
|
||||||
|
"""Sync the database up to the most recent version."""
|
||||||
|
return migration.db_sync(version)
|
||||||
|
|
||||||
|
def version(self):
|
||||||
|
"""Print the current database version."""
|
||||||
|
print migration.db_version()
|
||||||
|
|
||||||
|
|
||||||
|
class VersionCommands(object):
|
||||||
|
"""Class for exposing the codebase version."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
print _("%(version)s (%(vcs)s)") % \
|
||||||
|
{'version': version.version_string(),
|
||||||
|
'vcs': version.version_string_with_vcs()}
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
self.list()
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeCommands(object):
|
||||||
|
"""Methods for dealing with a cloud in an odd state"""
|
||||||
|
|
||||||
|
@args('--volume', dest='volume_id', metavar='<volume id>',
|
||||||
|
help='Volume ID')
|
||||||
|
def delete(self, volume_id):
|
||||||
|
"""Delete a volume, bypassing the check that it
|
||||||
|
must be available."""
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
volume = db.volume_get(ctxt, param2id(volume_id))
|
||||||
|
host = volume['host']
|
||||||
|
|
||||||
|
if not host:
|
||||||
|
print "Volume not yet assigned to host."
|
||||||
|
print "Deleting volume from database and skipping rpc."
|
||||||
|
db.volume_destroy(ctxt, param2id(volume_id))
|
||||||
|
return
|
||||||
|
|
||||||
|
if volume['status'] == 'in-use':
|
||||||
|
print "Volume is in-use."
|
||||||
|
print "Detach volume from instance and then try again."
|
||||||
|
return
|
||||||
|
|
||||||
|
rpc.cast(ctxt,
|
||||||
|
db.queue_get_for(ctxt, FLAGS.volume_topic, host),
|
||||||
|
{"method": "delete_volume",
|
||||||
|
"args": {"volume_id": volume['id']}})
|
||||||
|
|
||||||
|
@args('--volume', dest='volume_id', metavar='<volume id>',
|
||||||
|
help='Volume ID')
|
||||||
|
def reattach(self, volume_id):
|
||||||
|
"""Re-attach a volume that has previously been attached
|
||||||
|
to an instance. Typically called after a compute host
|
||||||
|
has been rebooted."""
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
volume = db.volume_get(ctxt, param2id(volume_id))
|
||||||
|
if not volume['instance_id']:
|
||||||
|
print "volume is not attached to an instance"
|
||||||
|
return
|
||||||
|
instance = db.instance_get(ctxt, volume['instance_id'])
|
||||||
|
host = instance['host']
|
||||||
|
rpc.cast(ctxt,
|
||||||
|
db.queue_get_for(ctxt, FLAGS.compute_topic, host),
|
||||||
|
{"method": "attach_volume",
|
||||||
|
"args": {"instance_id": instance['id'],
|
||||||
|
"volume_id": volume['id'],
|
||||||
|
"mountpoint": volume['mountpoint']}})
|
||||||
|
|
||||||
|
|
||||||
|
class StorageManagerCommands(object):
|
||||||
|
"""Class for mangaging Storage Backends and Flavors"""
|
||||||
|
|
||||||
|
def flavor_list(self, flavor=None):
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
|
||||||
|
try:
|
||||||
|
if flavor is None:
|
||||||
|
flavors = db.sm_flavor_get_all(ctxt)
|
||||||
|
else:
|
||||||
|
flavors = db.sm_flavor_get(ctxt, flavor)
|
||||||
|
except exception.NotFound as ex:
|
||||||
|
print "error: %s" % ex
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
print "%-18s\t%-20s\t%s" % (_('id'),
|
||||||
|
_('Label'),
|
||||||
|
_('Description'))
|
||||||
|
|
||||||
|
for flav in flavors:
|
||||||
|
print "%-18s\t%-20s\t%s" % (
|
||||||
|
flav['id'],
|
||||||
|
flav['label'],
|
||||||
|
flav['description'])
|
||||||
|
|
||||||
|
def flavor_create(self, label, desc):
|
||||||
|
# TODO(renukaapte) flavor name must be unique
|
||||||
|
try:
|
||||||
|
db.sm_flavor_create(context.get_admin_context(),
|
||||||
|
dict(label=label,
|
||||||
|
description=desc))
|
||||||
|
except exception.DBError, e:
|
||||||
|
_db_error(e)
|
||||||
|
|
||||||
|
def flavor_delete(self, label):
|
||||||
|
try:
|
||||||
|
db.sm_flavor_delete(context.get_admin_context(), label)
|
||||||
|
|
||||||
|
except exception.DBError, e:
|
||||||
|
_db_error(e)
|
||||||
|
|
||||||
|
def _splitfun(self, item):
|
||||||
|
i = item.split("=")
|
||||||
|
return i[0:2]
|
||||||
|
|
||||||
|
def backend_list(self, backend_conf_id=None):
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
|
||||||
|
try:
|
||||||
|
if backend_conf_id is None:
|
||||||
|
backends = db.sm_backend_conf_get_all(ctxt)
|
||||||
|
else:
|
||||||
|
backends = db.sm_backend_conf_get(ctxt, backend_conf_id)
|
||||||
|
|
||||||
|
except exception.NotFound as ex:
|
||||||
|
print "error: %s" % ex
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
|
||||||
|
_('Flavor id'),
|
||||||
|
_('SR UUID'),
|
||||||
|
_('SR Type'),
|
||||||
|
_('Config Parameters'),)
|
||||||
|
|
||||||
|
for b in backends:
|
||||||
|
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
|
||||||
|
b['flavor_id'],
|
||||||
|
b['sr_uuid'],
|
||||||
|
b['sr_type'],
|
||||||
|
b['config_params'],)
|
||||||
|
|
||||||
|
def backend_add(self, flavor_label, sr_type, *args):
|
||||||
|
# TODO(renukaapte) Add backend_introduce.
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
params = dict(map(self._splitfun, args))
|
||||||
|
sr_uuid = utils.gen_uuid()
|
||||||
|
|
||||||
|
if flavor_label is None:
|
||||||
|
print "error: backend needs to be associated with flavor"
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
try:
|
||||||
|
flavors = db.sm_flavor_get(ctxt, flavor_label)
|
||||||
|
|
||||||
|
except exception.NotFound as ex:
|
||||||
|
print "error: %s" % ex
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
config_params = " ".join(['%s=%s' %
|
||||||
|
(key, params[key]) for key in params])
|
||||||
|
|
||||||
|
if 'sr_uuid' in params:
|
||||||
|
sr_uuid = params['sr_uuid']
|
||||||
|
try:
|
||||||
|
backend = db.sm_backend_conf_get_by_sr(ctxt, sr_uuid)
|
||||||
|
except exception.DBError, e:
|
||||||
|
_db_error(e)
|
||||||
|
|
||||||
|
if backend:
|
||||||
|
print 'Backend config found. Would you like to recreate this?'
|
||||||
|
print '(WARNING:Recreating will destroy all VDIs on backend!!)'
|
||||||
|
c = raw_input('Proceed? (y/n) ')
|
||||||
|
if c == 'y' or c == 'Y':
|
||||||
|
try:
|
||||||
|
db.sm_backend_conf_update(ctxt, backend['id'],
|
||||||
|
dict(created=False,
|
||||||
|
flavor_id=flavors['id'],
|
||||||
|
sr_type=sr_type,
|
||||||
|
config_params=config_params))
|
||||||
|
except exception.DBError, e:
|
||||||
|
_db_error(e)
|
||||||
|
return
|
||||||
|
|
||||||
|
else:
|
||||||
|
print 'Backend config not found. Would you like to create it?'
|
||||||
|
|
||||||
|
print '(WARNING: Creating will destroy all data on backend!!!)'
|
||||||
|
c = raw_input('Proceed? (y/n) ')
|
||||||
|
if c == 'y' or c == 'Y':
|
||||||
|
try:
|
||||||
|
db.sm_backend_conf_create(ctxt,
|
||||||
|
dict(flavor_id=flavors['id'],
|
||||||
|
sr_uuid=sr_uuid,
|
||||||
|
sr_type=sr_type,
|
||||||
|
config_params=config_params))
|
||||||
|
except exception.DBError, e:
|
||||||
|
_db_error(e)
|
||||||
|
|
||||||
|
def backend_remove(self, backend_conf_id):
|
||||||
|
try:
|
||||||
|
db.sm_backend_conf_delete(context.get_admin_context(),
|
||||||
|
backend_conf_id)
|
||||||
|
|
||||||
|
except exception.DBError, e:
|
||||||
|
_db_error(e)
|
||||||
|
|
||||||
|
|
||||||
|
class ConfigCommands(object):
|
||||||
|
"""Class for exposing the flags defined by flag_file(s)."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
for key, value in FLAGS.iteritems():
|
||||||
|
if value is not None:
|
||||||
|
print '%s = %s' % (key, value)
|
||||||
|
|
||||||
|
@args('--infile', dest='infile', metavar='<path>',
|
||||||
|
help='old-style flagfile to convert to config')
|
||||||
|
@args('--outfile', dest='outfile', metavar='<path>',
|
||||||
|
help='path for output file. Writes config'
|
||||||
|
'to stdout if not specified.')
|
||||||
|
def convert(self, infile, outfile=None):
|
||||||
|
"""Converts a flagfile and prints results to stdout."""
|
||||||
|
arg = '--flagfile=%s' % infile
|
||||||
|
with flagfile.handle_flagfiles_managed([arg]) as newargs:
|
||||||
|
with open(newargs[0].split('=')[1]) as configfile:
|
||||||
|
config = configfile.read()
|
||||||
|
if outfile:
|
||||||
|
with open(outfile, 'w') as configfile:
|
||||||
|
configfile.write(config)
|
||||||
|
else:
|
||||||
|
print config,
|
||||||
|
|
||||||
|
|
||||||
|
class GetLogCommands(object):
|
||||||
|
"""Get logging information"""
|
||||||
|
|
||||||
|
def errors(self):
|
||||||
|
"""Get all of the errors from the log files"""
|
||||||
|
error_found = 0
|
||||||
|
if FLAGS.logdir:
|
||||||
|
logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
|
||||||
|
for file in logs:
|
||||||
|
log_file = os.path.join(FLAGS.logdir, file)
|
||||||
|
lines = [line.strip() for line in open(log_file, "r")]
|
||||||
|
lines.reverse()
|
||||||
|
print_name = 0
|
||||||
|
for index, line in enumerate(lines):
|
||||||
|
if line.find(" ERROR ") > 0:
|
||||||
|
error_found += 1
|
||||||
|
if print_name == 0:
|
||||||
|
print log_file + ":-"
|
||||||
|
print_name = 1
|
||||||
|
print "Line %d : %s" % (len(lines) - index, line)
|
||||||
|
if error_found == 0:
|
||||||
|
print "No errors in logfiles!"
|
||||||
|
|
||||||
|
def syslog(self, num_entries=10):
|
||||||
|
"""Get <num_entries> of the cinder syslog events"""
|
||||||
|
entries = int(num_entries)
|
||||||
|
count = 0
|
||||||
|
log_file = ''
|
||||||
|
if os.path.exists('/var/log/syslog'):
|
||||||
|
log_file = '/var/log/syslog'
|
||||||
|
elif os.path.exists('/var/log/messages'):
|
||||||
|
log_file = '/var/log/messages'
|
||||||
|
else:
|
||||||
|
print "Unable to find system log file!"
|
||||||
|
sys.exit(1)
|
||||||
|
lines = [line.strip() for line in open(log_file, "r")]
|
||||||
|
lines.reverse()
|
||||||
|
print "Last %s cinder syslog entries:-" % (entries)
|
||||||
|
for line in lines:
|
||||||
|
if line.find("cinder") > 0:
|
||||||
|
count += 1
|
||||||
|
print "%s" % (line)
|
||||||
|
if count == entries:
|
||||||
|
break
|
||||||
|
|
||||||
|
if count == 0:
|
||||||
|
print "No cinder entries in syslog!"
|
||||||
|
|
||||||
|
|
||||||
|
CATEGORIES = [
|
||||||
|
('config', ConfigCommands),
|
||||||
|
('db', DbCommands),
|
||||||
|
('host', HostCommands),
|
||||||
|
('logs', GetLogCommands),
|
||||||
|
('shell', ShellCommands),
|
||||||
|
('sm', StorageManagerCommands),
|
||||||
|
('version', VersionCommands),
|
||||||
|
('volume', VolumeCommands),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def lazy_match(name, key_value_tuples):
|
||||||
|
"""Finds all objects that have a key that case insensitively contains
|
||||||
|
[name] key_value_tuples is a list of tuples of the form (key, value)
|
||||||
|
returns a list of tuples of the form (key, value)"""
|
||||||
|
result = []
|
||||||
|
for (k, v) in key_value_tuples:
|
||||||
|
if k.lower().find(name.lower()) == 0:
|
||||||
|
result.append((k, v))
|
||||||
|
if len(result) == 0:
|
||||||
|
print "%s does not match any options:" % name
|
||||||
|
for k, _v in key_value_tuples:
|
||||||
|
print "\t%s" % k
|
||||||
|
sys.exit(2)
|
||||||
|
if len(result) > 1:
|
||||||
|
print "%s matched multiple options:" % name
|
||||||
|
for k, _v in result:
|
||||||
|
print "\t%s" % k
|
||||||
|
sys.exit(2)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def methods_of(obj):
|
||||||
|
"""Get all callable methods of an object that don't start with underscore
|
||||||
|
returns a list of tuples of the form (method_name, method)"""
|
||||||
|
result = []
|
||||||
|
for i in dir(obj):
|
||||||
|
if callable(getattr(obj, i)) and not i.startswith('_'):
|
||||||
|
result.append((i, getattr(obj, i)))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Parse options and call the appropriate class/method."""
|
||||||
|
flagfile = utils.default_flagfile()
|
||||||
|
|
||||||
|
if flagfile and not os.access(flagfile, os.R_OK):
|
||||||
|
st = os.stat(flagfile)
|
||||||
|
print "Could not read %s. Re-running with sudo" % flagfile
|
||||||
|
try:
|
||||||
|
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
|
||||||
|
except Exception:
|
||||||
|
print 'sudo failed, continuing as if nothing happened'
|
||||||
|
|
||||||
|
rpc.register_opts(FLAGS)
|
||||||
|
|
||||||
|
try:
|
||||||
|
argv = FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
except IOError, e:
|
||||||
|
if e.errno == errno.EACCES:
|
||||||
|
print _('Please re-run cinder-manage as root.')
|
||||||
|
sys.exit(2)
|
||||||
|
raise
|
||||||
|
script_name = argv.pop(0)
|
||||||
|
if len(argv) < 1:
|
||||||
|
print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \
|
||||||
|
{'version': version.version_string(),
|
||||||
|
'vcs': version.version_string_with_vcs()}
|
||||||
|
print script_name + " category action [<args>]"
|
||||||
|
print _("Available categories:")
|
||||||
|
for k, _v in CATEGORIES:
|
||||||
|
print "\t%s" % k
|
||||||
|
sys.exit(2)
|
||||||
|
category = argv.pop(0)
|
||||||
|
matches = lazy_match(category, CATEGORIES)
|
||||||
|
# instantiate the command group object
|
||||||
|
category, fn = matches[0]
|
||||||
|
command_object = fn()
|
||||||
|
actions = methods_of(command_object)
|
||||||
|
if len(argv) < 1:
|
||||||
|
if hasattr(command_object, '__call__'):
|
||||||
|
action = ''
|
||||||
|
fn = command_object.__call__
|
||||||
|
else:
|
||||||
|
print script_name + " category action [<args>]"
|
||||||
|
print _("Available actions for %s category:") % category
|
||||||
|
for k, _v in actions:
|
||||||
|
print "\t%s" % k
|
||||||
|
sys.exit(2)
|
||||||
|
else:
|
||||||
|
action = argv.pop(0)
|
||||||
|
matches = lazy_match(action, actions)
|
||||||
|
action, fn = matches[0]
|
||||||
|
|
||||||
|
# For not decorated methods
|
||||||
|
options = getattr(fn, 'options', [])
|
||||||
|
|
||||||
|
usage = "%%prog %s %s <args> [options]" % (category, action)
|
||||||
|
parser = optparse.OptionParser(usage=usage)
|
||||||
|
for ar, kw in options:
|
||||||
|
parser.add_option(*ar, **kw)
|
||||||
|
(opts, fn_args) = parser.parse_args(argv)
|
||||||
|
fn_kwargs = vars(opts)
|
||||||
|
|
||||||
|
for k, v in fn_kwargs.items():
|
||||||
|
if v is None:
|
||||||
|
del fn_kwargs[k]
|
||||||
|
elif isinstance(v, basestring):
|
||||||
|
fn_kwargs[k] = v.decode('utf-8')
|
||||||
|
else:
|
||||||
|
fn_kwargs[k] = v
|
||||||
|
|
||||||
|
fn_args = [arg.decode('utf-8') for arg in fn_args]
|
||||||
|
|
||||||
|
# call the action with the remaining arguments
|
||||||
|
try:
|
||||||
|
fn(*fn_args, **fn_kwargs)
|
||||||
|
rpc.cleanup()
|
||||||
|
sys.exit(0)
|
||||||
|
except TypeError:
|
||||||
|
print _("Possible wrong number of arguments supplied")
|
||||||
|
print fn.__doc__
|
||||||
|
parser.print_help()
|
||||||
|
raise
|
||||||
|
except Exception:
|
||||||
|
print _("Command failed, please check log for more info")
|
||||||
|
raise
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
74
bin/cinder-rootwrap
Executable file
74
bin/cinder-rootwrap
Executable file
@ -0,0 +1,74 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 Openstack, LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Root wrapper for Cinder
|
||||||
|
|
||||||
|
Uses modules in cinder.rootwrap containing filters for commands
|
||||||
|
that cinder is allowed to run as another user.
|
||||||
|
|
||||||
|
To switch to using this, you should:
|
||||||
|
* Set "--root_helper=sudo cinder-rootwrap" in cinder.conf
|
||||||
|
* Allow cinder to run cinder-rootwrap as root in cinder_sudoers:
|
||||||
|
cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap
|
||||||
|
(all other commands can be removed from this file)
|
||||||
|
|
||||||
|
To make allowed commands node-specific, your packaging should only
|
||||||
|
install cinder/rootwrap/{compute,network,volume}.py respectively on
|
||||||
|
compute, network and volume nodes (i.e. cinder-api nodes should not
|
||||||
|
have any of those files installed).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
RC_UNAUTHORIZED = 99
|
||||||
|
RC_NOCOMMAND = 98
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# Split arguments, require at least a command
|
||||||
|
execname = sys.argv.pop(0)
|
||||||
|
if len(sys.argv) == 0:
|
||||||
|
print "%s: %s" % (execname, "No command specified")
|
||||||
|
sys.exit(RC_NOCOMMAND)
|
||||||
|
|
||||||
|
userargs = sys.argv[:]
|
||||||
|
|
||||||
|
# Add ../ to sys.path to allow running from branch
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
|
||||||
|
os.pardir, os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
from cinder.rootwrap import wrapper
|
||||||
|
|
||||||
|
# Execute command if it matches any of the loaded filters
|
||||||
|
filters = wrapper.load_filters()
|
||||||
|
filtermatch = wrapper.match_filter(filters, userargs)
|
||||||
|
if filtermatch:
|
||||||
|
obj = subprocess.Popen(filtermatch.get_command(userargs),
|
||||||
|
stdin=sys.stdin,
|
||||||
|
stdout=sys.stdout,
|
||||||
|
stderr=sys.stderr,
|
||||||
|
env=filtermatch.get_environment(userargs))
|
||||||
|
obj.wait()
|
||||||
|
sys.exit(obj.returncode)
|
||||||
|
|
||||||
|
print "Unauthorized command: %s" % ' '.join(userargs)
|
||||||
|
sys.exit(RC_UNAUTHORIZED)
|
51
bin/cinder-scheduler
Executable file
51
bin/cinder-scheduler
Executable file
@ -0,0 +1,51 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Starter script for Cinder Scheduler."""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../cinder/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
gettext.install('cinder', unicode=1)
|
||||||
|
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import service
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
utils.monkey_patch()
|
||||||
|
server = service.Service.create(binary='cinder-scheduler')
|
||||||
|
service.serve(server)
|
||||||
|
service.wait()
|
49
bin/cinder-volume
Executable file
49
bin/cinder-volume
Executable file
@ -0,0 +1,49 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Starter script for Cinder Volume."""
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# If ../cinder/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')):
|
||||||
|
sys.path.insert(0, possible_topdir)
|
||||||
|
|
||||||
|
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import service
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
|
flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
utils.monkey_patch()
|
||||||
|
server = service.Service.create(binary='cinder-volume')
|
||||||
|
service.serve(server)
|
||||||
|
service.wait()
|
80
bin/clear_rabbit_queues
Executable file
80
bin/clear_rabbit_queues
Executable file
@ -0,0 +1,80 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 Openstack, LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Admin/debug script to wipe rabbitMQ (AMQP) queues cinder uses.
|
||||||
|
This can be used if you need to change durable options on queues,
|
||||||
|
or to wipe all messages in the queue system if things are in a
|
||||||
|
serious bad way.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import gettext
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
# If ../cinder/__init__.py exists, add ../ to Python search path, so that
|
||||||
|
# it will override what happens to be installed in /usr/(local/)lib/python...
|
||||||
|
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||||
|
os.pardir,
|
||||||
|
os.pardir))
|
||||||
|
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')):
|
||||||
|
sys.path.insert(0, POSSIBLE_TOPDIR)
|
||||||
|
|
||||||
|
gettext.install('cinder', unicode=1)
|
||||||
|
|
||||||
|
|
||||||
|
from cinder import context
|
||||||
|
from cinder import exception
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.openstack.common import cfg
|
||||||
|
from cinder import rpc
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
|
delete_exchange_opt = \
|
||||||
|
cfg.BoolOpt('delete_exchange',
|
||||||
|
default=False,
|
||||||
|
help='delete cinder exchange too.')
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
FLAGS.register_cli_opt(delete_exchange_opt)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_exchange(exch):
|
||||||
|
conn = rpc.create_connection()
|
||||||
|
x = conn.get_channel()
|
||||||
|
x.exchange_delete(exch)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_queues(queues):
|
||||||
|
conn = rpc.create_connection()
|
||||||
|
x = conn.get_channel()
|
||||||
|
for q in queues:
|
||||||
|
x.queue_delete(q)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
utils.default_flagfile()
|
||||||
|
args = flags.FLAGS(sys.argv)
|
||||||
|
logging.setup()
|
||||||
|
rpc.register_opts(flags.FLAGS)
|
||||||
|
delete_queues(args[1:])
|
||||||
|
if FLAGS.delete_exchange:
|
||||||
|
delete_exchange(FLAGS.control_exchange)
|
42
cinder/__init__.py
Normal file
42
cinder/__init__.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
:mod:`cinder` -- Cloud IaaS Platform
|
||||||
|
===================================
|
||||||
|
|
||||||
|
.. automodule:: cinder
|
||||||
|
:platform: Unix
|
||||||
|
:synopsis: Infrastructure-as-a-Service Cloud platform.
|
||||||
|
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
|
||||||
|
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
|
||||||
|
.. moduleauthor:: Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
|
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
|
||||||
|
.. moduleauthor:: Manish Singh <yosh@gimp.org>
|
||||||
|
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
|
||||||
|
"""
|
||||||
|
|
||||||
|
import gettext
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
gettext.install('cinder', unicode=1)
|
||||||
|
# NOTE(jkoelker) This configures the root logger if it is not already
|
||||||
|
# configured so messages from logging setup can be written
|
||||||
|
# to the console
|
||||||
|
logging.basicConfig(format='%(message)s')
|
17
cinder/api/__init__.py
Normal file
17
cinder/api/__init__.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
103
cinder/api/auth.py
Normal file
103
cinder/api/auth.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Common Auth Middleware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from cinder import context
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.openstack.common import cfg
|
||||||
|
from cinder import wsgi
|
||||||
|
|
||||||
|
|
||||||
|
use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for',
|
||||||
|
default=False,
|
||||||
|
help='Treat X-Forwarded-For as the canonical remote address. '
|
||||||
|
'Only enable this if you have a sanitizing proxy.')
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
FLAGS.register_opt(use_forwarded_for_opt)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def pipeline_factory(loader, global_conf, **local_conf):
|
||||||
|
"""A paste pipeline replica that keys off of auth_strategy."""
|
||||||
|
pipeline = local_conf[FLAGS.auth_strategy]
|
||||||
|
if not FLAGS.api_rate_limit:
|
||||||
|
limit_name = FLAGS.auth_strategy + '_nolimit'
|
||||||
|
pipeline = local_conf.get(limit_name, pipeline)
|
||||||
|
pipeline = pipeline.split()
|
||||||
|
filters = [loader.get_filter(n) for n in pipeline[:-1]]
|
||||||
|
app = loader.get_app(pipeline[-1])
|
||||||
|
filters.reverse()
|
||||||
|
for filter in filters:
|
||||||
|
app = filter(app)
|
||||||
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
class InjectContext(wsgi.Middleware):
|
||||||
|
"""Add a 'cinder.context' to WSGI environ."""
|
||||||
|
|
||||||
|
def __init__(self, context, *args, **kwargs):
|
||||||
|
self.context = context
|
||||||
|
super(InjectContext, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
req.environ['cinder.context'] = self.context
|
||||||
|
return self.application
|
||||||
|
|
||||||
|
|
||||||
|
class CinderKeystoneContext(wsgi.Middleware):
|
||||||
|
"""Make a request context from keystone headers"""
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
user_id = req.headers.get('X_USER')
|
||||||
|
user_id = req.headers.get('X_USER_ID', user_id)
|
||||||
|
if user_id is None:
|
||||||
|
LOG.debug("Neither X_USER_ID nor X_USER found in request")
|
||||||
|
return webob.exc.HTTPUnauthorized()
|
||||||
|
# get the roles
|
||||||
|
roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')]
|
||||||
|
if 'X_TENANT_ID' in req.headers:
|
||||||
|
# This is the new header since Keystone went to ID/Name
|
||||||
|
project_id = req.headers['X_TENANT_ID']
|
||||||
|
else:
|
||||||
|
# This is for legacy compatibility
|
||||||
|
project_id = req.headers['X_TENANT']
|
||||||
|
|
||||||
|
# Get the auth token
|
||||||
|
auth_token = req.headers.get('X_AUTH_TOKEN',
|
||||||
|
req.headers.get('X_STORAGE_TOKEN'))
|
||||||
|
|
||||||
|
# Build a context, including the auth_token...
|
||||||
|
remote_address = req.remote_addr
|
||||||
|
if FLAGS.use_forwarded_for:
|
||||||
|
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||||
|
ctx = context.RequestContext(user_id,
|
||||||
|
project_id,
|
||||||
|
roles=roles,
|
||||||
|
auth_token=auth_token,
|
||||||
|
remote_address=remote_address)
|
||||||
|
|
||||||
|
req.environ['cinder.context'] = ctx
|
||||||
|
return self.application
|
143
cinder/api/openstack/__init__.py
Normal file
143
cinder/api/openstack/__init__.py
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
WSGI middleware for OpenStack API controllers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import routes
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import wsgi as base_wsgi
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class FaultWrapper(base_wsgi.Middleware):
|
||||||
|
"""Calls down the middleware stack, making exceptions into faults."""
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
try:
|
||||||
|
return req.get_response(self.application)
|
||||||
|
except Exception as ex:
|
||||||
|
LOG.exception(_("Caught error: %s"), unicode(ex))
|
||||||
|
msg_dict = dict(url=req.url, status=500)
|
||||||
|
LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict)
|
||||||
|
exc = webob.exc.HTTPInternalServerError()
|
||||||
|
# NOTE(johannes): We leave the explanation empty here on
|
||||||
|
# purpose. It could possibly have sensitive information
|
||||||
|
# that should not be returned back to the user. See
|
||||||
|
# bugs 868360 and 874472
|
||||||
|
return wsgi.Fault(exc)
|
||||||
|
|
||||||
|
|
||||||
|
class APIMapper(routes.Mapper):
|
||||||
|
def routematch(self, url=None, environ=None):
|
||||||
|
if url is "":
|
||||||
|
result = self._match("", environ)
|
||||||
|
return result[0], result[1]
|
||||||
|
return routes.Mapper.routematch(self, url, environ)
|
||||||
|
|
||||||
|
|
||||||
|
class ProjectMapper(APIMapper):
|
||||||
|
def resource(self, member_name, collection_name, **kwargs):
|
||||||
|
if not ('parent_resource' in kwargs):
|
||||||
|
kwargs['path_prefix'] = '{project_id}/'
|
||||||
|
else:
|
||||||
|
parent_resource = kwargs['parent_resource']
|
||||||
|
p_collection = parent_resource['collection_name']
|
||||||
|
p_member = parent_resource['member_name']
|
||||||
|
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
|
||||||
|
p_member)
|
||||||
|
routes.Mapper.resource(self, member_name,
|
||||||
|
collection_name,
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class APIRouter(base_wsgi.Router):
|
||||||
|
"""
|
||||||
|
Routes requests on the OpenStack API to the appropriate controller
|
||||||
|
and method.
|
||||||
|
"""
|
||||||
|
ExtensionManager = None # override in subclasses
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, global_config, **local_config):
|
||||||
|
"""Simple paste factory, :class:`cinder.wsgi.Router` doesn't have"""
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
def __init__(self, ext_mgr=None):
|
||||||
|
if ext_mgr is None:
|
||||||
|
if self.ExtensionManager:
|
||||||
|
ext_mgr = self.ExtensionManager()
|
||||||
|
else:
|
||||||
|
raise Exception(_("Must specify an ExtensionManager class"))
|
||||||
|
|
||||||
|
mapper = ProjectMapper()
|
||||||
|
self.resources = {}
|
||||||
|
self._setup_routes(mapper)
|
||||||
|
self._setup_ext_routes(mapper, ext_mgr)
|
||||||
|
self._setup_extensions(ext_mgr)
|
||||||
|
super(APIRouter, self).__init__(mapper)
|
||||||
|
|
||||||
|
def _setup_ext_routes(self, mapper, ext_mgr):
|
||||||
|
for resource in ext_mgr.get_resources():
|
||||||
|
LOG.debug(_('Extended resource: %s'),
|
||||||
|
resource.collection)
|
||||||
|
|
||||||
|
wsgi_resource = wsgi.Resource(resource.controller)
|
||||||
|
self.resources[resource.collection] = wsgi_resource
|
||||||
|
kargs = dict(
|
||||||
|
controller=wsgi_resource,
|
||||||
|
collection=resource.collection_actions,
|
||||||
|
member=resource.member_actions)
|
||||||
|
|
||||||
|
if resource.parent:
|
||||||
|
kargs['parent_resource'] = resource.parent
|
||||||
|
|
||||||
|
mapper.resource(resource.collection, resource.collection, **kargs)
|
||||||
|
|
||||||
|
if resource.custom_routes_fn:
|
||||||
|
resource.custom_routes_fn(mapper, wsgi_resource)
|
||||||
|
|
||||||
|
def _setup_extensions(self, ext_mgr):
|
||||||
|
for extension in ext_mgr.get_controller_extensions():
|
||||||
|
ext_name = extension.extension.name
|
||||||
|
collection = extension.collection
|
||||||
|
controller = extension.controller
|
||||||
|
|
||||||
|
if collection not in self.resources:
|
||||||
|
LOG.warning(_('Extension %(ext_name)s: Cannot extend '
|
||||||
|
'resource %(collection)s: No such resource') %
|
||||||
|
locals())
|
||||||
|
continue
|
||||||
|
|
||||||
|
LOG.debug(_('Extension %(ext_name)s extending resource: '
|
||||||
|
'%(collection)s') % locals())
|
||||||
|
|
||||||
|
resource = self.resources[collection]
|
||||||
|
resource.register_actions(controller)
|
||||||
|
resource.register_extensions(controller)
|
||||||
|
|
||||||
|
def _setup_routes(self, mapper):
|
||||||
|
raise NotImplementedError
|
65
cinder/api/openstack/auth.py
Normal file
65
cinder/api/openstack/auth.py
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder import context
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import wsgi as base_wsgi
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DECLARE('use_forwarded_for', 'cinder.api.auth')
|
||||||
|
|
||||||
|
|
||||||
|
class NoAuthMiddleware(base_wsgi.Middleware):
|
||||||
|
"""Return a fake token if one isn't specified."""
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
if 'X-Auth-Token' not in req.headers:
|
||||||
|
user_id = req.headers.get('X-Auth-User', 'admin')
|
||||||
|
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
|
||||||
|
os_url = os.path.join(req.url, project_id)
|
||||||
|
res = webob.Response()
|
||||||
|
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
|
||||||
|
# keystone uses 2.0 auth. We should probably allow
|
||||||
|
# 2.0 auth here as well.
|
||||||
|
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
|
||||||
|
res.headers['X-Server-Management-Url'] = os_url
|
||||||
|
res.content_type = 'text/plain'
|
||||||
|
res.status = '204'
|
||||||
|
return res
|
||||||
|
|
||||||
|
token = req.headers['X-Auth-Token']
|
||||||
|
user_id, _sep, project_id = token.partition(':')
|
||||||
|
project_id = project_id or user_id
|
||||||
|
remote_address = getattr(req, 'remote_address', '127.0.0.1')
|
||||||
|
if FLAGS.use_forwarded_for:
|
||||||
|
remote_address = req.headers.get('X-Forwarded-For', remote_address)
|
||||||
|
ctx = context.RequestContext(user_id,
|
||||||
|
project_id,
|
||||||
|
is_admin=True,
|
||||||
|
remote_address=remote_address)
|
||||||
|
|
||||||
|
req.environ['cinder.context'] = ctx
|
||||||
|
return self.application
|
380
cinder/api/openstack/common.py
Normal file
380
cinder/api/openstack/common.py
Normal file
@ -0,0 +1,380 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
import webob
|
||||||
|
from xml.dom import minidom
|
||||||
|
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder.api.openstack import xmlutil
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import quota
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1'
|
||||||
|
|
||||||
|
|
||||||
|
def get_pagination_params(request):
|
||||||
|
"""Return marker, limit tuple from request.
|
||||||
|
|
||||||
|
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
|
||||||
|
GET variables. 'marker' is the id of the last element
|
||||||
|
the client has seen, and 'limit' is the maximum number
|
||||||
|
of items to return. If 'limit' is not specified, 0, or
|
||||||
|
> max_limit, we default to max_limit. Negative values
|
||||||
|
for either marker or limit will cause
|
||||||
|
exc.HTTPBadRequest() exceptions to be raised.
|
||||||
|
|
||||||
|
"""
|
||||||
|
params = {}
|
||||||
|
if 'limit' in request.GET:
|
||||||
|
params['limit'] = _get_limit_param(request)
|
||||||
|
if 'marker' in request.GET:
|
||||||
|
params['marker'] = _get_marker_param(request)
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def _get_limit_param(request):
|
||||||
|
"""Extract integer limit from request or fail"""
|
||||||
|
try:
|
||||||
|
limit = int(request.GET['limit'])
|
||||||
|
except ValueError:
|
||||||
|
msg = _('limit param must be an integer')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
if limit < 0:
|
||||||
|
msg = _('limit param must be positive')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
return limit
|
||||||
|
|
||||||
|
|
||||||
|
def _get_marker_param(request):
|
||||||
|
"""Extract marker id from request or fail"""
|
||||||
|
return request.GET['marker']
|
||||||
|
|
||||||
|
|
||||||
|
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
|
||||||
|
"""Return a slice of items according to requested offset and limit.
|
||||||
|
|
||||||
|
:param items: A sliceable entity
|
||||||
|
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
|
||||||
|
GET variables. 'offset' is where to start in the list,
|
||||||
|
and 'limit' is the maximum number of items to return. If
|
||||||
|
'limit' is not specified, 0, or > max_limit, we default
|
||||||
|
to max_limit. Negative values for either offset or limit
|
||||||
|
will cause exc.HTTPBadRequest() exceptions to be raised.
|
||||||
|
:kwarg max_limit: The maximum number of items to return from 'items'
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
offset = int(request.GET.get('offset', 0))
|
||||||
|
except ValueError:
|
||||||
|
msg = _('offset param must be an integer')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
try:
|
||||||
|
limit = int(request.GET.get('limit', max_limit))
|
||||||
|
except ValueError:
|
||||||
|
msg = _('limit param must be an integer')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
if limit < 0:
|
||||||
|
msg = _('limit param must be positive')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
if offset < 0:
|
||||||
|
msg = _('offset param must be positive')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
|
limit = min(max_limit, limit or max_limit)
|
||||||
|
range_end = offset + limit
|
||||||
|
return items[offset:range_end]
|
||||||
|
|
||||||
|
|
||||||
|
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
|
||||||
|
"""Return a slice of items according to the requested marker and limit."""
|
||||||
|
params = get_pagination_params(request)
|
||||||
|
|
||||||
|
limit = params.get('limit', max_limit)
|
||||||
|
marker = params.get('marker')
|
||||||
|
|
||||||
|
limit = min(max_limit, limit)
|
||||||
|
start_index = 0
|
||||||
|
if marker:
|
||||||
|
start_index = -1
|
||||||
|
for i, item in enumerate(items):
|
||||||
|
if 'flavorid' in item:
|
||||||
|
if item['flavorid'] == marker:
|
||||||
|
start_index = i + 1
|
||||||
|
break
|
||||||
|
elif item['id'] == marker or item.get('uuid') == marker:
|
||||||
|
start_index = i + 1
|
||||||
|
break
|
||||||
|
if start_index < 0:
|
||||||
|
msg = _('marker [%s] not found') % marker
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
range_end = start_index + limit
|
||||||
|
return items[start_index:range_end]
|
||||||
|
|
||||||
|
|
||||||
|
def get_id_from_href(href):
|
||||||
|
"""Return the id or uuid portion of a url.
|
||||||
|
|
||||||
|
Given: 'http://www.foo.com/bar/123?q=4'
|
||||||
|
Returns: '123'
|
||||||
|
|
||||||
|
Given: 'http://www.foo.com/bar/abc123?q=4'
|
||||||
|
Returns: 'abc123'
|
||||||
|
|
||||||
|
"""
|
||||||
|
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def remove_version_from_href(href):
|
||||||
|
"""Removes the first api version from the href.
|
||||||
|
|
||||||
|
Given: 'http://www.cinder.com/v1.1/123'
|
||||||
|
Returns: 'http://www.cinder.com/123'
|
||||||
|
|
||||||
|
Given: 'http://www.cinder.com/v1.1'
|
||||||
|
Returns: 'http://www.cinder.com'
|
||||||
|
|
||||||
|
"""
|
||||||
|
parsed_url = urlparse.urlsplit(href)
|
||||||
|
url_parts = parsed_url.path.split('/', 2)
|
||||||
|
|
||||||
|
# NOTE: this should match vX.X or vX
|
||||||
|
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
|
||||||
|
if expression.match(url_parts[1]):
|
||||||
|
del url_parts[1]
|
||||||
|
|
||||||
|
new_path = '/'.join(url_parts)
|
||||||
|
|
||||||
|
if new_path == parsed_url.path:
|
||||||
|
msg = _('href %s does not contain version') % href
|
||||||
|
LOG.debug(msg)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
parsed_url = list(parsed_url)
|
||||||
|
parsed_url[2] = new_path
|
||||||
|
return urlparse.urlunsplit(parsed_url)
|
||||||
|
|
||||||
|
|
||||||
|
def get_version_from_href(href):
|
||||||
|
"""Returns the api version in the href.
|
||||||
|
|
||||||
|
Returns the api version in the href.
|
||||||
|
If no version is found, '2' is returned
|
||||||
|
|
||||||
|
Given: 'http://www.cinder.com/123'
|
||||||
|
Returns: '2'
|
||||||
|
|
||||||
|
Given: 'http://www.cinder.com/v1.1'
|
||||||
|
Returns: '1.1'
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
expression = r'/v([0-9]+|[0-9]+\.[0-9]+)(/|$)'
|
||||||
|
return re.findall(expression, href)[0][0]
|
||||||
|
except IndexError:
|
||||||
|
return '2'
|
||||||
|
|
||||||
|
|
||||||
|
def dict_to_query_str(params):
|
||||||
|
# TODO(throughnothing): we should just use urllib.urlencode instead of this
|
||||||
|
# But currently we don't work with urlencoded url's
|
||||||
|
param_str = ""
|
||||||
|
for key, val in params.iteritems():
|
||||||
|
param_str = param_str + '='.join([str(key), str(val)]) + '&'
|
||||||
|
|
||||||
|
return param_str.rstrip('&')
|
||||||
|
|
||||||
|
|
||||||
|
def raise_http_conflict_for_instance_invalid_state(exc, action):
|
||||||
|
"""Return a webob.exc.HTTPConflict instance containing a message
|
||||||
|
appropriate to return via the API based on the original
|
||||||
|
InstanceInvalidState exception.
|
||||||
|
"""
|
||||||
|
attr = exc.kwargs.get('attr')
|
||||||
|
state = exc.kwargs.get('state')
|
||||||
|
if attr and state:
|
||||||
|
msg = _("Cannot '%(action)s' while instance is in %(attr)s %(state)s")
|
||||||
|
else:
|
||||||
|
# At least give some meaningful message
|
||||||
|
msg = _("Instance is in an invalid state for '%(action)s'")
|
||||||
|
raise webob.exc.HTTPConflict(explanation=msg % locals())
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
|
||||||
|
def deserialize(self, text):
|
||||||
|
dom = minidom.parseString(text)
|
||||||
|
metadata_node = self.find_first_child_named(dom, "metadata")
|
||||||
|
metadata = self.extract_metadata(metadata_node)
|
||||||
|
return {'body': {'metadata': metadata}}
|
||||||
|
|
||||||
|
|
||||||
|
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
|
||||||
|
def deserialize(self, text):
|
||||||
|
dom = minidom.parseString(text)
|
||||||
|
metadata_item = self.extract_metadata(dom)
|
||||||
|
return {'body': {'meta': metadata_item}}
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
|
||||||
|
|
||||||
|
def extract_metadata(self, metadata_node):
|
||||||
|
"""Marshal the metadata attribute of a parsed request"""
|
||||||
|
if metadata_node is None:
|
||||||
|
return {}
|
||||||
|
metadata = {}
|
||||||
|
for meta_node in self.find_children_named(metadata_node, "meta"):
|
||||||
|
key = meta_node.getAttribute("key")
|
||||||
|
metadata[key] = self.extract_text(meta_node)
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def _extract_metadata_container(self, datastring):
|
||||||
|
dom = minidom.parseString(datastring)
|
||||||
|
metadata_node = self.find_first_child_named(dom, "metadata")
|
||||||
|
metadata = self.extract_metadata(metadata_node)
|
||||||
|
return {'body': {'metadata': metadata}}
|
||||||
|
|
||||||
|
def create(self, datastring):
|
||||||
|
return self._extract_metadata_container(datastring)
|
||||||
|
|
||||||
|
def update_all(self, datastring):
|
||||||
|
return self._extract_metadata_container(datastring)
|
||||||
|
|
||||||
|
def update(self, datastring):
|
||||||
|
dom = minidom.parseString(datastring)
|
||||||
|
metadata_item = self.extract_metadata(dom)
|
||||||
|
return {'body': {'meta': metadata_item}}
|
||||||
|
|
||||||
|
|
||||||
|
metadata_nsmap = {None: xmlutil.XMLNS_V11}
|
||||||
|
|
||||||
|
|
||||||
|
class MetaItemTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
|
||||||
|
root = xmlutil.TemplateElement('meta', selector=sel)
|
||||||
|
root.set('key', 0)
|
||||||
|
root.text = 1
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataTemplateElement(xmlutil.TemplateElement):
|
||||||
|
def will_render(self, datum):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = MetadataTemplateElement('metadata', selector='metadata')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'meta',
|
||||||
|
selector=xmlutil.get_items)
|
||||||
|
elem.set('key', 0)
|
||||||
|
elem.text = 1
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
def check_snapshots_enabled(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
if not FLAGS.allow_instance_snapshots:
|
||||||
|
LOG.warn(_('Rejecting snapshot request, snapshots currently'
|
||||||
|
' disabled'))
|
||||||
|
msg = _("Instance snapshots are not permitted at this time.")
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return inner
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(object):
|
||||||
|
"""Model API responses as dictionaries."""
|
||||||
|
|
||||||
|
_collection_name = None
|
||||||
|
|
||||||
|
def _get_links(self, request, identifier):
|
||||||
|
return [{
|
||||||
|
"rel": "self",
|
||||||
|
"href": self._get_href_link(request, identifier),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rel": "bookmark",
|
||||||
|
"href": self._get_bookmark_link(request, identifier),
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _get_next_link(self, request, identifier):
|
||||||
|
"""Return href string with proper limit and marker params."""
|
||||||
|
params = request.params.copy()
|
||||||
|
params["marker"] = identifier
|
||||||
|
prefix = self._update_link_prefix(request.application_url,
|
||||||
|
FLAGS.osapi_compute_link_prefix)
|
||||||
|
url = os.path.join(prefix,
|
||||||
|
request.environ["cinder.context"].project_id,
|
||||||
|
self._collection_name)
|
||||||
|
return "%s?%s" % (url, dict_to_query_str(params))
|
||||||
|
|
||||||
|
def _get_href_link(self, request, identifier):
|
||||||
|
"""Return an href string pointing to this object."""
|
||||||
|
prefix = self._update_link_prefix(request.application_url,
|
||||||
|
FLAGS.osapi_compute_link_prefix)
|
||||||
|
return os.path.join(prefix,
|
||||||
|
request.environ["cinder.context"].project_id,
|
||||||
|
self._collection_name,
|
||||||
|
str(identifier))
|
||||||
|
|
||||||
|
def _get_bookmark_link(self, request, identifier):
|
||||||
|
"""Create a URL that refers to a specific resource."""
|
||||||
|
base_url = remove_version_from_href(request.application_url)
|
||||||
|
base_url = self._update_link_prefix(base_url,
|
||||||
|
FLAGS.osapi_compute_link_prefix)
|
||||||
|
return os.path.join(base_url,
|
||||||
|
request.environ["cinder.context"].project_id,
|
||||||
|
self._collection_name,
|
||||||
|
str(identifier))
|
||||||
|
|
||||||
|
def _get_collection_links(self, request, items, id_key="uuid"):
|
||||||
|
"""Retrieve 'next' link, if applicable."""
|
||||||
|
links = []
|
||||||
|
limit = int(request.params.get("limit", 0))
|
||||||
|
if limit and limit == len(items):
|
||||||
|
last_item = items[-1]
|
||||||
|
if id_key in last_item:
|
||||||
|
last_item_id = last_item[id_key]
|
||||||
|
else:
|
||||||
|
last_item_id = last_item["id"]
|
||||||
|
links.append({
|
||||||
|
"rel": "next",
|
||||||
|
"href": self._get_next_link(request, last_item_id),
|
||||||
|
})
|
||||||
|
return links
|
||||||
|
|
||||||
|
def _update_link_prefix(self, orig_url, prefix):
|
||||||
|
if not prefix:
|
||||||
|
return orig_url
|
||||||
|
url_parts = list(urlparse.urlsplit(orig_url))
|
||||||
|
prefix_parts = list(urlparse.urlsplit(prefix))
|
||||||
|
url_parts[0:2] = prefix_parts[0:2]
|
||||||
|
return urlparse.urlunsplit(url_parts)
|
23
cinder/api/openstack/compute/__init__.py
Normal file
23
cinder/api/openstack/compute/__init__.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
WSGI middleware for OpenStack Compute API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinder.api.openstack.compute import versions
|
141
cinder/api/openstack/compute/schemas/atom-link.rng
Normal file
141
cinder/api/openstack/compute/schemas/atom-link.rng
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
-*- rnc -*-
|
||||||
|
RELAX NG Compact Syntax Grammar for the
|
||||||
|
Atom Format Specification Version 11
|
||||||
|
-->
|
||||||
|
<grammar xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:s="http://www.ascc.net/xml/schematron" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
|
||||||
|
<start>
|
||||||
|
<choice>
|
||||||
|
<ref name="atomLink"/>
|
||||||
|
</choice>
|
||||||
|
</start>
|
||||||
|
<!-- Common attributes -->
|
||||||
|
<define name="atomCommonAttributes">
|
||||||
|
<optional>
|
||||||
|
<attribute name="xml:base">
|
||||||
|
<ref name="atomUri"/>
|
||||||
|
</attribute>
|
||||||
|
</optional>
|
||||||
|
<optional>
|
||||||
|
<attribute name="xml:lang">
|
||||||
|
<ref name="atomLanguageTag"/>
|
||||||
|
</attribute>
|
||||||
|
</optional>
|
||||||
|
<zeroOrMore>
|
||||||
|
<ref name="undefinedAttribute"/>
|
||||||
|
</zeroOrMore>
|
||||||
|
</define>
|
||||||
|
<!-- atom:link -->
|
||||||
|
<define name="atomLink">
|
||||||
|
<element name="atom:link">
|
||||||
|
<ref name="atomCommonAttributes"/>
|
||||||
|
<attribute name="href">
|
||||||
|
<ref name="atomUri"/>
|
||||||
|
</attribute>
|
||||||
|
<optional>
|
||||||
|
<attribute name="rel">
|
||||||
|
<choice>
|
||||||
|
<ref name="atomNCName"/>
|
||||||
|
<ref name="atomUri"/>
|
||||||
|
</choice>
|
||||||
|
</attribute>
|
||||||
|
</optional>
|
||||||
|
<optional>
|
||||||
|
<attribute name="type">
|
||||||
|
<ref name="atomMediaType"/>
|
||||||
|
</attribute>
|
||||||
|
</optional>
|
||||||
|
<optional>
|
||||||
|
<attribute name="hreflang">
|
||||||
|
<ref name="atomLanguageTag"/>
|
||||||
|
</attribute>
|
||||||
|
</optional>
|
||||||
|
<optional>
|
||||||
|
<attribute name="title"/>
|
||||||
|
</optional>
|
||||||
|
<optional>
|
||||||
|
<attribute name="length"/>
|
||||||
|
</optional>
|
||||||
|
<ref name="undefinedContent"/>
|
||||||
|
</element>
|
||||||
|
</define>
|
||||||
|
<!-- Low-level simple types -->
|
||||||
|
<define name="atomNCName">
|
||||||
|
<data type="string">
|
||||||
|
<param name="minLength">1</param>
|
||||||
|
<param name="pattern">[^:]*</param>
|
||||||
|
</data>
|
||||||
|
</define>
|
||||||
|
<!-- Whatever a media type is, it contains at least one slash -->
|
||||||
|
<define name="atomMediaType">
|
||||||
|
<data type="string">
|
||||||
|
<param name="pattern">.+/.+</param>
|
||||||
|
</data>
|
||||||
|
</define>
|
||||||
|
<!-- As defined in RFC 3066 -->
|
||||||
|
<define name="atomLanguageTag">
|
||||||
|
<data type="string">
|
||||||
|
<param name="pattern">[A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})*</param>
|
||||||
|
</data>
|
||||||
|
</define>
|
||||||
|
<!--
|
||||||
|
Unconstrained; it's not entirely clear how IRI fit into
|
||||||
|
xsd:anyURI so let's not try to constrain it here
|
||||||
|
-->
|
||||||
|
<define name="atomUri">
|
||||||
|
<text/>
|
||||||
|
</define>
|
||||||
|
<!-- Other Extensibility -->
|
||||||
|
<define name="undefinedAttribute">
|
||||||
|
<attribute>
|
||||||
|
<anyName>
|
||||||
|
<except>
|
||||||
|
<name>xml:base</name>
|
||||||
|
<name>xml:lang</name>
|
||||||
|
<nsName ns=""/>
|
||||||
|
</except>
|
||||||
|
</anyName>
|
||||||
|
</attribute>
|
||||||
|
</define>
|
||||||
|
<define name="undefinedContent">
|
||||||
|
<zeroOrMore>
|
||||||
|
<choice>
|
||||||
|
<text/>
|
||||||
|
<ref name="anyForeignElement"/>
|
||||||
|
</choice>
|
||||||
|
</zeroOrMore>
|
||||||
|
</define>
|
||||||
|
<define name="anyElement">
|
||||||
|
<element>
|
||||||
|
<anyName/>
|
||||||
|
<zeroOrMore>
|
||||||
|
<choice>
|
||||||
|
<attribute>
|
||||||
|
<anyName/>
|
||||||
|
</attribute>
|
||||||
|
<text/>
|
||||||
|
<ref name="anyElement"/>
|
||||||
|
</choice>
|
||||||
|
</zeroOrMore>
|
||||||
|
</element>
|
||||||
|
</define>
|
||||||
|
<define name="anyForeignElement">
|
||||||
|
<element>
|
||||||
|
<anyName>
|
||||||
|
<except>
|
||||||
|
<nsName ns="http://www.w3.org/2005/Atom"/>
|
||||||
|
</except>
|
||||||
|
</anyName>
|
||||||
|
<zeroOrMore>
|
||||||
|
<choice>
|
||||||
|
<attribute>
|
||||||
|
<anyName/>
|
||||||
|
</attribute>
|
||||||
|
<text/>
|
||||||
|
<ref name="anyElement"/>
|
||||||
|
</choice>
|
||||||
|
</zeroOrMore>
|
||||||
|
</element>
|
||||||
|
</define>
|
||||||
|
</grammar>
|
11
cinder/api/openstack/compute/schemas/v1.1/extension.rng
Normal file
11
cinder/api/openstack/compute/schemas/v1.1/extension.rng
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
<element name="extension" ns="http://docs.openstack.org/common/api/v1.0"
|
||||||
|
xmlns="http://relaxng.org/ns/structure/1.0">
|
||||||
|
<attribute name="alias"> <text/> </attribute>
|
||||||
|
<attribute name="name"> <text/> </attribute>
|
||||||
|
<attribute name="namespace"> <text/> </attribute>
|
||||||
|
<attribute name="updated"> <text/> </attribute>
|
||||||
|
<element name="description"> <text/> </element>
|
||||||
|
<zeroOrMore>
|
||||||
|
<externalRef href="../atom-link.rng"/>
|
||||||
|
</zeroOrMore>
|
||||||
|
</element>
|
6
cinder/api/openstack/compute/schemas/v1.1/extensions.rng
Normal file
6
cinder/api/openstack/compute/schemas/v1.1/extensions.rng
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
<element name="extensions" xmlns="http://relaxng.org/ns/structure/1.0"
|
||||||
|
ns="http://docs.openstack.org/common/api/v1.0">
|
||||||
|
<zeroOrMore>
|
||||||
|
<externalRef href="extension.rng"/>
|
||||||
|
</zeroOrMore>
|
||||||
|
</element>
|
9
cinder/api/openstack/compute/schemas/v1.1/metadata.rng
Normal file
9
cinder/api/openstack/compute/schemas/v1.1/metadata.rng
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
<element name="metadata" ns="http://docs.openstack.org/compute/api/v1.1"
|
||||||
|
xmlns="http://relaxng.org/ns/structure/1.0">
|
||||||
|
<zeroOrMore>
|
||||||
|
<element name="meta">
|
||||||
|
<attribute name="key"> <text/> </attribute>
|
||||||
|
<text/>
|
||||||
|
</element>
|
||||||
|
</zeroOrMore>
|
||||||
|
</element>
|
244
cinder/api/openstack/compute/versions.py
Normal file
244
cinder/api/openstack/compute/versions.py
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
|
||||||
|
from cinder.api.openstack.compute.views import versions as views_versions
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder.api.openstack import xmlutil
|
||||||
|
|
||||||
|
|
||||||
|
LINKS = {
|
||||||
|
'v2.0': {
|
||||||
|
'pdf': 'http://docs.openstack.org/'
|
||||||
|
'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf',
|
||||||
|
'wadl': 'http://docs.openstack.org/'
|
||||||
|
'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
VERSIONS = {
|
||||||
|
"v2.0": {
|
||||||
|
"id": "v2.0",
|
||||||
|
"status": "CURRENT",
|
||||||
|
"updated": "2011-01-21T11:33:21Z",
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"rel": "describedby",
|
||||||
|
"type": "application/pdf",
|
||||||
|
"href": LINKS['v2.0']['pdf'],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rel": "describedby",
|
||||||
|
"type": "application/vnd.sun.wadl+xml",
|
||||||
|
"href": LINKS['v2.0']['wadl'],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"media-types": [
|
||||||
|
{
|
||||||
|
"base": "application/xml",
|
||||||
|
"type": "application/vnd.openstack.compute+xml;version=2",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"base": "application/json",
|
||||||
|
"type": "application/vnd.openstack.compute+json;version=2",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class MediaTypesTemplateElement(xmlutil.TemplateElement):
|
||||||
|
def will_render(self, datum):
|
||||||
|
return 'media-types' in datum
|
||||||
|
|
||||||
|
|
||||||
|
def make_version(elem):
|
||||||
|
elem.set('id')
|
||||||
|
elem.set('status')
|
||||||
|
elem.set('updated')
|
||||||
|
|
||||||
|
mts = MediaTypesTemplateElement('media-types')
|
||||||
|
elem.append(mts)
|
||||||
|
|
||||||
|
mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types')
|
||||||
|
mt.set('base')
|
||||||
|
mt.set('type')
|
||||||
|
|
||||||
|
xmlutil.make_links(elem, 'links')
|
||||||
|
|
||||||
|
|
||||||
|
version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
|
||||||
|
|
||||||
|
|
||||||
|
class VersionTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('version', selector='version')
|
||||||
|
make_version(root)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class VersionsTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('versions')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'version', selector='versions')
|
||||||
|
make_version(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class ChoicesTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('choices')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'version', selector='choices')
|
||||||
|
make_version(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class AtomSerializer(wsgi.XMLDictSerializer):
|
||||||
|
|
||||||
|
NSMAP = {None: xmlutil.XMLNS_ATOM}
|
||||||
|
|
||||||
|
def __init__(self, metadata=None, xmlns=None):
|
||||||
|
self.metadata = metadata or {}
|
||||||
|
if not xmlns:
|
||||||
|
self.xmlns = wsgi.XMLNS_ATOM
|
||||||
|
else:
|
||||||
|
self.xmlns = xmlns
|
||||||
|
|
||||||
|
def _get_most_recent_update(self, versions):
|
||||||
|
recent = None
|
||||||
|
for version in versions:
|
||||||
|
updated = datetime.datetime.strptime(version['updated'],
|
||||||
|
'%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
if not recent:
|
||||||
|
recent = updated
|
||||||
|
elif updated > recent:
|
||||||
|
recent = updated
|
||||||
|
|
||||||
|
return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||||
|
|
||||||
|
def _get_base_url(self, link_href):
|
||||||
|
# Make sure no trailing /
|
||||||
|
link_href = link_href.rstrip('/')
|
||||||
|
return link_href.rsplit('/', 1)[0] + '/'
|
||||||
|
|
||||||
|
def _create_feed(self, versions, feed_title, feed_id):
|
||||||
|
feed = etree.Element('feed', nsmap=self.NSMAP)
|
||||||
|
title = etree.SubElement(feed, 'title')
|
||||||
|
title.set('type', 'text')
|
||||||
|
title.text = feed_title
|
||||||
|
|
||||||
|
# Set this updated to the most recently updated version
|
||||||
|
recent = self._get_most_recent_update(versions)
|
||||||
|
etree.SubElement(feed, 'updated').text = recent
|
||||||
|
|
||||||
|
etree.SubElement(feed, 'id').text = feed_id
|
||||||
|
|
||||||
|
link = etree.SubElement(feed, 'link')
|
||||||
|
link.set('rel', 'self')
|
||||||
|
link.set('href', feed_id)
|
||||||
|
|
||||||
|
author = etree.SubElement(feed, 'author')
|
||||||
|
etree.SubElement(author, 'name').text = 'Rackspace'
|
||||||
|
etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/'
|
||||||
|
|
||||||
|
for version in versions:
|
||||||
|
feed.append(self._create_version_entry(version))
|
||||||
|
|
||||||
|
return feed
|
||||||
|
|
||||||
|
def _create_version_entry(self, version):
|
||||||
|
entry = etree.Element('entry')
|
||||||
|
etree.SubElement(entry, 'id').text = version['links'][0]['href']
|
||||||
|
title = etree.SubElement(entry, 'title')
|
||||||
|
title.set('type', 'text')
|
||||||
|
title.text = 'Version %s' % version['id']
|
||||||
|
etree.SubElement(entry, 'updated').text = version['updated']
|
||||||
|
|
||||||
|
for link in version['links']:
|
||||||
|
link_elem = etree.SubElement(entry, 'link')
|
||||||
|
link_elem.set('rel', link['rel'])
|
||||||
|
link_elem.set('href', link['href'])
|
||||||
|
if 'type' in link:
|
||||||
|
link_elem.set('type', link['type'])
|
||||||
|
|
||||||
|
content = etree.SubElement(entry, 'content')
|
||||||
|
content.set('type', 'text')
|
||||||
|
content.text = 'Version %s %s (%s)' % (version['id'],
|
||||||
|
version['status'],
|
||||||
|
version['updated'])
|
||||||
|
return entry
|
||||||
|
|
||||||
|
|
||||||
|
class VersionsAtomSerializer(AtomSerializer):
|
||||||
|
def default(self, data):
|
||||||
|
versions = data['versions']
|
||||||
|
feed_id = self._get_base_url(versions[0]['links'][0]['href'])
|
||||||
|
feed = self._create_feed(versions, 'Available API Versions', feed_id)
|
||||||
|
return self._to_xml(feed)
|
||||||
|
|
||||||
|
|
||||||
|
class VersionAtomSerializer(AtomSerializer):
|
||||||
|
def default(self, data):
|
||||||
|
version = data['version']
|
||||||
|
feed_id = version['links'][0]['href']
|
||||||
|
feed = self._create_feed([version], 'About This Version', feed_id)
|
||||||
|
return self._to_xml(feed)
|
||||||
|
|
||||||
|
|
||||||
|
class Versions(wsgi.Resource):
|
||||||
|
def __init__(self):
|
||||||
|
super(Versions, self).__init__(None)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VersionsTemplate,
|
||||||
|
atom=VersionsAtomSerializer)
|
||||||
|
def index(self, req):
|
||||||
|
"""Return all versions."""
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_versions(VERSIONS)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=ChoicesTemplate)
|
||||||
|
@wsgi.response(300)
|
||||||
|
def multi(self, req):
|
||||||
|
"""Return multiple choices."""
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_choices(VERSIONS, req)
|
||||||
|
|
||||||
|
def get_action_args(self, request_environment):
|
||||||
|
"""Parse dictionary created by routes library."""
|
||||||
|
args = {}
|
||||||
|
if request_environment['PATH_INFO'] == '/':
|
||||||
|
args['action'] = 'index'
|
||||||
|
else:
|
||||||
|
args['action'] = 'multi'
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
class VersionV2(object):
|
||||||
|
@wsgi.serializers(xml=VersionTemplate,
|
||||||
|
atom=VersionAtomSerializer)
|
||||||
|
def show(self, req):
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_version(VERSIONS['v2.0'])
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(VersionV2())
|
0
cinder/api/openstack/compute/views/__init__.py
Normal file
0
cinder/api/openstack/compute/views/__init__.py
Normal file
94
cinder/api/openstack/compute/views/versions.py
Normal file
94
cinder/api/openstack/compute/views/versions.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010-2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def get_view_builder(req):
|
||||||
|
base_url = req.application_url
|
||||||
|
return ViewBuilder(base_url)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(object):
|
||||||
|
|
||||||
|
def __init__(self, base_url):
|
||||||
|
"""
|
||||||
|
:param base_url: url of the root wsgi application
|
||||||
|
"""
|
||||||
|
self.base_url = base_url
|
||||||
|
|
||||||
|
def build_choices(self, VERSIONS, req):
|
||||||
|
version_objs = []
|
||||||
|
for version in VERSIONS:
|
||||||
|
version = VERSIONS[version]
|
||||||
|
version_objs.append({
|
||||||
|
"id": version['id'],
|
||||||
|
"status": version['status'],
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"rel": "self",
|
||||||
|
"href": self.generate_href(req.path),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"media-types": version['media-types'],
|
||||||
|
})
|
||||||
|
|
||||||
|
return dict(choices=version_objs)
|
||||||
|
|
||||||
|
def build_versions(self, versions):
|
||||||
|
version_objs = []
|
||||||
|
for version in sorted(versions.keys()):
|
||||||
|
version = versions[version]
|
||||||
|
version_objs.append({
|
||||||
|
"id": version['id'],
|
||||||
|
"status": version['status'],
|
||||||
|
"updated": version['updated'],
|
||||||
|
"links": self._build_links(version),
|
||||||
|
})
|
||||||
|
|
||||||
|
return dict(versions=version_objs)
|
||||||
|
|
||||||
|
def build_version(self, version):
|
||||||
|
reval = copy.deepcopy(version)
|
||||||
|
reval['links'].insert(0, {
|
||||||
|
"rel": "self",
|
||||||
|
"href": self.base_url.rstrip('/') + '/',
|
||||||
|
})
|
||||||
|
return dict(version=reval)
|
||||||
|
|
||||||
|
def _build_links(self, version_data):
|
||||||
|
"""Generate a container of links that refer to the provided version."""
|
||||||
|
href = self.generate_href()
|
||||||
|
|
||||||
|
links = [
|
||||||
|
{
|
||||||
|
"rel": "self",
|
||||||
|
"href": href,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
return links
|
||||||
|
|
||||||
|
def generate_href(self, path=None):
|
||||||
|
"""Create an url that refers to a specific version_number."""
|
||||||
|
version_number = 'v2'
|
||||||
|
if path:
|
||||||
|
path = path.strip('/')
|
||||||
|
return os.path.join(self.base_url, version_number, path)
|
||||||
|
else:
|
||||||
|
return os.path.join(self.base_url, version_number) + '/'
|
395
cinder/api/openstack/extensions.py
Normal file
395
cinder/api/openstack/extensions.py
Normal file
@ -0,0 +1,395 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
import cinder.api.openstack
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder.api.openstack import xmlutil
|
||||||
|
from cinder import exception
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.openstack.common import exception as common_exception
|
||||||
|
from cinder.openstack.common import importutils
|
||||||
|
import cinder.policy
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class ExtensionDescriptor(object):
|
||||||
|
"""Base class that defines the contract for extensions.
|
||||||
|
|
||||||
|
Note that you don't have to derive from this class to have a valid
|
||||||
|
extension; it is purely a convenience.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The name of the extension, e.g., 'Fox In Socks'
|
||||||
|
name = None
|
||||||
|
|
||||||
|
# The alias for the extension, e.g., 'FOXNSOX'
|
||||||
|
alias = None
|
||||||
|
|
||||||
|
# Description comes from the docstring for the class
|
||||||
|
|
||||||
|
# The XML namespace for the extension, e.g.,
|
||||||
|
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
|
||||||
|
namespace = None
|
||||||
|
|
||||||
|
# The timestamp when the extension was last updated, e.g.,
|
||||||
|
# '2011-01-22T13:25:27-06:00'
|
||||||
|
updated = None
|
||||||
|
|
||||||
|
def __init__(self, ext_mgr):
|
||||||
|
"""Register extension with the extension manager."""
|
||||||
|
|
||||||
|
ext_mgr.register(self)
|
||||||
|
|
||||||
|
def get_resources(self):
|
||||||
|
"""List of extensions.ResourceExtension extension objects.
|
||||||
|
|
||||||
|
Resources define new nouns, and are accessible through URLs.
|
||||||
|
|
||||||
|
"""
|
||||||
|
resources = []
|
||||||
|
return resources
|
||||||
|
|
||||||
|
def get_controller_extensions(self):
|
||||||
|
"""List of extensions.ControllerExtension extension objects.
|
||||||
|
|
||||||
|
Controller extensions are used to extend existing controllers.
|
||||||
|
"""
|
||||||
|
controller_exts = []
|
||||||
|
return controller_exts
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def nsmap(cls):
|
||||||
|
"""Synthesize a namespace map from extension."""
|
||||||
|
|
||||||
|
# Start with a base nsmap
|
||||||
|
nsmap = ext_nsmap.copy()
|
||||||
|
|
||||||
|
# Add the namespace for the extension
|
||||||
|
nsmap[cls.alias] = cls.namespace
|
||||||
|
|
||||||
|
return nsmap
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def xmlname(cls, name):
|
||||||
|
"""Synthesize element and attribute names."""
|
||||||
|
|
||||||
|
return '{%s}%s' % (cls.namespace, name)
|
||||||
|
|
||||||
|
|
||||||
|
def make_ext(elem):
|
||||||
|
elem.set('name')
|
||||||
|
elem.set('namespace')
|
||||||
|
elem.set('alias')
|
||||||
|
elem.set('updated')
|
||||||
|
|
||||||
|
desc = xmlutil.SubTemplateElement(elem, 'description')
|
||||||
|
desc.text = 'description'
|
||||||
|
|
||||||
|
xmlutil.make_links(elem, 'links')
|
||||||
|
|
||||||
|
|
||||||
|
ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
|
||||||
|
|
||||||
|
|
||||||
|
class ExtensionTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('extension', selector='extension')
|
||||||
|
make_ext(root)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class ExtensionsTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('extensions')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'extension',
|
||||||
|
selector='extensions')
|
||||||
|
make_ext(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class ExtensionsResource(wsgi.Resource):
|
||||||
|
|
||||||
|
def __init__(self, extension_manager):
|
||||||
|
self.extension_manager = extension_manager
|
||||||
|
super(ExtensionsResource, self).__init__(None)
|
||||||
|
|
||||||
|
def _translate(self, ext):
|
||||||
|
ext_data = {}
|
||||||
|
ext_data['name'] = ext.name
|
||||||
|
ext_data['alias'] = ext.alias
|
||||||
|
ext_data['description'] = ext.__doc__
|
||||||
|
ext_data['namespace'] = ext.namespace
|
||||||
|
ext_data['updated'] = ext.updated
|
||||||
|
ext_data['links'] = [] # TODO(dprince): implement extension links
|
||||||
|
return ext_data
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=ExtensionsTemplate)
|
||||||
|
def index(self, req):
|
||||||
|
extensions = []
|
||||||
|
for _alias, ext in self.extension_manager.extensions.iteritems():
|
||||||
|
extensions.append(self._translate(ext))
|
||||||
|
return dict(extensions=extensions)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=ExtensionTemplate)
|
||||||
|
def show(self, req, id):
|
||||||
|
try:
|
||||||
|
# NOTE(dprince): the extensions alias is used as the 'id' for show
|
||||||
|
ext = self.extension_manager.extensions[id]
|
||||||
|
except KeyError:
|
||||||
|
raise webob.exc.HTTPNotFound()
|
||||||
|
|
||||||
|
return dict(extension=self._translate(ext))
|
||||||
|
|
||||||
|
def delete(self, req, id):
|
||||||
|
raise webob.exc.HTTPNotFound()
|
||||||
|
|
||||||
|
def create(self, req):
|
||||||
|
raise webob.exc.HTTPNotFound()
|
||||||
|
|
||||||
|
|
||||||
|
class ExtensionManager(object):
|
||||||
|
"""Load extensions from the configured extension path.
|
||||||
|
|
||||||
|
See cinder/tests/api/openstack/extensions/foxinsocks/extension.py for an
|
||||||
|
example extension implementation.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def register(self, ext):
|
||||||
|
# Do nothing if the extension doesn't check out
|
||||||
|
if not self._check_extension(ext):
|
||||||
|
return
|
||||||
|
|
||||||
|
alias = ext.alias
|
||||||
|
LOG.audit(_('Loaded extension: %s'), alias)
|
||||||
|
|
||||||
|
if alias in self.extensions:
|
||||||
|
raise exception.Error("Found duplicate extension: %s" % alias)
|
||||||
|
self.extensions[alias] = ext
|
||||||
|
|
||||||
|
def get_resources(self):
|
||||||
|
"""Returns a list of ResourceExtension objects."""
|
||||||
|
|
||||||
|
resources = []
|
||||||
|
resources.append(ResourceExtension('extensions',
|
||||||
|
ExtensionsResource(self)))
|
||||||
|
|
||||||
|
for ext in self.extensions.values():
|
||||||
|
try:
|
||||||
|
resources.extend(ext.get_resources())
|
||||||
|
except AttributeError:
|
||||||
|
# NOTE(dprince): Extension aren't required to have resource
|
||||||
|
# extensions
|
||||||
|
pass
|
||||||
|
return resources
|
||||||
|
|
||||||
|
def get_controller_extensions(self):
|
||||||
|
"""Returns a list of ControllerExtension objects."""
|
||||||
|
controller_exts = []
|
||||||
|
for ext in self.extensions.values():
|
||||||
|
try:
|
||||||
|
controller_exts.extend(ext.get_controller_extensions())
|
||||||
|
except AttributeError:
|
||||||
|
# NOTE(Vek): Extensions aren't required to have
|
||||||
|
# controller extensions
|
||||||
|
pass
|
||||||
|
return controller_exts
|
||||||
|
|
||||||
|
def _check_extension(self, extension):
|
||||||
|
"""Checks for required methods in extension objects."""
|
||||||
|
try:
|
||||||
|
LOG.debug(_('Ext name: %s'), extension.name)
|
||||||
|
LOG.debug(_('Ext alias: %s'), extension.alias)
|
||||||
|
LOG.debug(_('Ext description: %s'),
|
||||||
|
' '.join(extension.__doc__.strip().split()))
|
||||||
|
LOG.debug(_('Ext namespace: %s'), extension.namespace)
|
||||||
|
LOG.debug(_('Ext updated: %s'), extension.updated)
|
||||||
|
except AttributeError as ex:
|
||||||
|
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def load_extension(self, ext_factory):
|
||||||
|
"""Execute an extension factory.
|
||||||
|
|
||||||
|
Loads an extension. The 'ext_factory' is the name of a
|
||||||
|
callable that will be imported and called with one
|
||||||
|
argument--the extension manager. The factory callable is
|
||||||
|
expected to call the register() method at least once.
|
||||||
|
"""
|
||||||
|
|
||||||
|
LOG.debug(_("Loading extension %s"), ext_factory)
|
||||||
|
|
||||||
|
# Load the factory
|
||||||
|
factory = importutils.import_class(ext_factory)
|
||||||
|
|
||||||
|
# Call it
|
||||||
|
LOG.debug(_("Calling extension factory %s"), ext_factory)
|
||||||
|
factory(self)
|
||||||
|
|
||||||
|
def _load_extensions(self):
|
||||||
|
"""Load extensions specified on the command line."""
|
||||||
|
|
||||||
|
extensions = list(self.cls_list)
|
||||||
|
|
||||||
|
for ext_factory in extensions:
|
||||||
|
try:
|
||||||
|
self.load_extension(ext_factory)
|
||||||
|
except Exception as exc:
|
||||||
|
LOG.warn(_('Failed to load extension %(ext_factory)s: '
|
||||||
|
'%(exc)s') % locals())
|
||||||
|
|
||||||
|
|
||||||
|
class ControllerExtension(object):
|
||||||
|
"""Extend core controllers of cinder OpenStack API.
|
||||||
|
|
||||||
|
Provide a way to extend existing cinder OpenStack API core
|
||||||
|
controllers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, extension, collection, controller):
|
||||||
|
self.extension = extension
|
||||||
|
self.collection = collection
|
||||||
|
self.controller = controller
|
||||||
|
|
||||||
|
|
||||||
|
class ResourceExtension(object):
|
||||||
|
"""Add top level resources to the OpenStack API in cinder."""
|
||||||
|
|
||||||
|
def __init__(self, collection, controller, parent=None,
|
||||||
|
collection_actions=None, member_actions=None,
|
||||||
|
custom_routes_fn=None):
|
||||||
|
if not collection_actions:
|
||||||
|
collection_actions = {}
|
||||||
|
if not member_actions:
|
||||||
|
member_actions = {}
|
||||||
|
self.collection = collection
|
||||||
|
self.controller = controller
|
||||||
|
self.parent = parent
|
||||||
|
self.collection_actions = collection_actions
|
||||||
|
self.member_actions = member_actions
|
||||||
|
self.custom_routes_fn = custom_routes_fn
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_errors(fn):
|
||||||
|
"""Ensure errors are not passed along."""
|
||||||
|
def wrapped(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return fn(*args, **kwargs)
|
||||||
|
except webob.exc.HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception:
|
||||||
|
raise webob.exc.HTTPInternalServerError()
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
|
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
|
||||||
|
"""Registers all standard API extensions."""
|
||||||
|
|
||||||
|
# Walk through all the modules in our directory...
|
||||||
|
our_dir = path[0]
|
||||||
|
for dirpath, dirnames, filenames in os.walk(our_dir):
|
||||||
|
# Compute the relative package name from the dirpath
|
||||||
|
relpath = os.path.relpath(dirpath, our_dir)
|
||||||
|
if relpath == '.':
|
||||||
|
relpkg = ''
|
||||||
|
else:
|
||||||
|
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
|
||||||
|
|
||||||
|
# Now, consider each file in turn, only considering .py files
|
||||||
|
for fname in filenames:
|
||||||
|
root, ext = os.path.splitext(fname)
|
||||||
|
|
||||||
|
# Skip __init__ and anything that's not .py
|
||||||
|
if ext != '.py' or root == '__init__':
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try loading it
|
||||||
|
classname = "%s%s" % (root[0].upper(), root[1:])
|
||||||
|
classpath = ("%s%s.%s.%s" %
|
||||||
|
(package, relpkg, root, classname))
|
||||||
|
|
||||||
|
if ext_list is not None and classname not in ext_list:
|
||||||
|
logger.debug("Skipping extension: %s" % classpath)
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
ext_mgr.load_extension(classpath)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warn(_('Failed to load extension %(classpath)s: '
|
||||||
|
'%(exc)s') % locals())
|
||||||
|
|
||||||
|
# Now, let's consider any subdirectories we may have...
|
||||||
|
subdirs = []
|
||||||
|
for dname in dirnames:
|
||||||
|
# Skip it if it does not have __init__.py
|
||||||
|
if not os.path.exists(os.path.join(dirpath, dname,
|
||||||
|
'__init__.py')):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If it has extension(), delegate...
|
||||||
|
ext_name = ("%s%s.%s.extension" %
|
||||||
|
(package, relpkg, dname))
|
||||||
|
try:
|
||||||
|
ext = importutils.import_class(ext_name)
|
||||||
|
except common_exception.NotFound:
|
||||||
|
# extension() doesn't exist on it, so we'll explore
|
||||||
|
# the directory for ourselves
|
||||||
|
subdirs.append(dname)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
ext(ext_mgr)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warn(_('Failed to load extension %(ext_name)s: '
|
||||||
|
'%(exc)s') % locals())
|
||||||
|
|
||||||
|
# Update the list of directories we'll explore...
|
||||||
|
dirnames[:] = subdirs
|
||||||
|
|
||||||
|
|
||||||
|
def extension_authorizer(api_name, extension_name):
|
||||||
|
def authorize(context, target=None):
|
||||||
|
if target is None:
|
||||||
|
target = {'project_id': context.project_id,
|
||||||
|
'user_id': context.user_id}
|
||||||
|
action = '%s_extension:%s' % (api_name, extension_name)
|
||||||
|
cinder.policy.enforce(context, action, target)
|
||||||
|
return authorize
|
||||||
|
|
||||||
|
|
||||||
|
def soft_extension_authorizer(api_name, extension_name):
|
||||||
|
hard_authorize = extension_authorizer(api_name, extension_name)
|
||||||
|
|
||||||
|
def authorize(context):
|
||||||
|
try:
|
||||||
|
hard_authorize(context)
|
||||||
|
return True
|
||||||
|
except exception.NotAuthorized:
|
||||||
|
return False
|
||||||
|
return authorize
|
297
cinder/api/openstack/urlmap.py
Normal file
297
cinder/api/openstack/urlmap.py
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import paste.urlmap
|
||||||
|
import re
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
|
||||||
|
|
||||||
|
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
|
||||||
|
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
|
||||||
|
r'(?:=\s*([^;]+|%s))?\s*' %
|
||||||
|
(_quoted_string_re, _quoted_string_re))
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def unquote_header_value(value):
|
||||||
|
"""Unquotes a header value.
|
||||||
|
This does not use the real unquoting but what browsers are actually
|
||||||
|
using for quoting.
|
||||||
|
|
||||||
|
:param value: the header value to unquote.
|
||||||
|
"""
|
||||||
|
if value and value[0] == value[-1] == '"':
|
||||||
|
# this is not the real unquoting, but fixing this so that the
|
||||||
|
# RFC is met will result in bugs with internet explorer and
|
||||||
|
# probably some other browsers as well. IE for example is
|
||||||
|
# uploading files with "C:\foo\bar.txt" as filename
|
||||||
|
value = value[1:-1]
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def parse_list_header(value):
|
||||||
|
"""Parse lists as described by RFC 2068 Section 2.
|
||||||
|
|
||||||
|
In particular, parse comma-separated lists where the elements of
|
||||||
|
the list may include quoted-strings. A quoted-string could
|
||||||
|
contain a comma. A non-quoted string could have quotes in the
|
||||||
|
middle. Quotes are removed automatically after parsing.
|
||||||
|
|
||||||
|
The return value is a standard :class:`list`:
|
||||||
|
|
||||||
|
>>> parse_list_header('token, "quoted value"')
|
||||||
|
['token', 'quoted value']
|
||||||
|
|
||||||
|
:param value: a string with a list header.
|
||||||
|
:return: :class:`list`
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
for item in urllib2.parse_http_list(value):
|
||||||
|
if item[:1] == item[-1:] == '"':
|
||||||
|
item = unquote_header_value(item[1:-1])
|
||||||
|
result.append(item)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def parse_options_header(value):
|
||||||
|
"""Parse a ``Content-Type`` like header into a tuple with the content
|
||||||
|
type and the options:
|
||||||
|
|
||||||
|
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
|
||||||
|
('Content-Type:', {'mimetype': 'text/html'})
|
||||||
|
|
||||||
|
:param value: the header to parse.
|
||||||
|
:return: (str, options)
|
||||||
|
"""
|
||||||
|
def _tokenize(string):
|
||||||
|
for match in _option_header_piece_re.finditer(string):
|
||||||
|
key, value = match.groups()
|
||||||
|
key = unquote_header_value(key)
|
||||||
|
if value is not None:
|
||||||
|
value = unquote_header_value(value)
|
||||||
|
yield key, value
|
||||||
|
|
||||||
|
if not value:
|
||||||
|
return '', {}
|
||||||
|
|
||||||
|
parts = _tokenize(';' + value)
|
||||||
|
name = parts.next()[0]
|
||||||
|
extra = dict(parts)
|
||||||
|
return name, extra
|
||||||
|
|
||||||
|
|
||||||
|
class Accept(object):
|
||||||
|
def __init__(self, value):
|
||||||
|
self._content_types = [parse_options_header(v) for v in
|
||||||
|
parse_list_header(value)]
|
||||||
|
|
||||||
|
def best_match(self, supported_content_types):
|
||||||
|
# FIXME: Should we have a more sophisticated matching algorithm that
|
||||||
|
# takes into account the version as well?
|
||||||
|
best_quality = -1
|
||||||
|
best_content_type = None
|
||||||
|
best_params = {}
|
||||||
|
best_match = '*/*'
|
||||||
|
|
||||||
|
for content_type in supported_content_types:
|
||||||
|
for content_mask, params in self._content_types:
|
||||||
|
try:
|
||||||
|
quality = float(params.get('q', 1))
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if quality < best_quality:
|
||||||
|
continue
|
||||||
|
elif best_quality == quality:
|
||||||
|
if best_match.count('*') <= content_mask.count('*'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self._match_mask(content_mask, content_type):
|
||||||
|
best_quality = quality
|
||||||
|
best_content_type = content_type
|
||||||
|
best_params = params
|
||||||
|
best_match = content_mask
|
||||||
|
|
||||||
|
return best_content_type, best_params
|
||||||
|
|
||||||
|
def content_type_params(self, best_content_type):
|
||||||
|
"""Find parameters in Accept header for given content type."""
|
||||||
|
for content_type, params in self._content_types:
|
||||||
|
if best_content_type == content_type:
|
||||||
|
return params
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _match_mask(self, mask, content_type):
|
||||||
|
if '*' not in mask:
|
||||||
|
return content_type == mask
|
||||||
|
if mask == '*/*':
|
||||||
|
return True
|
||||||
|
mask_major = mask[:-2]
|
||||||
|
content_type_major = content_type.split('/', 1)[0]
|
||||||
|
return content_type_major == mask_major
|
||||||
|
|
||||||
|
|
||||||
|
def urlmap_factory(loader, global_conf, **local_conf):
|
||||||
|
if 'not_found_app' in local_conf:
|
||||||
|
not_found_app = local_conf.pop('not_found_app')
|
||||||
|
else:
|
||||||
|
not_found_app = global_conf.get('not_found_app')
|
||||||
|
if not_found_app:
|
||||||
|
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
|
||||||
|
urlmap = URLMap(not_found_app=not_found_app)
|
||||||
|
for path, app_name in local_conf.items():
|
||||||
|
path = paste.urlmap.parse_path_expression(path)
|
||||||
|
app = loader.get_app(app_name, global_conf=global_conf)
|
||||||
|
urlmap[path] = app
|
||||||
|
return urlmap
|
||||||
|
|
||||||
|
|
||||||
|
class URLMap(paste.urlmap.URLMap):
|
||||||
|
def _match(self, host, port, path_info):
|
||||||
|
"""Find longest match for a given URL path."""
|
||||||
|
for (domain, app_url), app in self.applications:
|
||||||
|
if domain and domain != host and domain != host + ':' + port:
|
||||||
|
continue
|
||||||
|
if (path_info == app_url
|
||||||
|
or path_info.startswith(app_url + '/')):
|
||||||
|
return app, app_url
|
||||||
|
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def _set_script_name(self, app, app_url):
|
||||||
|
def wrap(environ, start_response):
|
||||||
|
environ['SCRIPT_NAME'] += app_url
|
||||||
|
return app(environ, start_response)
|
||||||
|
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
def _munge_path(self, app, path_info, app_url):
|
||||||
|
def wrap(environ, start_response):
|
||||||
|
environ['SCRIPT_NAME'] += app_url
|
||||||
|
environ['PATH_INFO'] = path_info[len(app_url):]
|
||||||
|
return app(environ, start_response)
|
||||||
|
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
def _path_strategy(self, host, port, path_info):
|
||||||
|
"""Check path suffix for MIME type and path prefix for API version."""
|
||||||
|
mime_type = app = app_url = None
|
||||||
|
|
||||||
|
parts = path_info.rsplit('.', 1)
|
||||||
|
if len(parts) > 1:
|
||||||
|
possible_type = 'application/' + parts[1]
|
||||||
|
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
|
||||||
|
mime_type = possible_type
|
||||||
|
|
||||||
|
parts = path_info.split('/')
|
||||||
|
if len(parts) > 1:
|
||||||
|
possible_app, possible_app_url = self._match(host, port, path_info)
|
||||||
|
# Don't use prefix if it ends up matching default
|
||||||
|
if possible_app and possible_app_url:
|
||||||
|
app_url = possible_app_url
|
||||||
|
app = self._munge_path(possible_app, path_info, app_url)
|
||||||
|
|
||||||
|
return mime_type, app, app_url
|
||||||
|
|
||||||
|
def _content_type_strategy(self, host, port, environ):
|
||||||
|
"""Check Content-Type header for API version."""
|
||||||
|
app = None
|
||||||
|
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
|
||||||
|
if 'version' in params:
|
||||||
|
app, app_url = self._match(host, port, '/v' + params['version'])
|
||||||
|
if app:
|
||||||
|
app = self._set_script_name(app, app_url)
|
||||||
|
|
||||||
|
return app
|
||||||
|
|
||||||
|
def _accept_strategy(self, host, port, environ, supported_content_types):
|
||||||
|
"""Check Accept header for best matching MIME type and API version."""
|
||||||
|
accept = Accept(environ.get('HTTP_ACCEPT', ''))
|
||||||
|
|
||||||
|
app = None
|
||||||
|
|
||||||
|
# Find the best match in the Accept header
|
||||||
|
mime_type, params = accept.best_match(supported_content_types)
|
||||||
|
if 'version' in params:
|
||||||
|
app, app_url = self._match(host, port, '/v' + params['version'])
|
||||||
|
if app:
|
||||||
|
app = self._set_script_name(app, app_url)
|
||||||
|
|
||||||
|
return mime_type, app
|
||||||
|
|
||||||
|
def __call__(self, environ, start_response):
|
||||||
|
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
|
||||||
|
if ':' in host:
|
||||||
|
host, port = host.split(':', 1)
|
||||||
|
else:
|
||||||
|
if environ['wsgi.url_scheme'] == 'http':
|
||||||
|
port = '80'
|
||||||
|
else:
|
||||||
|
port = '443'
|
||||||
|
|
||||||
|
path_info = environ['PATH_INFO']
|
||||||
|
path_info = self.normalize_url(path_info, False)[1]
|
||||||
|
|
||||||
|
# The MIME type for the response is determined in one of two ways:
|
||||||
|
# 1) URL path suffix (eg /servers/detail.json)
|
||||||
|
# 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2)
|
||||||
|
|
||||||
|
# The API version is determined in one of three ways:
|
||||||
|
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
|
||||||
|
# 2) Content-Type header (eg application/json;version=1.1)
|
||||||
|
# 3) Accept header (eg application/json;q=0.8;version=1.1)
|
||||||
|
|
||||||
|
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
|
||||||
|
|
||||||
|
mime_type, app, app_url = self._path_strategy(host, port, path_info)
|
||||||
|
|
||||||
|
# Accept application/atom+xml for the index query of each API
|
||||||
|
# version mount point as well as the root index
|
||||||
|
if (app_url and app_url + '/' == path_info) or path_info == '/':
|
||||||
|
supported_content_types.append('application/atom+xml')
|
||||||
|
|
||||||
|
if not app:
|
||||||
|
app = self._content_type_strategy(host, port, environ)
|
||||||
|
|
||||||
|
if not mime_type or not app:
|
||||||
|
possible_mime_type, possible_app = self._accept_strategy(
|
||||||
|
host, port, environ, supported_content_types)
|
||||||
|
if possible_mime_type and not mime_type:
|
||||||
|
mime_type = possible_mime_type
|
||||||
|
if possible_app and not app:
|
||||||
|
app = possible_app
|
||||||
|
|
||||||
|
if not mime_type:
|
||||||
|
mime_type = 'application/json'
|
||||||
|
|
||||||
|
if not app:
|
||||||
|
# Didn't match a particular version, probably matches default
|
||||||
|
app, app_url = self._match(host, port, path_info)
|
||||||
|
if app:
|
||||||
|
app = self._munge_path(app, path_info, app_url)
|
||||||
|
|
||||||
|
if app:
|
||||||
|
environ['cinder.best_content_type'] = mime_type
|
||||||
|
return app(environ, start_response)
|
||||||
|
|
||||||
|
environ['paste.urlmap_object'] = self
|
||||||
|
return self.not_found_application(environ, start_response)
|
62
cinder/api/openstack/volume/__init__.py
Normal file
62
cinder/api/openstack/volume/__init__.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
WSGI middleware for OpenStack Volume API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import cinder.api.openstack
|
||||||
|
from cinder.api.openstack.volume import extensions
|
||||||
|
from cinder.api.openstack.volume import snapshots
|
||||||
|
from cinder.api.openstack.volume import types
|
||||||
|
from cinder.api.openstack.volume import volumes
|
||||||
|
from cinder.api.openstack.volume import versions
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class APIRouter(cinder.api.openstack.APIRouter):
|
||||||
|
"""
|
||||||
|
Routes requests on the OpenStack API to the appropriate controller
|
||||||
|
and method.
|
||||||
|
"""
|
||||||
|
ExtensionManager = extensions.ExtensionManager
|
||||||
|
|
||||||
|
def _setup_routes(self, mapper):
|
||||||
|
self.resources['versions'] = versions.create_resource()
|
||||||
|
mapper.connect("versions", "/",
|
||||||
|
controller=self.resources['versions'],
|
||||||
|
action='show')
|
||||||
|
|
||||||
|
mapper.redirect("", "/")
|
||||||
|
|
||||||
|
self.resources['volumes'] = volumes.create_resource()
|
||||||
|
mapper.resource("volume", "volumes",
|
||||||
|
controller=self.resources['volumes'],
|
||||||
|
collection={'detail': 'GET'})
|
||||||
|
|
||||||
|
self.resources['types'] = types.create_resource()
|
||||||
|
mapper.resource("type", "types",
|
||||||
|
controller=self.resources['types'])
|
||||||
|
|
||||||
|
self.resources['snapshots'] = snapshots.create_resource()
|
||||||
|
mapper.resource("snapshot", "snapshots",
|
||||||
|
controller=self.resources['snapshots'],
|
||||||
|
collection={'detail': 'GET'})
|
39
cinder/api/openstack/volume/contrib/__init__.py
Normal file
39
cinder/api/openstack/volume/contrib/__init__.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Contrib contains extensions that are shipped with cinder.
|
||||||
|
|
||||||
|
It can't be called 'extensions' because that causes namespacing problems.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.api.openstack import extensions
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def standard_extensions(ext_mgr):
|
||||||
|
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)
|
||||||
|
|
||||||
|
|
||||||
|
def select_extensions(ext_mgr):
|
||||||
|
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
|
||||||
|
FLAGS.osapi_volume_ext_list)
|
152
cinder/api/openstack/volume/contrib/types_extra_specs.py
Normal file
152
cinder/api/openstack/volume/contrib/types_extra_specs.py
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 Zadara Storage Inc.
|
||||||
|
# Copyright (c) 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""The volume types extra specs extension"""
|
||||||
|
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from cinder.api.openstack import extensions
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder.api.openstack import xmlutil
|
||||||
|
from cinder import db
|
||||||
|
from cinder import exception
|
||||||
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
|
|
||||||
|
authorize = extensions.extension_authorizer('volume', 'types_extra_specs')
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
|
||||||
|
return xmlutil.MasterTemplate(root, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
tagname = xmlutil.Selector('key')
|
||||||
|
|
||||||
|
def extraspec_sel(obj, do_raise=False):
|
||||||
|
# Have to extract the key and value for later use...
|
||||||
|
key, value = obj.items()[0]
|
||||||
|
return dict(key=key, value=value)
|
||||||
|
|
||||||
|
root = xmlutil.TemplateElement(tagname, selector=extraspec_sel)
|
||||||
|
root.text = 'value'
|
||||||
|
return xmlutil.MasterTemplate(root, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypeExtraSpecsController(object):
|
||||||
|
""" The volume type extra specs API controller for the OpenStack API """
|
||||||
|
|
||||||
|
def _get_extra_specs(self, context, type_id):
|
||||||
|
extra_specs = db.volume_type_extra_specs_get(context, type_id)
|
||||||
|
specs_dict = {}
|
||||||
|
for key, value in extra_specs.iteritems():
|
||||||
|
specs_dict[key] = value
|
||||||
|
return dict(extra_specs=specs_dict)
|
||||||
|
|
||||||
|
def _check_body(self, body):
|
||||||
|
if not body:
|
||||||
|
expl = _('No Request Body')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=expl)
|
||||||
|
|
||||||
|
def _check_type(self, context, type_id):
|
||||||
|
try:
|
||||||
|
volume_types.get_volume_type(context, type_id)
|
||||||
|
except exception.NotFound as ex:
|
||||||
|
raise webob.exc.HTTPNotFound(explanation=unicode(ex))
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate)
|
||||||
|
def index(self, req, type_id):
|
||||||
|
""" Returns the list of extra specs for a given volume type """
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
authorize(context)
|
||||||
|
self._check_type(context, type_id)
|
||||||
|
return self._get_extra_specs(context, type_id)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate)
|
||||||
|
def create(self, req, type_id, body=None):
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
authorize(context)
|
||||||
|
self._check_type(context, type_id)
|
||||||
|
self._check_body(body)
|
||||||
|
specs = body.get('extra_specs')
|
||||||
|
if not isinstance(specs, dict):
|
||||||
|
expl = _('Malformed extra specs')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=expl)
|
||||||
|
db.volume_type_extra_specs_update_or_create(context,
|
||||||
|
type_id,
|
||||||
|
specs)
|
||||||
|
return body
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
|
||||||
|
def update(self, req, type_id, id, body=None):
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
authorize(context)
|
||||||
|
self._check_type(context, type_id)
|
||||||
|
self._check_body(body)
|
||||||
|
if not id in body:
|
||||||
|
expl = _('Request body and URI mismatch')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=expl)
|
||||||
|
if len(body) > 1:
|
||||||
|
expl = _('Request body contains too many items')
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=expl)
|
||||||
|
db.volume_type_extra_specs_update_or_create(context,
|
||||||
|
type_id,
|
||||||
|
body)
|
||||||
|
return body
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
|
||||||
|
def show(self, req, type_id, id):
|
||||||
|
"""Return a single extra spec item."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
authorize(context)
|
||||||
|
self._check_type(context, type_id)
|
||||||
|
specs = self._get_extra_specs(context, type_id)
|
||||||
|
if id in specs['extra_specs']:
|
||||||
|
return {id: specs['extra_specs'][id]}
|
||||||
|
else:
|
||||||
|
raise webob.exc.HTTPNotFound()
|
||||||
|
|
||||||
|
def delete(self, req, type_id, id):
|
||||||
|
""" Deletes an existing extra spec """
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
self._check_type(context, type_id)
|
||||||
|
authorize(context)
|
||||||
|
db.volume_type_extra_specs_delete(context, type_id, id)
|
||||||
|
return webob.Response(status_int=202)
|
||||||
|
|
||||||
|
|
||||||
|
class Types_extra_specs(extensions.ExtensionDescriptor):
|
||||||
|
"""Types extra specs support"""
|
||||||
|
|
||||||
|
name = "TypesExtraSpecs"
|
||||||
|
alias = "os-types-extra-specs"
|
||||||
|
namespace = "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1"
|
||||||
|
updated = "2011-08-24T00:00:00+00:00"
|
||||||
|
|
||||||
|
def get_resources(self):
|
||||||
|
resources = []
|
||||||
|
res = extensions.ResourceExtension('extra_specs',
|
||||||
|
VolumeTypeExtraSpecsController(),
|
||||||
|
parent=dict(
|
||||||
|
member_name='type',
|
||||||
|
collection_name='types'))
|
||||||
|
resources.append(res)
|
||||||
|
|
||||||
|
return resources
|
91
cinder/api/openstack/volume/contrib/types_manage.py
Normal file
91
cinder/api/openstack/volume/contrib/types_manage.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 Zadara Storage Inc.
|
||||||
|
# Copyright (c) 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""The volume types manage extension."""
|
||||||
|
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from cinder.api.openstack import extensions
|
||||||
|
from cinder.api.openstack.volume import types
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder import exception
|
||||||
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
|
|
||||||
|
authorize = extensions.extension_authorizer('volume', 'types_manage')
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypesManageController(wsgi.Controller):
|
||||||
|
""" The volume types API controller for the OpenStack API """
|
||||||
|
|
||||||
|
@wsgi.action("create")
|
||||||
|
@wsgi.serializers(xml=types.VolumeTypeTemplate)
|
||||||
|
def _create(self, req, body):
|
||||||
|
"""Creates a new volume type."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
authorize(context)
|
||||||
|
|
||||||
|
if not body or body == "":
|
||||||
|
raise webob.exc.HTTPUnprocessableEntity()
|
||||||
|
|
||||||
|
vol_type = body.get('volume_type', None)
|
||||||
|
if vol_type is None or vol_type == "":
|
||||||
|
raise webob.exc.HTTPUnprocessableEntity()
|
||||||
|
|
||||||
|
name = vol_type.get('name', None)
|
||||||
|
specs = vol_type.get('extra_specs', {})
|
||||||
|
|
||||||
|
if name is None or name == "":
|
||||||
|
raise webob.exc.HTTPUnprocessableEntity()
|
||||||
|
|
||||||
|
try:
|
||||||
|
volume_types.create(context, name, specs)
|
||||||
|
vol_type = volume_types.get_volume_type_by_name(context, name)
|
||||||
|
except exception.VolumeTypeExists as err:
|
||||||
|
raise webob.exc.HTTPConflict(explanation=str(err))
|
||||||
|
except exception.NotFound:
|
||||||
|
raise webob.exc.HTTPNotFound()
|
||||||
|
|
||||||
|
return {'volume_type': vol_type}
|
||||||
|
|
||||||
|
@wsgi.action("delete")
|
||||||
|
def _delete(self, req, id):
|
||||||
|
""" Deletes an existing volume type """
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
authorize(context)
|
||||||
|
|
||||||
|
try:
|
||||||
|
vol_type = volume_types.get_volume_type(context, id)
|
||||||
|
volume_types.destroy(context, vol_type['name'])
|
||||||
|
except exception.NotFound:
|
||||||
|
raise webob.exc.HTTPNotFound()
|
||||||
|
|
||||||
|
return webob.Response(status_int=202)
|
||||||
|
|
||||||
|
|
||||||
|
class Types_manage(extensions.ExtensionDescriptor):
|
||||||
|
"""Types manage support"""
|
||||||
|
|
||||||
|
name = "TypesManage"
|
||||||
|
alias = "os-types-manage"
|
||||||
|
namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1"
|
||||||
|
updated = "2011-08-24T00:00:00+00:00"
|
||||||
|
|
||||||
|
def get_controller_extensions(self):
|
||||||
|
controller = VolumeTypesManageController()
|
||||||
|
extension = extensions.ControllerExtension(self, 'types', controller)
|
||||||
|
return [extension]
|
33
cinder/api/openstack/volume/extensions.py
Normal file
33
cinder/api/openstack/volume/extensions.py
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinder.api.openstack import extensions as base_extensions
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class ExtensionManager(base_extensions.ExtensionManager):
|
||||||
|
def __init__(self):
|
||||||
|
LOG.audit(_('Initializing extension manager.'))
|
||||||
|
|
||||||
|
self.cls_list = FLAGS.osapi_volume_extension
|
||||||
|
self.extensions = {}
|
||||||
|
self._load_extensions()
|
170
cinder/api/openstack/volume/snapshots.py
Normal file
170
cinder/api/openstack/volume/snapshots.py
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""The volumes snapshots api."""
|
||||||
|
|
||||||
|
from webob import exc
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from cinder.api.openstack import common
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder.api.openstack import xmlutil
|
||||||
|
from cinder import exception
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import volume
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_snapshot_detail_view(context, vol):
|
||||||
|
"""Maps keys for snapshots details view."""
|
||||||
|
|
||||||
|
d = _translate_snapshot_summary_view(context, vol)
|
||||||
|
|
||||||
|
# NOTE(gagupta): No additional data / lookups at the moment
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_snapshot_summary_view(context, vol):
|
||||||
|
"""Maps keys for snapshots summary view."""
|
||||||
|
d = {}
|
||||||
|
|
||||||
|
# TODO(bcwaldon): remove str cast once we use uuids
|
||||||
|
d['id'] = str(vol['id'])
|
||||||
|
d['volume_id'] = str(vol['volume_id'])
|
||||||
|
d['status'] = vol['status']
|
||||||
|
# NOTE(gagupta): We map volume_size as the snapshot size
|
||||||
|
d['size'] = vol['volume_size']
|
||||||
|
d['created_at'] = vol['created_at']
|
||||||
|
d['display_name'] = vol['display_name']
|
||||||
|
d['display_description'] = vol['display_description']
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def make_snapshot(elem):
|
||||||
|
elem.set('id')
|
||||||
|
elem.set('status')
|
||||||
|
elem.set('size')
|
||||||
|
elem.set('created_at')
|
||||||
|
elem.set('display_name')
|
||||||
|
elem.set('display_description')
|
||||||
|
elem.set('volume_id')
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('snapshot', selector='snapshot')
|
||||||
|
make_snapshot(root)
|
||||||
|
return xmlutil.MasterTemplate(root, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotsTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('snapshots')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'snapshot',
|
||||||
|
selector='snapshots')
|
||||||
|
make_snapshot(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class SnapshotsController(object):
|
||||||
|
"""The Volumes API controller for the OpenStack API."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.volume_api = volume.API()
|
||||||
|
super(SnapshotsController, self).__init__()
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=SnapshotTemplate)
|
||||||
|
def show(self, req, id):
|
||||||
|
"""Return data about the given snapshot."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
try:
|
||||||
|
vol = self.volume_api.get_snapshot(context, id)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exc.HTTPNotFound()
|
||||||
|
|
||||||
|
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
|
||||||
|
|
||||||
|
def delete(self, req, id):
|
||||||
|
"""Delete a snapshot."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
|
||||||
|
|
||||||
|
try:
|
||||||
|
snapshot = self.volume_api.get_snapshot(context, id)
|
||||||
|
self.volume_api.delete_snapshot(context, snapshot)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exc.HTTPNotFound()
|
||||||
|
return webob.Response(status_int=202)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=SnapshotsTemplate)
|
||||||
|
def index(self, req):
|
||||||
|
"""Returns a summary list of snapshots."""
|
||||||
|
return self._items(req, entity_maker=_translate_snapshot_summary_view)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=SnapshotsTemplate)
|
||||||
|
def detail(self, req):
|
||||||
|
"""Returns a detailed list of snapshots."""
|
||||||
|
return self._items(req, entity_maker=_translate_snapshot_detail_view)
|
||||||
|
|
||||||
|
def _items(self, req, entity_maker):
|
||||||
|
"""Returns a list of snapshots, transformed through entity_maker."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
snapshots = self.volume_api.get_all_snapshots(context)
|
||||||
|
limited_list = common.limited(snapshots, req)
|
||||||
|
res = [entity_maker(context, snapshot) for snapshot in limited_list]
|
||||||
|
return {'snapshots': res}
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=SnapshotTemplate)
|
||||||
|
def create(self, req, body):
|
||||||
|
"""Creates a new snapshot."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
if not body:
|
||||||
|
return exc.HTTPUnprocessableEntity()
|
||||||
|
|
||||||
|
snapshot = body['snapshot']
|
||||||
|
volume_id = snapshot['volume_id']
|
||||||
|
volume = self.volume_api.get(context, volume_id)
|
||||||
|
force = snapshot.get('force', False)
|
||||||
|
msg = _("Create snapshot from volume %s")
|
||||||
|
LOG.audit(msg, volume_id, context=context)
|
||||||
|
|
||||||
|
if force:
|
||||||
|
new_snapshot = self.volume_api.create_snapshot_force(context,
|
||||||
|
volume,
|
||||||
|
snapshot.get('display_name'),
|
||||||
|
snapshot.get('display_description'))
|
||||||
|
else:
|
||||||
|
new_snapshot = self.volume_api.create_snapshot(context,
|
||||||
|
volume,
|
||||||
|
snapshot.get('display_name'),
|
||||||
|
snapshot.get('display_description'))
|
||||||
|
|
||||||
|
retval = _translate_snapshot_detail_view(context, new_snapshot)
|
||||||
|
|
||||||
|
return {'snapshot': retval}
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(SnapshotsController())
|
76
cinder/api/openstack/volume/types.py
Normal file
76
cinder/api/openstack/volume/types.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 Zadara Storage Inc.
|
||||||
|
# Copyright (c) 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
""" The volume type & volume types extra specs extension"""
|
||||||
|
|
||||||
|
from webob import exc
|
||||||
|
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder.api.openstack import xmlutil
|
||||||
|
from cinder import exception
|
||||||
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
|
|
||||||
|
def make_voltype(elem):
|
||||||
|
elem.set('id')
|
||||||
|
elem.set('name')
|
||||||
|
extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
|
||||||
|
elem.append(extra_specs)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypeTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('volume_type', selector='volume_type')
|
||||||
|
make_voltype(root)
|
||||||
|
return xmlutil.MasterTemplate(root, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypesTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('volume_types')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'volume_type',
|
||||||
|
selector='volume_types')
|
||||||
|
make_voltype(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTypesController(object):
|
||||||
|
""" The volume types API controller for the OpenStack API """
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTypesTemplate)
|
||||||
|
def index(self, req):
|
||||||
|
""" Returns the list of volume types """
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
return {'volume_types': volume_types.get_all_types(context).values()}
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTypeTemplate)
|
||||||
|
def show(self, req, id):
|
||||||
|
""" Return a single volume type item """
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
try:
|
||||||
|
vol_type = volume_types.get_volume_type(context, id)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exc.HTTPNotFound()
|
||||||
|
|
||||||
|
# TODO(bcwaldon): remove str cast once we use uuids
|
||||||
|
vol_type['id'] = str(vol_type['id'])
|
||||||
|
return {'volume_type': vol_type}
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(VolumeTypesController())
|
83
cinder/api/openstack/volume/versions.py
Normal file
83
cinder/api/openstack/volume/versions.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
from cinder.api.openstack.compute import versions
|
||||||
|
from cinder.api.openstack.volume.views import versions as views_versions
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
|
||||||
|
|
||||||
|
VERSIONS = {
|
||||||
|
"v1.0": {
|
||||||
|
"id": "v1.0",
|
||||||
|
"status": "CURRENT",
|
||||||
|
"updated": "2012-01-04T11:33:21Z",
|
||||||
|
"links": [
|
||||||
|
{
|
||||||
|
"rel": "describedby",
|
||||||
|
"type": "application/pdf",
|
||||||
|
"href": "http://jorgew.github.com/block-storage-api/"
|
||||||
|
"content/os-block-storage-1.0.pdf",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rel": "describedby",
|
||||||
|
"type": "application/vnd.sun.wadl+xml",
|
||||||
|
#(anthony) FIXME
|
||||||
|
"href": "http://docs.rackspacecloud.com/"
|
||||||
|
"servers/api/v1.1/application.wadl",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"media-types": [
|
||||||
|
{
|
||||||
|
"base": "application/xml",
|
||||||
|
"type": "application/vnd.openstack.volume+xml;version=1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"base": "application/json",
|
||||||
|
"type": "application/vnd.openstack.volume+json;version=1",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Versions(versions.Versions):
|
||||||
|
@wsgi.serializers(xml=versions.VersionsTemplate,
|
||||||
|
atom=versions.VersionsAtomSerializer)
|
||||||
|
def index(self, req):
|
||||||
|
"""Return all versions."""
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_versions(VERSIONS)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=versions.ChoicesTemplate)
|
||||||
|
@wsgi.response(300)
|
||||||
|
def multi(self, req):
|
||||||
|
"""Return multiple choices."""
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_choices(VERSIONS, req)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeVersionV1(object):
|
||||||
|
@wsgi.serializers(xml=versions.VersionTemplate,
|
||||||
|
atom=versions.VersionAtomSerializer)
|
||||||
|
def show(self, req):
|
||||||
|
builder = views_versions.get_view_builder(req)
|
||||||
|
return builder.build_version(VERSIONS['v1.0'])
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(VolumeVersionV1())
|
16
cinder/api/openstack/volume/views/__init__.py
Normal file
16
cinder/api/openstack/volume/views/__init__.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
36
cinder/api/openstack/volume/views/versions.py
Normal file
36
cinder/api/openstack/volume/views/versions.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010-2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from cinder.api.openstack.compute.views import versions as compute_views
|
||||||
|
|
||||||
|
|
||||||
|
def get_view_builder(req):
|
||||||
|
base_url = req.application_url
|
||||||
|
return ViewBuilder(base_url)
|
||||||
|
|
||||||
|
|
||||||
|
class ViewBuilder(compute_views.ViewBuilder):
|
||||||
|
def generate_href(self, path=None):
|
||||||
|
"""Create an url that refers to a specific version_number."""
|
||||||
|
version_number = 'v1'
|
||||||
|
if path:
|
||||||
|
path = path.strip('/')
|
||||||
|
return os.path.join(self.base_url, version_number, path)
|
||||||
|
else:
|
||||||
|
return os.path.join(self.base_url, version_number) + '/'
|
263
cinder/api/openstack/volume/volumes.py
Normal file
263
cinder/api/openstack/volume/volumes.py
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""The volumes api."""
|
||||||
|
|
||||||
|
from webob import exc
|
||||||
|
import webob
|
||||||
|
|
||||||
|
from cinder.api.openstack import common
|
||||||
|
from cinder.api.openstack import wsgi
|
||||||
|
from cinder.api.openstack import xmlutil
|
||||||
|
from cinder import exception
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder import volume
|
||||||
|
from cinder.volume import volume_types
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_attachment_detail_view(_context, vol):
|
||||||
|
"""Maps keys for attachment details view."""
|
||||||
|
|
||||||
|
d = _translate_attachment_summary_view(_context, vol)
|
||||||
|
|
||||||
|
# No additional data / lookups at the moment
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_attachment_summary_view(_context, vol):
|
||||||
|
"""Maps keys for attachment summary view."""
|
||||||
|
d = {}
|
||||||
|
|
||||||
|
# TODO(bcwaldon): remove str cast once we use uuids
|
||||||
|
volume_id = str(vol['id'])
|
||||||
|
|
||||||
|
# NOTE(justinsb): We use the volume id as the id of the attachment object
|
||||||
|
d['id'] = volume_id
|
||||||
|
|
||||||
|
d['volume_id'] = volume_id
|
||||||
|
if vol.get('instance'):
|
||||||
|
d['server_id'] = vol['instance']['uuid']
|
||||||
|
if vol.get('mountpoint'):
|
||||||
|
d['device'] = vol['mountpoint']
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_volume_detail_view(context, vol):
|
||||||
|
"""Maps keys for volumes details view."""
|
||||||
|
|
||||||
|
d = _translate_volume_summary_view(context, vol)
|
||||||
|
|
||||||
|
# No additional data / lookups at the moment
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def _translate_volume_summary_view(context, vol):
|
||||||
|
"""Maps keys for volumes summary view."""
|
||||||
|
d = {}
|
||||||
|
|
||||||
|
# TODO(bcwaldon): remove str cast once we use uuids
|
||||||
|
d['id'] = str(vol['id'])
|
||||||
|
d['status'] = vol['status']
|
||||||
|
d['size'] = vol['size']
|
||||||
|
d['availability_zone'] = vol['availability_zone']
|
||||||
|
d['created_at'] = vol['created_at']
|
||||||
|
|
||||||
|
d['attachments'] = []
|
||||||
|
if vol['attach_status'] == 'attached':
|
||||||
|
attachment = _translate_attachment_detail_view(context, vol)
|
||||||
|
d['attachments'].append(attachment)
|
||||||
|
|
||||||
|
d['display_name'] = vol['display_name']
|
||||||
|
d['display_description'] = vol['display_description']
|
||||||
|
|
||||||
|
if vol['volume_type_id'] and vol.get('volume_type'):
|
||||||
|
d['volume_type'] = vol['volume_type']['name']
|
||||||
|
else:
|
||||||
|
# TODO(bcwaldon): remove str cast once we use uuids
|
||||||
|
d['volume_type'] = str(vol['volume_type_id'])
|
||||||
|
|
||||||
|
d['snapshot_id'] = vol['snapshot_id']
|
||||||
|
# TODO(bcwaldon): remove str cast once we use uuids
|
||||||
|
if d['snapshot_id'] is not None:
|
||||||
|
d['snapshot_id'] = str(d['snapshot_id'])
|
||||||
|
|
||||||
|
LOG.audit(_("vol=%s"), vol, context=context)
|
||||||
|
|
||||||
|
if vol.get('volume_metadata'):
|
||||||
|
meta_dict = {}
|
||||||
|
for i in vol['volume_metadata']:
|
||||||
|
meta_dict[i['key']] = i['value']
|
||||||
|
d['metadata'] = meta_dict
|
||||||
|
else:
|
||||||
|
d['metadata'] = {}
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def make_attachment(elem):
|
||||||
|
elem.set('id')
|
||||||
|
elem.set('server_id')
|
||||||
|
elem.set('volume_id')
|
||||||
|
elem.set('device')
|
||||||
|
|
||||||
|
|
||||||
|
def make_volume(elem):
|
||||||
|
elem.set('id')
|
||||||
|
elem.set('status')
|
||||||
|
elem.set('size')
|
||||||
|
elem.set('availability_zone')
|
||||||
|
elem.set('created_at')
|
||||||
|
elem.set('display_name')
|
||||||
|
elem.set('display_description')
|
||||||
|
elem.set('volume_type')
|
||||||
|
elem.set('snapshot_id')
|
||||||
|
|
||||||
|
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
|
||||||
|
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
|
||||||
|
selector='attachments')
|
||||||
|
make_attachment(attachment)
|
||||||
|
|
||||||
|
metadata = xmlutil.make_flat_dict('metadata')
|
||||||
|
elem.append(metadata)
|
||||||
|
|
||||||
|
|
||||||
|
volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM}
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('volume', selector='volume')
|
||||||
|
make_volume(root)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumesTemplate(xmlutil.TemplateBuilder):
|
||||||
|
def construct(self):
|
||||||
|
root = xmlutil.TemplateElement('volumes')
|
||||||
|
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
|
||||||
|
make_volume(elem)
|
||||||
|
return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap)
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeController(object):
|
||||||
|
"""The Volumes API controller for the OpenStack API."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.volume_api = volume.API()
|
||||||
|
super(VolumeController, self).__init__()
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTemplate)
|
||||||
|
def show(self, req, id):
|
||||||
|
"""Return data about the given volume."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
try:
|
||||||
|
vol = self.volume_api.get(context, id)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exc.HTTPNotFound()
|
||||||
|
|
||||||
|
return {'volume': _translate_volume_detail_view(context, vol)}
|
||||||
|
|
||||||
|
def delete(self, req, id):
|
||||||
|
"""Delete a volume."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
LOG.audit(_("Delete volume with id: %s"), id, context=context)
|
||||||
|
|
||||||
|
try:
|
||||||
|
volume = self.volume_api.get(context, id)
|
||||||
|
self.volume_api.delete(context, volume)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exc.HTTPNotFound()
|
||||||
|
return webob.Response(status_int=202)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumesTemplate)
|
||||||
|
def index(self, req):
|
||||||
|
"""Returns a summary list of volumes."""
|
||||||
|
return self._items(req, entity_maker=_translate_volume_summary_view)
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumesTemplate)
|
||||||
|
def detail(self, req):
|
||||||
|
"""Returns a detailed list of volumes."""
|
||||||
|
return self._items(req, entity_maker=_translate_volume_detail_view)
|
||||||
|
|
||||||
|
def _items(self, req, entity_maker):
|
||||||
|
"""Returns a list of volumes, transformed through entity_maker."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
volumes = self.volume_api.get_all(context)
|
||||||
|
limited_list = common.limited(volumes, req)
|
||||||
|
res = [entity_maker(context, vol) for vol in limited_list]
|
||||||
|
return {'volumes': res}
|
||||||
|
|
||||||
|
@wsgi.serializers(xml=VolumeTemplate)
|
||||||
|
def create(self, req, body):
|
||||||
|
"""Creates a new volume."""
|
||||||
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
|
if not body:
|
||||||
|
raise exc.HTTPUnprocessableEntity()
|
||||||
|
|
||||||
|
volume = body['volume']
|
||||||
|
size = volume['size']
|
||||||
|
LOG.audit(_("Create volume of %s GB"), size, context=context)
|
||||||
|
|
||||||
|
kwargs = {}
|
||||||
|
|
||||||
|
req_volume_type = volume.get('volume_type', None)
|
||||||
|
if req_volume_type:
|
||||||
|
try:
|
||||||
|
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
|
||||||
|
context, req_volume_type)
|
||||||
|
except exception.NotFound:
|
||||||
|
raise exc.HTTPNotFound()
|
||||||
|
|
||||||
|
kwargs['metadata'] = volume.get('metadata', None)
|
||||||
|
|
||||||
|
snapshot_id = volume.get('snapshot_id')
|
||||||
|
if snapshot_id is not None:
|
||||||
|
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
|
||||||
|
snapshot_id)
|
||||||
|
else:
|
||||||
|
kwargs['snapshot'] = None
|
||||||
|
|
||||||
|
kwargs['availability_zone'] = volume.get('availability_zone', None)
|
||||||
|
|
||||||
|
new_volume = self.volume_api.create(context,
|
||||||
|
size,
|
||||||
|
volume.get('display_name'),
|
||||||
|
volume.get('display_description'),
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
# TODO(vish): Instance should be None at db layer instead of
|
||||||
|
# trying to lazy load, but for now we turn it into
|
||||||
|
# a dict to avoid an error.
|
||||||
|
retval = _translate_volume_detail_view(context, dict(new_volume))
|
||||||
|
|
||||||
|
return {'volume': retval}
|
||||||
|
|
||||||
|
|
||||||
|
def create_resource():
|
||||||
|
return wsgi.Resource(VolumeController())
|
1123
cinder/api/openstack/wsgi.py
Normal file
1123
cinder/api/openstack/wsgi.py
Normal file
File diff suppressed because it is too large
Load Diff
908
cinder/api/openstack/xmlutil.py
Normal file
908
cinder/api/openstack/xmlutil.py
Normal file
@ -0,0 +1,908 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
from lxml import etree
|
||||||
|
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
|
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
|
||||||
|
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
|
||||||
|
XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0'
|
||||||
|
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
|
||||||
|
XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1'
|
||||||
|
|
||||||
|
|
||||||
|
def validate_schema(xml, schema_name):
|
||||||
|
if isinstance(xml, str):
|
||||||
|
xml = etree.fromstring(xml)
|
||||||
|
base_path = 'cinder/api/openstack/compute/schemas/v1.1/'
|
||||||
|
if schema_name in ('atom', 'atom-link'):
|
||||||
|
base_path = 'cinder/api/openstack/compute/schemas/'
|
||||||
|
schema_path = os.path.join(utils.cinderdir(),
|
||||||
|
'%s%s.rng' % (base_path, schema_name))
|
||||||
|
schema_doc = etree.parse(schema_path)
|
||||||
|
relaxng = etree.RelaxNG(schema_doc)
|
||||||
|
relaxng.assertValid(xml)
|
||||||
|
|
||||||
|
|
||||||
|
class Selector(object):
|
||||||
|
"""Selects datum to operate on from an object."""
|
||||||
|
|
||||||
|
def __init__(self, *chain):
|
||||||
|
"""Initialize the selector.
|
||||||
|
|
||||||
|
Each argument is a subsequent index into the object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.chain = chain
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return a representation of the selector."""
|
||||||
|
|
||||||
|
return "Selector" + repr(self.chain)
|
||||||
|
|
||||||
|
def __call__(self, obj, do_raise=False):
|
||||||
|
"""Select a datum to operate on.
|
||||||
|
|
||||||
|
Selects the relevant datum within the object.
|
||||||
|
|
||||||
|
:param obj: The object from which to select the object.
|
||||||
|
:param do_raise: If False (the default), return None if the
|
||||||
|
indexed datum does not exist. Otherwise,
|
||||||
|
raise a KeyError.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Walk the selector list
|
||||||
|
for elem in self.chain:
|
||||||
|
# If it's callable, call it
|
||||||
|
if callable(elem):
|
||||||
|
obj = elem(obj)
|
||||||
|
else:
|
||||||
|
# Use indexing
|
||||||
|
try:
|
||||||
|
obj = obj[elem]
|
||||||
|
except (KeyError, IndexError):
|
||||||
|
# No sense going any further
|
||||||
|
if do_raise:
|
||||||
|
# Convert to a KeyError, for consistency
|
||||||
|
raise KeyError(elem)
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Return the finally-selected object
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def get_items(obj):
|
||||||
|
"""Get items in obj."""
|
||||||
|
|
||||||
|
return list(obj.items())
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyStringSelector(Selector):
|
||||||
|
"""Returns the empty string if Selector would return None."""
|
||||||
|
def __call__(self, obj, do_raise=False):
|
||||||
|
"""Returns empty string if the selected value does not exist."""
|
||||||
|
|
||||||
|
try:
|
||||||
|
return super(EmptyStringSelector, self).__call__(obj, True)
|
||||||
|
except KeyError:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
class ConstantSelector(object):
|
||||||
|
"""Returns a constant."""
|
||||||
|
|
||||||
|
def __init__(self, value):
|
||||||
|
"""Initialize the selector.
|
||||||
|
|
||||||
|
:param value: The value to return.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return a representation of the selector."""
|
||||||
|
|
||||||
|
return repr(self.value)
|
||||||
|
|
||||||
|
def __call__(self, _obj, _do_raise=False):
|
||||||
|
"""Select a datum to operate on.
|
||||||
|
|
||||||
|
Returns a constant value. Compatible with
|
||||||
|
Selector.__call__().
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateElement(object):
|
||||||
|
"""Represent an element in the template."""
|
||||||
|
|
||||||
|
def __init__(self, tag, attrib=None, selector=None, subselector=None,
|
||||||
|
**extra):
|
||||||
|
"""Initialize an element.
|
||||||
|
|
||||||
|
Initializes an element in the template. Keyword arguments
|
||||||
|
specify attributes to be set on the element; values must be
|
||||||
|
callables. See TemplateElement.set() for more information.
|
||||||
|
|
||||||
|
:param tag: The name of the tag to create.
|
||||||
|
:param attrib: An optional dictionary of element attributes.
|
||||||
|
:param selector: An optional callable taking an object and
|
||||||
|
optional boolean do_raise indicator and
|
||||||
|
returning the object bound to the element.
|
||||||
|
:param subselector: An optional callable taking an object and
|
||||||
|
optional boolean do_raise indicator and
|
||||||
|
returning the object bound to the element.
|
||||||
|
This is used to further refine the datum
|
||||||
|
object returned by selector in the event
|
||||||
|
that it is a list of objects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Convert selector into a Selector
|
||||||
|
if selector is None:
|
||||||
|
selector = Selector()
|
||||||
|
elif not callable(selector):
|
||||||
|
selector = Selector(selector)
|
||||||
|
|
||||||
|
# Convert subselector into a Selector
|
||||||
|
if subselector is not None and not callable(subselector):
|
||||||
|
subselector = Selector(subselector)
|
||||||
|
|
||||||
|
self.tag = tag
|
||||||
|
self.selector = selector
|
||||||
|
self.subselector = subselector
|
||||||
|
self.attrib = {}
|
||||||
|
self._text = None
|
||||||
|
self._children = []
|
||||||
|
self._childmap = {}
|
||||||
|
|
||||||
|
# Run the incoming attributes through set() so that they
|
||||||
|
# become selectorized
|
||||||
|
if not attrib:
|
||||||
|
attrib = {}
|
||||||
|
attrib.update(extra)
|
||||||
|
for k, v in attrib.items():
|
||||||
|
self.set(k, v)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return a representation of the template element."""
|
||||||
|
|
||||||
|
return ('<%s.%s %r at %#x>' %
|
||||||
|
(self.__class__.__module__, self.__class__.__name__,
|
||||||
|
self.tag, id(self)))
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""Return the number of child elements."""
|
||||||
|
|
||||||
|
return len(self._children)
|
||||||
|
|
||||||
|
def __contains__(self, key):
|
||||||
|
"""Determine whether a child node named by key exists."""
|
||||||
|
|
||||||
|
return key in self._childmap
|
||||||
|
|
||||||
|
def __getitem__(self, idx):
|
||||||
|
"""Retrieve a child node by index or name."""
|
||||||
|
|
||||||
|
if isinstance(idx, basestring):
|
||||||
|
# Allow access by node name
|
||||||
|
return self._childmap[idx]
|
||||||
|
else:
|
||||||
|
return self._children[idx]
|
||||||
|
|
||||||
|
def append(self, elem):
|
||||||
|
"""Append a child to the element."""
|
||||||
|
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Avoid duplications
|
||||||
|
if elem.tag in self._childmap:
|
||||||
|
raise KeyError(elem.tag)
|
||||||
|
|
||||||
|
self._children.append(elem)
|
||||||
|
self._childmap[elem.tag] = elem
|
||||||
|
|
||||||
|
def extend(self, elems):
|
||||||
|
"""Append children to the element."""
|
||||||
|
|
||||||
|
# Pre-evaluate the elements
|
||||||
|
elemmap = {}
|
||||||
|
elemlist = []
|
||||||
|
for elem in elems:
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Avoid duplications
|
||||||
|
if elem.tag in self._childmap or elem.tag in elemmap:
|
||||||
|
raise KeyError(elem.tag)
|
||||||
|
|
||||||
|
elemmap[elem.tag] = elem
|
||||||
|
elemlist.append(elem)
|
||||||
|
|
||||||
|
# Update the children
|
||||||
|
self._children.extend(elemlist)
|
||||||
|
self._childmap.update(elemmap)
|
||||||
|
|
||||||
|
def insert(self, idx, elem):
|
||||||
|
"""Insert a child element at the given index."""
|
||||||
|
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Avoid duplications
|
||||||
|
if elem.tag in self._childmap:
|
||||||
|
raise KeyError(elem.tag)
|
||||||
|
|
||||||
|
self._children.insert(idx, elem)
|
||||||
|
self._childmap[elem.tag] = elem
|
||||||
|
|
||||||
|
def remove(self, elem):
|
||||||
|
"""Remove a child element."""
|
||||||
|
|
||||||
|
# Unwrap templates...
|
||||||
|
elem = elem.unwrap()
|
||||||
|
|
||||||
|
# Check if element exists
|
||||||
|
if elem.tag not in self._childmap or self._childmap[elem.tag] != elem:
|
||||||
|
raise ValueError(_('element is not a child'))
|
||||||
|
|
||||||
|
self._children.remove(elem)
|
||||||
|
del self._childmap[elem.tag]
|
||||||
|
|
||||||
|
def get(self, key):
|
||||||
|
"""Get an attribute.
|
||||||
|
|
||||||
|
Returns a callable which performs datum selection.
|
||||||
|
|
||||||
|
:param key: The name of the attribute to get.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.attrib[key]
|
||||||
|
|
||||||
|
def set(self, key, value=None):
|
||||||
|
"""Set an attribute.
|
||||||
|
|
||||||
|
:param key: The name of the attribute to set.
|
||||||
|
|
||||||
|
:param value: A callable taking an object and optional boolean
|
||||||
|
do_raise indicator and returning the datum bound
|
||||||
|
to the attribute. If None, a Selector() will be
|
||||||
|
constructed from the key. If a string, a
|
||||||
|
Selector() will be constructed from the string.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Convert value to a selector
|
||||||
|
if value is None:
|
||||||
|
value = Selector(key)
|
||||||
|
elif not callable(value):
|
||||||
|
value = Selector(value)
|
||||||
|
|
||||||
|
self.attrib[key] = value
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
"""Return the attribute names."""
|
||||||
|
|
||||||
|
return self.attrib.keys()
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
"""Return the attribute names and values."""
|
||||||
|
|
||||||
|
return self.attrib.items()
|
||||||
|
|
||||||
|
def unwrap(self):
|
||||||
|
"""Unwraps a template to return a template element."""
|
||||||
|
|
||||||
|
# We are a template element
|
||||||
|
return self
|
||||||
|
|
||||||
|
def wrap(self):
|
||||||
|
"""Wraps a template element to return a template."""
|
||||||
|
|
||||||
|
# Wrap in a basic Template
|
||||||
|
return Template(self)
|
||||||
|
|
||||||
|
def apply(self, elem, obj):
|
||||||
|
"""Apply text and attributes to an etree.Element.
|
||||||
|
|
||||||
|
Applies the text and attribute instructions in the template
|
||||||
|
element to an etree.Element instance.
|
||||||
|
|
||||||
|
:param elem: An etree.Element instance.
|
||||||
|
:param obj: The base object associated with this template
|
||||||
|
element.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Start with the text...
|
||||||
|
if self.text is not None:
|
||||||
|
elem.text = unicode(self.text(obj))
|
||||||
|
|
||||||
|
# Now set up all the attributes...
|
||||||
|
for key, value in self.attrib.items():
|
||||||
|
try:
|
||||||
|
elem.set(key, unicode(value(obj, True)))
|
||||||
|
except KeyError:
|
||||||
|
# Attribute has no value, so don't include it
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _render(self, parent, datum, patches, nsmap):
|
||||||
|
"""Internal rendering.
|
||||||
|
|
||||||
|
Renders the template node into an etree.Element object.
|
||||||
|
Returns the etree.Element object.
|
||||||
|
|
||||||
|
:param parent: The parent etree.Element instance.
|
||||||
|
:param datum: The datum associated with this template element.
|
||||||
|
:param patches: A list of other template elements that must
|
||||||
|
also be applied.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the etree.Element instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Allocate a node
|
||||||
|
if callable(self.tag):
|
||||||
|
tagname = self.tag(datum)
|
||||||
|
else:
|
||||||
|
tagname = self.tag
|
||||||
|
elem = etree.Element(tagname, nsmap=nsmap)
|
||||||
|
|
||||||
|
# If we have a parent, append the node to the parent
|
||||||
|
if parent is not None:
|
||||||
|
parent.append(elem)
|
||||||
|
|
||||||
|
# If the datum is None, do nothing else
|
||||||
|
if datum is None:
|
||||||
|
return elem
|
||||||
|
|
||||||
|
# Apply this template element to the element
|
||||||
|
self.apply(elem, datum)
|
||||||
|
|
||||||
|
# Additionally, apply the patches
|
||||||
|
for patch in patches:
|
||||||
|
patch.apply(elem, datum)
|
||||||
|
|
||||||
|
# We have fully rendered the element; return it
|
||||||
|
return elem
|
||||||
|
|
||||||
|
def render(self, parent, obj, patches=[], nsmap=None):
|
||||||
|
"""Render an object.
|
||||||
|
|
||||||
|
Renders an object against this template node. Returns a list
|
||||||
|
of two-item tuples, where the first item is an etree.Element
|
||||||
|
instance and the second item is the datum associated with that
|
||||||
|
instance.
|
||||||
|
|
||||||
|
:param parent: The parent for the etree.Element instances.
|
||||||
|
:param obj: The object to render this template element
|
||||||
|
against.
|
||||||
|
:param patches: A list of other template elements to apply
|
||||||
|
when rendering this template element.
|
||||||
|
:param nsmap: An optional namespace dictionary to attach to
|
||||||
|
the etree.Element instances.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# First, get the datum we're rendering
|
||||||
|
data = None if obj is None else self.selector(obj)
|
||||||
|
|
||||||
|
# Check if we should render at all
|
||||||
|
if not self.will_render(data):
|
||||||
|
return []
|
||||||
|
elif data is None:
|
||||||
|
return [(self._render(parent, None, patches, nsmap), None)]
|
||||||
|
|
||||||
|
# Make the data into a list if it isn't already
|
||||||
|
if not isinstance(data, list):
|
||||||
|
data = [data]
|
||||||
|
elif parent is None:
|
||||||
|
raise ValueError(_('root element selecting a list'))
|
||||||
|
|
||||||
|
# Render all the elements
|
||||||
|
elems = []
|
||||||
|
for datum in data:
|
||||||
|
if self.subselector is not None:
|
||||||
|
datum = self.subselector(datum)
|
||||||
|
elems.append((self._render(parent, datum, patches, nsmap), datum))
|
||||||
|
|
||||||
|
# Return all the elements rendered, as well as the
|
||||||
|
# corresponding datum for the next step down the tree
|
||||||
|
return elems
|
||||||
|
|
||||||
|
def will_render(self, datum):
|
||||||
|
"""Hook method.
|
||||||
|
|
||||||
|
An overridable hook method to determine whether this template
|
||||||
|
element will be rendered at all. By default, returns False
|
||||||
|
(inhibiting rendering) if the datum is None.
|
||||||
|
|
||||||
|
:param datum: The datum associated with this template element.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Don't render if datum is None
|
||||||
|
return datum is not None
|
||||||
|
|
||||||
|
def _text_get(self):
|
||||||
|
"""Template element text.
|
||||||
|
|
||||||
|
Either None or a callable taking an object and optional
|
||||||
|
boolean do_raise indicator and returning the datum bound to
|
||||||
|
the text of the template element.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._text
|
||||||
|
|
||||||
|
def _text_set(self, value):
|
||||||
|
# Convert value to a selector
|
||||||
|
if value is not None and not callable(value):
|
||||||
|
value = Selector(value)
|
||||||
|
|
||||||
|
self._text = value
|
||||||
|
|
||||||
|
def _text_del(self):
|
||||||
|
self._text = None
|
||||||
|
|
||||||
|
text = property(_text_get, _text_set, _text_del)
|
||||||
|
|
||||||
|
def tree(self):
|
||||||
|
"""Return string representation of the template tree.
|
||||||
|
|
||||||
|
Returns a representation of the template rooted at this
|
||||||
|
element as a string, suitable for inclusion in debug logs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Build the inner contents of the tag...
|
||||||
|
contents = [self.tag, '!selector=%r' % self.selector]
|
||||||
|
|
||||||
|
# Add the text...
|
||||||
|
if self.text is not None:
|
||||||
|
contents.append('!text=%r' % self.text)
|
||||||
|
|
||||||
|
# Add all the other attributes
|
||||||
|
for key, value in self.attrib.items():
|
||||||
|
contents.append('%s=%r' % (key, value))
|
||||||
|
|
||||||
|
# If there are no children, return it as a closed tag
|
||||||
|
if len(self) == 0:
|
||||||
|
return '<%s/>' % ' '.join([str(i) for i in contents])
|
||||||
|
|
||||||
|
# OK, recurse to our children
|
||||||
|
children = [c.tree() for c in self]
|
||||||
|
|
||||||
|
# Return the result
|
||||||
|
return ('<%s>%s</%s>' %
|
||||||
|
(' '.join(contents), ''.join(children), self.tag))
|
||||||
|
|
||||||
|
|
||||||
|
def SubTemplateElement(parent, tag, attrib=None, selector=None,
|
||||||
|
subselector=None, **extra):
|
||||||
|
"""Create a template element as a child of another.
|
||||||
|
|
||||||
|
Corresponds to the etree.SubElement interface. Parameters are as
|
||||||
|
for TemplateElement, with the addition of the parent.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Convert attributes
|
||||||
|
attrib = attrib or {}
|
||||||
|
attrib.update(extra)
|
||||||
|
|
||||||
|
# Get a TemplateElement
|
||||||
|
elem = TemplateElement(tag, attrib=attrib, selector=selector,
|
||||||
|
subselector=subselector)
|
||||||
|
|
||||||
|
# Append the parent safely
|
||||||
|
if parent is not None:
|
||||||
|
parent.append(elem)
|
||||||
|
|
||||||
|
return elem
|
||||||
|
|
||||||
|
|
||||||
|
class Template(object):
|
||||||
|
"""Represent a template."""
|
||||||
|
|
||||||
|
def __init__(self, root, nsmap=None):
|
||||||
|
"""Initialize a template.
|
||||||
|
|
||||||
|
:param root: The root element of the template.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the root element of the
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.root = root.unwrap() if root is not None else None
|
||||||
|
self.nsmap = nsmap or {}
|
||||||
|
self.serialize_options = dict(encoding='UTF-8', xml_declaration=True)
|
||||||
|
|
||||||
|
def _serialize(self, parent, obj, siblings, nsmap=None):
|
||||||
|
"""Internal serialization.
|
||||||
|
|
||||||
|
Recursive routine to build a tree of etree.Element instances
|
||||||
|
from an object based on the template. Returns the first
|
||||||
|
etree.Element instance rendered, or None.
|
||||||
|
|
||||||
|
:param parent: The parent etree.Element instance. Can be
|
||||||
|
None.
|
||||||
|
:param obj: The object to render.
|
||||||
|
:param siblings: The TemplateElement instances against which
|
||||||
|
to render the object.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the etree.Element instance
|
||||||
|
rendered.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# First step, render the element
|
||||||
|
elems = siblings[0].render(parent, obj, siblings[1:], nsmap)
|
||||||
|
|
||||||
|
# Now, recurse to all child elements
|
||||||
|
seen = set()
|
||||||
|
for idx, sibling in enumerate(siblings):
|
||||||
|
for child in sibling:
|
||||||
|
# Have we handled this child already?
|
||||||
|
if child.tag in seen:
|
||||||
|
continue
|
||||||
|
seen.add(child.tag)
|
||||||
|
|
||||||
|
# Determine the child's siblings
|
||||||
|
nieces = [child]
|
||||||
|
for sib in siblings[idx + 1:]:
|
||||||
|
if child.tag in sib:
|
||||||
|
nieces.append(sib[child.tag])
|
||||||
|
|
||||||
|
# Now we recurse for every data element
|
||||||
|
for elem, datum in elems:
|
||||||
|
self._serialize(elem, datum, nieces)
|
||||||
|
|
||||||
|
# Return the first element; at the top level, this will be the
|
||||||
|
# root element
|
||||||
|
if elems:
|
||||||
|
return elems[0][0]
|
||||||
|
|
||||||
|
def serialize(self, obj, *args, **kwargs):
|
||||||
|
"""Serialize an object.
|
||||||
|
|
||||||
|
Serializes an object against the template. Returns a string
|
||||||
|
with the serialized XML. Positional and keyword arguments are
|
||||||
|
passed to etree.tostring().
|
||||||
|
|
||||||
|
:param obj: The object to serialize.
|
||||||
|
"""
|
||||||
|
|
||||||
|
elem = self.make_tree(obj)
|
||||||
|
if elem is None:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
for k, v in self.serialize_options.items():
|
||||||
|
kwargs.setdefault(k, v)
|
||||||
|
|
||||||
|
# Serialize it into XML
|
||||||
|
return etree.tostring(elem, *args, **kwargs)
|
||||||
|
|
||||||
|
def make_tree(self, obj):
|
||||||
|
"""Create a tree.
|
||||||
|
|
||||||
|
Serializes an object against the template. Returns an Element
|
||||||
|
node with appropriate children.
|
||||||
|
|
||||||
|
:param obj: The object to serialize.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# If the template is empty, return the empty string
|
||||||
|
if self.root is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get the siblings and nsmap of the root element
|
||||||
|
siblings = self._siblings()
|
||||||
|
nsmap = self._nsmap()
|
||||||
|
|
||||||
|
# Form the element tree
|
||||||
|
return self._serialize(None, obj, siblings, nsmap)
|
||||||
|
|
||||||
|
def _siblings(self):
|
||||||
|
"""Hook method for computing root siblings.
|
||||||
|
|
||||||
|
An overridable hook method to return the siblings of the root
|
||||||
|
element. By default, this is the root element itself.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return [self.root]
|
||||||
|
|
||||||
|
def _nsmap(self):
|
||||||
|
"""Hook method for computing the namespace dictionary.
|
||||||
|
|
||||||
|
An overridable hook method to return the namespace dictionary.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.nsmap.copy()
|
||||||
|
|
||||||
|
def unwrap(self):
|
||||||
|
"""Unwraps a template to return a template element."""
|
||||||
|
|
||||||
|
# Return the root element
|
||||||
|
return self.root
|
||||||
|
|
||||||
|
def wrap(self):
|
||||||
|
"""Wraps a template element to return a template."""
|
||||||
|
|
||||||
|
# We are a template
|
||||||
|
return self
|
||||||
|
|
||||||
|
def apply(self, master):
|
||||||
|
"""Hook method for determining slave applicability.
|
||||||
|
|
||||||
|
An overridable hook method used to determine if this template
|
||||||
|
is applicable as a slave to a given master template.
|
||||||
|
|
||||||
|
:param master: The master template to test.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def tree(self):
|
||||||
|
"""Return string representation of the template tree.
|
||||||
|
|
||||||
|
Returns a representation of the template as a string, suitable
|
||||||
|
for inclusion in debug logs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return "%r: %s" % (self, self.root.tree())
|
||||||
|
|
||||||
|
|
||||||
|
class MasterTemplate(Template):
|
||||||
|
"""Represent a master template.
|
||||||
|
|
||||||
|
Master templates are versioned derivatives of templates that
|
||||||
|
additionally allow slave templates to be attached. Slave
|
||||||
|
templates allow modification of the serialized result without
|
||||||
|
directly changing the master.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, root, version, nsmap=None):
|
||||||
|
"""Initialize a master template.
|
||||||
|
|
||||||
|
:param root: The root element of the template.
|
||||||
|
:param version: The version number of the template.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the root element of the
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(MasterTemplate, self).__init__(root, nsmap)
|
||||||
|
self.version = version
|
||||||
|
self.slaves = []
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return string representation of the template."""
|
||||||
|
|
||||||
|
return ("<%s.%s object version %s at %#x>" %
|
||||||
|
(self.__class__.__module__, self.__class__.__name__,
|
||||||
|
self.version, id(self)))
|
||||||
|
|
||||||
|
def _siblings(self):
|
||||||
|
"""Hook method for computing root siblings.
|
||||||
|
|
||||||
|
An overridable hook method to return the siblings of the root
|
||||||
|
element. This is the root element plus the root elements of
|
||||||
|
all the slave templates.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return [self.root] + [slave.root for slave in self.slaves]
|
||||||
|
|
||||||
|
def _nsmap(self):
|
||||||
|
"""Hook method for computing the namespace dictionary.
|
||||||
|
|
||||||
|
An overridable hook method to return the namespace dictionary.
|
||||||
|
The namespace dictionary is computed by taking the master
|
||||||
|
template's namespace dictionary and updating it from all the
|
||||||
|
slave templates.
|
||||||
|
"""
|
||||||
|
|
||||||
|
nsmap = self.nsmap.copy()
|
||||||
|
for slave in self.slaves:
|
||||||
|
nsmap.update(slave._nsmap())
|
||||||
|
return nsmap
|
||||||
|
|
||||||
|
def attach(self, *slaves):
|
||||||
|
"""Attach one or more slave templates.
|
||||||
|
|
||||||
|
Attaches one or more slave templates to the master template.
|
||||||
|
Slave templates must have a root element with the same tag as
|
||||||
|
the master template. The slave template's apply() method will
|
||||||
|
be called to determine if the slave should be applied to this
|
||||||
|
master; if it returns False, that slave will be skipped.
|
||||||
|
(This allows filtering of slaves based on the version of the
|
||||||
|
master template.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
slave_list = []
|
||||||
|
for slave in slaves:
|
||||||
|
slave = slave.wrap()
|
||||||
|
|
||||||
|
# Make sure we have a tree match
|
||||||
|
if slave.root.tag != self.root.tag:
|
||||||
|
slavetag = slave.root.tag
|
||||||
|
mastertag = self.root.tag
|
||||||
|
msg = _("Template tree mismatch; adding slave %(slavetag)s "
|
||||||
|
"to master %(mastertag)s") % locals()
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
# Make sure slave applies to this template
|
||||||
|
if not slave.apply(self):
|
||||||
|
continue
|
||||||
|
|
||||||
|
slave_list.append(slave)
|
||||||
|
|
||||||
|
# Add the slaves
|
||||||
|
self.slaves.extend(slave_list)
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
"""Return a copy of this master template."""
|
||||||
|
|
||||||
|
# Return a copy of the MasterTemplate
|
||||||
|
tmp = self.__class__(self.root, self.version, self.nsmap)
|
||||||
|
tmp.slaves = self.slaves[:]
|
||||||
|
return tmp
|
||||||
|
|
||||||
|
|
||||||
|
class SlaveTemplate(Template):
|
||||||
|
"""Represent a slave template.
|
||||||
|
|
||||||
|
Slave templates are versioned derivatives of templates. Each
|
||||||
|
slave has a minimum version and optional maximum version of the
|
||||||
|
master template to which they can be attached.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
|
||||||
|
"""Initialize a slave template.
|
||||||
|
|
||||||
|
:param root: The root element of the template.
|
||||||
|
:param min_vers: The minimum permissible version of the master
|
||||||
|
template for this slave template to apply.
|
||||||
|
:param max_vers: An optional upper bound for the master
|
||||||
|
template version.
|
||||||
|
:param nsmap: An optional namespace dictionary to be
|
||||||
|
associated with the root element of the
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(SlaveTemplate, self).__init__(root, nsmap)
|
||||||
|
self.min_vers = min_vers
|
||||||
|
self.max_vers = max_vers
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
"""Return string representation of the template."""
|
||||||
|
|
||||||
|
return ("<%s.%s object versions %s-%s at %#x>" %
|
||||||
|
(self.__class__.__module__, self.__class__.__name__,
|
||||||
|
self.min_vers, self.max_vers, id(self)))
|
||||||
|
|
||||||
|
def apply(self, master):
|
||||||
|
"""Hook method for determining slave applicability.
|
||||||
|
|
||||||
|
An overridable hook method used to determine if this template
|
||||||
|
is applicable as a slave to a given master template. This
|
||||||
|
version requires the master template to have a version number
|
||||||
|
between min_vers and max_vers.
|
||||||
|
|
||||||
|
:param master: The master template to test.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Does the master meet our minimum version requirement?
|
||||||
|
if master.version < self.min_vers:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# How about our maximum version requirement?
|
||||||
|
if self.max_vers is not None and master.version > self.max_vers:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateBuilder(object):
|
||||||
|
"""Template builder.
|
||||||
|
|
||||||
|
This class exists to allow templates to be lazily built without
|
||||||
|
having to build them each time they are needed. It must be
|
||||||
|
subclassed, and the subclass must implement the construct()
|
||||||
|
method, which must return a Template (or subclass) instance. The
|
||||||
|
constructor will always return the template returned by
|
||||||
|
construct(), or, if it has a copy() method, a copy of that
|
||||||
|
template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_tmpl = None
|
||||||
|
|
||||||
|
def __new__(cls, copy=True):
|
||||||
|
"""Construct and return a template.
|
||||||
|
|
||||||
|
:param copy: If True (the default), a copy of the template
|
||||||
|
will be constructed and returned, if possible.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Do we need to construct the template?
|
||||||
|
if cls._tmpl is None:
|
||||||
|
tmp = super(TemplateBuilder, cls).__new__(cls)
|
||||||
|
|
||||||
|
# Construct the template
|
||||||
|
cls._tmpl = tmp.construct()
|
||||||
|
|
||||||
|
# If the template has a copy attribute, return the result of
|
||||||
|
# calling it
|
||||||
|
if copy and hasattr(cls._tmpl, 'copy'):
|
||||||
|
return cls._tmpl.copy()
|
||||||
|
|
||||||
|
# Return the template
|
||||||
|
return cls._tmpl
|
||||||
|
|
||||||
|
def construct(self):
|
||||||
|
"""Construct a template.
|
||||||
|
|
||||||
|
Called to construct a template instance, which it must return.
|
||||||
|
Only called once.
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise NotImplementedError(_("subclasses must implement construct()!"))
|
||||||
|
|
||||||
|
|
||||||
|
def make_links(parent, selector=None):
|
||||||
|
"""
|
||||||
|
Attach an Atom <links> element to the parent.
|
||||||
|
"""
|
||||||
|
|
||||||
|
elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM,
|
||||||
|
selector=selector)
|
||||||
|
elem.set('rel')
|
||||||
|
elem.set('type')
|
||||||
|
elem.set('href')
|
||||||
|
|
||||||
|
# Just for completeness...
|
||||||
|
return elem
|
||||||
|
|
||||||
|
|
||||||
|
def make_flat_dict(name, selector=None, subselector=None, ns=None):
|
||||||
|
"""
|
||||||
|
Utility for simple XML templates that traditionally used
|
||||||
|
XMLDictSerializer with no metadata. Returns a template element
|
||||||
|
where the top-level element has the given tag name, and where
|
||||||
|
sub-elements have tag names derived from the object's keys and
|
||||||
|
text derived from the object's values. This only works for flat
|
||||||
|
dictionary objects, not dictionaries containing nested lists or
|
||||||
|
dictionaries.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Set up the names we need...
|
||||||
|
if ns is None:
|
||||||
|
elemname = name
|
||||||
|
tagname = Selector(0)
|
||||||
|
else:
|
||||||
|
elemname = '{%s}%s' % (ns, name)
|
||||||
|
tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0])
|
||||||
|
|
||||||
|
if selector is None:
|
||||||
|
selector = name
|
||||||
|
|
||||||
|
# Build the root element
|
||||||
|
root = TemplateElement(elemname, selector=selector,
|
||||||
|
subselector=subselector)
|
||||||
|
|
||||||
|
# Build an element to represent all the keys and values
|
||||||
|
elem = SubTemplateElement(root, tagname, selector=get_items)
|
||||||
|
elem.text = 1
|
||||||
|
|
||||||
|
# Return the template
|
||||||
|
return root
|
54
cinder/api/sizelimit.py
Normal file
54
cinder/api/sizelimit.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2012 OpenStack, LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Request Body limiting middleware.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import webob.dec
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
from cinder import context
|
||||||
|
from cinder import flags
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.openstack.common import cfg
|
||||||
|
from cinder import wsgi
|
||||||
|
|
||||||
|
|
||||||
|
#default request size is 112k
|
||||||
|
max_request_body_size_opt = cfg.BoolOpt('osapi_max_request_body_size',
|
||||||
|
default=114688,
|
||||||
|
help='')
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
FLAGS.register_opt(max_request_body_size_opt)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RequestBodySizeLimiter(wsgi.Middleware):
|
||||||
|
"""Add a 'cinder.context' to WSGI environ."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||||
|
def __call__(self, req):
|
||||||
|
if (req.content_length > FLAGS.osapi_max_request_body_size
|
||||||
|
or len(req.body) > FLAGS.osapi_max_request_body_size):
|
||||||
|
msg = _("Request is too large.")
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
else:
|
||||||
|
return self.application
|
15
cinder/common/__init__.py
Normal file
15
cinder/common/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
64
cinder/common/memorycache.py
Normal file
64
cinder/common/memorycache.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Super simple fake memcache client."""
|
||||||
|
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
|
class Client(object):
|
||||||
|
"""Replicates a tiny subset of memcached client interface."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
"""Ignores the passed in args."""
|
||||||
|
self.cache = {}
|
||||||
|
|
||||||
|
def get(self, key):
|
||||||
|
"""Retrieves the value for a key or None.
|
||||||
|
|
||||||
|
this expunges expired keys during each get"""
|
||||||
|
|
||||||
|
for k in self.cache.keys():
|
||||||
|
(timeout, _value) = self.cache[k]
|
||||||
|
if timeout and utils.utcnow_ts() >= timeout:
|
||||||
|
del self.cache[k]
|
||||||
|
|
||||||
|
return self.cache.get(key, (0, None))[1]
|
||||||
|
|
||||||
|
def set(self, key, value, time=0, min_compress_len=0):
|
||||||
|
"""Sets the value for a key."""
|
||||||
|
timeout = 0
|
||||||
|
if time != 0:
|
||||||
|
timeout = utils.utcnow_ts() + time
|
||||||
|
self.cache[key] = (timeout, value)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def add(self, key, value, time=0, min_compress_len=0):
|
||||||
|
"""Sets the value for a key if it doesn't exist."""
|
||||||
|
if not self.get(key) is None:
|
||||||
|
return False
|
||||||
|
return self.set(key, value, time, min_compress_len)
|
||||||
|
|
||||||
|
def incr(self, key, delta=1):
|
||||||
|
"""Increments the value for a key."""
|
||||||
|
value = self.get(key)
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
new_value = int(value) + delta
|
||||||
|
self.cache[key] = (self.cache[key][0], str(new_value))
|
||||||
|
return new_value
|
222
cinder/common/policy.py
Normal file
222
cinder/common/policy.py
Normal file
@ -0,0 +1,222 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2011 OpenStack, LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Common Policy Engine Implementation"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import urllib
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
|
||||||
|
class NotAuthorized(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
_BRAIN = None
|
||||||
|
|
||||||
|
|
||||||
|
def set_brain(brain):
|
||||||
|
"""Set the brain used by enforce().
|
||||||
|
|
||||||
|
Defaults use Brain() if not set.
|
||||||
|
|
||||||
|
"""
|
||||||
|
global _BRAIN
|
||||||
|
_BRAIN = brain
|
||||||
|
|
||||||
|
|
||||||
|
def reset():
|
||||||
|
"""Clear the brain used by enforce()."""
|
||||||
|
global _BRAIN
|
||||||
|
_BRAIN = None
|
||||||
|
|
||||||
|
|
||||||
|
def enforce(match_list, target_dict, credentials_dict):
|
||||||
|
"""Enforces authorization of some rules against credentials.
|
||||||
|
|
||||||
|
:param match_list: nested tuples of data to match against
|
||||||
|
|
||||||
|
The basic brain supports three types of match lists:
|
||||||
|
|
||||||
|
1) rules
|
||||||
|
|
||||||
|
looks like: ``('rule:compute:get_instance',)``
|
||||||
|
|
||||||
|
Retrieves the named rule from the rules dict and recursively
|
||||||
|
checks against the contents of the rule.
|
||||||
|
|
||||||
|
2) roles
|
||||||
|
|
||||||
|
looks like: ``('role:compute:admin',)``
|
||||||
|
|
||||||
|
Matches if the specified role is in credentials_dict['roles'].
|
||||||
|
|
||||||
|
3) generic
|
||||||
|
|
||||||
|
looks like: ``('tenant_id:%(tenant_id)s',)``
|
||||||
|
|
||||||
|
Substitutes values from the target dict into the match using
|
||||||
|
the % operator and matches them against the creds dict.
|
||||||
|
|
||||||
|
Combining rules:
|
||||||
|
|
||||||
|
The brain returns True if any of the outer tuple of rules
|
||||||
|
match and also True if all of the inner tuples match. You
|
||||||
|
can use this to perform simple boolean logic. For
|
||||||
|
example, the following rule would return True if the creds
|
||||||
|
contain the role 'admin' OR the if the tenant_id matches
|
||||||
|
the target dict AND the the creds contains the role
|
||||||
|
'compute_sysadmin':
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
{
|
||||||
|
"rule:combined": (
|
||||||
|
'role:admin',
|
||||||
|
('tenant_id:%(tenant_id)s', 'role:compute_sysadmin')
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
Note that rule and role are reserved words in the credentials match, so
|
||||||
|
you can't match against properties with those names. Custom brains may
|
||||||
|
also add new reserved words. For example, the HttpBrain adds http as a
|
||||||
|
reserved word.
|
||||||
|
|
||||||
|
:param target_dict: dict of object properties
|
||||||
|
|
||||||
|
Target dicts contain as much information as we can about the object being
|
||||||
|
operated on.
|
||||||
|
|
||||||
|
:param credentials_dict: dict of actor properties
|
||||||
|
|
||||||
|
Credentials dicts contain as much information as we can about the user
|
||||||
|
performing the action.
|
||||||
|
|
||||||
|
:raises NotAuthorized: if the check fails
|
||||||
|
|
||||||
|
"""
|
||||||
|
global _BRAIN
|
||||||
|
if not _BRAIN:
|
||||||
|
_BRAIN = Brain()
|
||||||
|
if not _BRAIN.check(match_list, target_dict, credentials_dict):
|
||||||
|
raise NotAuthorized()
|
||||||
|
|
||||||
|
|
||||||
|
class Brain(object):
|
||||||
|
"""Implements policy checking."""
|
||||||
|
@classmethod
|
||||||
|
def load_json(cls, data, default_rule=None):
|
||||||
|
"""Init a brain using json instead of a rules dictionary."""
|
||||||
|
rules_dict = json.loads(data)
|
||||||
|
return cls(rules=rules_dict, default_rule=default_rule)
|
||||||
|
|
||||||
|
def __init__(self, rules=None, default_rule=None):
|
||||||
|
self.rules = rules or {}
|
||||||
|
self.default_rule = default_rule
|
||||||
|
|
||||||
|
def add_rule(self, key, match):
|
||||||
|
self.rules[key] = match
|
||||||
|
|
||||||
|
def _check(self, match, target_dict, cred_dict):
|
||||||
|
match_kind, match_value = match.split(':', 1)
|
||||||
|
try:
|
||||||
|
f = getattr(self, '_check_%s' % match_kind)
|
||||||
|
except AttributeError:
|
||||||
|
if not self._check_generic(match, target_dict, cred_dict):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
if not f(match_value, target_dict, cred_dict):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def check(self, match_list, target_dict, cred_dict):
|
||||||
|
"""Checks authorization of some rules against credentials.
|
||||||
|
|
||||||
|
Detailed description of the check with examples in policy.enforce().
|
||||||
|
|
||||||
|
:param match_list: nested tuples of data to match against
|
||||||
|
:param target_dict: dict of object properties
|
||||||
|
:param credentials_dict: dict of actor properties
|
||||||
|
|
||||||
|
:returns: True if the check passes
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not match_list:
|
||||||
|
return True
|
||||||
|
for and_list in match_list:
|
||||||
|
if isinstance(and_list, basestring):
|
||||||
|
and_list = (and_list,)
|
||||||
|
if all([self._check(item, target_dict, cred_dict)
|
||||||
|
for item in and_list]):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _check_rule(self, match, target_dict, cred_dict):
|
||||||
|
"""Recursively checks credentials based on the brains rules."""
|
||||||
|
try:
|
||||||
|
new_match_list = self.rules[match]
|
||||||
|
except KeyError:
|
||||||
|
if self.default_rule and match != self.default_rule:
|
||||||
|
new_match_list = ('rule:%s' % self.default_rule,)
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.check(new_match_list, target_dict, cred_dict)
|
||||||
|
|
||||||
|
def _check_role(self, match, target_dict, cred_dict):
|
||||||
|
"""Check that there is a matching role in the cred dict."""
|
||||||
|
return match.lower() in [x.lower() for x in cred_dict['roles']]
|
||||||
|
|
||||||
|
def _check_generic(self, match, target_dict, cred_dict):
|
||||||
|
"""Check an individual match.
|
||||||
|
|
||||||
|
Matches look like:
|
||||||
|
|
||||||
|
tenant:%(tenant_id)s
|
||||||
|
role:compute:admin
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO(termie): do dict inspection via dot syntax
|
||||||
|
match = match % target_dict
|
||||||
|
key, value = match.split(':', 1)
|
||||||
|
if key in cred_dict:
|
||||||
|
return value == cred_dict[key]
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class HttpBrain(Brain):
|
||||||
|
"""A brain that can check external urls for policy.
|
||||||
|
|
||||||
|
Posts json blobs for target and credentials.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _check_http(self, match, target_dict, cred_dict):
|
||||||
|
"""Check http: rules by calling to a remote server.
|
||||||
|
|
||||||
|
This example implementation simply verifies that the response is
|
||||||
|
exactly 'True'. A custom brain using response codes could easily
|
||||||
|
be implemented.
|
||||||
|
|
||||||
|
"""
|
||||||
|
url = match % target_dict
|
||||||
|
data = {'target': json.dumps(target_dict),
|
||||||
|
'credentials': json.dumps(cred_dict)}
|
||||||
|
post_data = urllib.urlencode(data)
|
||||||
|
f = urllib2.urlopen(url, post_data)
|
||||||
|
return f.read() == "True"
|
15
cinder/compat/__init__.py
Normal file
15
cinder/compat/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2012 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
188
cinder/compat/flagfile.py
Normal file
188
cinder/compat/flagfile.py
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright (c) 2012 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
'''
|
||||||
|
Compatibility code for handling the deprecated --flagfile option.
|
||||||
|
|
||||||
|
gflags style configuration files are deprecated and will be removed in future.
|
||||||
|
|
||||||
|
The code in this module transles --flagfile options into --config-file and can
|
||||||
|
be removed when support for --flagfile is removed.
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def _get_flagfile(argp):
|
||||||
|
'''Parse the filename from a --flagfile argument.
|
||||||
|
|
||||||
|
The current and next arguments are passed as a 2 item list. If the
|
||||||
|
flagfile filename is in the next argument, the two arguments are
|
||||||
|
joined into the first item while the second item is set to None.
|
||||||
|
'''
|
||||||
|
i = argp[0].find('-flagfile')
|
||||||
|
if i < 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Accept -flagfile or -flagfile
|
||||||
|
if i != 0 and (i != 1 or argp[0][i] != '-'):
|
||||||
|
return None
|
||||||
|
|
||||||
|
i += len('-flagfile')
|
||||||
|
if i == len(argp[0]): # Accept [-]-flagfile foo
|
||||||
|
argp[0] += '=' + argp[1]
|
||||||
|
argp[1] = None
|
||||||
|
|
||||||
|
if argp[0][i] != '=': # Accept [-]-flagfile=foo
|
||||||
|
return None
|
||||||
|
|
||||||
|
return argp[0][i + 1:]
|
||||||
|
|
||||||
|
|
||||||
|
def _open_file_for_reading(path):
|
||||||
|
'''Helper method which test code may stub out.'''
|
||||||
|
return open(path, 'r')
|
||||||
|
|
||||||
|
|
||||||
|
def _open_fd_for_writing(fd, _path):
|
||||||
|
'''Helper method which test code may stub out.'''
|
||||||
|
return os.fdopen(fd, 'w')
|
||||||
|
|
||||||
|
|
||||||
|
def _read_lines(flagfile):
|
||||||
|
'''Read a flag file, returning all lines with comments stripped.'''
|
||||||
|
with _open_file_for_reading(flagfile) as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
ret = []
|
||||||
|
for l in lines:
|
||||||
|
if l.isspace() or l.startswith('#') or l.startswith('//'):
|
||||||
|
continue
|
||||||
|
ret.append(l.strip())
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def _read_flagfile(arg, next_arg, tempdir=None):
|
||||||
|
'''Convert a --flagfile argument to --config-file.
|
||||||
|
|
||||||
|
If the supplied argument is a --flagfile argument, read the contents
|
||||||
|
of the file and convert it to a .ini format config file. Return a
|
||||||
|
--config-file argument with the converted file.
|
||||||
|
|
||||||
|
If the flag file contains more --flagfile arguments, multiple
|
||||||
|
--config-file arguments will be returned.
|
||||||
|
|
||||||
|
The returned argument list may also contain None values which should
|
||||||
|
be filtered out later.
|
||||||
|
'''
|
||||||
|
argp = [arg, next_arg]
|
||||||
|
flagfile = _get_flagfile(argp)
|
||||||
|
if not flagfile:
|
||||||
|
return argp
|
||||||
|
|
||||||
|
args = _read_lines(flagfile)
|
||||||
|
|
||||||
|
if args and not args[0].startswith('--'):
|
||||||
|
# This is a config file, not a flagfile, so return it.
|
||||||
|
return ['--config-file=' + flagfile] + argp[1:]
|
||||||
|
|
||||||
|
#
|
||||||
|
# We're recursing here to convert any --flagfile arguments
|
||||||
|
# read from this flagfile into --config-file arguments
|
||||||
|
#
|
||||||
|
# We don't actually include those --config-file arguments
|
||||||
|
# in the generated config file; instead we include all those
|
||||||
|
# --config-file args in the final command line
|
||||||
|
#
|
||||||
|
args = _iterate_args(args, _read_flagfile, tempdir=tempdir)
|
||||||
|
|
||||||
|
config_file_args = []
|
||||||
|
|
||||||
|
(fd, tmpconf) = tempfile.mkstemp(suffix='.conf', dir=tempdir)
|
||||||
|
|
||||||
|
with _open_fd_for_writing(fd, tmpconf) as f:
|
||||||
|
f.write('[DEFAULT]\n')
|
||||||
|
for arg in args:
|
||||||
|
if arg.startswith('--config-file='):
|
||||||
|
config_file_args.append(arg)
|
||||||
|
continue
|
||||||
|
if '=' in arg:
|
||||||
|
f.write(arg[2:] + '\n')
|
||||||
|
elif arg[2:].startswith('no'):
|
||||||
|
f.write(arg[4:] + '=false\n')
|
||||||
|
else:
|
||||||
|
f.write(arg[2:] + '=true\n')
|
||||||
|
|
||||||
|
return ['--config-file=' + tmpconf] + argp[1:] + config_file_args
|
||||||
|
|
||||||
|
|
||||||
|
def _iterate_args(args, iterator, **kwargs):
|
||||||
|
'''Run an iterator function on the supplied args list.
|
||||||
|
|
||||||
|
The iterator is passed the current arg and next arg and returns a
|
||||||
|
list of args. The returned args replace the suppied args in the
|
||||||
|
resulting args list.
|
||||||
|
|
||||||
|
The iterator will be passed None for the next arg when processing
|
||||||
|
the last arg.
|
||||||
|
'''
|
||||||
|
args.append(None)
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for i in range(len(args)):
|
||||||
|
if args[i] is None: # last item, or consumed file name
|
||||||
|
continue
|
||||||
|
|
||||||
|
modified = iterator(args[i], args[i + 1], **kwargs)
|
||||||
|
args[i], args[i + 1] = modified[:2]
|
||||||
|
|
||||||
|
ret.extend(modified[:1] + modified[2:]) # don't append next arg
|
||||||
|
|
||||||
|
return filter(None, ret)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_flagfiles(args, tempdir=None):
|
||||||
|
'''Replace --flagfile arguments with --config-file arguments.
|
||||||
|
|
||||||
|
Replace any --flagfile argument in the supplied list with a --config-file
|
||||||
|
argument containing a temporary config file with the contents of the flag
|
||||||
|
file translated to .ini format.
|
||||||
|
|
||||||
|
The tempdir argument is a directory which will be used to create temporary
|
||||||
|
files.
|
||||||
|
'''
|
||||||
|
return _iterate_args(args[:], _read_flagfile, tempdir=tempdir)
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def handle_flagfiles_managed(args):
|
||||||
|
'''A context manager for handle_flagfiles() which removes temp files.
|
||||||
|
|
||||||
|
For use with the 'with' statement, i.e.::
|
||||||
|
|
||||||
|
with handle_flagfiles_managed(args) as args:
|
||||||
|
# Do stuff
|
||||||
|
# Any temporary fils have been removed
|
||||||
|
'''
|
||||||
|
# NOTE(johannes): Would be nice to use utils.tempdir(), but it
|
||||||
|
# causes an import loop
|
||||||
|
tempdir = tempfile.mkdtemp(prefix='cinder-conf-')
|
||||||
|
try:
|
||||||
|
yield handle_flagfiles(args, tempdir=tempdir)
|
||||||
|
finally:
|
||||||
|
shutil.rmtree(tempdir)
|
0
cinder/compute/__init__.py
Normal file
0
cinder/compute/__init__.py
Normal file
44
cinder/compute/aggregate_states.py
Normal file
44
cinder/compute/aggregate_states.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Possible states for host aggregates.
|
||||||
|
|
||||||
|
An aggregate may be 'created', in which case the admin has triggered its
|
||||||
|
creation, but the underlying hypervisor pool has not actually being set up
|
||||||
|
yet. An aggregate may be 'changing', meaning that the underlying hypervisor
|
||||||
|
pool is being setup. An aggregate may be 'active', in which case the underlying
|
||||||
|
hypervisor pool is up and running. An aggregate may be 'dismissed' when it has
|
||||||
|
no hosts and it has been deleted. An aggregate may be in 'error' in all other
|
||||||
|
cases.
|
||||||
|
A 'created' aggregate becomes 'changing' during the first request of
|
||||||
|
adding a host. During a 'changing' status no other requests will be accepted;
|
||||||
|
this is to allow the hypervisor layer to instantiate the underlying pool
|
||||||
|
without any potential race condition that may incur in master/slave-based
|
||||||
|
configurations. The aggregate goes into the 'active' state when the underlying
|
||||||
|
pool has been correctly instantiated.
|
||||||
|
All other operations (e.g. add/remove hosts) that succeed will keep the
|
||||||
|
aggregate in the 'active' state. If a number of continuous requests fail,
|
||||||
|
an 'active' aggregate goes into an 'error' state. To recover from such a state,
|
||||||
|
admin intervention is required. Currently an error state is irreversible,
|
||||||
|
that is, in order to recover from it an aggregate must be deleted.
|
||||||
|
"""
|
||||||
|
|
||||||
|
CREATED = 'created'
|
||||||
|
CHANGING = 'changing'
|
||||||
|
ACTIVE = 'active'
|
||||||
|
ERROR = 'error'
|
||||||
|
DISMISSED = 'dismissed'
|
138
cinder/context.py
Normal file
138
cinder/context.py
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""RequestContext: context for requests that persist through all of cinder."""
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from cinder import log as logging
|
||||||
|
from cinder.openstack.common import local
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_request_id():
|
||||||
|
return 'req-' + str(utils.gen_uuid())
|
||||||
|
|
||||||
|
|
||||||
|
class RequestContext(object):
|
||||||
|
"""Security context and request information.
|
||||||
|
|
||||||
|
Represents the user taking a given action within the system.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
|
||||||
|
roles=None, remote_address=None, timestamp=None,
|
||||||
|
request_id=None, auth_token=None, overwrite=True,
|
||||||
|
quota_class=None, **kwargs):
|
||||||
|
"""
|
||||||
|
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
|
||||||
|
indicates deleted records are visible, 'only' indicates that
|
||||||
|
*only* deleted records are visible.
|
||||||
|
|
||||||
|
:param overwrite: Set to False to ensure that the greenthread local
|
||||||
|
copy of the index is not overwritten.
|
||||||
|
|
||||||
|
:param kwargs: Extra arguments that might be present, but we ignore
|
||||||
|
because they possibly came in from older rpc messages.
|
||||||
|
"""
|
||||||
|
if kwargs:
|
||||||
|
LOG.warn(_('Arguments dropped when creating context: %s') %
|
||||||
|
str(kwargs))
|
||||||
|
|
||||||
|
self.user_id = user_id
|
||||||
|
self.project_id = project_id
|
||||||
|
self.roles = roles or []
|
||||||
|
self.is_admin = is_admin
|
||||||
|
if self.is_admin is None:
|
||||||
|
self.is_admin = 'admin' in [x.lower() for x in self.roles]
|
||||||
|
elif self.is_admin and 'admin' not in self.roles:
|
||||||
|
self.roles.append('admin')
|
||||||
|
self.read_deleted = read_deleted
|
||||||
|
self.remote_address = remote_address
|
||||||
|
if not timestamp:
|
||||||
|
timestamp = utils.utcnow()
|
||||||
|
if isinstance(timestamp, basestring):
|
||||||
|
timestamp = utils.parse_strtime(timestamp)
|
||||||
|
self.timestamp = timestamp
|
||||||
|
if not request_id:
|
||||||
|
request_id = generate_request_id()
|
||||||
|
self.request_id = request_id
|
||||||
|
self.auth_token = auth_token
|
||||||
|
self.quota_class = quota_class
|
||||||
|
if overwrite or not hasattr(local.store, 'context'):
|
||||||
|
self.update_store()
|
||||||
|
|
||||||
|
def _get_read_deleted(self):
|
||||||
|
return self._read_deleted
|
||||||
|
|
||||||
|
def _set_read_deleted(self, read_deleted):
|
||||||
|
if read_deleted not in ('no', 'yes', 'only'):
|
||||||
|
raise ValueError(_("read_deleted can only be one of 'no', "
|
||||||
|
"'yes' or 'only', not %r") % read_deleted)
|
||||||
|
self._read_deleted = read_deleted
|
||||||
|
|
||||||
|
def _del_read_deleted(self):
|
||||||
|
del self._read_deleted
|
||||||
|
|
||||||
|
read_deleted = property(_get_read_deleted, _set_read_deleted,
|
||||||
|
_del_read_deleted)
|
||||||
|
|
||||||
|
def update_store(self):
|
||||||
|
local.store.context = self
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {'user_id': self.user_id,
|
||||||
|
'project_id': self.project_id,
|
||||||
|
'is_admin': self.is_admin,
|
||||||
|
'read_deleted': self.read_deleted,
|
||||||
|
'roles': self.roles,
|
||||||
|
'remote_address': self.remote_address,
|
||||||
|
'timestamp': utils.strtime(self.timestamp),
|
||||||
|
'request_id': self.request_id,
|
||||||
|
'auth_token': self.auth_token,
|
||||||
|
'quota_class': self.quota_class}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, values):
|
||||||
|
return cls(**values)
|
||||||
|
|
||||||
|
def elevated(self, read_deleted=None, overwrite=False):
|
||||||
|
"""Return a version of this context with admin flag set."""
|
||||||
|
context = copy.copy(self)
|
||||||
|
context.is_admin = True
|
||||||
|
|
||||||
|
if 'admin' not in context.roles:
|
||||||
|
context.roles.append('admin')
|
||||||
|
|
||||||
|
if read_deleted is not None:
|
||||||
|
context.read_deleted = read_deleted
|
||||||
|
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def get_admin_context(read_deleted="no"):
|
||||||
|
return RequestContext(user_id=None,
|
||||||
|
project_id=None,
|
||||||
|
is_admin=True,
|
||||||
|
read_deleted=read_deleted,
|
||||||
|
overwrite=False)
|
23
cinder/db/__init__.py
Normal file
23
cinder/db/__init__.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
DB abstraction for Cinder
|
||||||
|
"""
|
||||||
|
|
||||||
|
from cinder.db.api import *
|
1335
cinder/db/api.py
Normal file
1335
cinder/db/api.py
Normal file
File diff suppressed because it is too large
Load Diff
40
cinder/db/base.py
Normal file
40
cinder/db/base.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Base class for classes that need modular database access."""
|
||||||
|
|
||||||
|
from cinder import flags
|
||||||
|
from cinder.openstack.common import cfg
|
||||||
|
from cinder.openstack.common import importutils
|
||||||
|
|
||||||
|
|
||||||
|
db_driver_opt = cfg.StrOpt('db_driver',
|
||||||
|
default='cinder.db',
|
||||||
|
help='driver to use for database access')
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
FLAGS.register_opt(db_driver_opt)
|
||||||
|
|
||||||
|
|
||||||
|
class Base(object):
|
||||||
|
"""DB driver is injected in the init method."""
|
||||||
|
|
||||||
|
def __init__(self, db_driver=None):
|
||||||
|
if not db_driver:
|
||||||
|
db_driver = FLAGS.db_driver
|
||||||
|
self.db = importutils.import_module(db_driver) # pylint: disable=C0103
|
35
cinder/db/migration.py
Normal file
35
cinder/db/migration.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Database setup and migration commands."""
|
||||||
|
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
|
IMPL = utils.LazyPluggable('db_backend',
|
||||||
|
sqlalchemy='cinder.db.sqlalchemy.migration')
|
||||||
|
|
||||||
|
|
||||||
|
def db_sync(version=None):
|
||||||
|
"""Migrate the database to `version` or the most recent version."""
|
||||||
|
return IMPL.db_sync(version=version)
|
||||||
|
|
||||||
|
|
||||||
|
def db_version():
|
||||||
|
"""Display the current database version."""
|
||||||
|
return IMPL.db_version()
|
17
cinder/db/sqlalchemy/__init__.py
Normal file
17
cinder/db/sqlalchemy/__init__.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
1499
cinder/db/sqlalchemy/api.py
Normal file
1499
cinder/db/sqlalchemy/api.py
Normal file
File diff suppressed because it is too large
Load Diff
4
cinder/db/sqlalchemy/migrate_repo/README
Normal file
4
cinder/db/sqlalchemy/migrate_repo/README
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
This is a database migration repository.
|
||||||
|
|
||||||
|
More information at
|
||||||
|
http://code.google.com/p/sqlalchemy-migrate/
|
0
cinder/db/sqlalchemy/migrate_repo/__init__.py
Normal file
0
cinder/db/sqlalchemy/migrate_repo/__init__.py
Normal file
4
cinder/db/sqlalchemy/migrate_repo/manage.py
Normal file
4
cinder/db/sqlalchemy/migrate_repo/manage.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from migrate.versioning.shell import main
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(debug='False', repository='.')
|
20
cinder/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
20
cinder/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
[db_settings]
|
||||||
|
# Used to identify which repository this database is versioned under.
|
||||||
|
# You can use the name of your project.
|
||||||
|
repository_id=cinder
|
||||||
|
|
||||||
|
# The name of the database table used to track the schema version.
|
||||||
|
# This name shouldn't already be used by your project.
|
||||||
|
# If this is changed once a database is under version control, you'll need to
|
||||||
|
# change the table name in each database too.
|
||||||
|
version_table=migrate_version
|
||||||
|
|
||||||
|
# When committing a change script, Migrate will attempt to generate the
|
||||||
|
# sql for all supported databases; normally, if one of them fails - probably
|
||||||
|
# because you don't have that database installed - it is ignored and the
|
||||||
|
# commit continues, perhaps ending successfully.
|
||||||
|
# Databases in this list MUST compile successfully during a commit, or the
|
||||||
|
# entire commit will fail. List the databases your application will actually
|
||||||
|
# be using to ensure your updates to that database work properly.
|
||||||
|
# This must be a list; example: ['postgres','sqlite']
|
||||||
|
required_dbs=[]
|
627
cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py
Normal file
627
cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py
Normal file
@ -0,0 +1,627 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
## Table code mostly autogenerated by genmodel.py
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
|
||||||
|
from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String
|
||||||
|
from sqlalchemy import Table, Text
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
auth_tokens = Table('auth_tokens', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('token_hash',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
Column('user_id', Integer()),
|
||||||
|
Column('server_manageent_url',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('storage_url',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('cdn_management_url',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
export_devices = Table('export_devices', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('shelf_id', Integer()),
|
||||||
|
Column('blade_id', Integer()),
|
||||||
|
Column('volume_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('volumes.id'),
|
||||||
|
nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
fixed_ips = Table('fixed_ips', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('address',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('network_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('networks.id'),
|
||||||
|
nullable=True),
|
||||||
|
Column('instance_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('instances.id'),
|
||||||
|
nullable=True),
|
||||||
|
Column('allocated', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('leased', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('reserved', Boolean(create_constraint=True, name=None)),
|
||||||
|
)
|
||||||
|
|
||||||
|
floating_ips = Table('floating_ips', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('address',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('fixed_ip_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('fixed_ips.id'),
|
||||||
|
nullable=True),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
instances = Table('instances', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('internal_id', Integer()),
|
||||||
|
Column('admin_pass',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('image_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('kernel_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('ramdisk_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('server_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('launch_index', Integer()),
|
||||||
|
Column('key_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('key_data',
|
||||||
|
Text(length=None, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('state', Integer()),
|
||||||
|
Column('state_description',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('memory_mb', Integer()),
|
||||||
|
Column('vcpus', Integer()),
|
||||||
|
Column('local_gb', Integer()),
|
||||||
|
Column('hostname',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('instance_type',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('user_data',
|
||||||
|
Text(length=None, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('reservation_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('mac_address',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('scheduled_at', DateTime(timezone=False)),
|
||||||
|
Column('launched_at', DateTime(timezone=False)),
|
||||||
|
Column('terminated_at', DateTime(timezone=False)),
|
||||||
|
Column('display_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('display_description',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
key_pairs = Table('key_pairs', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('fingerprint',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('public_key',
|
||||||
|
Text(length=None, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
networks = Table('networks', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('injected', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('cidr',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('netmask',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('bridge',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('gateway',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('broadcast',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('dns',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('vlan', Integer()),
|
||||||
|
Column('vpn_public_address',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('vpn_public_port', Integer()),
|
||||||
|
Column('vpn_private_address',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('dhcp_start',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
projects = Table('projects', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
Column('name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('description',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('project_manager',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
ForeignKey('users.id')),
|
||||||
|
)
|
||||||
|
|
||||||
|
quotas = Table('quotas', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('instances', Integer()),
|
||||||
|
Column('cores', Integer()),
|
||||||
|
Column('volumes', Integer()),
|
||||||
|
Column('gigabytes', Integer()),
|
||||||
|
Column('floating_ips', Integer()),
|
||||||
|
)
|
||||||
|
|
||||||
|
security_groups = Table('security_groups', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('description',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
security_group_inst_assoc = Table('security_group_instance_association',
|
||||||
|
meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('security_group_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('security_groups.id')),
|
||||||
|
Column('instance_id', Integer(), ForeignKey('instances.id')),
|
||||||
|
)
|
||||||
|
|
||||||
|
security_group_rules = Table('security_group_rules', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('parent_group_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('security_groups.id')),
|
||||||
|
Column('protocol',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('from_port', Integer()),
|
||||||
|
Column('to_port', Integer()),
|
||||||
|
Column('cidr',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('group_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('security_groups.id')),
|
||||||
|
)
|
||||||
|
|
||||||
|
services = Table('services', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('binary',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('topic',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('report_count', Integer(), nullable=False),
|
||||||
|
Column('disabled', Boolean(create_constraint=True, name=None)),
|
||||||
|
)
|
||||||
|
|
||||||
|
users = Table('users', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
Column('name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('access_key',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('secret_key',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('is_admin', Boolean(create_constraint=True, name=None)),
|
||||||
|
)
|
||||||
|
|
||||||
|
user_project_association = Table('user_project_association', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
ForeignKey('users.id'),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
ForeignKey('projects.id'),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
user_project_role_association = Table('user_project_role_association',
|
||||||
|
meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
Column('role',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
ForeignKeyConstraint(['user_id',
|
||||||
|
'project_id'],
|
||||||
|
['user_project_association.user_id',
|
||||||
|
'user_project_association.project_id']),
|
||||||
|
)
|
||||||
|
|
||||||
|
user_role_association = Table('user_role_association', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
ForeignKey('users.id'),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
Column('role',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
primary_key=True,
|
||||||
|
nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
volumes = Table('volumes', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('ec2_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('size', Integer()),
|
||||||
|
Column('availability_zone',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('instance_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('instances.id'),
|
||||||
|
nullable=True),
|
||||||
|
Column('mountpoint',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('attach_time',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('status',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('attach_status',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('scheduled_at', DateTime(timezone=False)),
|
||||||
|
Column('launched_at', DateTime(timezone=False)),
|
||||||
|
Column('terminated_at', DateTime(timezone=False)),
|
||||||
|
Column('display_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('display_description',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
tables = [auth_tokens,
|
||||||
|
instances, key_pairs, networks, fixed_ips, floating_ips,
|
||||||
|
quotas, security_groups, security_group_inst_assoc,
|
||||||
|
security_group_rules, services, users, projects,
|
||||||
|
user_project_association, user_project_role_association,
|
||||||
|
user_role_association, volumes, export_devices]
|
||||||
|
|
||||||
|
for table in tables:
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(table))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
meta.drop_all(tables=tables)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
# Operations to reverse the above upgrade go here.
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
auth_tokens = Table('auth_tokens', meta, autoload=True)
|
||||||
|
export_devices = Table('export_devices', meta, autoload=True)
|
||||||
|
fixed_ips = Table('fixed_ips', meta, autoload=True)
|
||||||
|
floating_ips = Table('floating_ips', meta, autoload=True)
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
key_pairs = Table('key_pairs', meta, autoload=True)
|
||||||
|
networks = Table('networks', meta, autoload=True)
|
||||||
|
projects = Table('projects', meta, autoload=True)
|
||||||
|
quotas = Table('quotas', meta, autoload=True)
|
||||||
|
security_groups = Table('security_groups', meta, autoload=True)
|
||||||
|
security_group_inst_assoc = Table('security_group_instance_association',
|
||||||
|
meta, autoload=True)
|
||||||
|
security_group_rules = Table('security_group_rules', meta, autoload=True)
|
||||||
|
services = Table('services', meta, autoload=True)
|
||||||
|
users = Table('users', meta, autoload=True)
|
||||||
|
user_project_association = Table('user_project_association', meta,
|
||||||
|
autoload=True)
|
||||||
|
user_project_role_association = Table('user_project_role_association',
|
||||||
|
meta,
|
||||||
|
autoload=True)
|
||||||
|
user_role_association = Table('user_role_association', meta, autoload=True)
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
|
||||||
|
# table order matters, don't change
|
||||||
|
for table in (auth_tokens, export_devices, floating_ips, fixed_ips,
|
||||||
|
key_pairs, networks,
|
||||||
|
quotas, security_group_inst_assoc,
|
||||||
|
security_group_rules, security_groups, services,
|
||||||
|
user_project_role_association, user_project_association,
|
||||||
|
user_role_association,
|
||||||
|
projects, users, volumes, instances):
|
||||||
|
table.drop()
|
236
cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py
Normal file
236
cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py
Normal file
@ -0,0 +1,236 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
|
||||||
|
from sqlalchemy import Integer, MetaData, String, Table, Text
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
services = Table('services', meta, autoload=True)
|
||||||
|
networks = Table('networks', meta, autoload=True)
|
||||||
|
auth_tokens = Table('auth_tokens', meta, autoload=True)
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
certificates = Table('certificates', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('file_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
consoles = Table('consoles', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('instance_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('instance_id', Integer()),
|
||||||
|
Column('password',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('port', Integer(), nullable=True),
|
||||||
|
Column('pool_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('console_pools.id')),
|
||||||
|
)
|
||||||
|
|
||||||
|
console_pools = Table('console_pools', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('address',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('username',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('password',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('console_type',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('public_hostname',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('compute_host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
instance_actions = Table('instance_actions', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('instance_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('instances.id')),
|
||||||
|
Column('action',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('error',
|
||||||
|
Text(length=None, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
iscsi_targets = Table('iscsi_targets', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('target_num', Integer()),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('volume_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('volumes.id'),
|
||||||
|
nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
tables = [certificates, console_pools, consoles, instance_actions,
|
||||||
|
iscsi_targets]
|
||||||
|
for table in tables:
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(table))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
meta.drop_all(tables=tables)
|
||||||
|
raise
|
||||||
|
|
||||||
|
auth_tokens.c.user_id.alter(type=String(length=255,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Columns
|
||||||
|
#
|
||||||
|
instances_availability_zone = Column(
|
||||||
|
'availability_zone',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False))
|
||||||
|
|
||||||
|
instances_locked = Column('locked',
|
||||||
|
Boolean(create_constraint=True, name=None))
|
||||||
|
|
||||||
|
networks_cidr_v6 = Column(
|
||||||
|
'cidr_v6',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False))
|
||||||
|
|
||||||
|
networks_ra_server = Column(
|
||||||
|
'ra_server',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False))
|
||||||
|
|
||||||
|
services_availability_zone = Column(
|
||||||
|
'availability_zone',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False))
|
||||||
|
|
||||||
|
instances.create_column(instances_availability_zone)
|
||||||
|
instances.create_column(instances_locked)
|
||||||
|
networks.create_column(networks_cidr_v6)
|
||||||
|
networks.create_column(networks_ra_server)
|
||||||
|
services.create_column(services_availability_zone)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
services = Table('services', meta, autoload=True)
|
||||||
|
networks = Table('networks', meta, autoload=True)
|
||||||
|
auth_tokens = Table('auth_tokens', meta, autoload=True)
|
||||||
|
|
||||||
|
certificates = Table('certificates', meta, autoload=True)
|
||||||
|
consoles = Table('consoles', meta, autoload=True)
|
||||||
|
console_pools = Table('console_pools', meta, autoload=True)
|
||||||
|
instance_actions = Table('instance_actions', meta, autoload=True)
|
||||||
|
iscsi_targets = Table('iscsi_targets', meta, autoload=True)
|
||||||
|
|
||||||
|
# table order matters, don't change
|
||||||
|
tables = [certificates, consoles, console_pools, instance_actions,
|
||||||
|
iscsi_targets]
|
||||||
|
for table in tables:
|
||||||
|
table.drop()
|
||||||
|
|
||||||
|
auth_tokens.c.user_id.alter(type=Integer())
|
||||||
|
|
||||||
|
instances.drop_column('availability_zone')
|
||||||
|
instances.drop_column('locked')
|
||||||
|
networks.drop_column('cidr_v6')
|
||||||
|
networks.drop_column('ra_server')
|
||||||
|
services.drop_column('availability_zone')
|
@ -0,0 +1,20 @@
|
|||||||
|
BEGIN;
|
||||||
|
|
||||||
|
DROP TABLE certificates;
|
||||||
|
DROP TABLE consoles;
|
||||||
|
DROP TABLE console_pools;
|
||||||
|
DROP TABLE instance_actions;
|
||||||
|
DROP TABLE iscsi_targets;
|
||||||
|
|
||||||
|
ALTER TABLE auth_tokens ADD COLUMN user_id_backup INTEGER;
|
||||||
|
UPDATE auth_tokens SET user_id_backup = CAST(user_id AS INTEGER);
|
||||||
|
ALTER TABLE auth_tokens DROP COLUMN user_id;
|
||||||
|
ALTER TABLE auth_tokens RENAME COLUMN user_id_backup TO user_id;
|
||||||
|
|
||||||
|
ALTER TABLE instances DROP COLUMN availability_zone;
|
||||||
|
ALTER TABLE instances DROP COLUMN locked;
|
||||||
|
ALTER TABLE networks DROP COLUMN cidr_v6;
|
||||||
|
ALTER TABLE networks DROP COLUMN ra_server;
|
||||||
|
ALTER TABLE services DROP COLUMN availability_zone;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,388 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
DROP TABLE certificates;
|
||||||
|
|
||||||
|
DROP TABLE console_pools;
|
||||||
|
|
||||||
|
DROP TABLE consoles;
|
||||||
|
|
||||||
|
DROP TABLE instance_actions;
|
||||||
|
|
||||||
|
DROP TABLE iscsi_targets;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE auth_tokens_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
token_hash VARCHAR(255) NOT NULL,
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
server_manageent_url VARCHAR(255),
|
||||||
|
storage_url VARCHAR(255),
|
||||||
|
cdn_management_url VARCHAR(255),
|
||||||
|
PRIMARY KEY (token_hash),
|
||||||
|
CHECK (deleted IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO auth_tokens_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
token_hash,
|
||||||
|
user_id,
|
||||||
|
server_manageent_url,
|
||||||
|
storage_url,
|
||||||
|
cdn_management_url
|
||||||
|
FROM auth_tokens;
|
||||||
|
|
||||||
|
DROP TABLE auth_tokens;
|
||||||
|
|
||||||
|
CREATE TABLE auth_tokens (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
token_hash VARCHAR(255) NOT NULL,
|
||||||
|
user_id INTEGER,
|
||||||
|
server_manageent_url VARCHAR(255),
|
||||||
|
storage_url VARCHAR(255),
|
||||||
|
cdn_management_url VARCHAR(255),
|
||||||
|
PRIMARY KEY (token_hash),
|
||||||
|
CHECK (deleted IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO auth_tokens
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
token_hash,
|
||||||
|
user_id,
|
||||||
|
server_manageent_url,
|
||||||
|
storage_url,
|
||||||
|
cdn_management_url
|
||||||
|
FROM auth_tokens_backup;
|
||||||
|
|
||||||
|
DROP TABLE auth_tokens_backup;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE instances_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
internal_id INTEGER,
|
||||||
|
admin_pass VARCHAR(255),
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
image_id VARCHAR(255),
|
||||||
|
kernel_id VARCHAR(255),
|
||||||
|
ramdisk_id VARCHAR(255),
|
||||||
|
server_name VARCHAR(255),
|
||||||
|
launch_index INTEGER,
|
||||||
|
key_name VARCHAR(255),
|
||||||
|
key_data TEXT,
|
||||||
|
state INTEGER,
|
||||||
|
state_description VARCHAR(255),
|
||||||
|
memory_mb INTEGER,
|
||||||
|
vcpus INTEGER,
|
||||||
|
local_gb INTEGER,
|
||||||
|
hostname VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
instance_type VARCHAR(255),
|
||||||
|
user_data TEXT,
|
||||||
|
reservation_id VARCHAR(255),
|
||||||
|
mac_address VARCHAR(255),
|
||||||
|
scheduled_at DATETIME,
|
||||||
|
launched_at DATETIME,
|
||||||
|
terminated_at DATETIME,
|
||||||
|
display_name VARCHAR(255),
|
||||||
|
display_description VARCHAR(255),
|
||||||
|
availability_zone VARCHAR(255),
|
||||||
|
locked BOOLEAN,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (locked IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO instances_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
internal_id,
|
||||||
|
admin_pass,
|
||||||
|
user_id,
|
||||||
|
project_id,
|
||||||
|
image_id,
|
||||||
|
kernel_id,
|
||||||
|
ramdisk_id,
|
||||||
|
server_name,
|
||||||
|
launch_index,
|
||||||
|
key_name,
|
||||||
|
key_data,
|
||||||
|
state,
|
||||||
|
state_description,
|
||||||
|
memory_mb,
|
||||||
|
vcpus,
|
||||||
|
local_gb,
|
||||||
|
hostname,
|
||||||
|
host,
|
||||||
|
instance_type,
|
||||||
|
user_data,
|
||||||
|
reservation_id,
|
||||||
|
mac_address,
|
||||||
|
scheduled_at,
|
||||||
|
launched_at,
|
||||||
|
terminated_at,
|
||||||
|
display_name,
|
||||||
|
display_description,
|
||||||
|
availability_zone,
|
||||||
|
locked
|
||||||
|
FROM instances;
|
||||||
|
|
||||||
|
DROP TABLE instances;
|
||||||
|
|
||||||
|
CREATE TABLE instances (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
internal_id INTEGER,
|
||||||
|
admin_pass VARCHAR(255),
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
image_id VARCHAR(255),
|
||||||
|
kernel_id VARCHAR(255),
|
||||||
|
ramdisk_id VARCHAR(255),
|
||||||
|
server_name VARCHAR(255),
|
||||||
|
launch_index INTEGER,
|
||||||
|
key_name VARCHAR(255),
|
||||||
|
key_data TEXT,
|
||||||
|
state INTEGER,
|
||||||
|
state_description VARCHAR(255),
|
||||||
|
memory_mb INTEGER,
|
||||||
|
vcpus INTEGER,
|
||||||
|
local_gb INTEGER,
|
||||||
|
hostname VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
instance_type VARCHAR(255),
|
||||||
|
user_data TEXT,
|
||||||
|
reservation_id VARCHAR(255),
|
||||||
|
mac_address VARCHAR(255),
|
||||||
|
scheduled_at DATETIME,
|
||||||
|
launched_at DATETIME,
|
||||||
|
terminated_at DATETIME,
|
||||||
|
display_name VARCHAR(255),
|
||||||
|
display_description VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO instances
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
internal_id,
|
||||||
|
admin_pass,
|
||||||
|
user_id,
|
||||||
|
project_id,
|
||||||
|
image_id,
|
||||||
|
kernel_id,
|
||||||
|
ramdisk_id,
|
||||||
|
server_name,
|
||||||
|
launch_index,
|
||||||
|
key_name,
|
||||||
|
key_data,
|
||||||
|
state,
|
||||||
|
state_description,
|
||||||
|
memory_mb,
|
||||||
|
vcpus,
|
||||||
|
local_gb,
|
||||||
|
hostname,
|
||||||
|
host,
|
||||||
|
instance_type,
|
||||||
|
user_data,
|
||||||
|
reservation_id,
|
||||||
|
mac_address,
|
||||||
|
scheduled_at,
|
||||||
|
launched_at,
|
||||||
|
terminated_at,
|
||||||
|
display_name,
|
||||||
|
display_description
|
||||||
|
FROM instances_backup;
|
||||||
|
|
||||||
|
DROP TABLE instances_backup;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE networks_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
injected BOOLEAN,
|
||||||
|
cidr VARCHAR(255),
|
||||||
|
netmask VARCHAR(255),
|
||||||
|
bridge VARCHAR(255),
|
||||||
|
gateway VARCHAR(255),
|
||||||
|
broadcast VARCHAR(255),
|
||||||
|
dns VARCHAR(255),
|
||||||
|
vlan INTEGER,
|
||||||
|
vpn_public_address VARCHAR(255),
|
||||||
|
vpn_public_port INTEGER,
|
||||||
|
vpn_private_address VARCHAR(255),
|
||||||
|
dhcp_start VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
cidr_v6 VARCHAR(255),
|
||||||
|
ra_server VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (injected IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO networks_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
injected,
|
||||||
|
cidr,
|
||||||
|
netmask,
|
||||||
|
bridge,
|
||||||
|
gateway,
|
||||||
|
broadcast,
|
||||||
|
dns,
|
||||||
|
vlan,
|
||||||
|
vpn_public_address,
|
||||||
|
vpn_public_port,
|
||||||
|
vpn_private_address,
|
||||||
|
dhcp_start,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
cidr_v6,
|
||||||
|
ra_server
|
||||||
|
FROM networks;
|
||||||
|
|
||||||
|
DROP TABLE networks;
|
||||||
|
|
||||||
|
CREATE TABLE networks (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
injected BOOLEAN,
|
||||||
|
cidr VARCHAR(255),
|
||||||
|
netmask VARCHAR(255),
|
||||||
|
bridge VARCHAR(255),
|
||||||
|
gateway VARCHAR(255),
|
||||||
|
broadcast VARCHAR(255),
|
||||||
|
dns VARCHAR(255),
|
||||||
|
vlan INTEGER,
|
||||||
|
vpn_public_address VARCHAR(255),
|
||||||
|
vpn_public_port INTEGER,
|
||||||
|
vpn_private_address VARCHAR(255),
|
||||||
|
dhcp_start VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (injected IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO networks
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
injected,
|
||||||
|
cidr,
|
||||||
|
netmask,
|
||||||
|
bridge,
|
||||||
|
gateway,
|
||||||
|
broadcast,
|
||||||
|
dns,
|
||||||
|
vlan,
|
||||||
|
vpn_public_address,
|
||||||
|
vpn_public_port,
|
||||||
|
vpn_private_address,
|
||||||
|
dhcp_start,
|
||||||
|
project_id,
|
||||||
|
host
|
||||||
|
FROM networks_backup;
|
||||||
|
|
||||||
|
DROP TABLE networks_backup;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE services_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
host VARCHAR(255),
|
||||||
|
binary VARCHAR(255),
|
||||||
|
topic VARCHAR(255),
|
||||||
|
report_count INTEGER NOT NULL,
|
||||||
|
disabled BOOLEAN,
|
||||||
|
availability_zone VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (disabled IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO services_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
host,
|
||||||
|
binary,
|
||||||
|
topic,
|
||||||
|
report_count,
|
||||||
|
disabled,
|
||||||
|
availability_zone
|
||||||
|
FROM services;
|
||||||
|
|
||||||
|
DROP TABLE services;
|
||||||
|
|
||||||
|
CREATE TABLE services (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
host VARCHAR(255),
|
||||||
|
binary VARCHAR(255),
|
||||||
|
topic VARCHAR(255),
|
||||||
|
report_count INTEGER NOT NULL,
|
||||||
|
disabled BOOLEAN,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (disabled IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO services
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
host,
|
||||||
|
binary,
|
||||||
|
topic,
|
||||||
|
report_count,
|
||||||
|
disabled
|
||||||
|
FROM services_backup;
|
||||||
|
|
||||||
|
DROP TABLE services_backup;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,42 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
networks = Table('networks', meta, autoload=True)
|
||||||
|
|
||||||
|
networks_label = Column(
|
||||||
|
'label',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False))
|
||||||
|
networks.create_column(networks_label)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
networks = Table('networks', meta, autoload=True)
|
||||||
|
|
||||||
|
networks.drop_column('label')
|
@ -0,0 +1,111 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE networks_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
injected BOOLEAN,
|
||||||
|
cidr VARCHAR(255),
|
||||||
|
netmask VARCHAR(255),
|
||||||
|
bridge VARCHAR(255),
|
||||||
|
gateway VARCHAR(255),
|
||||||
|
broadcast VARCHAR(255),
|
||||||
|
dns VARCHAR(255),
|
||||||
|
vlan INTEGER,
|
||||||
|
vpn_public_address VARCHAR(255),
|
||||||
|
vpn_public_port INTEGER,
|
||||||
|
vpn_private_address VARCHAR(255),
|
||||||
|
dhcp_start VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
cidr_v6 VARCHAR(255),
|
||||||
|
ra_server VARCHAR(255),
|
||||||
|
label VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (injected IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO networks_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
injected,
|
||||||
|
cidr,
|
||||||
|
netmask,
|
||||||
|
bridge,
|
||||||
|
gateway,
|
||||||
|
broadcast,
|
||||||
|
dns,
|
||||||
|
vlan,
|
||||||
|
vpn_public_address,
|
||||||
|
vpn_public_port,
|
||||||
|
vpn_private_address,
|
||||||
|
dhcp_start,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
cidr_v6,
|
||||||
|
ra_server,
|
||||||
|
label
|
||||||
|
FROM networks;
|
||||||
|
|
||||||
|
DROP TABLE networks;
|
||||||
|
|
||||||
|
CREATE TABLE networks (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
injected BOOLEAN,
|
||||||
|
cidr VARCHAR(255),
|
||||||
|
netmask VARCHAR(255),
|
||||||
|
bridge VARCHAR(255),
|
||||||
|
gateway VARCHAR(255),
|
||||||
|
broadcast VARCHAR(255),
|
||||||
|
dns VARCHAR(255),
|
||||||
|
vlan INTEGER,
|
||||||
|
vpn_public_address VARCHAR(255),
|
||||||
|
vpn_public_port INTEGER,
|
||||||
|
vpn_private_address VARCHAR(255),
|
||||||
|
dhcp_start VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
cidr_v6 VARCHAR(255),
|
||||||
|
ra_server VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (injected IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO networks
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
injected,
|
||||||
|
cidr,
|
||||||
|
netmask,
|
||||||
|
bridge,
|
||||||
|
gateway,
|
||||||
|
broadcast,
|
||||||
|
dns,
|
||||||
|
vlan,
|
||||||
|
vpn_public_address,
|
||||||
|
vpn_public_port,
|
||||||
|
vpn_private_address,
|
||||||
|
dhcp_start,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
cidr_v6,
|
||||||
|
ra_server
|
||||||
|
FROM networks_backup;
|
||||||
|
|
||||||
|
DROP TABLE networks_backup;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,66 @@
|
|||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, Integer
|
||||||
|
from sqlalchemy import MetaData, String, Table
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
zones = Table('zones', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('api_url',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('username',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('password',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
for table in (zones, ):
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(table))
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
zones = Table('zones', meta, autoload=True)
|
||||||
|
|
||||||
|
for table in (zones, ):
|
||||||
|
table.drop()
|
@ -0,0 +1,81 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
|
||||||
|
from sqlalchemy import MetaData, String, Table
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
quotas = Table('quotas', meta, autoload=True)
|
||||||
|
|
||||||
|
instance_metadata_table = Table('instance_metadata', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('instance_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('instances.id'),
|
||||||
|
nullable=False),
|
||||||
|
Column('key',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('value',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)))
|
||||||
|
|
||||||
|
for table in (instance_metadata_table, ):
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(table))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
raise
|
||||||
|
|
||||||
|
quota_metadata_items = Column('metadata_items', Integer())
|
||||||
|
quotas.create_column(quota_metadata_items)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
quotas = Table('quotas', meta, autoload=True)
|
||||||
|
|
||||||
|
instance_metadata_table = Table('instance_metadata', meta, autoload=True)
|
||||||
|
|
||||||
|
for table in (instance_metadata_table, ):
|
||||||
|
table.drop()
|
||||||
|
|
||||||
|
quotas.drop_column('metadata_items')
|
@ -0,0 +1,54 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Justin Santa Barbara.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
|
||||||
|
# Add columns to existing tables
|
||||||
|
volumes_provider_location = Column('provider_location',
|
||||||
|
String(length=256,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
volumes_provider_auth = Column('provider_auth',
|
||||||
|
String(length=256,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
volumes.create_column(volumes_provider_location)
|
||||||
|
volumes.create_column(volumes_provider_auth)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
|
||||||
|
volumes.drop_column('provider_location')
|
||||||
|
volumes.drop_column('provider_auth')
|
@ -0,0 +1,113 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE volumes_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
ec2_id VARCHAR(255),
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
size INTEGER,
|
||||||
|
availability_zone VARCHAR(255),
|
||||||
|
instance_id INTEGER,
|
||||||
|
mountpoint VARCHAR(255),
|
||||||
|
attach_time VARCHAR(255),
|
||||||
|
status VARCHAR(255),
|
||||||
|
attach_status VARCHAR(255),
|
||||||
|
scheduled_at DATETIME,
|
||||||
|
launched_at DATETIME,
|
||||||
|
terminated_at DATETIME,
|
||||||
|
display_name VARCHAR(255),
|
||||||
|
display_description VARCHAR(255),
|
||||||
|
provider_location VARCHAR(256),
|
||||||
|
provider_auth VARCHAR(256),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO volumes_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
ec2_id,
|
||||||
|
user_id,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
size,
|
||||||
|
availability_zone,
|
||||||
|
instance_id,
|
||||||
|
mountpoint,
|
||||||
|
attach_time,
|
||||||
|
status,
|
||||||
|
attach_status,
|
||||||
|
scheduled_at,
|
||||||
|
launched_at,
|
||||||
|
terminated_at,
|
||||||
|
display_name,
|
||||||
|
display_description,
|
||||||
|
provider_location,
|
||||||
|
provider_auth
|
||||||
|
FROM volumes;
|
||||||
|
|
||||||
|
DROP TABLE volumes;
|
||||||
|
|
||||||
|
CREATE TABLE volumes (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
ec2_id VARCHAR(255),
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
size INTEGER,
|
||||||
|
availability_zone VARCHAR(255),
|
||||||
|
instance_id INTEGER,
|
||||||
|
mountpoint VARCHAR(255),
|
||||||
|
attach_time VARCHAR(255),
|
||||||
|
status VARCHAR(255),
|
||||||
|
attach_status VARCHAR(255),
|
||||||
|
scheduled_at DATETIME,
|
||||||
|
launched_at DATETIME,
|
||||||
|
terminated_at DATETIME,
|
||||||
|
display_name VARCHAR(255),
|
||||||
|
display_description VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO volumes
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
ec2_id,
|
||||||
|
user_id,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
size,
|
||||||
|
availability_zone,
|
||||||
|
instance_id,
|
||||||
|
mountpoint,
|
||||||
|
attach_time,
|
||||||
|
status,
|
||||||
|
attach_status,
|
||||||
|
scheduled_at,
|
||||||
|
launched_at,
|
||||||
|
terminated_at,
|
||||||
|
display_name,
|
||||||
|
display_description
|
||||||
|
FROM volumes_backup;
|
||||||
|
|
||||||
|
DROP TABLE volumes_backup;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,70 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
fixed_ips = Table('fixed_ips', meta, autoload=True)
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Columns
|
||||||
|
#
|
||||||
|
fixed_ips_addressV6 = Column(
|
||||||
|
"addressV6",
|
||||||
|
String(
|
||||||
|
length=255,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
fixed_ips_netmaskV6 = Column(
|
||||||
|
"netmaskV6",
|
||||||
|
String(
|
||||||
|
length=3,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
fixed_ips_gatewayV6 = Column(
|
||||||
|
"gatewayV6",
|
||||||
|
String(
|
||||||
|
length=255,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
# Add columns to existing tables
|
||||||
|
fixed_ips.create_column(fixed_ips_addressV6)
|
||||||
|
fixed_ips.create_column(fixed_ips_netmaskV6)
|
||||||
|
fixed_ips.create_column(fixed_ips_gatewayV6)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
fixed_ips = Table('fixed_ips', meta, autoload=True)
|
||||||
|
|
||||||
|
fixed_ips.drop_column('addressV6')
|
||||||
|
fixed_ips.drop_column('netmaskV6')
|
||||||
|
fixed_ips.drop_column('gatewayV6')
|
@ -0,0 +1,79 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE fixed_ips_backup (
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
address VARCHAR(255),
|
||||||
|
network_id INTEGER,
|
||||||
|
instance_id INTEGER,
|
||||||
|
allocated BOOLEAN DEFAULT FALSE,
|
||||||
|
leased BOOLEAN DEFAULT FALSE,
|
||||||
|
reserved BOOLEAN DEFAULT FALSE,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN NOT NULL,
|
||||||
|
addressV6 VARCHAR(255),
|
||||||
|
netmaskV6 VARCHAR(3),
|
||||||
|
gatewayV6 VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (leased IN (0, 1)),
|
||||||
|
CHECK (allocated IN (0, 1)),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (reserved IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO fixed_ips_backup
|
||||||
|
SELECT id,
|
||||||
|
address,
|
||||||
|
network_id,
|
||||||
|
instance_id,
|
||||||
|
allocated,
|
||||||
|
leased,
|
||||||
|
reserved,
|
||||||
|
created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
addressV6,
|
||||||
|
netmaskV6,
|
||||||
|
gatewayV6
|
||||||
|
FROM fixed_ips;
|
||||||
|
|
||||||
|
DROP TABLE fixed_ips;
|
||||||
|
|
||||||
|
CREATE TABLE fixed_ips (
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
address VARCHAR(255),
|
||||||
|
network_id INTEGER,
|
||||||
|
instance_id INTEGER,
|
||||||
|
allocated BOOLEAN DEFAULT FALSE,
|
||||||
|
leased BOOLEAN DEFAULT FALSE,
|
||||||
|
reserved BOOLEAN DEFAULT FALSE,
|
||||||
|
created_at DATETIME NOT NULL,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN NOT NULL,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (leased IN (0, 1)),
|
||||||
|
CHECK (allocated IN (0, 1)),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (reserved IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO fixed_ips
|
||||||
|
SELECT id,
|
||||||
|
address,
|
||||||
|
network_id,
|
||||||
|
instance_id,
|
||||||
|
allocated,
|
||||||
|
leased,
|
||||||
|
reserved,
|
||||||
|
created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted
|
||||||
|
FROM fixed_ips_backup;
|
||||||
|
|
||||||
|
DROP TABLE fixed_ips_backup;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,85 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Ken Pepple
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, Integer
|
||||||
|
from sqlalchemy import MetaData, String, Table
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here
|
||||||
|
# Don't create your own engine; bind migrate_engine
|
||||||
|
# to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
instance_types = Table('instance_types', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
unique=True),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('memory_mb', Integer(), nullable=False),
|
||||||
|
Column('vcpus', Integer(), nullable=False),
|
||||||
|
Column('local_gb', Integer(), nullable=False),
|
||||||
|
Column('flavorid', Integer(), nullable=False, unique=True),
|
||||||
|
Column('swap', Integer(), nullable=False, default=0),
|
||||||
|
Column('rxtx_quota', Integer(), nullable=False, default=0),
|
||||||
|
Column('rxtx_cap', Integer(), nullable=False, default=0))
|
||||||
|
try:
|
||||||
|
instance_types.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(instance_types))
|
||||||
|
LOG.exception('Exception while creating instance_types table')
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Here are the old static instance types
|
||||||
|
INSTANCE_TYPES = {
|
||||||
|
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||||
|
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||||
|
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||||
|
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||||
|
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||||
|
try:
|
||||||
|
i = instance_types.insert()
|
||||||
|
for name, values in INSTANCE_TYPES.iteritems():
|
||||||
|
# FIXME(kpepple) should we be seeding created_at / updated_at ?
|
||||||
|
# now = datetime.datatime.utcnow()
|
||||||
|
i.execute({'name': name, 'memory_mb': values["memory_mb"],
|
||||||
|
'vcpus': values["vcpus"], 'deleted': False,
|
||||||
|
'local_gb': values["local_gb"],
|
||||||
|
'flavorid': values["flavorid"]})
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(instance_types))
|
||||||
|
LOG.exception('Exception while seeding instance_types table')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
# Operations to reverse the above upgrade go here.
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
instance_types = Table('instance_types', meta, autoload=True)
|
||||||
|
for table in (instance_types, ):
|
||||||
|
table.drop()
|
@ -0,0 +1,70 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
|
||||||
|
from sqlalchemy import MetaData, String, Table
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
migrations = Table('migrations', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('source_compute', String(255)),
|
||||||
|
Column('dest_compute', String(255)),
|
||||||
|
Column('dest_host', String(255)),
|
||||||
|
Column('instance_id', Integer, ForeignKey('instances.id'),
|
||||||
|
nullable=True),
|
||||||
|
Column('status', String(255)),
|
||||||
|
)
|
||||||
|
|
||||||
|
for table in (migrations, ):
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(table))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
migrations = Table('migrations', meta, autoload=True)
|
||||||
|
|
||||||
|
for table in (migrations, ):
|
||||||
|
table.drop()
|
@ -0,0 +1,45 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
instances_os_type = Column('os_type',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False),
|
||||||
|
nullable=True)
|
||||||
|
instances.create_column(instances_os_type)
|
||||||
|
migrate_engine.execute(instances.update()\
|
||||||
|
.where(instances.c.os_type == None)\
|
||||||
|
.values(os_type='linux'))
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
instances.drop_column('os_type')
|
@ -0,0 +1,85 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData
|
||||||
|
from sqlalchemy import Table, Text
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
compute_nodes = Table('compute_nodes', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('service_id', Integer(), nullable=False),
|
||||||
|
|
||||||
|
Column('vcpus', Integer(), nullable=False),
|
||||||
|
Column('memory_mb', Integer(), nullable=False),
|
||||||
|
Column('local_gb', Integer(), nullable=False),
|
||||||
|
Column('vcpus_used', Integer(), nullable=False),
|
||||||
|
Column('memory_mb_used', Integer(), nullable=False),
|
||||||
|
Column('local_gb_used', Integer(), nullable=False),
|
||||||
|
Column('hypervisor_type',
|
||||||
|
Text(convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
nullable=False),
|
||||||
|
Column('hypervisor_version', Integer(), nullable=False),
|
||||||
|
Column('cpu_info',
|
||||||
|
Text(convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
compute_nodes.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(compute_nodes))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
meta.drop_all(tables=[compute_nodes])
|
||||||
|
raise
|
||||||
|
|
||||||
|
instances_launched_on = Column(
|
||||||
|
'launched_on',
|
||||||
|
Text(convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
nullable=True)
|
||||||
|
instances.create_column(instances_launched_on)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
compute_nodes = Table('compute_nodes', meta, autoload=True)
|
||||||
|
|
||||||
|
compute_nodes.drop()
|
||||||
|
|
||||||
|
instances.drop_column('launched_on')
|
@ -0,0 +1,90 @@
|
|||||||
|
# Copyright (c) 2011 NTT.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
|
||||||
|
from sqlalchemy import MetaData, String, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
networks = Table('networks', meta, autoload=True)
|
||||||
|
fixed_ips = Table('fixed_ips', meta, autoload=True)
|
||||||
|
|
||||||
|
# Alter column name
|
||||||
|
networks.c.ra_server.alter(name='gateway_v6')
|
||||||
|
# Add new column to existing table
|
||||||
|
networks_netmask_v6 = Column(
|
||||||
|
'netmask_v6',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False))
|
||||||
|
networks.create_column(networks_netmask_v6)
|
||||||
|
|
||||||
|
# drop existing columns from table
|
||||||
|
fixed_ips.c.addressV6.drop()
|
||||||
|
fixed_ips.c.netmaskV6.drop()
|
||||||
|
fixed_ips.c.gatewayV6.drop()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
networks = Table('networks', meta, autoload=True)
|
||||||
|
fixed_ips = Table('fixed_ips', meta, autoload=True)
|
||||||
|
|
||||||
|
networks.c.gateway_v6.alter(name='ra_server')
|
||||||
|
networks.drop_column('netmask_v6')
|
||||||
|
|
||||||
|
fixed_ips_addressV6 = Column(
|
||||||
|
"addressV6",
|
||||||
|
String(
|
||||||
|
length=255,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
fixed_ips_netmaskV6 = Column(
|
||||||
|
"netmaskV6",
|
||||||
|
String(
|
||||||
|
length=3,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
fixed_ips_gatewayV6 = Column(
|
||||||
|
"gatewayV6",
|
||||||
|
String(
|
||||||
|
length=255,
|
||||||
|
convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False))
|
||||||
|
|
||||||
|
for column in (fixed_ips_addressV6,
|
||||||
|
fixed_ips_netmaskV6,
|
||||||
|
fixed_ips_gatewayV6):
|
||||||
|
fixed_ips.create_column(column)
|
@ -0,0 +1,195 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE networks_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
injected BOOLEAN,
|
||||||
|
cidr VARCHAR(255),
|
||||||
|
netmask VARCHAR(255),
|
||||||
|
bridge VARCHAR(255),
|
||||||
|
gateway VARCHAR(255),
|
||||||
|
broadcast VARCHAR(255),
|
||||||
|
dns VARCHAR(255),
|
||||||
|
vlan INTEGER,
|
||||||
|
vpn_public_address VARCHAR(255),
|
||||||
|
vpn_public_port INTEGER,
|
||||||
|
vpn_private_address VARCHAR(255),
|
||||||
|
dhcp_start VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
cidr_v6 VARCHAR(255),
|
||||||
|
ra_server VARCHAR(255),
|
||||||
|
label VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (injected IN (0, 1)),
|
||||||
|
CHECK (deleted IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO networks_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
injected,
|
||||||
|
cidr,
|
||||||
|
netmask,
|
||||||
|
bridge,
|
||||||
|
gateway,
|
||||||
|
broadcast,
|
||||||
|
dns,
|
||||||
|
vlan,
|
||||||
|
vpn_public_address,
|
||||||
|
vpn_public_port,
|
||||||
|
vpn_private_address,
|
||||||
|
dhcp_start,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
cidr_v6,
|
||||||
|
ra_server,
|
||||||
|
label
|
||||||
|
FROM networks;
|
||||||
|
|
||||||
|
DROP TABLE networks;
|
||||||
|
|
||||||
|
CREATE TABLE networks (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
injected BOOLEAN,
|
||||||
|
cidr VARCHAR(255),
|
||||||
|
netmask VARCHAR(255),
|
||||||
|
bridge VARCHAR(255),
|
||||||
|
gateway VARCHAR(255),
|
||||||
|
broadcast VARCHAR(255),
|
||||||
|
dns VARCHAR(255),
|
||||||
|
vlan INTEGER,
|
||||||
|
vpn_public_address VARCHAR(255),
|
||||||
|
vpn_public_port INTEGER,
|
||||||
|
vpn_private_address VARCHAR(255),
|
||||||
|
dhcp_start VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
cidr_v6 VARCHAR(255),
|
||||||
|
gateway_v6 VARCHAR(255),
|
||||||
|
label VARCHAR(255),
|
||||||
|
netmask_v6 VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (injected IN (0, 1)),
|
||||||
|
CHECK (deleted IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO networks
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
injected,
|
||||||
|
cidr,
|
||||||
|
netmask,
|
||||||
|
bridge,
|
||||||
|
gateway,
|
||||||
|
broadcast,
|
||||||
|
dns,
|
||||||
|
vlan,
|
||||||
|
vpn_public_address,
|
||||||
|
vpn_public_port,
|
||||||
|
vpn_private_address,
|
||||||
|
dhcp_start,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
cidr_v6,
|
||||||
|
ra_server AS gateway_v6,
|
||||||
|
label,
|
||||||
|
NULL AS netmask_v6
|
||||||
|
FROM networks_backup;
|
||||||
|
|
||||||
|
DROP TABLE networks_backup;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE fixed_ips_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
address VARCHAR(255),
|
||||||
|
network_id INTEGER,
|
||||||
|
instance_id INTEGER,
|
||||||
|
allocated BOOLEAN,
|
||||||
|
leased BOOLEAN,
|
||||||
|
reserved BOOLEAN,
|
||||||
|
addressV6 VARCHAR(255),
|
||||||
|
netmaskV6 VARCHAR(3),
|
||||||
|
gatewayV6 VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (reserved IN (0, 1)),
|
||||||
|
CHECK (allocated IN (0, 1)),
|
||||||
|
CHECK (leased IN (0, 1)),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id),
|
||||||
|
FOREIGN KEY(network_id) REFERENCES networks (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO fixed_ips_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
address,
|
||||||
|
network_id,
|
||||||
|
instance_id,
|
||||||
|
allocated,
|
||||||
|
leased,
|
||||||
|
reserved,
|
||||||
|
addressV6,
|
||||||
|
netmaskV6,
|
||||||
|
gatewayV6
|
||||||
|
FROM fixed_ips;
|
||||||
|
|
||||||
|
DROP TABLE fixed_ips;
|
||||||
|
|
||||||
|
CREATE TABLE fixed_ips (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
address VARCHAR(255),
|
||||||
|
network_id INTEGER,
|
||||||
|
instance_id INTEGER,
|
||||||
|
allocated BOOLEAN,
|
||||||
|
leased BOOLEAN,
|
||||||
|
reserved BOOLEAN,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (reserved IN (0, 1)),
|
||||||
|
CHECK (allocated IN (0, 1)),
|
||||||
|
CHECK (leased IN (0, 1)),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id),
|
||||||
|
FOREIGN KEY(network_id) REFERENCES networks (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO fixed_ips
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
address,
|
||||||
|
network_id,
|
||||||
|
instance_id,
|
||||||
|
allocated,
|
||||||
|
leased,
|
||||||
|
reserved
|
||||||
|
FROM fixed_ips_backup;
|
||||||
|
|
||||||
|
DROP TABLE fixed_ips_backup;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,43 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
migrations = Table('migrations', meta, autoload=True)
|
||||||
|
|
||||||
|
old_flavor_id = Column('old_flavor_id', Integer())
|
||||||
|
new_flavor_id = Column('new_flavor_id', Integer())
|
||||||
|
|
||||||
|
migrations.create_column(old_flavor_id)
|
||||||
|
migrations.create_column(new_flavor_id)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
migrations = Table('migrations', meta, autoload=True)
|
||||||
|
|
||||||
|
migrations.drop_column('old_flavor_id')
|
||||||
|
migrations.drop_column('new_flavor_id')
|
@ -0,0 +1,69 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE migrations_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
source_compute VARCHAR(255),
|
||||||
|
dest_compute VARCHAR(255),
|
||||||
|
dest_host VARCHAR(255),
|
||||||
|
instance_id INTEGER,
|
||||||
|
status VARCHAR(255),
|
||||||
|
old_flavor_id INTEGER,
|
||||||
|
new_flavor_id INTEGER,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO migrations_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
source_compute,
|
||||||
|
dest_compute,
|
||||||
|
dest_host,
|
||||||
|
instance_id,
|
||||||
|
status,
|
||||||
|
old_flavor_id,
|
||||||
|
new_flavor_id
|
||||||
|
FROM migrations;
|
||||||
|
|
||||||
|
DROP TABLE migrations;
|
||||||
|
|
||||||
|
CREATE TABLE migrations (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
source_compute VARCHAR(255),
|
||||||
|
dest_compute VARCHAR(255),
|
||||||
|
dest_host VARCHAR(255),
|
||||||
|
instance_id INTEGER,
|
||||||
|
status VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO migrations
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
source_compute,
|
||||||
|
dest_compute,
|
||||||
|
dest_host,
|
||||||
|
instance_id,
|
||||||
|
status
|
||||||
|
FROM migrations_backup;
|
||||||
|
|
||||||
|
DROP TABLE migrations_backup;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,74 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instance_types = Table('instance_types', meta, autoload=True)
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
c_instance_type_id = Column('instance_type_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False),
|
||||||
|
nullable=True)
|
||||||
|
|
||||||
|
instances.create_column(c_instance_type_id)
|
||||||
|
|
||||||
|
type_names = {}
|
||||||
|
recs = migrate_engine.execute(instance_types.select())
|
||||||
|
for row in recs:
|
||||||
|
type_names[row[0]] = row[1]
|
||||||
|
|
||||||
|
for type_id, type_name in type_names.iteritems():
|
||||||
|
migrate_engine.execute(instances.update()\
|
||||||
|
.where(instances.c.instance_type == type_name)\
|
||||||
|
.values(instance_type_id=type_id))
|
||||||
|
|
||||||
|
instances.c.instance_type.drop()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instance_types = Table('instance_types', meta, autoload=True)
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
c_instance_type = Column('instance_type',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False),
|
||||||
|
nullable=True)
|
||||||
|
instances.create_column(c_instance_type)
|
||||||
|
|
||||||
|
type_names = {}
|
||||||
|
recs = migrate_engine.execute(instance_types.select())
|
||||||
|
for row in recs:
|
||||||
|
type_names[row[0]] = row[1]
|
||||||
|
|
||||||
|
for type_id, type_name in type_names.iteritems():
|
||||||
|
migrate_engine.execute(instances.update()\
|
||||||
|
.where(instances.c.instance_type_id == type_id)\
|
||||||
|
.values(instance_type=type_name))
|
||||||
|
|
||||||
|
instances.c.instance_type_id.drop()
|
@ -0,0 +1,35 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# Copyright 2011 Grid Dynamics
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, MetaData, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
floating_ips = Table('floating_ips', meta, autoload=True)
|
||||||
|
c_auto_assigned = Column('auto_assigned', Boolean, default=False)
|
||||||
|
floating_ips.create_column(c_auto_assigned)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
floating_ips = Table('floating_ips', meta, autoload=True)
|
||||||
|
floating_ips.drop_column('auto_assigned')
|
@ -0,0 +1,62 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
CREATE TEMPORARY TABLE floating_ips_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
address VARCHAR(255),
|
||||||
|
fixed_ip_id INTEGER,
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
auto_assigned BOOLEAN,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
CHECK (auto_assigned IN (0, 1)),
|
||||||
|
FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO floating_ips_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
address,
|
||||||
|
fixed_ip_id,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
auto_assigned
|
||||||
|
FROM floating_ips;
|
||||||
|
|
||||||
|
DROP TABLE floating_ips;
|
||||||
|
|
||||||
|
CREATE TABLE floating_ips (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
address VARCHAR(255),
|
||||||
|
fixed_ip_id INTEGER,
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
CHECK (deleted IN (0, 1)),
|
||||||
|
FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO floating_ips
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
address,
|
||||||
|
fixed_ip_id,
|
||||||
|
project_id,
|
||||||
|
host
|
||||||
|
FROM floating_ips_backup;
|
||||||
|
|
||||||
|
DROP TABLE floating_ips_backup;
|
||||||
|
COMMIT;
|
@ -0,0 +1,213 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, Integer
|
||||||
|
from sqlalchemy import MetaData, String, Table
|
||||||
|
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
resources = [
|
||||||
|
'instances',
|
||||||
|
'cores',
|
||||||
|
'volumes',
|
||||||
|
'gigabytes',
|
||||||
|
'floating_ips',
|
||||||
|
'metadata_items',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def old_style_quotas_table(meta, name):
|
||||||
|
return Table(name, meta,
|
||||||
|
Column('id', Integer(), primary_key=True),
|
||||||
|
Column('created_at', DateTime(),
|
||||||
|
default=utils.utcnow),
|
||||||
|
Column('updated_at', DateTime(),
|
||||||
|
onupdate=utils.utcnow),
|
||||||
|
Column('deleted_at', DateTime()),
|
||||||
|
Column('deleted', Boolean(), default=False),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False)),
|
||||||
|
Column('instances', Integer()),
|
||||||
|
Column('cores', Integer()),
|
||||||
|
Column('volumes', Integer()),
|
||||||
|
Column('gigabytes', Integer()),
|
||||||
|
Column('floating_ips', Integer()),
|
||||||
|
Column('metadata_items', Integer()),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def new_style_quotas_table(meta, name):
|
||||||
|
return Table(name, meta,
|
||||||
|
Column('id', Integer(), primary_key=True),
|
||||||
|
Column('created_at', DateTime(),
|
||||||
|
default=utils.utcnow),
|
||||||
|
Column('updated_at', DateTime(),
|
||||||
|
onupdate=utils.utcnow),
|
||||||
|
Column('deleted_at', DateTime()),
|
||||||
|
Column('deleted', Boolean(), default=False),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False)),
|
||||||
|
Column('resource',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False),
|
||||||
|
nullable=False),
|
||||||
|
Column('hard_limit', Integer(), nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def quotas_table(meta, name='quotas'):
|
||||||
|
return Table(name, meta, autoload=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _assert_no_duplicate_project_ids(quotas):
|
||||||
|
project_ids = set()
|
||||||
|
message = ('There are multiple active quotas for project "%s" '
|
||||||
|
'(among others, possibly). '
|
||||||
|
'Please resolve all ambiguous quotas before '
|
||||||
|
'reattempting the migration.')
|
||||||
|
for quota in quotas:
|
||||||
|
assert quota.project_id not in project_ids, message % quota.project_id
|
||||||
|
project_ids.add(quota.project_id)
|
||||||
|
|
||||||
|
|
||||||
|
def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
|
||||||
|
"""Ensure that there are no duplicate non-deleted quota entries."""
|
||||||
|
select = quotas.select().where(quotas.c.deleted == False)
|
||||||
|
results = migrate_engine.execute(select)
|
||||||
|
_assert_no_duplicate_project_ids(list(results))
|
||||||
|
|
||||||
|
|
||||||
|
def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
|
||||||
|
"""Ensure that there are no duplicate non-deleted quota entries."""
|
||||||
|
for resource in resources:
|
||||||
|
select = quotas.select().\
|
||||||
|
where(quotas.c.deleted == False).\
|
||||||
|
where(quotas.c.resource == resource)
|
||||||
|
results = migrate_engine.execute(select)
|
||||||
|
_assert_no_duplicate_project_ids(list(results))
|
||||||
|
|
||||||
|
|
||||||
|
def convert_forward(migrate_engine, old_quotas, new_quotas):
|
||||||
|
quotas = list(migrate_engine.execute(old_quotas.select()))
|
||||||
|
for quota in quotas:
|
||||||
|
for resource in resources:
|
||||||
|
hard_limit = getattr(quota, resource)
|
||||||
|
if hard_limit is None:
|
||||||
|
continue
|
||||||
|
insert = new_quotas.insert().values(
|
||||||
|
created_at=quota.created_at,
|
||||||
|
updated_at=quota.updated_at,
|
||||||
|
deleted_at=quota.deleted_at,
|
||||||
|
deleted=quota.deleted,
|
||||||
|
project_id=quota.project_id,
|
||||||
|
resource=resource,
|
||||||
|
hard_limit=hard_limit)
|
||||||
|
migrate_engine.execute(insert)
|
||||||
|
|
||||||
|
|
||||||
|
def earliest(date1, date2):
|
||||||
|
if date1 is None and date2 is None:
|
||||||
|
return None
|
||||||
|
if date1 is None:
|
||||||
|
return date2
|
||||||
|
if date2 is None:
|
||||||
|
return date1
|
||||||
|
if date1 < date2:
|
||||||
|
return date1
|
||||||
|
return date2
|
||||||
|
|
||||||
|
|
||||||
|
def latest(date1, date2):
|
||||||
|
if date1 is None and date2 is None:
|
||||||
|
return None
|
||||||
|
if date1 is None:
|
||||||
|
return date2
|
||||||
|
if date2 is None:
|
||||||
|
return date1
|
||||||
|
if date1 > date2:
|
||||||
|
return date1
|
||||||
|
return date2
|
||||||
|
|
||||||
|
|
||||||
|
def convert_backward(migrate_engine, old_quotas, new_quotas):
|
||||||
|
quotas = {}
|
||||||
|
for quota in migrate_engine.execute(new_quotas.select()):
|
||||||
|
if (quota.resource not in resources
|
||||||
|
or quota.hard_limit is None or quota.deleted):
|
||||||
|
continue
|
||||||
|
if not quota.project_id in quotas:
|
||||||
|
quotas[quota.project_id] = {
|
||||||
|
'project_id': quota.project_id,
|
||||||
|
'created_at': quota.created_at,
|
||||||
|
'updated_at': quota.updated_at,
|
||||||
|
quota.resource: quota.hard_limit,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
quotas[quota.project_id]['created_at'] = earliest(
|
||||||
|
quota.created_at, quotas[quota.project_id]['created_at'])
|
||||||
|
quotas[quota.project_id]['updated_at'] = latest(
|
||||||
|
quota.updated_at, quotas[quota.project_id]['updated_at'])
|
||||||
|
quotas[quota.project_id][quota.resource] = quota.hard_limit
|
||||||
|
|
||||||
|
for quota in quotas.itervalues():
|
||||||
|
insert = old_quotas.insert().values(**quota)
|
||||||
|
migrate_engine.execute(insert)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
old_quotas = quotas_table(meta)
|
||||||
|
assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
|
||||||
|
|
||||||
|
new_quotas = new_style_quotas_table(meta, 'quotas_new')
|
||||||
|
new_quotas.create()
|
||||||
|
convert_forward(migrate_engine, old_quotas, new_quotas)
|
||||||
|
old_quotas.drop()
|
||||||
|
|
||||||
|
# clear metadata to work around this:
|
||||||
|
# http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
|
||||||
|
meta.clear()
|
||||||
|
new_quotas = quotas_table(meta, 'quotas_new')
|
||||||
|
new_quotas.rename('quotas')
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
# Operations to reverse the above upgrade go here.
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
new_quotas = quotas_table(meta)
|
||||||
|
assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
|
||||||
|
|
||||||
|
old_quotas = old_style_quotas_table(meta, 'quotas_old')
|
||||||
|
old_quotas.create()
|
||||||
|
convert_backward(migrate_engine, old_quotas, new_quotas)
|
||||||
|
new_quotas.drop()
|
||||||
|
|
||||||
|
# clear metadata to work around this:
|
||||||
|
# http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
|
||||||
|
meta.clear()
|
||||||
|
old_quotas = quotas_table(meta, 'quotas_old')
|
||||||
|
old_quotas.rename('quotas')
|
@ -0,0 +1,87 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2012 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
types = {}
|
||||||
|
for instance in migrate_engine.execute(instances.select()):
|
||||||
|
if instance.instance_type_id is None:
|
||||||
|
types[instance.id] = None
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
types[instance.id] = int(instance.instance_type_id)
|
||||||
|
except ValueError:
|
||||||
|
LOG.warn("Instance %s did not have instance_type_id "
|
||||||
|
"converted to an integer because its value is %s" %
|
||||||
|
(instance.id, instance.instance_type_id))
|
||||||
|
types[instance.id] = None
|
||||||
|
|
||||||
|
integer_column = Column('instance_type_id_int', Integer(), nullable=True)
|
||||||
|
string_column = instances.c.instance_type_id
|
||||||
|
|
||||||
|
integer_column.create(instances)
|
||||||
|
for instance_id, instance_type_id in types.iteritems():
|
||||||
|
update = instances.update().\
|
||||||
|
where(instances.c.id == instance_id).\
|
||||||
|
values(instance_type_id_int=instance_type_id)
|
||||||
|
migrate_engine.execute(update)
|
||||||
|
|
||||||
|
string_column.alter(name='instance_type_id_str')
|
||||||
|
integer_column.alter(name='instance_type_id')
|
||||||
|
string_column.drop()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
integer_column = instances.c.instance_type_id
|
||||||
|
string_column = Column('instance_type_id_str',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False),
|
||||||
|
nullable=True)
|
||||||
|
|
||||||
|
types = {}
|
||||||
|
for instance in migrate_engine.execute(instances.select()):
|
||||||
|
if instance.instance_type_id is None:
|
||||||
|
types[instance.id] = None
|
||||||
|
else:
|
||||||
|
types[instance.id] = str(instance.instance_type_id)
|
||||||
|
|
||||||
|
string_column.create(instances)
|
||||||
|
for instance_id, instance_type_id in types.iteritems():
|
||||||
|
update = instances.update().\
|
||||||
|
where(instances.c.id == instance_id).\
|
||||||
|
values(instance_type_id_str=instance_type_id)
|
||||||
|
migrate_engine.execute(update)
|
||||||
|
|
||||||
|
integer_column.alter(name='instance_type_id_int')
|
||||||
|
string_column.alter(name='instance_type_id')
|
||||||
|
integer_column.drop()
|
@ -0,0 +1,35 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import MetaData, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
tokens = Table('auth_tokens', meta, autoload=True)
|
||||||
|
c_manageent = tokens.c.server_manageent_url
|
||||||
|
c_manageent.alter(name='server_management_url')
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
tokens = Table('auth_tokens', meta, autoload=True)
|
||||||
|
c_management = tokens.c.server_management_url
|
||||||
|
c_management.alter(name='server_manageent_url')
|
@ -0,0 +1,82 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 MORITA Kazutaka.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Table, MetaData
|
||||||
|
from sqlalchemy import Integer, DateTime, Boolean, String
|
||||||
|
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
snapshots = Table('snapshots', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('volume_id', Integer(), nullable=False),
|
||||||
|
Column('user_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('project_id',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('status',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('progress',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('volume_size', Integer()),
|
||||||
|
Column('scheduled_at', DateTime(timezone=False)),
|
||||||
|
Column('display_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('display_description',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)))
|
||||||
|
try:
|
||||||
|
snapshots.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(snapshots))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
meta.drop_all(tables=[snapshots])
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
# Operations to reverse the above upgrade go here.
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
snapshots = Table('snapshots', meta, autoload=True)
|
||||||
|
snapshots.drop()
|
@ -0,0 +1,40 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 MORITA Kazutaka.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Table, MetaData, Integer
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
|
||||||
|
snapshot_id = Column('snapshot_id', Integer())
|
||||||
|
# Add columns to existing tables
|
||||||
|
volumes.create_column(snapshot_id)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
|
||||||
|
volumes.drop_column('snapshot_id')
|
@ -0,0 +1,119 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TEMPORARY TABLE volumes_backup (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
ec2_id VARCHAR(255),
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
size INTEGER,
|
||||||
|
availability_zone VARCHAR(255),
|
||||||
|
instance_id INTEGER,
|
||||||
|
mountpoint VARCHAR(255),
|
||||||
|
attach_time VARCHAR(255),
|
||||||
|
status VARCHAR(255),
|
||||||
|
attach_status VARCHAR(255),
|
||||||
|
scheduled_at DATETIME,
|
||||||
|
launched_at DATETIME,
|
||||||
|
terminated_at DATETIME,
|
||||||
|
display_name VARCHAR(255),
|
||||||
|
display_description VARCHAR(255),
|
||||||
|
provider_location VARCHAR(256),
|
||||||
|
provider_auth VARCHAR(256),
|
||||||
|
snapshot_id INTEGER,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id),
|
||||||
|
CHECK (deleted IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO volumes_backup
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
ec2_id,
|
||||||
|
user_id,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
size,
|
||||||
|
availability_zone,
|
||||||
|
instance_id,
|
||||||
|
mountpoint,
|
||||||
|
attach_time,
|
||||||
|
status,
|
||||||
|
attach_status,
|
||||||
|
scheduled_at,
|
||||||
|
launched_at,
|
||||||
|
terminated_at,
|
||||||
|
display_name,
|
||||||
|
display_description,
|
||||||
|
provider_location,
|
||||||
|
provider_auth,
|
||||||
|
snapshot_id
|
||||||
|
FROM volumes;
|
||||||
|
|
||||||
|
DROP TABLE volumes;
|
||||||
|
|
||||||
|
CREATE TABLE volumes (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id INTEGER NOT NULL,
|
||||||
|
ec2_id VARCHAR(255),
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
host VARCHAR(255),
|
||||||
|
size INTEGER,
|
||||||
|
availability_zone VARCHAR(255),
|
||||||
|
instance_id INTEGER,
|
||||||
|
mountpoint VARCHAR(255),
|
||||||
|
attach_time VARCHAR(255),
|
||||||
|
status VARCHAR(255),
|
||||||
|
attach_status VARCHAR(255),
|
||||||
|
scheduled_at DATETIME,
|
||||||
|
launched_at DATETIME,
|
||||||
|
terminated_at DATETIME,
|
||||||
|
display_name VARCHAR(255),
|
||||||
|
display_description VARCHAR(255),
|
||||||
|
provider_location VARCHAR(256),
|
||||||
|
provider_auth VARCHAR(256),
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
FOREIGN KEY(instance_id) REFERENCES instances (id),
|
||||||
|
CHECK (deleted IN (0, 1))
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO volumes
|
||||||
|
SELECT created_at,
|
||||||
|
updated_at,
|
||||||
|
deleted_at,
|
||||||
|
deleted,
|
||||||
|
id,
|
||||||
|
ec2_id,
|
||||||
|
user_id,
|
||||||
|
project_id,
|
||||||
|
host,
|
||||||
|
size,
|
||||||
|
availability_zone,
|
||||||
|
instance_id,
|
||||||
|
mountpoint,
|
||||||
|
attach_time,
|
||||||
|
status,
|
||||||
|
attach_status,
|
||||||
|
scheduled_at,
|
||||||
|
launched_at,
|
||||||
|
terminated_at,
|
||||||
|
display_name,
|
||||||
|
display_description,
|
||||||
|
provider_location,
|
||||||
|
provider_auth
|
||||||
|
FROM volumes_backup;
|
||||||
|
|
||||||
|
DROP TABLE volumes_backup;
|
||||||
|
|
||||||
|
COMMIT;
|
@ -0,0 +1,38 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import MetaData, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
image_id_column = instances.c.image_id
|
||||||
|
image_id_column.alter(name='image_ref')
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
image_ref_column = instances.c.image_ref
|
||||||
|
image_ref_column.alter(name='image_id')
|
@ -0,0 +1,64 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import MetaData
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
if migrate_engine.name == "mysql":
|
||||||
|
migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE certificates Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE compute_nodes Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE console_pools Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE consoles Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE export_devices Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE fixed_ips Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE floating_ips Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE instance_actions Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE instance_metadata Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE instance_types Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE instances Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE iscsi_targets Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE key_pairs Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE migrate_version Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE migrations Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE networks Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE projects Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE quotas Engine=InnoDB")
|
||||||
|
migrate_engine.execute(
|
||||||
|
"ALTER TABLE security_group_instance_association Engine=InnoDB")
|
||||||
|
migrate_engine.execute(
|
||||||
|
"ALTER TABLE security_group_rules Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE security_groups Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE services Engine=InnoDB")
|
||||||
|
migrate_engine.execute(
|
||||||
|
"ALTER TABLE user_project_association Engine=InnoDB")
|
||||||
|
migrate_engine.execute(
|
||||||
|
"ALTER TABLE user_project_role_association Engine=InnoDB")
|
||||||
|
migrate_engine.execute(
|
||||||
|
"ALTER TABLE user_role_association Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE users Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE volumes Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE zones Engine=InnoDB")
|
||||||
|
migrate_engine.execute("ALTER TABLE snapshots Engine=InnoDB")
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
pass
|
@ -0,0 +1,42 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, MetaData, String, Table
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
instances_vm_mode = Column('vm_mode',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False),
|
||||||
|
nullable=True)
|
||||||
|
instances.create_column(instances_vm_mode)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
instances.drop_column('vm_mode')
|
@ -0,0 +1,92 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# Copyright 2011 Isaku Yamahata
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import MetaData, Table, Column
|
||||||
|
from sqlalchemy import DateTime, Boolean, Integer, String
|
||||||
|
from sqlalchemy import ForeignKey
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
snapshots = Table('snapshots', meta, autoload=True)
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
block_device_mapping = Table('block_device_mapping', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, autoincrement=True),
|
||||||
|
Column('instance_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('instances.id'),
|
||||||
|
nullable=False),
|
||||||
|
Column('device_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
nullable=False),
|
||||||
|
Column('delete_on_termination',
|
||||||
|
Boolean(create_constraint=True, name=None),
|
||||||
|
default=False),
|
||||||
|
Column('virtual_name',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False),
|
||||||
|
nullable=True),
|
||||||
|
Column('snapshot_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('snapshots.id'),
|
||||||
|
nullable=True),
|
||||||
|
Column('volume_id', Integer(), ForeignKey('volumes.id'),
|
||||||
|
nullable=True),
|
||||||
|
Column('volume_size', Integer(), nullable=True),
|
||||||
|
Column('no_device',
|
||||||
|
Boolean(create_constraint=True, name=None),
|
||||||
|
nullable=True),
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
block_device_mapping.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(block_device_mapping))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
meta.drop_all(tables=[block_device_mapping])
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
# Operations to reverse the above upgrade go here.
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# load tables for fk
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
snapshots = Table('snapshots', meta, autoload=True)
|
||||||
|
|
||||||
|
block_device_mapping = Table('block_device_mapping', meta, autoload=True)
|
||||||
|
block_device_mapping.drop()
|
@ -0,0 +1,45 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||||
|
|
||||||
|
from cinder import utils
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
uuid_column = Column("uuid", String(36))
|
||||||
|
instances.create_column(uuid_column)
|
||||||
|
|
||||||
|
rows = migrate_engine.execute(instances.select())
|
||||||
|
for row in rows:
|
||||||
|
instance_uuid = str(utils.gen_uuid())
|
||||||
|
migrate_engine.execute(instances.update()\
|
||||||
|
.where(instances.c.id == row[0])\
|
||||||
|
.values(uuid=instance_uuid))
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
instances.drop_column('uuid')
|
@ -0,0 +1,89 @@
|
|||||||
|
# Copyright 2011 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime, Integer
|
||||||
|
from sqlalchemy import MetaData, String, Table
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
builds = Table('agent_builds', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('hypervisor',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('os',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('architecture',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('version',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('url',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('md5hash',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
)
|
||||||
|
for table in (builds, ):
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(table))
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Columns
|
||||||
|
#
|
||||||
|
architecture = Column('architecture', String(length=255))
|
||||||
|
|
||||||
|
# Add columns to existing tables
|
||||||
|
instances.create_column(architecture)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
builds = Table('agent_builds', meta, autoload=True)
|
||||||
|
for table in (builds, ):
|
||||||
|
table.drop()
|
||||||
|
|
||||||
|
instances = Table('instances', meta, autoload=True)
|
||||||
|
instances.drop_column('architecture')
|
@ -0,0 +1,65 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 United States Government as represented by the
|
||||||
|
# Administrator of the National Aeronautics and Space Administration.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime
|
||||||
|
from sqlalchemy import Integer, MetaData, String
|
||||||
|
from sqlalchemy import Table
|
||||||
|
|
||||||
|
from cinder import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
provider_fw_rules = Table('provider_fw_rules', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('protocol',
|
||||||
|
String(length=5, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('from_port', Integer()),
|
||||||
|
Column('to_port', Integer()),
|
||||||
|
Column('cidr',
|
||||||
|
String(length=255, convert_unicode=False,
|
||||||
|
assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)))
|
||||||
|
for table in (provider_fw_rules,):
|
||||||
|
try:
|
||||||
|
table.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.info(repr(table))
|
||||||
|
LOG.exception('Exception while creating table')
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
provider_fw_rules = Table('provider_fw_rules', meta, autoload=True)
|
||||||
|
for table in (provider_fw_rules,):
|
||||||
|
table.drop()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user