Migrate to oslo_db

Currently, Ironic-inspector uses SQLite as database.
We should migrate to oslo.db.
In this patch, make database connect operations to oslo_db.

Change-Id: Ibd0f787b570bc101eab9fbd0b59d8a775a95e2e0
Implements: blueprint migrate-to-oslodb
This commit is contained in:
Yuiko Takada 2015-04-23 09:48:28 +09:00 committed by Yuiko Takada
parent 762731698c
commit e195b4780a
10 changed files with 429 additions and 245 deletions

View File

@ -135,7 +135,7 @@ function configure_inspector {
inspector_iniset firewall manage_firewall $IRONIC_INSPECTOR_MANAGE_FIREWALL inspector_iniset firewall manage_firewall $IRONIC_INSPECTOR_MANAGE_FIREWALL
inspector_iniset firewall dnsmasq_interface $IRONIC_INSPECTOR_INTERFACE inspector_iniset firewall dnsmasq_interface $IRONIC_INSPECTOR_INTERFACE
inspector_iniset DEFAULT database $IRONIC_INSPECTOR_DATA_DIR/inspector.sqlite inspector_iniset database connection sqlite:///$IRONIC_INSPECTOR_DATA_DIR/inspector.sqlite
iniset "$IRONIC_CONF_FILE" inspector enabled True iniset "$IRONIC_CONF_FILE" inspector enabled True
iniset "$IRONIC_CONF_FILE" inspector service_url $IRONIC_INSPECTOR_URI iniset "$IRONIC_CONF_FILE" inspector service_url $IRONIC_INSPECTOR_URI

View File

@ -24,11 +24,6 @@
# Its value may be silently ignored in the future. # Its value may be silently ignored in the future.
#authenticate = <None> #authenticate = <None>
# SQLite3 database to store nodes under introspection, required. Do
# not use :memory: here, it won't work. (string value)
# Deprecated group/name - [discoverd]/database
#database =
# Debug mode enabled/disabled. (boolean value) # Debug mode enabled/disabled. (boolean value)
# Deprecated group/name - [discoverd]/debug # Deprecated group/name - [discoverd]/debug
#debug = false #debug = false
@ -62,6 +57,126 @@
#max_concurrency = 1000 #max_concurrency = 1000
[database]
#
# From oslo.db
#
# The file name to use with SQLite. (string value)
# Deprecated group/name - [DEFAULT]/sqlite_db
#sqlite_db = oslo.sqlite
# If True, SQLite uses synchronous mode. (boolean value)
# Deprecated group/name - [DEFAULT]/sqlite_synchronous
#sqlite_synchronous = true
# The back end to use for the database. (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend = sqlalchemy
# The SQLAlchemy connection string to use to connect to the database.
# (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
# The SQLAlchemy connection string to use to connect to the slave
# database. (string value)
#slave_connection = <None>
# The SQL mode to be used for MySQL sessions. This option, including
# the default, overrides any server-set SQL mode. To use whatever SQL
# mode is set by the server configuration, set this to no value.
# Example: mysql_sql_mode= (string value)
#mysql_sql_mode = TRADITIONAL
# Timeout before idle SQL connections are reaped. (integer value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#idle_timeout = 3600
# Minimum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = <None>
# Maximum number of database connection retries during startup. Set to
# -1 to specify an infinite retry count. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries = 10
# Interval between retries of opening a SQL connection. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval = 10
# If set, use this value for max_overflow with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow = <None>
# Verbosity of SQL debugging information: 0=None, 100=Everything.
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# Add Python stack traces to SQL as comment strings. (boolean value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# Enable the experimental use of database reconnect on connection
# lost. (boolean value)
#use_db_reconnect = false
# Seconds between retries of a database transaction. (integer value)
#db_retry_interval = 1
# If True, increases the interval between retries of a database
# operation up to db_max_retry_interval. (boolean value)
#db_inc_retry_interval = true
# If db_inc_retry_interval is set, the maximum seconds between retries
# of a database operation. (integer value)
#db_max_retry_interval = 10
# Maximum retries in case of connection error or deadlock error before
# error is raised. Set to -1 to specify an infinite retry count.
# (integer value)
#db_max_retries = 20
[discoverd]
#
# From ironic_inspector
#
# SQLite3 database to store nodes under introspection, required. Do
# not use :memory: here, it won't work. DEPRECATED: use
# [database]/connection. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#database =
[firewall] [firewall]
# #

View File

@ -143,6 +143,15 @@ PROCESSING_OPTS = [
] ]
DISCOVERD_OPTS = [
cfg.StrOpt('database',
default='',
help='SQLite3 database to store nodes under introspection, '
'required. Do not use :memory: here, it won\'t work. '
'DEPRECATED: use [database]/connection.',
deprecated_for_removal=True),
]
SERVICE_OPTS = [ SERVICE_OPTS = [
cfg.StrOpt('listen_address', cfg.StrOpt('listen_address',
default='0.0.0.0', default='0.0.0.0',
@ -163,11 +172,6 @@ SERVICE_OPTS = [
help='DEPRECATED: use auth_strategy.', help='DEPRECATED: use auth_strategy.',
deprecated_group='discoverd', deprecated_group='discoverd',
deprecated_for_removal=True), deprecated_for_removal=True),
cfg.StrOpt('database',
default='',
help='SQLite3 database to store nodes under introspection, '
'required. Do not use :memory: here, it won\'t work.',
deprecated_group='discoverd'),
cfg.BoolOpt('debug', cfg.BoolOpt('debug',
default=False, default=False,
help='Debug mode enabled/disabled.', help='Debug mode enabled/disabled.',
@ -207,6 +211,7 @@ cfg.CONF.register_opts(SERVICE_OPTS)
cfg.CONF.register_opts(FIREWALL_OPTS, group='firewall') cfg.CONF.register_opts(FIREWALL_OPTS, group='firewall')
cfg.CONF.register_opts(PROCESSING_OPTS, group='processing') cfg.CONF.register_opts(PROCESSING_OPTS, group='processing')
cfg.CONF.register_opts(IRONIC_OPTS, group='ironic') cfg.CONF.register_opts(IRONIC_OPTS, group='ironic')
cfg.CONF.register_opts(DISCOVERD_OPTS, group='discoverd')
def list_opts(): def list_opts():
@ -215,4 +220,5 @@ def list_opts():
('firewall', FIREWALL_OPTS), ('firewall', FIREWALL_OPTS),
('ironic', IRONIC_OPTS), ('ironic', IRONIC_OPTS),
('processing', PROCESSING_OPTS), ('processing', PROCESSING_OPTS),
('discoverd', DISCOVERD_OPTS),
] ]

View File

@ -0,0 +1,47 @@
# Copyright 2015 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for inspection data.
"""
from oslo_db.sqlalchemy import models
from sqlalchemy import Column, Float, ForeignKey, String, Text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(cls=models.ModelBase)
class Node(Base):
__tablename__ = 'nodes'
uuid = Column(String(36), primary_key=True)
started_at = Column(Float, nullable=True)
finished_at = Column(Float, nullable=True)
error = Column(Text, nullable=True)
class Attribute(Base):
__tablename__ = 'attributes'
name = Column(Text, primary_key=True)
value = Column(Text, primary_key=True)
uuid = Column(String(36), ForeignKey('nodes.uuid'))
class Option(Base):
__tablename__ = 'options'
uuid = Column(String(36), ForeignKey('nodes.uuid'), primary_key=True)
name = Column(Text, primary_key=True)
value = Column(Text)

View File

@ -16,36 +16,24 @@
import contextlib import contextlib
import json import json
import logging import logging
import os
import sqlite3
import sys
import time import time
from ironicclient import exceptions from ironicclient import exceptions
from oslo_config import cfg from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db import options as db_opts
from oslo_db.sqlalchemy import session as db_session
from sqlalchemy import text
from ironic_inspector.common.i18n import _, _LC, _LE, _LW from ironic_inspector.common.i18n import _, _LE, _LW
from ironic_inspector import models
from ironic_inspector import utils from ironic_inspector import utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger("ironic_inspector.node_cache") LOG = logging.getLogger("ironic_inspector.node_cache")
_DB_NAME = None _FACADE = None
_SCHEMA = """
create table if not exists nodes
(uuid text primary key, started_at real, finished_at real, error text);
create table if not exists attributes
(name text, value text, uuid text,
primary key (name, value),
foreign key (uuid) references nodes);
create table if not exists options
(uuid text, name text, value text,
primary key (uuid, name),
foreign key (uuid) references nodes);
"""
MACS_ATTRIBUTE = 'mac' MACS_ATTRIBUTE = 'mac'
@ -70,9 +58,9 @@ class NodeInfo(object):
def options(self): def options(self):
"""Node introspection options as a dict.""" """Node introspection options as a dict."""
if self._options is None: if self._options is None:
rows = _db().execute('select name, value from options ' rows = model_query(models.Option).filter_by(
'where uuid=?', (self.uuid,)) uuid=self.uuid)
self._options = {row['name']: json.loads(row['value']) self._options = {row.name: json.loads(row.value)
for row in rows} for row in rows}
return self._options return self._options
@ -80,11 +68,11 @@ class NodeInfo(object):
"""Set an option for a node.""" """Set an option for a node."""
encoded = json.dumps(value) encoded = json.dumps(value)
self.options[name] = value self.options[name] = value
with _db() as db: with _ensure_transaction() as session:
db.execute('delete from options where uuid=? and name=?', model_query(models.Option, session=session).filter_by(
(self.uuid, name)) uuid=self.uuid, name=name).delete()
db.execute('insert into options(uuid, name, value) values(?,?,?)', models.Option(uuid=self.uuid, name=name, value=encoded).save(
(self.uuid, name, encoded)) session)
def finished(self, error=None): def finished(self, error=None):
"""Record status for this node. """Record status for this node.
@ -96,29 +84,32 @@ class NodeInfo(object):
self.finished_at = time.time() self.finished_at = time.time()
self.error = error self.error = error
with _db() as db: with _ensure_transaction() as session:
db.execute('update nodes set finished_at=?, error=? where uuid=?', model_query(models.Node, session=session).filter_by(
(self.finished_at, error, self.uuid)) uuid=self.uuid).update(
db.execute("delete from attributes where uuid=?", (self.uuid,)) {'finished_at': self.finished_at, 'error': error})
db.execute("delete from options where uuid=?", (self.uuid,)) model_query(models.Attribute, session=session).filter_by(
uuid=self.uuid).delete()
model_query(models.Option, session=session).filter_by(
uuid=self.uuid).delete()
def add_attribute(self, name, value, database=None): def add_attribute(self, name, value, session=None):
"""Store look up attribute for a node in the database. """Store look up attribute for a node in the database.
:param name: attribute name :param name: attribute name
:param value: attribute value or list of possible values :param value: attribute value or list of possible values
:param database: optional existing database connection :param session: optional existing database session
:raises: Error if attributes values are already in database :raises: Error if attributes values are already in database
""" """
if not isinstance(value, list): if not isinstance(value, list):
value = [value] value = [value]
with _maybe_db(database) as db: with _ensure_transaction(session) as session:
try: try:
db.executemany("insert into attributes(name, value, uuid) " for v in value:
"values(?, ?, ?)", models.Attribute(name=name, value=v, uuid=self.uuid).save(
[(name, v, self.uuid) for v in value]) session)
except sqlite3.IntegrityError as exc: except db_exc.DBDuplicateEntry as exc:
LOG.error(_LE('Database integrity error %s during ' LOG.error(_LE('Database integrity error %s during '
'adding attributes'), exc) 'adding attributes'), exc)
raise utils.Error(_( raise utils.Error(_(
@ -186,35 +177,49 @@ class NodeInfo(object):
def init(): def init():
"""Initialize the database.""" """Initialize the database."""
global _DB_NAME if CONF.discoverd.database:
db_opts.set_defaults(CONF,
_DB_NAME = CONF.database.strip() connection='sqlite:///%s' %
if not _DB_NAME: str(CONF.discoverd.database).strip())
LOG.critical(_LC('Configuration option inspector.database' # TODO(yuikotakada) alembic migration
' should be set')) engine = get_engine()
sys.exit(1) models.Base.metadata.create_all(engine)
return get_session()
db_dir = os.path.dirname(_DB_NAME)
if db_dir and not os.path.exists(db_dir):
os.makedirs(db_dir)
sqlite3.connect(_DB_NAME).executescript(_SCHEMA)
def _db(): def get_session(**kwargs):
if _DB_NAME is None: facade = _create_facade_lazily()
init() return facade.get_session(**kwargs)
conn = sqlite3.connect(_DB_NAME)
conn.row_factory = sqlite3.Row
return conn def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(cfg.CONF)
return _FACADE
@contextlib.contextmanager @contextlib.contextmanager
def _maybe_db(db=None): def _ensure_transaction(session=None):
if db is None: session = session or get_session()
with _db() as db: with session.begin(subtransactions=True):
yield db yield session
else:
yield db
def add_node(uuid, **attributes): def add_node(uuid, **attributes):
@ -228,27 +233,29 @@ def add_node(uuid, **attributes):
:returns: NodeInfo :returns: NodeInfo
""" """
started_at = time.time() started_at = time.time()
with _db() as db: with _ensure_transaction() as session:
db.execute("delete from nodes where uuid=?", (uuid,)) (model_query(models.Node, session=session).filter_by(uuid=uuid).
db.execute("delete from attributes where uuid=?", (uuid,)) delete())
db.execute("delete from options where uuid=?", (uuid,)) (model_query(models.Attribute, session=session).filter_by(uuid=uuid).
delete(synchronize_session=False))
(model_query(models.Option, session=session).filter_by(uuid=uuid).
delete())
db.execute("insert into nodes(uuid, started_at) " models.Node(uuid=uuid, started_at=started_at).save(session)
"values(?, ?)", (uuid, started_at))
node_info = NodeInfo(uuid=uuid, started_at=started_at) node_info = NodeInfo(uuid=uuid, started_at=started_at)
for (name, value) in attributes.items(): for (name, value) in attributes.items():
if not value: if not value:
continue continue
node_info.add_attribute(name, value, database=db) node_info.add_attribute(name, value, session=session)
return node_info return node_info
def active_macs(): def active_macs():
"""List all MAC's that are on introspection right now.""" """List all MAC's that are on introspection right now."""
return {x[0] for x in _db().execute("select value from attributes " return ({x.value for x in model_query(models.Attribute.value).
"where name=?", (MACS_ATTRIBUTE,))} filter_by(name=MACS_ATTRIBUTE)})
def get_node(uuid): def get_node(uuid):
@ -257,7 +264,7 @@ def get_node(uuid):
:param uuid: node UUID. :param uuid: node UUID.
:returns: structure NodeInfo. :returns: structure NodeInfo.
""" """
row = _db().execute('select * from nodes where uuid=?', (uuid,)).fetchone() row = model_query(models.Node).filter_by(uuid=uuid).first()
if row is None: if row is None:
raise utils.Error(_('Could not find node %s in cache') % uuid, raise utils.Error(_('Could not find node %s in cache') % uuid,
code=404) code=404)
@ -273,7 +280,7 @@ def find_node(**attributes):
""" """
# NOTE(dtantsur): sorting is not required, but gives us predictability # NOTE(dtantsur): sorting is not required, but gives us predictability
found = set() found = set()
db = _db()
for (name, value) in sorted(attributes.items()): for (name, value) in sorted(attributes.items()):
if not value: if not value:
LOG.debug('Empty value for attribute %s', name) LOG.debug('Empty value for attribute %s', name)
@ -283,34 +290,40 @@ def find_node(**attributes):
LOG.debug('Trying to use %s of value %s for node look up' LOG.debug('Trying to use %s of value %s for node look up'
% (name, value)) % (name, value))
rows = db.execute('select distinct uuid from attributes where ' + value_list = []
' OR '.join('name=? AND value=?' for _ in value), for v in value:
sum(([name, v] for v in value), [])).fetchall() value_list.append('name="%s" AND value="%s"' % (name, v))
stmt = ('select distinct uuid from attributes where ' +
' OR '.join(value_list))
rows = (model_query(models.Attribute.uuid).from_statement(
text(stmt)).all())
if rows: if rows:
found.update(item[0] for item in rows) found.update(item.uuid for item in rows)
if not found: if not found:
raise utils.NotFoundInCacheError(_( raise utils.NotFoundInCacheError(_(
'Could not find a node for attributes %s') % attributes) 'Could not find a node for attributes %s') % attributes)
elif len(found) > 1: elif len(found) > 1:
raise utils.Error(_( raise utils.Error(_(
'Multiple matching nodes found for attributes %(attr)s: %(found)s') 'Multiple matching nodes found for attributes '
'%(attr)s: %(found)s')
% {'attr': attributes, 'found': list(found)}, code=404) % {'attr': attributes, 'found': list(found)}, code=404)
uuid = found.pop() uuid = found.pop()
row = db.execute('select started_at, finished_at from nodes where uuid=?', row = (model_query(models.Node.started_at, models.Node.finished_at).
(uuid,)).fetchone() filter_by(uuid=uuid).first())
if not row: if not row:
raise utils.Error(_( raise utils.Error(_(
'Could not find node %s in introspection cache, ' 'Could not find node %s in introspection cache, '
'probably it\'s not on introspection now') % uuid, code=404) 'probably it\'s not on introspection now') % uuid, code=404)
if row['finished_at']: if row.finished_at:
raise utils.Error(_( raise utils.Error(_(
'Introspection for node %(node)s already finished on %(finish)s') % 'Introspection for node %(node)s already finished on '
{'node': uuid, 'finish': row['finished_at']}) '%(finish)s') % {'node': uuid, 'finish': row.finished_at})
return NodeInfo(uuid=uuid, started_at=row['started_at']) return NodeInfo(uuid=uuid, started_at=row.started_at)
def clean_up(): def clean_up():
@ -324,30 +337,32 @@ def clean_up():
status_keep_threshold = (time.time() - status_keep_threshold = (time.time() -
CONF.node_status_keep_time) CONF.node_status_keep_time)
with _db() as db: with _ensure_transaction() as session:
db.execute('delete from nodes where finished_at < ?', model_query(models.Node, session=session).filter(
(status_keep_threshold,)) models.Node.finished_at.isnot(None),
models.Node.finished_at < status_keep_threshold).delete()
timeout = CONF.timeout timeout = CONF.timeout
if timeout <= 0: if timeout <= 0:
return [] return []
threshold = time.time() - timeout threshold = time.time() - timeout
with _db() as db: uuids = [row.uuid for row in
uuids = [row[0] for row in model_query(models.Node.uuid, session=session).filter(
db.execute('select uuid from nodes where ' models.Node.started_at < threshold,
'started_at < ? and finished_at is null', models.Node.finished_at.is_(None)).all()]
(threshold,))]
if not uuids: if not uuids:
return [] return []
LOG.error(_LE('Introspection for nodes %s has timed out'), uuids) LOG.error(_LE('Introspection for nodes %s has timed out'), uuids)
db.execute('update nodes set finished_at=?, error=? ' query = model_query(models.Node, session=session).filter(
'where started_at < ? and finished_at is null', models.Node.started_at < threshold,
(time.time(), 'Introspection timeout', threshold)) models.Node.finished_at.is_(None))
db.executemany('delete from attributes where uuid=?', query.update({'finished_at': time.time(),
[(u,) for u in uuids]) 'error': 'Introspection timeout'})
db.executemany('delete from options where uuid=?', for u in uuids:
[(u,) for u in uuids]) model_query(models.Attribute, session=session).filter_by(
uuid=u).delete()
model_query(models.Option, session=session).filter_by(
uuid=u).delete()
return uuids return uuids

View File

@ -11,15 +11,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest import unittest
import mock import mock
from oslo_config import cfg from oslo_config import cfg
from oslo_db import options as db_opts
from ironic_inspector.common import i18n from ironic_inspector.common import i18n
# Import configuration options # Import configuration options
from ironic_inspector import conf # noqa from ironic_inspector import conf # noqa
from ironic_inspector import models
from ironic_inspector import node_cache from ironic_inspector import node_cache
from ironic_inspector.plugins import base as plugins_base from ironic_inspector.plugins import base as plugins_base
@ -35,23 +36,24 @@ def init_test_conf():
CONF.reset() CONF.reset()
for group in ('firewall', 'processing', 'ironic'): for group in ('firewall', 'processing', 'ironic'):
CONF.register_group(cfg.OptGroup(group)) CONF.register_group(cfg.OptGroup(group))
if not CONF.database: db_opts.set_defaults(CONF)
CONF.set_default('slave_connection', False, group='database')
CONF.set_default('max_retries', 10, group='database')
if not CONF.database.connection:
# Might be set in functional tests # Might be set in functional tests
db_file = tempfile.NamedTemporaryFile() db_opts.set_defaults(CONF,
CONF.set_override('database', db_file.name) connection='sqlite:///')
else:
db_file = None
node_cache._DB_NAME = None
return db_file
class BaseTest(unittest.TestCase): class BaseTest(unittest.TestCase):
def setUp(self): def setUp(self):
super(BaseTest, self).setUp() super(BaseTest, self).setUp()
self.db_file = init_test_conf() init_test_conf()
self.db = node_cache._db() self.session = node_cache.get_session()
if self.db_file: engine = node_cache.get_engine()
self.addCleanup(lambda: self.db_file.close()) models.Base.metadata.create_all(engine)
engine.connect()
self.addCleanup(node_cache.get_engine().dispose)
plugins_base._HOOKS_MGR = None plugins_base._HOOKS_MGR = None
for name in ('_', '_LI', '_LW', '_LE', '_LC'): for name in ('_', '_LI', '_LW', '_LE', '_LC'):
patch = mock.patch.object(i18n, name, lambda s: s) patch = mock.patch.object(i18n, name, lambda s: s)

View File

@ -41,8 +41,9 @@ manage_firewall = False
[processing] [processing]
enable_setting_ipmi_credentials = True enable_setting_ipmi_credentials = True
[DEFAULT] [DEFAULT]
database = %(db_file)s
debug = True debug = True
[database]
connection = sqlite:///%(db_file)s
""" """

View File

@ -11,14 +11,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import tempfile
import time import time
import unittest import unittest
import mock import mock
from oslo_config import cfg from oslo_config import cfg
from ironic_inspector import models
from ironic_inspector import node_cache from ironic_inspector import node_cache
from ironic_inspector.test import base as test_base from ironic_inspector.test import base as test_base
from ironic_inspector import utils from ironic_inspector import utils
@ -29,63 +28,68 @@ CONF = cfg.CONF
class TestNodeCache(test_base.NodeTest): class TestNodeCache(test_base.NodeTest):
def test_add_node(self): def test_add_node(self):
# Ensure previous node information is cleared # Ensure previous node information is cleared
with self.db: session = node_cache.get_session()
self.db.execute("insert into nodes(uuid) values(?)", with session.begin():
(self.node.uuid,)) models.Node(uuid=self.node.uuid).save(session)
self.db.execute("insert into nodes(uuid) values('uuid2')") models.Node(uuid='uuid2').save(session)
self.db.execute("insert into attributes(name, value, uuid) " models.Attribute(name='mac',
"values(?, ?, ?)", value='11:22:11:22:11:22',
('mac', '11:22:11:22:11:22', self.uuid)) uuid=self.uuid).save(session)
res = node_cache.add_node(self.node.uuid, mac=self.macs, res = node_cache.add_node(self.node.uuid, mac=self.macs,
bmc_address='1.2.3.4', foo=None) bmc_address='1.2.3.4', foo=None)
self.assertEqual(self.uuid, res.uuid) self.assertEqual(self.uuid, res.uuid)
self.assertTrue(time.time() - 60 < res.started_at < time.time() + 60) self.assertTrue(time.time() - 60 < res.started_at < time.time() + 60)
res = self.db.execute("select uuid, started_at " res = (node_cache.model_query(models.Node.uuid,
"from nodes order by uuid").fetchall() models.Node.started_at).order_by(models.Node.uuid).all())
self.assertEqual(['1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e', self.assertEqual(['1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e',
'uuid2'], [t[0] for t in res]) 'uuid2'], [t.uuid for t in res])
self.assertTrue(time.time() - 60 < res[0][1] < time.time() + 60) self.assertTrue(time.time() - 60 < res[0].started_at <
time.time() + 60)
res = self.db.execute("select name, value, uuid from attributes " res = (node_cache.model_query(models.Attribute.name,
"order by name, value").fetchall() models.Attribute.value, models.Attribute.uuid).
order_by(models.Attribute.name, models.Attribute.value).all())
self.assertEqual([('bmc_address', '1.2.3.4', self.uuid), self.assertEqual([('bmc_address', '1.2.3.4', self.uuid),
('mac', self.macs[0], self.uuid), ('mac', self.macs[0], self.uuid),
('mac', self.macs[1], self.uuid)], ('mac', self.macs[1], self.uuid)],
[tuple(row) for row in res]) [(row.name, row.value, row.uuid) for row in res])
def test_add_node_duplicate_mac(self): def test_add_node_duplicate_mac(self):
with self.db: session = node_cache.get_session()
self.db.execute("insert into nodes(uuid) values(?)", with session.begin():
('another-uuid',)) models.Node(uuid='another-uuid').save(session)
self.db.execute("insert into attributes(name, value, uuid) " models.Attribute(name='mac', value='11:22:11:22:11:22',
"values(?, ?, ?)", uuid='another-uuid').save(session)
('mac', '11:22:11:22:11:22', 'another-uuid'))
self.assertRaises(utils.Error, self.assertRaises(utils.Error,
node_cache.add_node, node_cache.add_node,
self.node.uuid, mac=['11:22:11:22:11:22']) self.node.uuid, mac=['11:22:11:22:11:22'])
def test_active_macs(self): def test_active_macs(self):
with self.db: session = node_cache.get_session()
self.db.execute("insert into nodes(uuid) values(?)", with session.begin():
(self.node.uuid,)) models.Node(uuid=self.node.uuid).save(session)
self.db.executemany("insert into attributes(name, value, uuid) " values = [('mac', '11:22:11:22:11:22', self.uuid),
"values(?, ?, ?)", ('mac', '22:11:22:11:22:11', self.uuid)]
[('mac', '11:22:11:22:11:22', self.uuid), for value in values:
('mac', '22:11:22:11:22:11', self.uuid)]) models.Attribute(name=value[0], value=value[1],
uuid=value[2]).save(session)
self.assertEqual({'11:22:11:22:11:22', '22:11:22:11:22:11'}, self.assertEqual({'11:22:11:22:11:22', '22:11:22:11:22:11'},
node_cache.active_macs()) node_cache.active_macs())
def test_add_attribute(self): def test_add_attribute(self):
with self.db: session = node_cache.get_session()
self.db.execute("insert into nodes(uuid) values(?)", with session.begin():
(self.node.uuid,)) models.Node(uuid=self.node.uuid).save(session)
node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42)
node_info.add_attribute('key', 'value') node_info.add_attribute('key', 'value')
res = self.db.execute("select name, value, uuid from attributes " res = node_cache.model_query(models.Attribute.name,
"order by name, value").fetchall() models.Attribute.value,
models.Attribute.uuid,
session=session).order_by(
models.Attribute.name,
models.Attribute.value).all()
self.assertEqual([('key', 'value', self.uuid)], self.assertEqual([('key', 'value', self.uuid)],
[tuple(row) for row in res]) [tuple(row) for row in res])
self.assertRaises(utils.Error, node_info.add_attribute, self.assertRaises(utils.Error, node_info.add_attribute,
@ -131,15 +135,18 @@ class TestNodeCacheFind(test_base.NodeTest):
self.assertTrue(time.time() - 60 < res.started_at < time.time() + 1) self.assertTrue(time.time() - 60 < res.started_at < time.time() + 1)
def test_inconsistency(self): def test_inconsistency(self):
with self.db: session = node_cache.get_session()
self.db.execute('delete from nodes where uuid=?', (self.uuid,)) with session.begin():
(node_cache.model_query(models.Node).filter_by(uuid=self.uuid).
delete())
self.assertRaises(utils.Error, node_cache.find_node, self.assertRaises(utils.Error, node_cache.find_node,
bmc_address='1.2.3.4') bmc_address='1.2.3.4')
def test_already_finished(self): def test_already_finished(self):
with self.db: session = node_cache.get_session()
self.db.execute('update nodes set finished_at=42.0 where uuid=?', with session.begin():
(self.uuid,)) (node_cache.model_query(models.Node).filter_by(uuid=self.uuid).
update({'finished_at': 42.0}))
self.assertRaises(utils.Error, node_cache.find_node, self.assertRaises(utils.Error, node_cache.find_node,
bmc_address='1.2.3.4') bmc_address='1.2.3.4')
@ -148,27 +155,28 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
def setUp(self): def setUp(self):
super(TestNodeCacheCleanUp, self).setUp() super(TestNodeCacheCleanUp, self).setUp()
self.started_at = 100.0 self.started_at = 100.0
with self.db: session = node_cache.get_session()
self.db.execute('insert into nodes(uuid, started_at) ' with session.begin():
'values(?, ?)', (self.uuid, self.started_at)) models.Node(uuid=self.uuid, started_at=self.started_at).save(
self.db.executemany('insert into attributes(name, value, uuid) ' session)
'values(?, ?, ?)', for v in self.macs:
[('mac', v, self.uuid) for v in self.macs]) models.Attribute(name='mac', value=v, uuid=self.uuid).save(
self.db.execute('insert into options(uuid, name, value) ' session)
'values(?, ?, ?)', (self.uuid, 'foo', 'bar')) models.Option(uuid=self.uuid, name='foo', value='bar').save(
session)
def test_no_timeout(self): def test_no_timeout(self):
CONF.set_override('timeout', 0) CONF.set_override('timeout', 0)
self.assertFalse(node_cache.clean_up()) self.assertFalse(node_cache.clean_up())
res = [tuple(row) for row in self.db.execute( res = [tuple(row) for row in
'select finished_at, error from nodes').fetchall()] node_cache.model_query(models.Node.finished_at,
models.Node.error).all()]
self.assertEqual([(None, None)], res) self.assertEqual([(None, None)], res)
self.assertEqual(len(self.macs), len(self.db.execute( self.assertEqual(len(self.macs),
'select * from attributes').fetchall())) node_cache.model_query(models.Attribute).count())
self.assertEqual(1, len(self.db.execute( self.assertEqual(1, node_cache.model_query(models.Option).count())
'select * from options').fetchall()))
@mock.patch.object(time, 'time') @mock.patch.object(time, 'time')
def test_ok(self, time_mock): def test_ok(self, time_mock):
@ -176,55 +184,52 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
self.assertFalse(node_cache.clean_up()) self.assertFalse(node_cache.clean_up())
res = [tuple(row) for row in self.db.execute( res = [tuple(row) for row in node_cache.model_query(
'select finished_at, error from nodes').fetchall()] models.Node.finished_at, models.Node.error).all()]
self.assertEqual([(None, None)], res) self.assertEqual([(None, None)], res)
self.assertEqual(len(self.macs), len(self.db.execute( self.assertEqual(len(self.macs),
'select * from attributes').fetchall())) node_cache.model_query(models.Attribute).count())
self.assertEqual(1, len(self.db.execute( self.assertEqual(1, node_cache.model_query(models.Option).count())
'select * from options').fetchall()))
@mock.patch.object(time, 'time') @mock.patch.object(time, 'time')
def test_timeout(self, time_mock): def test_timeout(self, time_mock):
# Add a finished node to confirm we don't try to timeout it # Add a finished node to confirm we don't try to timeout it
with self.db: time_mock.return_value = self.started_at
self.db.execute('insert into nodes(uuid, started_at, finished_at) ' session = node_cache.get_session()
'values(?, ?, ?)', (self.uuid + '1', with session.begin():
self.started_at, models.Node(uuid=self.uuid + '1', started_at=self.started_at,
self.started_at + 60)) finished_at=self.started_at + 60).save(session)
CONF.set_override('timeout', 99) CONF.set_override('timeout', 99)
time_mock.return_value = self.started_at + 100 time_mock.return_value = (self.started_at + 100)
self.assertEqual([self.uuid], node_cache.clean_up()) self.assertEqual([self.uuid], node_cache.clean_up())
res = [tuple(row) for row in self.db.execute( res = [(row.finished_at, row.error) for row in
'select finished_at, error from nodes order by uuid').fetchall()] node_cache.model_query(models.Node).all()]
self.assertEqual([(self.started_at + 100, 'Introspection timeout'), self.assertEqual([(self.started_at + 100, 'Introspection timeout'),
(self.started_at + 60, None)], (self.started_at + 60, None)],
res) res)
self.assertEqual([], self.db.execute( self.assertEqual([], node_cache.model_query(models.Attribute).all())
'select * from attributes').fetchall()) self.assertEqual([], node_cache.model_query(models.Option).all())
self.assertEqual([], self.db.execute(
'select * from options').fetchall())
def test_old_status(self): def test_old_status(self):
CONF.set_override('node_status_keep_time', 42) CONF.set_override('node_status_keep_time', 42)
with self.db: session = node_cache.get_session()
self.db.execute('update nodes set finished_at=?', with session.begin():
(time.time() - 100,)) node_cache.model_query(models.Node).update(
{'finished_at': time.time() - 100})
self.assertEqual([], node_cache.clean_up()) self.assertEqual([], node_cache.clean_up())
self.assertEqual([], self.db.execute( self.assertEqual([], node_cache.model_query(models.Node).all())
'select * from nodes').fetchall())
class TestNodeCacheGetNode(test_base.NodeTest): class TestNodeCacheGetNode(test_base.NodeTest):
def test_ok(self): def test_ok(self):
started_at = time.time() - 42 started_at = time.time() - 42
with self.db: session = node_cache.get_session()
self.db.execute('insert into nodes(uuid, started_at) ' with session.begin():
'values(?, ?)', (self.uuid, started_at)) models.Node(uuid=self.uuid, started_at=started_at).save(session)
info = node_cache.get_node(self.uuid) info = node_cache.get_node(self.uuid)
self.assertEqual(self.uuid, info.uuid) self.assertEqual(self.uuid, info.uuid)
@ -244,53 +249,43 @@ class TestNodeInfoFinished(test_base.NodeTest):
bmc_address='1.2.3.4', bmc_address='1.2.3.4',
mac=self.macs) mac=self.macs)
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14) self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14)
with self.db: session = node_cache.get_session()
self.db.execute('insert into options(uuid, name, value) ' with session.begin():
'values(?, ?, ?)', (self.uuid, 'foo', 'bar')) models.Option(uuid=self.uuid, name='foo', value='bar').save(
session)
def test_success(self): def test_success(self):
self.node_info.finished() self.node_info.finished()
self.assertEqual((42.0, None), tuple(self.db.execute( session = node_cache.get_session()
'select finished_at, error from nodes').fetchone())) with session.begin():
self.assertEqual([], self.db.execute( self.assertEqual((42.0, None),
"select * from attributes").fetchall()) tuple(node_cache.model_query(
self.assertEqual([], self.db.execute( models.Node.finished_at,
"select * from options").fetchall()) models.Node.error).first()))
self.assertEqual([], node_cache.model_query(models.Attribute,
session=session).all())
self.assertEqual([], node_cache.model_query(models.Option,
session=session).all())
def test_error(self): def test_error(self):
self.node_info.finished(error='boom') self.node_info.finished(error='boom')
self.assertEqual((42.0, 'boom'), tuple(self.db.execute( self.assertEqual((42.0, 'boom'),
'select finished_at, error from nodes').fetchone())) tuple(node_cache.model_query(models.Node.finished_at,
self.assertEqual([], self.db.execute( models.Node.error).first()))
"select * from attributes").fetchall()) self.assertEqual([], node_cache.model_query(models.Attribute).all())
self.assertEqual([], self.db.execute( self.assertEqual([], node_cache.model_query(models.Option).all())
"select * from options").fetchall())
class TestInit(unittest.TestCase): class TestInit(unittest.TestCase):
def setUp(self): def setUp(self):
super(TestInit, self).setUp() super(TestInit, self).setUp()
node_cache._DB_NAME = None
def test_ok(self): def test_ok(self):
with tempfile.NamedTemporaryFile() as db_file:
CONF.set_override('database', db_file.name)
node_cache.init() node_cache.init()
session = node_cache.get_session()
self.assertIsNotNone(node_cache._DB_NAME) node_cache.model_query(models.Node, session=session)
# Verify that table exists
node_cache._db().execute("select * from nodes")
def test_create_dir(self):
temp = tempfile.mkdtemp()
CONF.set_override('database', os.path.join(temp, 'dir', 'file'))
node_cache.init()
def test_no_database(self):
CONF.set_override('database', '')
self.assertRaises(SystemExit, node_cache.init)
class TestNodeInfoOptions(test_base.NodeTest): class TestNodeInfoOptions(test_base.NodeTest):
@ -300,14 +295,15 @@ class TestNodeInfoOptions(test_base.NodeTest):
bmc_address='1.2.3.4', bmc_address='1.2.3.4',
mac=self.macs) mac=self.macs)
self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14) self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14)
with self.db: session = node_cache.get_session()
self.db.execute('insert into options(uuid, name, value) ' with session.begin():
'values(?, ?, ?)', (self.uuid, 'foo', '"bar"')) models.Option(uuid=self.uuid, name='foo', value='"bar"').save(
session)
def test_get(self): def test_get(self):
self.assertEqual({'foo': 'bar'}, self.node_info.options) self.assertEqual({'foo': 'bar'}, self.node_info.options)
# should be cached # should be cached
self.assertIs(self.node_info.options, self.node_info.options) self.assertEqual(self.node_info.options, self.node_info.options)
# invalidate cache # invalidate cache
old_options = self.node_info.options old_options = self.node_info.options
self.node_info.invalidate_cache() self.node_info.invalidate_cache()

View File

@ -10,6 +10,7 @@ python-ironicclient>=0.6.0
python-keystoneclient>=1.6.0 python-keystoneclient>=1.6.0
python-openstackclient>=1.0.3 python-openstackclient>=1.0.3
oslo.config>=1.11.0 # Apache-2.0 oslo.config>=1.11.0 # Apache-2.0
oslo.db>=1.12.0 # Apache-2.0
oslo.i18n>=1.5.0 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0
oslo.utils>=1.6.0 # Apache-2.0 oslo.utils>=1.6.0 # Apache-2.0
six>=1.9.0 six>=1.9.0

View File

@ -47,4 +47,5 @@ commands =
--output-file example.conf \ --output-file example.conf \
--namespace ironic_inspector \ --namespace ironic_inspector \
--namespace keystonemiddleware.auth_token \ --namespace keystonemiddleware.auth_token \
--namespace ironic_inspector.common.swift --namespace ironic_inspector.common.swift \
--namespace oslo.db