Remove sqlalchemy-migrate
sqlalchemy-migrate does not support SQLAlchemy 2.x and we're not going to invest the time in changing this. Remove integration of sqlalchemy-migrate in oslo.db, allowing us to support SQLAlchemy 2.x fully. Note that we do not remove the 'migration_cli' module entirely yet. While this is deprecated, it is possible to use this with alembic. New users shouldn't be switching to it, but any existing users can continue to use this module for some time yet. Change-Id: Ic3d6bd318038d723b0d50d39e45f8e26289e9a57 Sem-Ver: api-break
This commit is contained in:
parent
877bcfc6a6
commit
94d6e24ca1
@ -1,183 +0,0 @@
|
||||
# coding=utf-8
|
||||
|
||||
# Copyright (c) 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Base on code in migrate/changeset/databases/sqlite.py which is under
|
||||
# the following license:
|
||||
#
|
||||
# The MIT License
|
||||
#
|
||||
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
import os
|
||||
|
||||
from debtcollector import removals
|
||||
from migrate import exceptions as versioning_exceptions
|
||||
from migrate.versioning import api as versioning_api
|
||||
from migrate.versioning.repository import Repository
|
||||
import sqlalchemy
|
||||
|
||||
from oslo_db._i18n import _
|
||||
from oslo_db import exception
|
||||
|
||||
|
||||
_removed_msg = (
|
||||
'sqlalchemy-migrate support in oslo_db is deprecated; consider '
|
||||
'migrating to alembic'
|
||||
)
|
||||
|
||||
|
||||
@removals.remove(message=_removed_msg, version='8.3.0')
|
||||
def db_sync(engine, abs_path, version=None, init_version=0, sanity_check=True):
|
||||
"""Upgrade or downgrade a database.
|
||||
|
||||
Function runs the upgrade() or downgrade() functions in change scripts.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
:param abs_path: Absolute path to migrate repository.
|
||||
:param version: Database will upgrade/downgrade until this version.
|
||||
If None - database will update to the latest
|
||||
available version.
|
||||
:param init_version: Initial database version
|
||||
:param sanity_check: Require schema sanity checking for all tables
|
||||
"""
|
||||
|
||||
if version is not None:
|
||||
try:
|
||||
version = int(version)
|
||||
except ValueError:
|
||||
raise exception.DBMigrationError(_("version should be an integer"))
|
||||
|
||||
current_version = db_version(engine, abs_path, init_version)
|
||||
repository = _find_migrate_repo(abs_path)
|
||||
if sanity_check:
|
||||
_db_schema_sanity_check(engine)
|
||||
if version is None or version > current_version:
|
||||
try:
|
||||
migration = versioning_api.upgrade(engine, repository, version)
|
||||
except Exception as ex:
|
||||
raise exception.DBMigrationError(ex)
|
||||
else:
|
||||
migration = versioning_api.downgrade(engine, repository,
|
||||
version)
|
||||
if sanity_check:
|
||||
_db_schema_sanity_check(engine)
|
||||
|
||||
return migration
|
||||
|
||||
|
||||
def _db_schema_sanity_check(engine):
|
||||
"""Ensure all database tables were created with required parameters.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
|
||||
"""
|
||||
|
||||
if engine.name == 'mysql':
|
||||
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
|
||||
'from information_schema.TABLES '
|
||||
'where TABLE_SCHEMA=%s and '
|
||||
'TABLE_COLLATION NOT LIKE \'%%utf8%%\'')
|
||||
|
||||
# NOTE(morganfainberg): exclude the sqlalchemy-migrate and alembic
|
||||
# versioning tables from the tables we need to verify utf8 status on.
|
||||
# Non-standard table names are not supported.
|
||||
EXCLUDED_TABLES = ['migrate_version', 'alembic_version']
|
||||
|
||||
table_names = [res[0] for res in
|
||||
engine.execute(onlyutf8_sql, engine.url.database) if
|
||||
res[0].lower() not in EXCLUDED_TABLES]
|
||||
|
||||
if len(table_names) > 0:
|
||||
raise ValueError(_('Tables "%s" have non utf8 collation, '
|
||||
'please make sure all tables are CHARSET=utf8'
|
||||
) % ','.join(table_names))
|
||||
|
||||
|
||||
@removals.remove(message=_removed_msg, version='8.3.0')
|
||||
def db_version(engine, abs_path, init_version):
|
||||
"""Show the current version of the repository.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
:param abs_path: Absolute path to migrate repository
|
||||
:param init_version: Initial database version
|
||||
"""
|
||||
repository = _find_migrate_repo(abs_path)
|
||||
try:
|
||||
return versioning_api.db_version(engine, repository)
|
||||
except versioning_exceptions.DatabaseNotControlledError:
|
||||
meta = sqlalchemy.MetaData()
|
||||
meta.reflect(bind=engine)
|
||||
tables = meta.tables
|
||||
if (len(tables) == 0 or 'alembic_version' in tables or
|
||||
'migrate_version' in tables):
|
||||
db_version_control(engine, abs_path, version=init_version)
|
||||
return versioning_api.db_version(engine, repository)
|
||||
else:
|
||||
raise exception.DBMigrationError(
|
||||
_("The database is not under version control, but has "
|
||||
"tables. Please stamp the current version of the schema "
|
||||
"manually."))
|
||||
|
||||
|
||||
@removals.remove(message=_removed_msg, version='8.3.0')
|
||||
def db_version_control(engine, abs_path, version=None):
|
||||
"""Mark a database as under this repository's version control.
|
||||
|
||||
Once a database is under version control, schema changes should
|
||||
only be done via change scripts in this repository.
|
||||
|
||||
:param engine: SQLAlchemy engine instance for a given database
|
||||
:param abs_path: Absolute path to migrate repository
|
||||
:param version: Initial database version
|
||||
"""
|
||||
repository = _find_migrate_repo(abs_path)
|
||||
|
||||
try:
|
||||
versioning_api.version_control(engine, repository, version)
|
||||
except versioning_exceptions.InvalidVersionError as ex:
|
||||
raise exception.DBMigrationError("Invalid version : %s" % ex)
|
||||
except versioning_exceptions.DatabaseAlreadyControlledError:
|
||||
raise exception.DBMigrationError("Database is already controlled.")
|
||||
|
||||
return version
|
||||
|
||||
|
||||
def _find_migrate_repo(abs_path):
|
||||
"""Get the project's change script repository
|
||||
|
||||
:param abs_path: Absolute path to migrate repository
|
||||
"""
|
||||
if not os.path.exists(abs_path):
|
||||
raise exception.DBMigrationError("Path %s not found" % abs_path)
|
||||
return Repository(abs_path)
|
@ -1,79 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from migrate.versioning import version as migrate_version
|
||||
|
||||
from oslo_db.sqlalchemy import migration
|
||||
from oslo_db.sqlalchemy.migration_cli import ext_base
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MigrateExtension(ext_base.MigrationExtensionBase):
|
||||
"""Extension to provide sqlalchemy-migrate features.
|
||||
|
||||
:param migration_config: Stores specific configuration for migrations
|
||||
:type migration_config: dict
|
||||
"""
|
||||
|
||||
order = 1
|
||||
|
||||
def __init__(self, engine, migration_config):
|
||||
self.engine = engine
|
||||
self.repository = migration_config.get('migration_repo_path', '')
|
||||
self.init_version = migration_config.get('init_version', 0)
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
return os.path.exists(self.repository)
|
||||
|
||||
def upgrade(self, version):
|
||||
version = None if version == 'head' else version
|
||||
return migration.db_sync(
|
||||
self.engine, self.repository, version,
|
||||
init_version=self.init_version)
|
||||
|
||||
def downgrade(self, version):
|
||||
try:
|
||||
# version for migrate should be valid int - else skip
|
||||
if version in ('base', None):
|
||||
version = self.init_version
|
||||
version = int(version)
|
||||
return migration.db_sync(
|
||||
self.engine, self.repository, version,
|
||||
init_version=self.init_version)
|
||||
except ValueError:
|
||||
LOG.error(
|
||||
'Migration number for migrate plugin must be valid '
|
||||
'integer or empty, if you want to downgrade '
|
||||
'to initial state'
|
||||
)
|
||||
raise
|
||||
|
||||
def version(self):
|
||||
return migration.db_version(
|
||||
self.engine, self.repository, init_version=self.init_version)
|
||||
|
||||
def has_revision(self, rev_id):
|
||||
collection = migrate_version.Collection(self.repository)
|
||||
try:
|
||||
collection.version(rev_id)
|
||||
return True
|
||||
except (KeyError, ValueError):
|
||||
# NOTE(breton): migrate raises KeyError if an int is passed but not
|
||||
# found in the list of revisions and ValueError if non-int is
|
||||
# passed. Both mean there is no requested revision.
|
||||
return False
|
@ -27,229 +27,12 @@ import sqlalchemy.exc
|
||||
import sqlalchemy.sql.expression as expr
|
||||
import sqlalchemy.types as types
|
||||
|
||||
from oslo_db import exception as exc
|
||||
from oslo_db.sqlalchemy import provision
|
||||
from oslo_db.sqlalchemy import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WalkVersionsMixin(object, metaclass=abc.ABCMeta):
|
||||
"""Test mixin to check upgrade and downgrade ability of migration.
|
||||
|
||||
This is only suitable for testing of migrate_ migration scripts. An
|
||||
abstract class mixin. `INIT_VERSION`, `REPOSITORY` and `migration_api`
|
||||
attributes must be implemented in subclasses.
|
||||
|
||||
.. _auxiliary-dynamic-methods:
|
||||
|
||||
Auxiliary Methods:
|
||||
|
||||
`migrate_up` and `migrate_down` instance methods of the class can be
|
||||
used with auxiliary methods named `_pre_upgrade_<revision_id>`,
|
||||
`_check_<revision_id>`, `_post_downgrade_<revision_id>`. The methods
|
||||
intended to check applied changes for correctness of data operations.
|
||||
This methods should be implemented for every particular revision
|
||||
which you want to check with data. Implementation recommendations for
|
||||
`_pre_upgrade_<revision_id>`, `_check_<revision_id>`,
|
||||
`_post_downgrade_<revision_id>` implementation:
|
||||
|
||||
* `_pre_upgrade_<revision_id>`: provide a data appropriate to
|
||||
a next revision. Should be used an id of revision which
|
||||
going to be applied.
|
||||
|
||||
* `_check_<revision_id>`: Insert, select, delete operations
|
||||
with newly applied changes. The data provided by
|
||||
`_pre_upgrade_<revision_id>` will be used.
|
||||
|
||||
* `_post_downgrade_<revision_id>`: check for absence
|
||||
(inability to use) changes provided by reverted revision.
|
||||
|
||||
Execution order of auxiliary methods when revision is upgrading:
|
||||
|
||||
`_pre_upgrade_###` => `upgrade` => `_check_###`
|
||||
|
||||
Execution order of auxiliary methods when revision is downgrading:
|
||||
|
||||
`downgrade` => `_post_downgrade_###`
|
||||
|
||||
.. _migrate: https://sqlalchemy-migrate.readthedocs.org/en/latest/
|
||||
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def INIT_VERSION(self):
|
||||
"""Initial version of a migration repository.
|
||||
|
||||
Can be different from 0, if a migrations were squashed.
|
||||
|
||||
:rtype: int
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def REPOSITORY(self):
|
||||
"""Allows basic manipulation with migration repository.
|
||||
|
||||
:returns: `migrate.versioning.repository.Repository` subclass.
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def migration_api(self):
|
||||
"""Provides API for upgrading, downgrading and version manipulations.
|
||||
|
||||
:returns: `migrate.api` or overloaded analog.
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def migrate_engine(self):
|
||||
"""Provides engine instance.
|
||||
|
||||
Should be the same instance as used when migrations are applied. In
|
||||
most cases, the `engine` attribute provided by the test class in a
|
||||
`setUp` method will work.
|
||||
|
||||
Example of implementation:
|
||||
|
||||
def migrate_engine(self):
|
||||
return self.engine
|
||||
|
||||
:returns: sqlalchemy engine instance
|
||||
"""
|
||||
pass
|
||||
|
||||
def walk_versions(self, snake_walk=False, downgrade=True):
|
||||
"""Check if migration upgrades and downgrades successfully.
|
||||
|
||||
Determine the latest version script from the repo, then
|
||||
upgrade from 1 through to the latest, with no data
|
||||
in the databases. This just checks that the schema itself
|
||||
upgrades successfully.
|
||||
|
||||
`walk_versions` calls `migrate_up` and `migrate_down` with
|
||||
`with_data` argument to check changes with data, but these methods
|
||||
can be called without any extra check outside of `walk_versions`
|
||||
method.
|
||||
|
||||
:param snake_walk: enables checking that each individual migration can
|
||||
be upgraded/downgraded by itself.
|
||||
|
||||
If we have ordered migrations 123abc, 456def, 789ghi and we run
|
||||
upgrading with the `snake_walk` argument set to `True`, the
|
||||
migrations will be applied in the following order::
|
||||
|
||||
`123abc => 456def => 123abc =>
|
||||
456def => 789ghi => 456def => 789ghi`
|
||||
|
||||
:type snake_walk: bool
|
||||
:param downgrade: Check downgrade behavior if True.
|
||||
:type downgrade: bool
|
||||
"""
|
||||
|
||||
# Place the database under version control
|
||||
self.migration_api.version_control(self.migrate_engine,
|
||||
self.REPOSITORY,
|
||||
self.INIT_VERSION)
|
||||
self.assertEqual(self.INIT_VERSION,
|
||||
self.migration_api.db_version(self.migrate_engine,
|
||||
self.REPOSITORY))
|
||||
|
||||
LOG.debug('latest version is %s', self.REPOSITORY.latest)
|
||||
versions = range(int(self.INIT_VERSION) + 1,
|
||||
int(self.REPOSITORY.latest) + 1)
|
||||
|
||||
for version in versions:
|
||||
# upgrade -> downgrade -> upgrade
|
||||
self.migrate_up(version, with_data=True)
|
||||
if snake_walk:
|
||||
downgraded = self.migrate_down(version - 1, with_data=True)
|
||||
if downgraded:
|
||||
self.migrate_up(version)
|
||||
|
||||
if downgrade:
|
||||
# Now walk it back down to 0 from the latest, testing
|
||||
# the downgrade paths.
|
||||
for version in reversed(versions):
|
||||
# downgrade -> upgrade -> downgrade
|
||||
downgraded = self.migrate_down(version - 1)
|
||||
|
||||
if snake_walk and downgraded:
|
||||
self.migrate_up(version)
|
||||
self.migrate_down(version - 1)
|
||||
|
||||
def migrate_down(self, version, with_data=False):
|
||||
"""Migrate down to a previous version of the db.
|
||||
|
||||
:param version: id of revision to downgrade.
|
||||
:type version: str
|
||||
:keyword with_data: Whether to verify the absence of changes from
|
||||
migration(s) being downgraded, see
|
||||
:ref:`Auxiliary Methods <auxiliary-dynamic-methods>`.
|
||||
:type with_data: Bool
|
||||
"""
|
||||
|
||||
try:
|
||||
self.migration_api.downgrade(self.migrate_engine,
|
||||
self.REPOSITORY, version)
|
||||
except NotImplementedError:
|
||||
# NOTE(sirp): some migrations, namely release-level
|
||||
# migrations, don't support a downgrade.
|
||||
return False
|
||||
|
||||
self.assertEqual(version, self.migration_api.db_version(
|
||||
self.migrate_engine, self.REPOSITORY))
|
||||
|
||||
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
|
||||
# version). So if we have any downgrade checks, they need to be run for
|
||||
# the previous (higher numbered) migration.
|
||||
if with_data:
|
||||
post_downgrade = getattr(
|
||||
self, "_post_downgrade_%03d" % (version + 1), None)
|
||||
if post_downgrade:
|
||||
post_downgrade(self.migrate_engine)
|
||||
|
||||
return True
|
||||
|
||||
def migrate_up(self, version, with_data=False):
|
||||
"""Migrate up to a new version of the db.
|
||||
|
||||
:param version: id of revision to upgrade.
|
||||
:type version: str
|
||||
:keyword with_data: Whether to verify the applied changes with data,
|
||||
see :ref:`Auxiliary Methods <auxiliary-dynamic-methods>`.
|
||||
:type with_data: Bool
|
||||
"""
|
||||
# NOTE(sdague): try block is here because it's impossible to debug
|
||||
# where a failed data migration happens otherwise
|
||||
try:
|
||||
if with_data:
|
||||
data = None
|
||||
pre_upgrade = getattr(
|
||||
self, "_pre_upgrade_%03d" % version, None)
|
||||
if pre_upgrade:
|
||||
data = pre_upgrade(self.migrate_engine)
|
||||
|
||||
self.migration_api.upgrade(self.migrate_engine,
|
||||
self.REPOSITORY, version)
|
||||
self.assertEqual(version,
|
||||
self.migration_api.db_version(self.migrate_engine,
|
||||
self.REPOSITORY))
|
||||
if with_data:
|
||||
check = getattr(self, "_check_%03d" % version, None)
|
||||
if check:
|
||||
check(self.migrate_engine, data)
|
||||
except exc.DBMigrationError:
|
||||
msg = "Failed to migrate to version %(ver)s on engine %(eng)s"
|
||||
LOG.error(msg, {"ver": version, "eng": self.migrate_engine})
|
||||
raise
|
||||
|
||||
|
||||
class ModelsMigrationsSync(object, metaclass=abc.ABCMeta):
|
||||
"""A helper class for comparison of DB migration scripts and models.
|
||||
|
||||
|
@ -39,13 +39,11 @@ from sqlalchemy import Index
|
||||
from sqlalchemy import inspect
|
||||
from sqlalchemy import Integer
|
||||
from sqlalchemy import MetaData
|
||||
from sqlalchemy import PrimaryKeyConstraint
|
||||
from sqlalchemy.sql.expression import cast
|
||||
from sqlalchemy.sql.expression import literal_column
|
||||
from sqlalchemy.sql import text
|
||||
from sqlalchemy import String
|
||||
from sqlalchemy import Table
|
||||
from sqlalchemy.types import NullType
|
||||
|
||||
from oslo_db._i18n import _
|
||||
from oslo_db import exception
|
||||
@ -551,155 +549,6 @@ def _restore_indexes_on_deleted_columns(engine, table_name, indexes):
|
||||
new_index.create(engine)
|
||||
|
||||
|
||||
@debtcollector.removals.remove(
|
||||
message='This API is intended for use with sqlalchemy-migrate, support '
|
||||
'for which is deprecated for removal; it will be removed in a future '
|
||||
'release',
|
||||
version='10.1.0',
|
||||
)
|
||||
def change_deleted_column_type_to_boolean(engine, table_name,
|
||||
**col_name_col_instance):
|
||||
if engine.name == "sqlite":
|
||||
return _change_deleted_column_type_to_boolean_sqlite(
|
||||
engine, table_name, **col_name_col_instance)
|
||||
indexes = get_indexes(engine, table_name)
|
||||
table = get_table(engine, table_name)
|
||||
|
||||
old_deleted = Column('old_deleted', Boolean, default=False)
|
||||
table.metadata.bind = engine
|
||||
try:
|
||||
old_deleted.create(table, populate_default=False)
|
||||
finally:
|
||||
table.metadata.bind = None
|
||||
|
||||
with engine.connect() as conn, conn.begin():
|
||||
conn.execute(
|
||||
table.update().where(
|
||||
table.c.deleted == table.c.id
|
||||
).values(old_deleted=True)
|
||||
)
|
||||
|
||||
table.metadata.bind = engine
|
||||
try:
|
||||
table.c.deleted.drop()
|
||||
table.c.old_deleted.alter(name="deleted")
|
||||
finally:
|
||||
table.metadata.bind = None
|
||||
|
||||
_restore_indexes_on_deleted_columns(engine, table_name, indexes)
|
||||
|
||||
|
||||
def _change_deleted_column_type_to_boolean_sqlite(engine, table_name,
|
||||
**col_name_col_instance):
|
||||
table = get_table(engine, table_name)
|
||||
columns = []
|
||||
for column in table.columns:
|
||||
column_copy = None
|
||||
if column.name != "deleted":
|
||||
if isinstance(column.type, NullType):
|
||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||
column.name)
|
||||
else:
|
||||
# FIXME(stephenfin): We shouldn't be using this private API;
|
||||
# figure out how else to copy an arbitrary column schema
|
||||
column_copy = column._copy()
|
||||
else:
|
||||
column_copy = Column('deleted', Boolean, default=0)
|
||||
columns.append(column_copy)
|
||||
|
||||
# FIXME(stephenfin): We shouldn't be using this private API;
|
||||
# figure out how else to copy an arbitrary column schema
|
||||
# NOTE(stephenfin): We drop PrimaryKeyConstraint-type constraints since
|
||||
# these duplicate the 'primary_key=True' attribute on the speicified
|
||||
# column(s). This technically breaks things when the primary key covers
|
||||
# multiple columns but that's okay: these are deprecated APIs
|
||||
constraints = [
|
||||
constraint._copy() for constraint in table.constraints
|
||||
if not isinstance(constraint, PrimaryKeyConstraint)
|
||||
]
|
||||
|
||||
with engine.connect() as conn:
|
||||
meta = table.metadata
|
||||
new_table = Table(
|
||||
table_name + "__tmp__", meta,
|
||||
*(columns + constraints))
|
||||
|
||||
with conn.begin():
|
||||
new_table.create(conn)
|
||||
|
||||
indexes = []
|
||||
for index in get_indexes(engine, table_name):
|
||||
column_names = [new_table.c[c] for c in index['column_names']]
|
||||
indexes.append(
|
||||
Index(index["name"], *column_names, unique=index["unique"])
|
||||
)
|
||||
|
||||
c_select = []
|
||||
for c in table.c:
|
||||
if c.name != "deleted":
|
||||
c_select.append(c)
|
||||
else:
|
||||
c_select.append(table.c.deleted == table.c.id)
|
||||
|
||||
with conn.begin():
|
||||
table.drop(conn)
|
||||
for index in indexes:
|
||||
index.create(conn)
|
||||
|
||||
table.metadata.bind = engine
|
||||
try:
|
||||
new_table.rename(table_name)
|
||||
finally:
|
||||
table.metadata.bind = None
|
||||
|
||||
with conn.begin():
|
||||
conn.execute(
|
||||
new_table.update().where(
|
||||
new_table.c.deleted == new_table.c.id
|
||||
).values(deleted=True)
|
||||
)
|
||||
|
||||
|
||||
@debtcollector.removals.remove(
|
||||
message='This API is intended for use with sqlalchemy-migrate, support '
|
||||
'for which is deprecated for removal; it will be removed in a future '
|
||||
'release',
|
||||
version='10.1.0',
|
||||
)
|
||||
def change_deleted_column_type_to_id_type(engine, table_name,
|
||||
**col_name_col_instance):
|
||||
if engine.name == "sqlite":
|
||||
return _change_deleted_column_type_to_id_type_sqlite(
|
||||
engine, table_name, **col_name_col_instance)
|
||||
indexes = get_indexes(engine, table_name)
|
||||
table = get_table(engine, table_name)
|
||||
|
||||
new_deleted = Column('new_deleted', table.c.id.type,
|
||||
default=_get_default_deleted_value(table))
|
||||
table.metadata.bind = engine
|
||||
try:
|
||||
new_deleted.create(table, populate_default=True)
|
||||
finally:
|
||||
table.metadata.bind = None
|
||||
|
||||
table.metadata.bind = engine
|
||||
try:
|
||||
with engine.connect() as conn, conn.begin():
|
||||
deleted = True # workaround for pyflakes
|
||||
conn.execute(
|
||||
table.update().where(
|
||||
table.c.deleted == deleted
|
||||
).values(new_deleted=table.c.id)
|
||||
)
|
||||
|
||||
table.c.deleted.drop()
|
||||
table.c.new_deleted.alter(name="deleted")
|
||||
|
||||
_restore_indexes_on_deleted_columns(engine, table_name, indexes)
|
||||
finally:
|
||||
table.metadata.bind = None
|
||||
|
||||
|
||||
def _is_deleted_column_constraint(constraint):
|
||||
# NOTE(boris-42): There is no other way to check is CheckConstraint
|
||||
# associated with deleted column.
|
||||
@ -713,92 +562,6 @@ def _is_deleted_column_constraint(constraint):
|
||||
return bool(re.match(r".*deleted in \(.*\)", sqltext, re.I))
|
||||
|
||||
|
||||
def _change_deleted_column_type_to_id_type_sqlite(engine, table_name,
|
||||
**col_name_col_instance):
|
||||
# NOTE(boris-42): sqlalchemy-migrate can't drop column with check
|
||||
# constraints in sqlite DB and our `deleted` column has two check
|
||||
# constraints. There is only one way to remove these constraints:
|
||||
#
|
||||
# 1) Create new table with the same columns, constraints and indexes.
|
||||
# (except deleted column).
|
||||
# 2) Copy all data from old to new table.
|
||||
# 3) Drop old table.
|
||||
# 4) Rename new table to old table name.
|
||||
meta = MetaData()
|
||||
table = Table(table_name, meta, autoload_with=engine)
|
||||
default_deleted_value = _get_default_deleted_value(table)
|
||||
|
||||
columns = []
|
||||
for column in table.columns:
|
||||
column_copy = None
|
||||
if column.name != "deleted":
|
||||
if isinstance(column.type, NullType):
|
||||
column_copy = _get_not_supported_column(col_name_col_instance,
|
||||
column.name)
|
||||
else:
|
||||
# FIXME(stephenfin): We shouldn't be using this private API;
|
||||
# figure out how else to copy an arbitrary column schema
|
||||
column_copy = column._copy()
|
||||
else:
|
||||
column_copy = Column('deleted', table.c.id.type,
|
||||
default=default_deleted_value)
|
||||
columns.append(column_copy)
|
||||
|
||||
constraints = []
|
||||
for constraint in table.constraints:
|
||||
if not (
|
||||
_is_deleted_column_constraint(constraint) or
|
||||
isinstance(constraint, PrimaryKeyConstraint)
|
||||
):
|
||||
# FIXME(stephenfin): We shouldn't be using this private API;
|
||||
# figure out how else to copy an arbitrary constraint schema
|
||||
constraints.append(constraint._copy())
|
||||
|
||||
with engine.connect() as conn:
|
||||
# we need separate transactions, since we must create the table before
|
||||
# we can copy entries into it (later)
|
||||
with conn.begin():
|
||||
new_table = Table(
|
||||
table_name + "__tmp__", meta,
|
||||
*(columns + constraints),
|
||||
)
|
||||
new_table.create(conn)
|
||||
|
||||
indexes = []
|
||||
for index in get_indexes(engine, table_name):
|
||||
column_names = [new_table.c[c] for c in index['column_names']]
|
||||
indexes.append(
|
||||
Index(index["name"], *column_names, unique=index["unique"])
|
||||
)
|
||||
|
||||
with conn.begin():
|
||||
table.drop(conn)
|
||||
for index in indexes:
|
||||
index.create(conn)
|
||||
|
||||
with conn.begin():
|
||||
new_table.metadata.bind = engine
|
||||
try:
|
||||
new_table.rename(table_name)
|
||||
finally:
|
||||
new_table.metadata.bind = None
|
||||
|
||||
deleted = True # workaround for pyflakes
|
||||
conn.execute(
|
||||
new_table.update().where(
|
||||
new_table.c.deleted == deleted
|
||||
).values(deleted=new_table.c.id)
|
||||
)
|
||||
|
||||
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
|
||||
deleted = False # workaround for pyflakes
|
||||
conn.execute(
|
||||
new_table.update().where(
|
||||
new_table.c.deleted == deleted
|
||||
).values(deleted=default_deleted_value)
|
||||
)
|
||||
|
||||
|
||||
def get_db_connection_info(conn_pieces):
|
||||
database = conn_pieces.path.strip('/')
|
||||
loc_pieces = conn_pieces.netloc.split('@')
|
||||
|
@ -24,49 +24,32 @@ class WarningsFixture(fixtures.Fixture):
|
||||
|
||||
self._original_warning_filters = warnings.filters[:]
|
||||
|
||||
# Enable deprecation warnings
|
||||
|
||||
warnings.simplefilter('once', DeprecationWarning)
|
||||
|
||||
# Except things we've deprecated but are still testing until removal
|
||||
# Except for things we've deprecated but are still testing until
|
||||
# removal
|
||||
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
category=DeprecationWarning,
|
||||
module='oslo_db')
|
||||
module='oslo_db',
|
||||
)
|
||||
|
||||
# Enable generic warnings to ensure we're not doing anything odd
|
||||
|
||||
warnings.filterwarnings(
|
||||
'error',
|
||||
category=sqla_exc.SAWarning)
|
||||
category=sqla_exc.SAWarning,
|
||||
)
|
||||
|
||||
# Enable deprecation warnings to capture upcoming SQLAlchemy changes
|
||||
|
||||
warnings.filterwarnings(
|
||||
'error',
|
||||
category=sqla_exc.SADeprecationWarning)
|
||||
|
||||
# ...but filter things that aren't our fault
|
||||
|
||||
# FIXME(stephenfin): These are caused by sqlalchemy-migrate, not us,
|
||||
# and should be removed when we drop support for that library
|
||||
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
message=r'Passing a string to Connection.execute\(\) .*',
|
||||
module='migrate',
|
||||
category=sqla_exc.SADeprecationWarning)
|
||||
|
||||
warnings.filterwarnings(
|
||||
'once',
|
||||
message=r'The current statement is being autocommitted .*',
|
||||
module='migrate',
|
||||
category=sqla_exc.SADeprecationWarning)
|
||||
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
message=r'The Engine.execute\(\) method is considered legacy .*',
|
||||
module='migrate',
|
||||
category=sqla_exc.SADeprecationWarning)
|
||||
category=sqla_exc.SADeprecationWarning,
|
||||
)
|
||||
|
||||
self.addCleanup(self._reset_warning_filters)
|
||||
|
||||
|
@ -17,7 +17,6 @@ import sqlalchemy
|
||||
|
||||
from oslo_db import exception
|
||||
from oslo_db.sqlalchemy.migration_cli import ext_alembic
|
||||
from oslo_db.sqlalchemy.migration_cli import ext_migrate
|
||||
from oslo_db.sqlalchemy.migration_cli import manager
|
||||
from oslo_db.tests import base as test_base
|
||||
|
||||
@ -127,88 +126,6 @@ class TestAlembicExtension(test_base.BaseTestCase):
|
||||
'test')
|
||||
|
||||
|
||||
@mock.patch(('oslo_db.sqlalchemy.migration_cli.'
|
||||
'ext_migrate.migration'))
|
||||
class TestMigrateExtension(test_base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.migration_config = {'migration_repo_path': '.',
|
||||
'db_url': 'sqlite://'}
|
||||
self.engine = sqlalchemy.create_engine(self.migration_config['db_url'])
|
||||
self.migrate = ext_migrate.MigrateExtension(
|
||||
self.engine, self.migration_config)
|
||||
super(TestMigrateExtension, self).setUp()
|
||||
|
||||
def test_check_enabled_true(self, migration):
|
||||
self.assertTrue(self.migrate.enabled)
|
||||
|
||||
def test_check_enabled_false(self, migration):
|
||||
self.migration_config['migration_repo_path'] = ''
|
||||
migrate = ext_migrate.MigrateExtension(
|
||||
self.engine, self.migration_config)
|
||||
self.assertFalse(migrate.enabled)
|
||||
|
||||
def test_upgrade_head(self, migration):
|
||||
self.migrate.upgrade('head')
|
||||
migration.db_sync.assert_called_once_with(
|
||||
self.migrate.engine, self.migrate.repository, None, init_version=0)
|
||||
|
||||
def test_upgrade_normal(self, migration):
|
||||
self.migrate.upgrade(111)
|
||||
migration.db_sync.assert_called_once_with(
|
||||
mock.ANY, self.migrate.repository, 111, init_version=0)
|
||||
|
||||
def test_downgrade_init_version_from_base(self, migration):
|
||||
self.migrate.downgrade('base')
|
||||
migration.db_sync.assert_called_once_with(
|
||||
self.migrate.engine, self.migrate.repository, mock.ANY,
|
||||
init_version=mock.ANY)
|
||||
|
||||
def test_downgrade_init_version_from_none(self, migration):
|
||||
self.migrate.downgrade(None)
|
||||
migration.db_sync.assert_called_once_with(
|
||||
self.migrate.engine, self.migrate.repository, mock.ANY,
|
||||
init_version=mock.ANY)
|
||||
|
||||
def test_downgrade_normal(self, migration):
|
||||
self.migrate.downgrade(101)
|
||||
migration.db_sync.assert_called_once_with(
|
||||
self.migrate.engine, self.migrate.repository, 101, init_version=0)
|
||||
|
||||
def test_version(self, migration):
|
||||
self.migrate.version()
|
||||
migration.db_version.assert_called_once_with(
|
||||
self.migrate.engine, self.migrate.repository, init_version=0)
|
||||
|
||||
def test_change_init_version(self, migration):
|
||||
self.migration_config['init_version'] = 101
|
||||
migrate = ext_migrate.MigrateExtension(
|
||||
self.engine, self.migration_config)
|
||||
migrate.downgrade(None)
|
||||
migration.db_sync.assert_called_once_with(
|
||||
migrate.engine,
|
||||
self.migrate.repository,
|
||||
self.migration_config['init_version'],
|
||||
init_version=self.migration_config['init_version'])
|
||||
|
||||
def test_has_revision(self, command):
|
||||
with mock.patch(('oslo_db.sqlalchemy.migration_cli.'
|
||||
'ext_migrate.migrate_version')) as mocked:
|
||||
self.migrate.has_revision('test')
|
||||
mocked.Collection().version.assert_called_once_with('test')
|
||||
# tip of the branch should always be True
|
||||
self.assertIs(True, self.migrate.has_revision(None))
|
||||
|
||||
def test_has_revision_negative(self, command):
|
||||
with mock.patch(('oslo_db.sqlalchemy.migration_cli.'
|
||||
'ext_migrate.migrate_version')) as mocked:
|
||||
mocked.Collection().version.side_effect = ValueError
|
||||
self.assertIs(False, self.migrate.has_revision('test'))
|
||||
mocked.Collection().version.assert_called_once_with('test')
|
||||
# relative revision, should be False for migrate
|
||||
self.assertIs(False, self.migrate.has_revision('+1'))
|
||||
|
||||
|
||||
class TestMigrationManager(test_base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
@ -1,287 +0,0 @@
|
||||
# Copyright 2013 Mirantis Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from unittest import mock
|
||||
|
||||
from migrate import exceptions as migrate_exception
|
||||
from migrate.versioning import api as versioning_api
|
||||
import sqlalchemy
|
||||
|
||||
from oslo_db import exception as db_exception
|
||||
from oslo_db.sqlalchemy import migration
|
||||
from oslo_db.tests.sqlalchemy import base as test_base
|
||||
from oslo_db.tests import utils as test_utils
|
||||
|
||||
|
||||
class TestMigrationCommon(test_base._DbTestCase):
|
||||
def setUp(self):
|
||||
super(TestMigrationCommon, self).setUp()
|
||||
|
||||
migration._REPOSITORY = None
|
||||
self.path = tempfile.mkdtemp('test_migration')
|
||||
self.path1 = tempfile.mkdtemp('test_migration')
|
||||
self.return_value = '/home/openstack/migrations'
|
||||
self.return_value1 = '/home/extension/migrations'
|
||||
self.init_version = 1
|
||||
self.test_version = 123
|
||||
|
||||
self.patcher_repo = mock.patch.object(migration, 'Repository')
|
||||
self.repository = self.patcher_repo.start()
|
||||
self.repository.side_effect = [self.return_value, self.return_value1]
|
||||
|
||||
self.mock_api_db = mock.patch.object(versioning_api, 'db_version')
|
||||
self.mock_api_db_version = self.mock_api_db.start()
|
||||
self.mock_api_db_version.return_value = self.test_version
|
||||
|
||||
def tearDown(self):
|
||||
os.rmdir(self.path)
|
||||
self.mock_api_db.stop()
|
||||
self.patcher_repo.stop()
|
||||
super(TestMigrationCommon, self).tearDown()
|
||||
|
||||
def test_find_migrate_repo_path_not_found(self):
|
||||
self.assertRaises(
|
||||
db_exception.DBMigrationError,
|
||||
migration._find_migrate_repo,
|
||||
"/foo/bar/",
|
||||
)
|
||||
self.assertIsNone(migration._REPOSITORY)
|
||||
|
||||
def test_find_migrate_repo_called_once(self):
|
||||
my_repository = migration._find_migrate_repo(self.path)
|
||||
self.repository.assert_called_once_with(self.path)
|
||||
self.assertEqual(self.return_value, my_repository)
|
||||
|
||||
def test_find_migrate_repo_called_few_times(self):
|
||||
repo1 = migration._find_migrate_repo(self.path)
|
||||
repo2 = migration._find_migrate_repo(self.path1)
|
||||
self.assertNotEqual(repo1, repo2)
|
||||
|
||||
def test_db_version_control(self):
|
||||
with test_utils.nested(
|
||||
mock.patch.object(migration, '_find_migrate_repo'),
|
||||
mock.patch.object(versioning_api, 'version_control'),
|
||||
) as (mock_find_repo, mock_version_control):
|
||||
mock_find_repo.return_value = self.return_value
|
||||
|
||||
version = migration.db_version_control(
|
||||
self.engine, self.path, self.test_version)
|
||||
|
||||
self.assertEqual(self.test_version, version)
|
||||
mock_version_control.assert_called_once_with(
|
||||
self.engine, self.return_value, self.test_version)
|
||||
|
||||
@mock.patch.object(migration, '_find_migrate_repo')
|
||||
@mock.patch.object(versioning_api, 'version_control')
|
||||
def test_db_version_control_version_less_than_actual_version(
|
||||
self, mock_version_control, mock_find_repo):
|
||||
mock_find_repo.return_value = self.return_value
|
||||
mock_version_control.side_effect = (migrate_exception.
|
||||
DatabaseAlreadyControlledError)
|
||||
self.assertRaises(db_exception.DBMigrationError,
|
||||
migration.db_version_control, self.engine,
|
||||
self.path, self.test_version - 1)
|
||||
|
||||
@mock.patch.object(migration, '_find_migrate_repo')
|
||||
@mock.patch.object(versioning_api, 'version_control')
|
||||
def test_db_version_control_version_greater_than_actual_version(
|
||||
self, mock_version_control, mock_find_repo):
|
||||
mock_find_repo.return_value = self.return_value
|
||||
mock_version_control.side_effect = (migrate_exception.
|
||||
InvalidVersionError)
|
||||
self.assertRaises(db_exception.DBMigrationError,
|
||||
migration.db_version_control, self.engine,
|
||||
self.path, self.test_version + 1)
|
||||
|
||||
def test_db_version_return(self):
|
||||
ret_val = migration.db_version(self.engine, self.path,
|
||||
self.init_version)
|
||||
self.assertEqual(self.test_version, ret_val)
|
||||
|
||||
def test_db_version_raise_not_controlled_error_first(self):
|
||||
with mock.patch.object(migration, 'db_version_control') as mock_ver:
|
||||
|
||||
self.mock_api_db_version.side_effect = [
|
||||
migrate_exception.DatabaseNotControlledError('oups'),
|
||||
self.test_version]
|
||||
|
||||
ret_val = migration.db_version(self.engine, self.path,
|
||||
self.init_version)
|
||||
self.assertEqual(self.test_version, ret_val)
|
||||
mock_ver.assert_called_once_with(self.engine, self.path,
|
||||
version=self.init_version)
|
||||
|
||||
def test_db_version_raise_not_controlled_error_tables(self):
|
||||
with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
|
||||
self.mock_api_db_version.side_effect = \
|
||||
migrate_exception.DatabaseNotControlledError('oups')
|
||||
my_meta = mock.MagicMock()
|
||||
my_meta.tables = {'a': 1, 'b': 2}
|
||||
mock_meta.return_value = my_meta
|
||||
|
||||
self.assertRaises(
|
||||
db_exception.DBMigrationError, migration.db_version,
|
||||
self.engine, self.path, self.init_version)
|
||||
|
||||
@mock.patch.object(versioning_api, 'version_control')
|
||||
def test_db_version_raise_not_controlled_error_no_tables(self, mock_vc):
|
||||
with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
|
||||
self.mock_api_db_version.side_effect = (
|
||||
migrate_exception.DatabaseNotControlledError('oups'),
|
||||
self.init_version)
|
||||
my_meta = mock.MagicMock()
|
||||
my_meta.tables = {}
|
||||
mock_meta.return_value = my_meta
|
||||
migration.db_version(self.engine, self.path, self.init_version)
|
||||
|
||||
mock_vc.assert_called_once_with(self.engine, self.return_value1,
|
||||
self.init_version)
|
||||
|
||||
@mock.patch.object(versioning_api, 'version_control')
|
||||
def test_db_version_raise_not_controlled_alembic_tables(self, mock_vc):
|
||||
# When there are tables but the alembic control table
|
||||
# (alembic_version) is present, attempt to version the db.
|
||||
# This simulates the case where there is are multiple repos (different
|
||||
# abs_paths) and a different path has been versioned already.
|
||||
with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
|
||||
self.mock_api_db_version.side_effect = [
|
||||
migrate_exception.DatabaseNotControlledError('oups'), None]
|
||||
my_meta = mock.MagicMock()
|
||||
my_meta.tables = {'alembic_version': 1, 'b': 2}
|
||||
mock_meta.return_value = my_meta
|
||||
|
||||
migration.db_version(self.engine, self.path, self.init_version)
|
||||
|
||||
mock_vc.assert_called_once_with(self.engine, self.return_value1,
|
||||
self.init_version)
|
||||
|
||||
@mock.patch.object(versioning_api, 'version_control')
|
||||
def test_db_version_raise_not_controlled_migrate_tables(self, mock_vc):
|
||||
# When there are tables but the sqlalchemy-migrate control table
|
||||
# (migrate_version) is present, attempt to version the db.
|
||||
# This simulates the case where there is are multiple repos (different
|
||||
# abs_paths) and a different path has been versioned already.
|
||||
with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
|
||||
self.mock_api_db_version.side_effect = [
|
||||
migrate_exception.DatabaseNotControlledError('oups'), None]
|
||||
my_meta = mock.MagicMock()
|
||||
my_meta.tables = {'migrate_version': 1, 'b': 2}
|
||||
mock_meta.return_value = my_meta
|
||||
|
||||
migration.db_version(self.engine, self.path, self.init_version)
|
||||
|
||||
mock_vc.assert_called_once_with(self.engine, self.return_value1,
|
||||
self.init_version)
|
||||
|
||||
def test_db_sync_wrong_version(self):
|
||||
self.assertRaises(db_exception.DBMigrationError,
|
||||
migration.db_sync, self.engine, self.path, 'foo')
|
||||
|
||||
@mock.patch.object(versioning_api, 'upgrade')
|
||||
def test_db_sync_script_not_present(self, upgrade):
|
||||
# For non existent migration script file sqlalchemy-migrate will raise
|
||||
# VersionNotFoundError which will be wrapped in DBMigrationError.
|
||||
upgrade.side_effect = migrate_exception.VersionNotFoundError
|
||||
self.assertRaises(db_exception.DBMigrationError,
|
||||
migration.db_sync, self.engine, self.path,
|
||||
self.test_version + 1)
|
||||
|
||||
@mock.patch.object(versioning_api, 'upgrade')
|
||||
def test_db_sync_known_error_raised(self, upgrade):
|
||||
upgrade.side_effect = migrate_exception.KnownError
|
||||
self.assertRaises(db_exception.DBMigrationError,
|
||||
migration.db_sync, self.engine, self.path,
|
||||
self.test_version + 1)
|
||||
|
||||
def test_db_sync_upgrade(self):
|
||||
init_ver = 55
|
||||
with test_utils.nested(
|
||||
mock.patch.object(migration, '_find_migrate_repo'),
|
||||
mock.patch.object(versioning_api, 'upgrade')
|
||||
) as (mock_find_repo, mock_upgrade):
|
||||
|
||||
mock_find_repo.return_value = self.return_value
|
||||
self.mock_api_db_version.return_value = self.test_version - 1
|
||||
|
||||
migration.db_sync(self.engine, self.path, self.test_version,
|
||||
init_ver)
|
||||
|
||||
mock_upgrade.assert_called_once_with(
|
||||
self.engine, self.return_value, self.test_version)
|
||||
|
||||
def test_db_sync_downgrade(self):
|
||||
with test_utils.nested(
|
||||
mock.patch.object(migration, '_find_migrate_repo'),
|
||||
mock.patch.object(versioning_api, 'downgrade')
|
||||
) as (mock_find_repo, mock_downgrade):
|
||||
|
||||
mock_find_repo.return_value = self.return_value
|
||||
self.mock_api_db_version.return_value = self.test_version + 1
|
||||
|
||||
migration.db_sync(self.engine, self.path, self.test_version)
|
||||
|
||||
mock_downgrade.assert_called_once_with(
|
||||
self.engine, self.return_value, self.test_version)
|
||||
|
||||
def test_db_sync_sanity_called(self):
|
||||
with test_utils.nested(
|
||||
mock.patch.object(migration, '_find_migrate_repo'),
|
||||
mock.patch.object(migration, '_db_schema_sanity_check'),
|
||||
mock.patch.object(versioning_api, 'downgrade')
|
||||
) as (mock_find_repo, mock_sanity, mock_downgrade):
|
||||
|
||||
mock_find_repo.return_value = self.return_value
|
||||
migration.db_sync(self.engine, self.path, self.test_version)
|
||||
|
||||
self.assertEqual([mock.call(self.engine), mock.call(self.engine)],
|
||||
mock_sanity.call_args_list)
|
||||
|
||||
def test_db_sync_sanity_skipped(self):
|
||||
with test_utils.nested(
|
||||
mock.patch.object(migration, '_find_migrate_repo'),
|
||||
mock.patch.object(migration, '_db_schema_sanity_check'),
|
||||
mock.patch.object(versioning_api, 'downgrade')
|
||||
) as (mock_find_repo, mock_sanity, mock_downgrade):
|
||||
|
||||
mock_find_repo.return_value = self.return_value
|
||||
migration.db_sync(self.engine, self.path, self.test_version,
|
||||
sanity_check=False)
|
||||
|
||||
self.assertFalse(mock_sanity.called)
|
||||
|
||||
def test_db_sanity_table_not_utf8(self):
|
||||
with mock.patch.object(self, 'engine') as mock_eng:
|
||||
type(mock_eng).name = mock.PropertyMock(return_value='mysql')
|
||||
mock_eng.execute.return_value = [['table_A', 'latin1'],
|
||||
['table_B', 'latin1']]
|
||||
|
||||
self.assertRaises(ValueError, migration._db_schema_sanity_check,
|
||||
mock_eng)
|
||||
|
||||
def test_db_sanity_table_not_utf8_exclude_migrate_tables(self):
|
||||
with mock.patch.object(self, 'engine') as mock_eng:
|
||||
type(mock_eng).name = mock.PropertyMock(return_value='mysql')
|
||||
# NOTE(morganfainberg): Check both lower and upper case versions
|
||||
# of the migration table names (validate case insensitivity in
|
||||
# the sanity check.
|
||||
mock_eng.execute.return_value = [['migrate_version', 'latin1'],
|
||||
['alembic_version', 'latin1'],
|
||||
['MIGRATE_VERSION', 'latin1'],
|
||||
['ALEMBIC_VERSION', 'latin1']]
|
||||
|
||||
migration._db_schema_sanity_check(mock_eng)
|
@ -16,175 +16,13 @@
|
||||
|
||||
from unittest import mock
|
||||
|
||||
import fixtures
|
||||
from migrate.versioning import api as versioning_api
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
|
||||
from oslo_db import exception as exc
|
||||
from oslo_db.sqlalchemy import test_migrations as migrate
|
||||
from oslo_db.tests import base as test_base
|
||||
from oslo_db.tests.sqlalchemy import base as db_test_base
|
||||
|
||||
|
||||
class TestWalkVersions(test_base.BaseTestCase, migrate.WalkVersionsMixin):
|
||||
migration_api = mock.MagicMock()
|
||||
REPOSITORY = mock.MagicMock()
|
||||
engine = mock.MagicMock()
|
||||
INIT_VERSION = versioning_api.VerNum(4)
|
||||
|
||||
@property
|
||||
def migrate_engine(self):
|
||||
return self.engine
|
||||
|
||||
def test_migrate_up(self):
|
||||
self.migration_api.db_version.return_value = 141
|
||||
|
||||
self.migrate_up(141)
|
||||
|
||||
self.migration_api.upgrade.assert_called_with(
|
||||
self.engine, self.REPOSITORY, 141)
|
||||
self.migration_api.db_version.assert_called_with(
|
||||
self.engine, self.REPOSITORY)
|
||||
|
||||
@staticmethod
|
||||
def _fake_upgrade_boom(*args, **kwargs):
|
||||
raise exc.DBMigrationError("boom")
|
||||
|
||||
def test_migrate_up_fail(self):
|
||||
version = 141
|
||||
self.migration_api.db_version.return_value = version
|
||||
expected_output = (
|
||||
"Failed to migrate to version %(version)s on "
|
||||
"engine %(engine)s\n" %
|
||||
{'version': version, 'engine': self.engine})
|
||||
|
||||
with mock.patch.object(
|
||||
self.migration_api,
|
||||
'upgrade',
|
||||
side_effect=self._fake_upgrade_boom,
|
||||
):
|
||||
log = self.useFixture(fixtures.FakeLogger())
|
||||
self.assertRaises(exc.DBMigrationError, self.migrate_up, version)
|
||||
self.assertEqual(expected_output, log.output)
|
||||
|
||||
def test_migrate_up_with_data(self):
|
||||
test_value = {"a": 1, "b": 2}
|
||||
self.migration_api.db_version.return_value = 141
|
||||
self._pre_upgrade_141 = mock.MagicMock()
|
||||
self._pre_upgrade_141.return_value = test_value
|
||||
self._check_141 = mock.MagicMock()
|
||||
|
||||
self.migrate_up(141, True)
|
||||
|
||||
self._pre_upgrade_141.assert_called_with(self.engine)
|
||||
self._check_141.assert_called_with(self.engine, test_value)
|
||||
|
||||
def test_migrate_down(self):
|
||||
self.migration_api.db_version.return_value = 42
|
||||
|
||||
self.assertTrue(self.migrate_down(42))
|
||||
self.migration_api.db_version.assert_called_with(
|
||||
self.engine, self.REPOSITORY)
|
||||
|
||||
def test_migrate_down_not_implemented(self):
|
||||
with mock.patch.object(
|
||||
self.migration_api,
|
||||
'downgrade',
|
||||
side_effect=NotImplementedError,
|
||||
):
|
||||
self.assertFalse(self.migrate_down(self.engine, 42))
|
||||
|
||||
def test_migrate_down_with_data(self):
|
||||
self._post_downgrade_043 = mock.MagicMock()
|
||||
self.migration_api.db_version.return_value = 42
|
||||
|
||||
self.migrate_down(42, True)
|
||||
|
||||
self._post_downgrade_043.assert_called_with(self.engine)
|
||||
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
|
||||
def test_walk_versions_all_default(self, migrate_up, migrate_down):
|
||||
self.REPOSITORY.latest = versioning_api.VerNum(20)
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self.walk_versions()
|
||||
|
||||
self.migration_api.version_control.assert_called_with(
|
||||
self.engine, self.REPOSITORY, self.INIT_VERSION)
|
||||
self.migration_api.db_version.assert_called_with(
|
||||
self.engine, self.REPOSITORY)
|
||||
|
||||
versions = range(int(self.INIT_VERSION) + 1,
|
||||
int(self.REPOSITORY.latest) + 1)
|
||||
upgraded = [mock.call(v, with_data=True)
|
||||
for v in versions]
|
||||
self.assertEqual(upgraded, self.migrate_up.call_args_list)
|
||||
|
||||
downgraded = [mock.call(v - 1) for v in reversed(versions)]
|
||||
self.assertEqual(downgraded, self.migrate_down.call_args_list)
|
||||
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
|
||||
def test_walk_versions_all_true(self, migrate_up, migrate_down):
|
||||
self.REPOSITORY.latest = versioning_api.VerNum(20)
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self.walk_versions(snake_walk=True, downgrade=True)
|
||||
|
||||
versions = range(int(self.INIT_VERSION) + 1,
|
||||
int(self.REPOSITORY.latest) + 1)
|
||||
upgraded = []
|
||||
for v in versions:
|
||||
upgraded.append(mock.call(v, with_data=True))
|
||||
upgraded.append(mock.call(v))
|
||||
upgraded.extend([mock.call(v) for v in reversed(versions)])
|
||||
self.assertEqual(upgraded, self.migrate_up.call_args_list)
|
||||
|
||||
downgraded_1 = [mock.call(v - 1, with_data=True) for v in versions]
|
||||
downgraded_2 = []
|
||||
for v in reversed(versions):
|
||||
downgraded_2.append(mock.call(v - 1))
|
||||
downgraded_2.append(mock.call(v - 1))
|
||||
downgraded = downgraded_1 + downgraded_2
|
||||
self.assertEqual(downgraded, self.migrate_down.call_args_list)
|
||||
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
|
||||
def test_walk_versions_true_false(self, migrate_up, migrate_down):
|
||||
self.REPOSITORY.latest = versioning_api.VerNum(20)
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self.walk_versions(snake_walk=True, downgrade=False)
|
||||
|
||||
versions = range(int(self.INIT_VERSION) + 1,
|
||||
int(self.REPOSITORY.latest) + 1)
|
||||
|
||||
upgraded = []
|
||||
for v in versions:
|
||||
upgraded.append(mock.call(v, with_data=True))
|
||||
upgraded.append(mock.call(v))
|
||||
self.assertEqual(upgraded, self.migrate_up.call_args_list)
|
||||
|
||||
downgraded = [mock.call(v - 1, with_data=True) for v in versions]
|
||||
self.assertEqual(downgraded, self.migrate_down.call_args_list)
|
||||
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_up')
|
||||
@mock.patch.object(migrate.WalkVersionsMixin, 'migrate_down')
|
||||
def test_walk_versions_all_false(self, migrate_up, migrate_down):
|
||||
self.REPOSITORY.latest = versioning_api.VerNum(20)
|
||||
self.migration_api.db_version.return_value = self.INIT_VERSION
|
||||
|
||||
self.walk_versions(snake_walk=False, downgrade=False)
|
||||
|
||||
versions = range(int(self.INIT_VERSION) + 1,
|
||||
int(self.REPOSITORY.latest) + 1)
|
||||
|
||||
upgraded = [mock.call(v, with_data=True) for v in versions]
|
||||
self.assertEqual(upgraded, self.migrate_up.call_args_list)
|
||||
|
||||
|
||||
class ModelsMigrationSyncMixin(db_test_base._DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
@ -18,7 +18,6 @@ from urllib import parse
|
||||
|
||||
import fixtures
|
||||
import sqlalchemy
|
||||
from sqlalchemy.dialects import mysql
|
||||
from sqlalchemy import Boolean, Index, Integer, DateTime, String
|
||||
from sqlalchemy import CheckConstraint
|
||||
from sqlalchemy import MetaData, Table, Column
|
||||
@ -782,129 +781,6 @@ class TestMigrationUtils(db_test_base._DbTestCase):
|
||||
for value in soft_deleted_values:
|
||||
self.assertIn(value['id'], deleted_rows_ids)
|
||||
|
||||
def test_change_deleted_column_type_does_not_drop_index(self):
|
||||
table_name = 'abc'
|
||||
|
||||
indexes = {
|
||||
'idx_a_deleted': ['a', 'deleted'],
|
||||
'idx_b_deleted': ['b', 'deleted'],
|
||||
'idx_a': ['a']
|
||||
}
|
||||
|
||||
index_instances = [Index(name, *columns)
|
||||
for name, columns in indexes.items()]
|
||||
|
||||
table = Table(table_name, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('a', String(255)),
|
||||
Column('b', String(255)),
|
||||
Column('deleted', Boolean),
|
||||
*index_instances)
|
||||
table.create(self.engine)
|
||||
utils.change_deleted_column_type_to_id_type(self.engine, table_name)
|
||||
utils.change_deleted_column_type_to_boolean(self.engine, table_name)
|
||||
|
||||
insp = sqlalchemy.inspect(self.engine)
|
||||
real_indexes = insp.get_indexes(table_name)
|
||||
self.assertEqual(3, len(real_indexes))
|
||||
for index in real_indexes:
|
||||
name = index['name']
|
||||
self.assertIn(name, indexes)
|
||||
self.assertEqual(set(indexes[name]),
|
||||
set(index['column_names']))
|
||||
|
||||
def test_change_deleted_column_type_to_id_type_integer(self):
|
||||
table_name = 'abc'
|
||||
table = Table(table_name, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('deleted', Boolean))
|
||||
table.create(self.engine)
|
||||
utils.change_deleted_column_type_to_id_type(self.engine, table_name)
|
||||
|
||||
table = utils.get_table(self.engine, table_name)
|
||||
self.assertIsInstance(table.c.deleted.type, Integer)
|
||||
|
||||
def test_change_deleted_column_type_to_id_type_string(self):
|
||||
table_name = 'abc'
|
||||
table = Table(table_name, self.meta,
|
||||
Column('id', String(255), primary_key=True),
|
||||
Column('deleted', Boolean))
|
||||
table.create(self.engine)
|
||||
utils.change_deleted_column_type_to_id_type(self.engine, table_name)
|
||||
|
||||
table = utils.get_table(self.engine, table_name)
|
||||
self.assertIsInstance(table.c.deleted.type, String)
|
||||
|
||||
@db_test_base.backend_specific('sqlite')
|
||||
def test_change_deleted_column_type_to_id_type_custom(self):
|
||||
table_name = 'abc'
|
||||
table = Table(table_name, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('foo', CustomType),
|
||||
Column('deleted', Boolean))
|
||||
table.create(self.engine)
|
||||
|
||||
fooColumn = Column('foo', CustomType())
|
||||
utils.change_deleted_column_type_to_id_type(self.engine, table_name,
|
||||
foo=fooColumn)
|
||||
|
||||
table = utils.get_table(self.engine, table_name)
|
||||
|
||||
self.assertIsInstance(table.c.deleted.type, Integer)
|
||||
|
||||
def test_change_deleted_column_type_to_boolean(self):
|
||||
expected_types = {'mysql': mysql.TINYINT}
|
||||
table_name = 'abc'
|
||||
table = Table(table_name, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('deleted', Integer))
|
||||
table.create(self.engine)
|
||||
|
||||
utils.change_deleted_column_type_to_boolean(self.engine, table_name)
|
||||
|
||||
table = utils.get_table(self.engine, table_name)
|
||||
self.assertIsInstance(table.c.deleted.type,
|
||||
expected_types.get(self.engine.name, Boolean))
|
||||
|
||||
def test_change_deleted_column_type_to_boolean_with_fc(self):
|
||||
expected_types = {'mysql': mysql.TINYINT}
|
||||
table_name_1 = 'abc'
|
||||
table_name_2 = 'bcd'
|
||||
|
||||
table_1 = Table(table_name_1, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('deleted', Integer))
|
||||
table_1.create(self.engine)
|
||||
|
||||
table_2 = Table(table_name_2, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('foreign_id', Integer,
|
||||
ForeignKey('%s.id' % table_name_1)),
|
||||
Column('deleted', Integer))
|
||||
table_2.create(self.engine)
|
||||
|
||||
utils.change_deleted_column_type_to_boolean(self.engine, table_name_2)
|
||||
|
||||
table = utils.get_table(self.engine, table_name_2)
|
||||
self.assertIsInstance(table.c.deleted.type,
|
||||
expected_types.get(self.engine.name, Boolean))
|
||||
|
||||
@db_test_base.backend_specific('sqlite')
|
||||
def test_change_deleted_column_type_to_boolean_type_custom(self):
|
||||
table_name = 'abc'
|
||||
table = Table(table_name, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('foo', CustomType),
|
||||
Column('deleted', Integer))
|
||||
table.create(self.engine)
|
||||
|
||||
fooColumn = Column('foo', CustomType())
|
||||
utils.change_deleted_column_type_to_boolean(self.engine, table_name,
|
||||
foo=fooColumn)
|
||||
|
||||
table = utils.get_table(self.engine, table_name)
|
||||
self.assertIsInstance(table.c.deleted.type, Boolean)
|
||||
|
||||
def test_detect_boolean_deleted_constraint_detection(self):
|
||||
table_name = 'abc'
|
||||
table = Table(table_name, self.meta,
|
||||
@ -922,33 +798,6 @@ class TestMigrationUtils(db_test_base._DbTestCase):
|
||||
)
|
||||
)
|
||||
|
||||
@db_test_base.backend_specific('sqlite')
|
||||
def test_change_deleted_column_type_sqlite_drops_check_constraint(self):
|
||||
table_name = 'abc'
|
||||
table = Table(table_name, self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('deleted', Boolean))
|
||||
table.create(self.engine)
|
||||
|
||||
utils._change_deleted_column_type_to_id_type_sqlite(self.engine,
|
||||
table_name)
|
||||
table = Table(table_name, self.meta, autoload_with=self.engine)
|
||||
# NOTE(I159): if the CHECK constraint has been dropped (expected
|
||||
# behavior), any integer value can be inserted, otherwise only 1 or 0.
|
||||
# NOTE(zzzeek): SQLAlchemy 1.2 Boolean type will disallow non 1/0
|
||||
# value here, 1.1 also coerces to "1/0" so use raw SQL to test the
|
||||
# constraint
|
||||
with self.engine.connect() as conn, conn.begin():
|
||||
conn.exec_driver_sql(
|
||||
"INSERT INTO abc (deleted) VALUES (?)",
|
||||
(10, ),
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
10,
|
||||
conn.scalar(sql.text("SELECT deleted FROM abc")),
|
||||
)
|
||||
|
||||
def test_get_foreign_key_constraint_name(self):
|
||||
table_1 = Table('table_name_1', self.meta,
|
||||
Column('id', Integer, primary_key=True),
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
The ``oslo_db.sqlalchemy.migration`` module and ``migrate`` backend for the
|
||||
``oslo_db.sqalchemy.migration_cli`` module, both of which were first
|
||||
deprecated in the 8.5.0 release, have now been removed.
|
||||
``sqlalchemy-migrate`` is no longer under active development, does not
|
||||
support SQLAlchemy 2.0, and has been effectively replaced by ``alembic``.
|
@ -9,7 +9,6 @@ oslo.i18n>=3.15.3 # Apache-2.0
|
||||
oslo.config>=5.2.0 # Apache-2.0
|
||||
oslo.utils>=3.33.0 # Apache-2.0
|
||||
SQLAlchemy>=1.4.0 # MIT
|
||||
sqlalchemy-migrate>=0.11.0 # Apache-2.0
|
||||
stevedore>=1.20.0 # Apache-2.0
|
||||
# these are used by downstream libraries that require
|
||||
# oslo.db as one of their test requirements - do not remove!
|
||||
|
Loading…
Reference in New Issue
Block a user