Updated "deleted" column of volume_type_access
Below changes are done in this commit: a. replaced update() update() is replaced with oslo.db's soft_delete() used in volume_type_access_remove() function of sqlalchemy.db.api to keep value of "id" in "deleted" column during volume type access remove operation. This bug will be solved after this change. b. updated db schema db schema of volume_type_projects if updated. As tinyint can store maximum 127, "deleted" column type is modified tinyint->integer so that it can store more than 128 entries in it. c. release notes release notes added to prohibit addition or deletion of volume_type_access to a project during update operation. UpgradeImpact Closes-Bug: #1518363 Change-Id: I638a202dfb2b4febf1d623683de22df3b6dc2615
This commit is contained in:
parent
94d4b15943
commit
b2cd356cac
@ -259,6 +259,8 @@ def model_query(context, *args, **kwargs):
|
||||
pass # omit the filter to include deleted and active
|
||||
elif read_deleted == 'only':
|
||||
query = query.filter_by(deleted=True)
|
||||
elif read_deleted == 'int_no':
|
||||
query = query.filter_by(deleted=0)
|
||||
else:
|
||||
raise Exception(
|
||||
_("Unrecognized read_deleted value '%s'") % read_deleted)
|
||||
@ -2521,7 +2523,7 @@ def volume_type_get_all(context, inactive=False, filters=None):
|
||||
if filters['is_public'] and context.project_id is not None:
|
||||
projects_attr = getattr(models.VolumeTypes, 'projects')
|
||||
the_filter.extend([
|
||||
projects_attr.any(project_id=context.project_id, deleted=False)
|
||||
projects_attr.any(project_id=context.project_id, deleted=0)
|
||||
])
|
||||
if len(the_filter) > 1:
|
||||
query = query.filter(or_(*the_filter))
|
||||
@ -2777,7 +2779,7 @@ def volume_get_active_by_window(context,
|
||||
|
||||
def _volume_type_access_query(context, session=None):
|
||||
return model_query(context, models.VolumeTypeProjects, session=session,
|
||||
read_deleted="no")
|
||||
read_deleted="int_no")
|
||||
|
||||
|
||||
@require_admin_context
|
||||
@ -2814,9 +2816,7 @@ def volume_type_access_remove(context, type_id, project_id):
|
||||
count = (_volume_type_access_query(context).
|
||||
filter_by(volume_type_id=volume_type_id).
|
||||
filter_by(project_id=project_id).
|
||||
update({'deleted': True,
|
||||
'deleted_at': timeutils.utcnow(),
|
||||
'updated_at': literal_column('updated_at')}))
|
||||
soft_delete(synchronize_session=False))
|
||||
if count == 0:
|
||||
raise exception.VolumeTypeAccessNotFound(
|
||||
volume_type_id=type_id, project_id=project_id)
|
||||
|
@ -0,0 +1,30 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import Integer
|
||||
from sqlalchemy import MetaData, Table
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
"""Deleted col of volume_type_projects converted(tinyint->Int)."""
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
volume_type_projects = Table('volume_type_projects', meta, autoload=True)
|
||||
|
||||
if migrate_engine.name == 'postgresql':
|
||||
# NOTE: PostgreSQL can't cast Boolean to int automatically
|
||||
sql = 'ALTER TABLE volume_type_projects ALTER COLUMN deleted ' + \
|
||||
'TYPE INTEGER USING deleted::integer'
|
||||
migrate_engine.execute(sql)
|
||||
else:
|
||||
volume_type_projects.c.deleted.alter(Integer)
|
@ -0,0 +1,29 @@
|
||||
-- As sqlite does not support the DROP CHECK, we need to create
|
||||
-- the table, and move all the data to it.
|
||||
|
||||
CREATE TABLE volume_type_projects_new (
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted INTEGER,
|
||||
id INTEGER NOT NULL,
|
||||
volume_type_id VARCHAR(36),
|
||||
project_id VARCHAR(255),
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (volume_type_id) REFERENCES volume_types(id),
|
||||
CONSTRAINT uniq_volume_type_projects0volume_type_id0project_id0deleted UNIQUE (volume_type_id, project_id, deleted)
|
||||
);
|
||||
|
||||
INSERT INTO volume_type_projects_new
|
||||
SELECT created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
id,
|
||||
volume_type_id,
|
||||
project_id
|
||||
FROM volume_type_projects;
|
||||
|
||||
DROP TABLE volume_type_projects;
|
||||
|
||||
ALTER TABLE volume_type_projects_new RENAME TO volume_type_projects;
|
@ -259,6 +259,7 @@ class VolumeTypeProjects(BASE, CinderBase):
|
||||
volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
|
||||
nullable=False)
|
||||
project_id = Column(String(255))
|
||||
deleted = Column(Integer, default=0)
|
||||
|
||||
volume_type = relationship(
|
||||
VolumeTypes,
|
||||
@ -266,7 +267,7 @@ class VolumeTypeProjects(BASE, CinderBase):
|
||||
foreign_keys=volume_type_id,
|
||||
primaryjoin='and_('
|
||||
'VolumeTypeProjects.volume_type_id == VolumeTypes.id,'
|
||||
'VolumeTypeProjects.deleted == False)')
|
||||
'VolumeTypeProjects.deleted == 0)')
|
||||
|
||||
|
||||
class VolumeTypeExtraSpecs(BASE, CinderBase):
|
||||
|
@ -717,6 +717,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
|
||||
self.assertIsInstance(backups.c.data_timestamp.type,
|
||||
self.TIME_TYPE)
|
||||
|
||||
def _check_062(self, engine, data):
|
||||
volume_type_projects = db_utils.get_table(engine,
|
||||
'volume_type_projects')
|
||||
self.assertIsInstance(volume_type_projects.c.id.type,
|
||||
sqlalchemy.types.INTEGER)
|
||||
|
||||
def test_walk_versions(self):
|
||||
self.walk_versions(False, False)
|
||||
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
upgrade:
|
||||
- Adding or removing volume_type_access from any project during DB migration 62 must not be performed.
|
||||
- When running PostgreSQL it is required to upgrade and restart all the cinder-api services along with DB migration 62.
|
Loading…
Reference in New Issue
Block a user