Data Replication: Ensure Snapshots across replicas
When taking a snapshot of a replicated share, users would expect the snapshot to be available across the replicas. Currently, Manila assumes that drivers will take snapshots along with the share data. Different backends implement replication differently. Many of them assume that replicas are 'in_sync' with the primary if they have been synchronized within a particular window of time (sometimes referred to as 'recovery point objective'). Since snapshots are point in time actions on a share, drivers must ensure that the replication of a snapshot begins as soon as it is available on the primary/'active' instance. To ensure that snapshots are indeed available across replicas: 1) Track snapshots across replicas as different snapshot instances. 2) Make an 'aggregate_status' property to convey the availability of a snapshot across all the active/in_sync replicas of a share. 3) Provide all the information necessary for drivers to: - Create a snapshot on the primary and send it across to all replicas - Delete a snapshot on the primary and ensure that it is deleted on the replicas as well. 4) Set snapshot instance to 'error' if it was in any transitional state when its replica was 'promoted' to become an 'active' instance. 5) Pass down snapshot instances during create, delete and update replica driver calls. 6) Update replica interfaces to pass in snapshot instance data. Always include 'available' snapshots in the update_replica call to ensure that drivers confirm their presence on the replica in order to report that the replica is 'in_sync'. Also, grab the share_type afresh from parent share/snapshot to allow 'replication_key' to be copied to the share model during share creation. Rename SnapshotNotFound to SnapshotResourceNotFound to signify a missing snapshot object on backends. APIImpact The 'status' attribute of the snapshot is now the 'aggregate_status' of the instances of the snapshot. No key changes have been made in the JSON response. However, API actions will be gated against 'aggregate_status' of the snapshot instances when more than one instance exists. Closes-Bug: #1546303 Closes-Bug: #1551064 Change-Id: I63203df00904d209e9e92eda7c79206e5ef8d611
This commit is contained in:
parent
f5a537f039
commit
94e04c8de2
@ -320,7 +320,7 @@ class ShareMixin(object):
|
||||
else:
|
||||
snapshot = None
|
||||
|
||||
kwargs['snapshot'] = snapshot
|
||||
kwargs['snapshot_id'] = snapshot_id
|
||||
|
||||
share_network_id = share.get('share_network_id')
|
||||
|
||||
|
@ -49,7 +49,7 @@ class ViewBuilder(common.ViewBuilder):
|
||||
'share_id': snapshot.get('share_id'),
|
||||
'share_size': snapshot.get('share_size'),
|
||||
'created_at': snapshot.get('created_at'),
|
||||
'status': snapshot.get('status'),
|
||||
'status': snapshot.get('aggregate_status'),
|
||||
'name': snapshot.get('display_name'),
|
||||
'description': snapshot.get('display_description'),
|
||||
'size': snapshot.get('size'),
|
||||
|
@ -478,19 +478,37 @@ def share_instance_update_access_status(context, share_instance_id, status):
|
||||
|
||||
|
||||
def share_snapshot_instance_update(context, instance_id, values):
|
||||
"""Set the given properties on an snapshot instance and update it.
|
||||
"""Set the given properties on a share snapshot instance and update it.
|
||||
|
||||
Raises NotFound if snapshot instance does not exist.
|
||||
"""
|
||||
return IMPL.share_snapshot_instance_update(context, instance_id, values)
|
||||
|
||||
|
||||
def share_snapshot_instance_create(context, snapshot_id, values):
|
||||
"""Create a share snapshot instance for an existing snapshot."""
|
||||
return IMPL.share_snapshot_instance_create(
|
||||
context, snapshot_id, values)
|
||||
|
||||
|
||||
def share_snapshot_instance_get(context, instance_id, with_share_data=False):
|
||||
"""Get a snapshot or raise if it does not exist."""
|
||||
"""Get a snapshot instance or raise a NotFound exception."""
|
||||
return IMPL.share_snapshot_instance_get(
|
||||
context, instance_id, with_share_data=with_share_data)
|
||||
|
||||
|
||||
def share_snapshot_instance_get_all_with_filters(context, filters,
|
||||
with_share_data=False):
|
||||
"""Get all snapshot instances satisfying provided filters."""
|
||||
return IMPL.share_snapshot_instance_get_all_with_filters(
|
||||
context, filters, with_share_data=with_share_data)
|
||||
|
||||
|
||||
def share_snapshot_instance_delete(context, snapshot_instance_id):
|
||||
"""Delete a share snapshot instance."""
|
||||
return IMPL.share_snapshot_instance_delete(context, snapshot_instance_id)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
|
@ -1883,7 +1883,10 @@ def share_instance_update_access_status(context, share_instance_id, status):
|
||||
|
||||
|
||||
@require_context
|
||||
def share_snapshot_instance_create(context, snapshot_id, values, session):
|
||||
def share_snapshot_instance_create(context, snapshot_id, values, session=None):
|
||||
session = session or get_session()
|
||||
values = copy.deepcopy(values)
|
||||
|
||||
if not values.get('id'):
|
||||
values['id'] = uuidutils.generate_uuid()
|
||||
values.update({'snapshot_id': snapshot_id})
|
||||
@ -1913,30 +1916,109 @@ def share_snapshot_instance_update(context, instance_id, values):
|
||||
|
||||
|
||||
@require_context
|
||||
def share_snapshot_instance_get(context, instance_id, session=None,
|
||||
def share_snapshot_instance_delete(context, snapshot_instance_id,
|
||||
session=None):
|
||||
session = session or get_session()
|
||||
|
||||
with session.begin():
|
||||
snapshot_instance_ref = share_snapshot_instance_get(
|
||||
context, snapshot_instance_id, session=session)
|
||||
snapshot_instance_ref.soft_delete(
|
||||
session=session, update_status=True)
|
||||
snapshot = share_snapshot_get(
|
||||
context, snapshot_instance_ref['snapshot_id'], session=session)
|
||||
if len(snapshot.instances) == 0:
|
||||
snapshot.soft_delete(session=session)
|
||||
|
||||
|
||||
@require_context
|
||||
def share_snapshot_instance_get(context, snapshot_instance_id, session=None,
|
||||
with_share_data=False):
|
||||
if session is None:
|
||||
session = get_session()
|
||||
result = (
|
||||
model_query(
|
||||
context,
|
||||
models.ShareSnapshotInstance,
|
||||
session=session
|
||||
).filter_by(id=instance_id).first()
|
||||
)
|
||||
|
||||
session = session or get_session()
|
||||
|
||||
result = _share_snapshot_instance_get_with_filters(
|
||||
context, instance_ids=[snapshot_instance_id], session=session).first()
|
||||
|
||||
if result is None:
|
||||
raise exception.NotFound()
|
||||
raise exception.ShareSnapshotInstanceNotFound(
|
||||
instance_id=snapshot_instance_id)
|
||||
|
||||
if with_share_data:
|
||||
share_instance = share_instance_get(
|
||||
context, result['share_instance_id'],
|
||||
session=session, with_share_data=True
|
||||
)
|
||||
result['share'] = share_instance
|
||||
result = _set_share_snapshot_instance_data(context, result, session)[0]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def share_snapshot_instance_get_all_with_filters(context, search_filters,
|
||||
with_share_data=False,
|
||||
session=None):
|
||||
"""Get snapshot instances filtered by known attrs, ignore unknown attrs.
|
||||
|
||||
All filters accept list/tuples to filter on, along with simple values.
|
||||
"""
|
||||
def listify(values):
|
||||
if values:
|
||||
if not isinstance(values, (list, tuple, set)):
|
||||
return values,
|
||||
else:
|
||||
return values
|
||||
|
||||
session = session or get_session()
|
||||
_known_filters = ('instance_ids', 'snapshot_ids', 'share_instance_ids',
|
||||
'statuses')
|
||||
|
||||
filters = {k: listify(search_filters.get(k)) for k in _known_filters}
|
||||
|
||||
result = _share_snapshot_instance_get_with_filters(
|
||||
context, session=session, **filters).all()
|
||||
|
||||
if with_share_data:
|
||||
result = _set_share_snapshot_instance_data(context, result, session)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _share_snapshot_instance_get_with_filters(context, instance_ids=None,
|
||||
snapshot_ids=None, statuses=None,
|
||||
share_instance_ids=None,
|
||||
session=None):
|
||||
|
||||
query = model_query(context, models.ShareSnapshotInstance, session=session,
|
||||
read_deleted="no")
|
||||
|
||||
if instance_ids is not None:
|
||||
query = query.filter(
|
||||
models.ShareSnapshotInstance.id.in_(instance_ids))
|
||||
|
||||
if snapshot_ids is not None:
|
||||
query = query.filter(
|
||||
models.ShareSnapshotInstance.snapshot_id.in_(snapshot_ids))
|
||||
|
||||
if share_instance_ids is not None:
|
||||
query = query.filter(models.ShareSnapshotInstance.share_instance_id
|
||||
.in_(share_instance_ids))
|
||||
|
||||
if statuses is not None:
|
||||
query = query.filter(models.ShareSnapshotInstance.status.in_(statuses))
|
||||
|
||||
return query
|
||||
|
||||
|
||||
def _set_share_snapshot_instance_data(context, snapshot_instances, session):
|
||||
if snapshot_instances and not isinstance(snapshot_instances, list):
|
||||
snapshot_instances = [snapshot_instances]
|
||||
|
||||
for snapshot_instance in snapshot_instances:
|
||||
share_instance = share_instance_get(
|
||||
context, snapshot_instance['share_instance_id'], session=session,
|
||||
with_share_data=True)
|
||||
snapshot_instance['share'] = share_instance
|
||||
|
||||
return snapshot_instances
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@ -1989,7 +2071,12 @@ def share_snapshot_destroy(context, snapshot_id):
|
||||
with session.begin():
|
||||
snapshot_ref = share_snapshot_get(context, snapshot_id,
|
||||
session=session)
|
||||
snapshot_ref.instance.soft_delete(session=session, update_status=True)
|
||||
|
||||
if len(snapshot_ref.instances) > 0:
|
||||
msg = _("Snapshot %(id)s has %(count)s snapshot instances.") % {
|
||||
'id': snapshot_id, 'count': len(snapshot_ref.instances)}
|
||||
raise exception.InvalidShareSnapshot(msg)
|
||||
|
||||
snapshot_ref.soft_delete(session=session)
|
||||
|
||||
|
||||
|
@ -585,7 +585,15 @@ class ShareSnapshot(BASE, ManilaBase):
|
||||
"""Represents a snapshot of a share."""
|
||||
__tablename__ = 'share_snapshots'
|
||||
_extra_keys = ['name', 'share_name', 'status', 'progress',
|
||||
'provider_location']
|
||||
'provider_location', 'aggregate_status']
|
||||
|
||||
def __getattr__(self, item):
|
||||
proxified_properties = ('status', 'progress', 'provider_location')
|
||||
|
||||
if item in proxified_properties:
|
||||
return getattr(self.instance, item, None)
|
||||
|
||||
raise AttributeError(item)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@ -595,25 +603,52 @@ class ShareSnapshot(BASE, ManilaBase):
|
||||
def share_name(self):
|
||||
return CONF.share_name_template % self.share_id
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
if self.instance:
|
||||
return self.instance.status
|
||||
|
||||
@property
|
||||
def progress(self):
|
||||
if self.instance:
|
||||
return self.instance.progress
|
||||
|
||||
@property
|
||||
def provider_location(self):
|
||||
if self.instance:
|
||||
return self.instance.provider_location
|
||||
|
||||
@property
|
||||
def instance(self):
|
||||
result = None
|
||||
if len(self.instances) > 0:
|
||||
return self.instances[0]
|
||||
def qualified_replica(x):
|
||||
preferred_statuses = (constants.REPLICA_STATE_ACTIVE,)
|
||||
return x['replica_state'] in preferred_statuses
|
||||
|
||||
replica_snapshots = list(filter(
|
||||
lambda x: qualified_replica(x.share_instance), self.instances))
|
||||
|
||||
snapshot_instances = replica_snapshots or self.instances
|
||||
result = snapshot_instances[0]
|
||||
|
||||
return result
|
||||
|
||||
@property
|
||||
def aggregate_status(self):
|
||||
"""Get the aggregated 'status' of all instances.
|
||||
|
||||
A snapshot is supposed to be truly 'available' when it is available
|
||||
across all of the share instances of the parent share object. In
|
||||
case of replication, we only consider replicas (share instances)
|
||||
that are in 'in_sync' replica_state.
|
||||
"""
|
||||
|
||||
def qualified_replica(x):
|
||||
preferred_statuses = (constants.REPLICA_STATE_ACTIVE,
|
||||
constants.REPLICA_STATE_IN_SYNC)
|
||||
return x['replica_state'] in preferred_statuses
|
||||
|
||||
replica_snapshots = list(filter(
|
||||
lambda x: qualified_replica(x['share_instance']), self.instances))
|
||||
|
||||
if not replica_snapshots:
|
||||
return self.status
|
||||
|
||||
order = (constants.STATUS_DELETING, constants.STATUS_CREATING,
|
||||
constants.STATUS_ERROR, constants.STATUS_AVAILABLE)
|
||||
other_statuses = [x['status'] for x in self.instances if
|
||||
x['status'] not in order]
|
||||
order = (order + tuple(other_statuses))
|
||||
|
||||
sorted_instances = sorted(
|
||||
replica_snapshots, key=lambda x: order.index(x['status']))
|
||||
return sorted_instances[0].status
|
||||
|
||||
id = Column(String(36), primary_key=True)
|
||||
deleted = Column(String(36), default='False')
|
||||
@ -674,6 +709,7 @@ class ShareSnapshotInstance(BASE, ManilaBase):
|
||||
provider_location = Column(String(255))
|
||||
share_instance = orm.relationship(
|
||||
ShareInstance, backref="snapshot_instances",
|
||||
lazy='immediate',
|
||||
primaryjoin=(
|
||||
'and_('
|
||||
'ShareSnapshotInstance.share_instance_id == ShareInstance.id,'
|
||||
|
@ -457,6 +457,10 @@ class ShareSnapshotNotFound(NotFound):
|
||||
message = _("Snapshot %(snapshot_id)s could not be found.")
|
||||
|
||||
|
||||
class ShareSnapshotInstanceNotFound(NotFound):
|
||||
message = _("Snapshot instance %(instance_id)s could not be found.")
|
||||
|
||||
|
||||
class ShareSnapshotNotSupported(ManilaException):
|
||||
message = _("Share %(share_name)s does not support snapshots.")
|
||||
|
||||
@ -609,7 +613,7 @@ class StorageResourceNotFound(StorageResourceException):
|
||||
code = 404
|
||||
|
||||
|
||||
class SnapshotNotFound(StorageResourceNotFound):
|
||||
class SnapshotResourceNotFound(StorageResourceNotFound):
|
||||
message = _("Snapshot %(name)s not found.")
|
||||
|
||||
|
||||
|
@ -219,6 +219,14 @@ class SchedulerManager(manager.Manager):
|
||||
share_replica_id = request_spec.get(
|
||||
'share_instance_properties').get('id')
|
||||
|
||||
# Set any snapshot instances to 'error'.
|
||||
replica_snapshots = db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'share_instance_ids': share_replica_id})
|
||||
for snapshot_instance in replica_snapshots:
|
||||
db.share_snapshot_instance_update(
|
||||
context, snapshot_instance['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
|
||||
db.share_replica_update(context, share_replica_id, status_updates)
|
||||
|
||||
def create_share_replica(self, context, request_spec=None,
|
||||
|
@ -69,7 +69,7 @@ class API(base.Base):
|
||||
super(API, self).__init__(db_driver)
|
||||
|
||||
def create(self, context, share_proto, size, name, description,
|
||||
snapshot=None, availability_zone=None, metadata=None,
|
||||
snapshot_id=None, availability_zone=None, metadata=None,
|
||||
share_network_id=None, share_type=None, is_public=False,
|
||||
consistency_group_id=None, cgsnapshot_member=None):
|
||||
"""Create new share."""
|
||||
@ -77,16 +77,15 @@ class API(base.Base):
|
||||
|
||||
self._check_metadata_properties(context, metadata)
|
||||
|
||||
if snapshot is not None:
|
||||
if snapshot['status'] != constants.STATUS_AVAILABLE:
|
||||
if snapshot_id is not None:
|
||||
snapshot = self.get_snapshot(context, snapshot_id)
|
||||
if snapshot['aggregate_status'] != constants.STATUS_AVAILABLE:
|
||||
msg = _("status must be '%s'") % constants.STATUS_AVAILABLE
|
||||
raise exception.InvalidShareSnapshot(reason=msg)
|
||||
if not size:
|
||||
size = snapshot['size']
|
||||
|
||||
snapshot_id = snapshot['id']
|
||||
else:
|
||||
snapshot_id = None
|
||||
snapshot = None
|
||||
|
||||
def as_int(s):
|
||||
try:
|
||||
@ -113,7 +112,10 @@ class API(base.Base):
|
||||
source_share = self.db.share_get(context, snapshot['share_id'])
|
||||
availability_zone = source_share['instance']['availability_zone']
|
||||
if share_type is None:
|
||||
# Grab the source share's share_type if no new share type
|
||||
# has been provided.
|
||||
share_type_id = source_share['share_type_id']
|
||||
share_type = share_types.get_share_type(context, share_type_id)
|
||||
else:
|
||||
share_type_id = share_type['id']
|
||||
if share_type_id != source_share['share_type_id']:
|
||||
@ -400,6 +402,19 @@ class API(base.Base):
|
||||
context, share_replica['id'],
|
||||
{'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC})
|
||||
|
||||
existing_snapshots = (
|
||||
self.db.share_snapshot_get_all_for_share(
|
||||
context, share_replica['share_id'])
|
||||
)
|
||||
snapshot_instance = {
|
||||
'status': constants.STATUS_CREATING,
|
||||
'progress': '0%',
|
||||
'share_instance_id': share_replica['id'],
|
||||
}
|
||||
for snapshot in existing_snapshots:
|
||||
self.db.share_snapshot_instance_create(
|
||||
context, snapshot['id'], snapshot_instance)
|
||||
|
||||
self.scheduler_rpcapi.create_share_replica(
|
||||
context, request_spec=request_spec, filter_properties={})
|
||||
|
||||
@ -420,16 +435,26 @@ class API(base.Base):
|
||||
|
||||
LOG.info(_LI("Deleting replica %s."), id)
|
||||
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
{
|
||||
'status': constants.STATUS_DELETING,
|
||||
'terminated_at': timeutils.utcnow(),
|
||||
}
|
||||
)
|
||||
|
||||
if not share_replica['host']:
|
||||
self.db.share_replica_update(context, share_replica['id'],
|
||||
{'terminated_at': timeutils.utcnow()})
|
||||
# Delete any snapshot instances created on the database
|
||||
replica_snapshots = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'share_instance_ids': share_replica['id']})
|
||||
)
|
||||
for snapshot in replica_snapshots:
|
||||
self.db.share_snapshot_instance_delete(context, snapshot['id'])
|
||||
|
||||
# Delete the replica from the database
|
||||
self.db.share_replica_delete(context, share_replica['id'])
|
||||
else:
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
{'status': constants.STATUS_DELETING,
|
||||
'terminated_at': timeutils.utcnow()}
|
||||
)
|
||||
|
||||
self.share_rpcapi.delete_share_replica(context,
|
||||
share_replica,
|
||||
@ -752,7 +777,27 @@ class API(base.Base):
|
||||
finally:
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
self.share_rpcapi.create_snapshot(context, share, snapshot)
|
||||
# If replicated share, create snapshot instances for each replica
|
||||
if share.get('has_replicas'):
|
||||
snapshot = self.db.share_snapshot_get(context, snapshot['id'])
|
||||
share_instance_id = snapshot['instance']['share_instance_id']
|
||||
replicas = self.db.share_replicas_get_all_by_share(
|
||||
context, share['id'])
|
||||
replicas = [r for r in replicas if r['id'] != share_instance_id]
|
||||
snapshot_instance = {
|
||||
'status': constants.STATUS_CREATING,
|
||||
'progress': '0%',
|
||||
}
|
||||
for replica in replicas:
|
||||
snapshot_instance.update({'share_instance_id': replica['id']})
|
||||
self.db.share_snapshot_instance_create(
|
||||
context, snapshot['id'], snapshot_instance)
|
||||
self.share_rpcapi.create_replicated_snapshot(
|
||||
context, share, snapshot)
|
||||
|
||||
else:
|
||||
self.share_rpcapi.create_snapshot(context, share, snapshot)
|
||||
|
||||
return snapshot
|
||||
|
||||
def migration_start(self, context, share, host, force_host_copy,
|
||||
@ -930,16 +975,30 @@ class API(base.Base):
|
||||
@policy.wrap_check_policy('share')
|
||||
def delete_snapshot(self, context, snapshot, force=False):
|
||||
statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR)
|
||||
if not (force or snapshot['status'] in statuses):
|
||||
if not (force or snapshot['aggregate_status'] in statuses):
|
||||
msg = _("Share Snapshot status must be one of %(statuses)s.") % {
|
||||
"statuses": statuses}
|
||||
raise exception.InvalidShareSnapshot(reason=msg)
|
||||
|
||||
self.db.share_snapshot_update(context, snapshot['id'],
|
||||
{'status': constants.STATUS_DELETING})
|
||||
share = self.db.share_get(context, snapshot['share_id'])
|
||||
self.share_rpcapi.delete_snapshot(context, snapshot,
|
||||
share['instance']['host'])
|
||||
|
||||
snapshot_instances = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'snapshot_ids': snapshot['id']})
|
||||
)
|
||||
|
||||
for snapshot_instance in snapshot_instances:
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, snapshot_instance['id'],
|
||||
{'status': constants.STATUS_DELETING})
|
||||
|
||||
if share['has_replicas']:
|
||||
self.share_rpcapi.delete_replicated_snapshot(
|
||||
context, snapshot, share['instance']['host'],
|
||||
share_id=share['id'], force=force)
|
||||
else:
|
||||
self.share_rpcapi.delete_snapshot(context, snapshot,
|
||||
share['instance']['host'])
|
||||
|
||||
@policy.wrap_check_policy('share')
|
||||
def update(self, context, share, fields):
|
||||
@ -1028,8 +1087,7 @@ class API(base.Base):
|
||||
|
||||
def get_snapshot(self, context, snapshot_id):
|
||||
policy.check_policy(context, 'share_snapshot', 'get_snapshot')
|
||||
rv = self.db.share_snapshot_get(context, snapshot_id)
|
||||
return dict(rv.items())
|
||||
return self.db.share_snapshot_get(context, snapshot_id)
|
||||
|
||||
def get_all_snapshots(self, context, search_opts=None,
|
||||
sort_key='share_id', sort_dir='desc'):
|
||||
|
@ -1046,9 +1046,12 @@ class ShareDriver(object):
|
||||
return share_instances
|
||||
|
||||
def create_replica(self, context, replica_list, new_replica,
|
||||
access_rules, share_server=None):
|
||||
access_rules, replica_snapshots, share_server=None):
|
||||
"""Replicate the active replica to a new replica on this backend.
|
||||
|
||||
NOTE: This call is made on the host that the new replica is
|
||||
being created upon.
|
||||
|
||||
:param context: Current context
|
||||
:param replica_list: List of all replicas for a particular share.
|
||||
This list also contains the replica to be created. The 'active'
|
||||
@ -1092,7 +1095,7 @@ class ShareDriver(object):
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'deleted': False,
|
||||
'host': 'openstack2@cmodeSSVMNFS2',
|
||||
'status': 'available',
|
||||
'status': 'creating',
|
||||
'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
|
||||
'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
|
||||
'terminated_at': None,
|
||||
@ -1109,7 +1112,7 @@ class ShareDriver(object):
|
||||
:param access_rules: A list of access rules that other instances of
|
||||
the share already obey. Drivers are expected to apply access rules
|
||||
to the new replica or disregard access rules that don't apply.
|
||||
EXAMPLE:
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
[ {
|
||||
'id': 'f0875f6f-766b-4865-8b41-cccb4cdf1676',
|
||||
@ -1119,6 +1122,38 @@ class ShareDriver(object):
|
||||
'access_to' = '172.16.20.1',
|
||||
'access_level' = 'rw',
|
||||
}]
|
||||
:param replica_snapshots: List of dictionaries of snapshot instances
|
||||
for each snapshot of the share whose 'aggregate_status' property was
|
||||
reported to be 'available' when the share manager initiated this
|
||||
request. Each list member will have two sub dictionaries:
|
||||
'active_replica_snapshot' and 'share_replica_snapshot'. The 'active'
|
||||
replica snapshot corresponds to the instance of the snapshot on any
|
||||
of the 'active' replicas of the share while share_replica_snapshot
|
||||
corresponds to the snapshot instance for the specific replica that
|
||||
will need to exist on the new share replica that is being created.
|
||||
The driver needs to ensure that this snapshot instance is truly
|
||||
available before transitioning the replica from 'out_of_sync' to
|
||||
'in_sync'. Snapshots instances for snapshots that have an
|
||||
'aggregate_status' of 'creating' or 'deleting' will be polled for in
|
||||
the update_replicated_snapshot method.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
[ {
|
||||
'active_replica_snapshot': {
|
||||
'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e',
|
||||
'share_instance_id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af',
|
||||
'status': 'available',
|
||||
'provider_location': '/newton/share-snapshot-10e49c3e-aca9',
|
||||
...
|
||||
},
|
||||
'share_replica_snapshot': {
|
||||
'id': '',
|
||||
'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f',
|
||||
'status': 'available',
|
||||
'provider_location': None,
|
||||
...
|
||||
},
|
||||
}]
|
||||
:param share_server: <models.ShareServer> or None,
|
||||
Share server of the replica being created.
|
||||
:return: None or a dictionary containing export_locations,
|
||||
@ -1127,7 +1162,7 @@ class ShareDriver(object):
|
||||
error. A backend supporting 'writable' type replication should return
|
||||
'active' as the replica_state. Export locations should be in the
|
||||
same format as returned during the create_share call.
|
||||
EXAMPLE:
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
{
|
||||
'export_locations': [
|
||||
@ -1143,9 +1178,12 @@ class ShareDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_replica(self, context, replica_list, replica,
|
||||
share_server=None):
|
||||
"""Delete a replica. This is called on the destination backend.
|
||||
def delete_replica(self, context, replica_list, replica_snapshots,
|
||||
replica, share_server=None):
|
||||
"""Delete a replica.
|
||||
|
||||
NOTE: This call is made on the host that hosts the replica being
|
||||
deleted.
|
||||
|
||||
:param context: Current context
|
||||
:param replica_list: List of all replicas for a particular share.
|
||||
@ -1204,9 +1242,39 @@ class ShareDriver(object):
|
||||
'share_server_id': '53099868-65f1-11e5-9d70-feff819cdc9f',
|
||||
'share_server': <models.ShareServer> or None,
|
||||
}
|
||||
:param replica_snapshots: A list of dictionaries containing snapshot
|
||||
instances that are associated with the share replica being deleted.
|
||||
No model updates are possible in this method. The driver should
|
||||
return when the cleanup is completed on the backend for both,
|
||||
the snapshots and the replica itself. Drivers must handle situations
|
||||
where the snapshot may not yet have finished 'creating' on this
|
||||
replica.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
|
||||
[
|
||||
{
|
||||
'id': '89dafd00-0999-4d23-8614-13eaa6b02a3b',
|
||||
'snapshot_id': '3ce1caf7-0945-45fd-a320-714973e949d3',
|
||||
'status: 'available',
|
||||
'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f'
|
||||
...
|
||||
},
|
||||
{
|
||||
'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
'status: 'creating',
|
||||
'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f'
|
||||
...
|
||||
},
|
||||
...
|
||||
]
|
||||
:param share_server: <models.ShareServer> or None,
|
||||
Share server of the replica to be deleted.
|
||||
:return: None.
|
||||
:raises Exception. Any exception raised will set the share replica's
|
||||
'status' and 'replica_state' to 'error_deleting'. It will not affect
|
||||
snapshots belonging to this replica.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -1214,6 +1282,9 @@ class ShareDriver(object):
|
||||
share_server=None):
|
||||
"""Promote a replica to 'active' replica state.
|
||||
|
||||
NOTE: This call is made on the host that hosts the replica being
|
||||
promoted.
|
||||
|
||||
:param context: Current context
|
||||
:param replica_list: List of all replicas for a particular share.
|
||||
This list also contains the replica to be promoted. The 'active'
|
||||
@ -1302,9 +1373,13 @@ class ShareDriver(object):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_replica_state(self, context, replica_list, replica,
|
||||
access_rules, share_server=None):
|
||||
access_rules, replica_snapshots,
|
||||
share_server=None):
|
||||
"""Update the replica_state of a replica.
|
||||
|
||||
NOTE: This call is made on the host which hosts the replica being
|
||||
updated.
|
||||
|
||||
Drivers should fix replication relationships that were broken if
|
||||
possible inside this method.
|
||||
|
||||
@ -1382,6 +1457,37 @@ class ShareDriver(object):
|
||||
'access_to' = '172.16.20.1',
|
||||
'access_level' = 'rw',
|
||||
}]
|
||||
:param replica_snapshots: List of dictionaries of snapshot instances
|
||||
for each snapshot of the share whose 'aggregate_status' property was
|
||||
reported to be 'available' when the share manager initiated this
|
||||
request. Each list member will have two sub dictionaries:
|
||||
'active_replica_snapshot' and 'share_replica_snapshot'. The 'active'
|
||||
replica snapshot corresponds to the instance of the snapshot on any
|
||||
of the 'active' replicas of the share while share_replica_snapshot
|
||||
corresponds to the snapshot instance for the specific replica being
|
||||
updated. The driver needs to ensure that this snapshot instance is
|
||||
truly available before transitioning from 'out_of_sync' to
|
||||
'in_sync'. Snapshots instances for snapshots that have an
|
||||
'aggregate_status' of 'creating' or 'deleting' will be polled for in
|
||||
the update_replicated_snapshot method.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
[ {
|
||||
'active_replica_snapshot': {
|
||||
'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e',
|
||||
'share_instance_id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af',
|
||||
'status': 'available',
|
||||
'provider_location': '/newton/share-snapshot-10e49c3e-aca9',
|
||||
...
|
||||
},
|
||||
'share_replica_snapshot': {
|
||||
'id': ,
|
||||
'share_instance_id': 'd487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'status': 'creating',
|
||||
'provider_location': None,
|
||||
...
|
||||
},
|
||||
}]
|
||||
:param share_server: <models.ShareServer> or None
|
||||
:return: replica_state
|
||||
replica_state - a str value denoting the replica_state that the
|
||||
@ -1389,3 +1495,267 @@ class ShareDriver(object):
|
||||
or None (to leave the current replica_state unchanged).
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_replicated_snapshot(self, context, replica_list,
|
||||
replica_snapshots,
|
||||
share_server=None):
|
||||
"""Create a snapshot on active instance and update across the replicas.
|
||||
|
||||
NOTE: This call is made on the 'active' replica's host. Drivers
|
||||
are expected to transfer the snapshot created to the respective
|
||||
replicas.
|
||||
|
||||
The driver is expected to return model updates to the share manager.
|
||||
If it was able to confirm the creation of any number of the snapshot
|
||||
instances passed in this interface, it can set their status to
|
||||
'available' as a cue for the share manager to set the progress attr
|
||||
to '100%'.
|
||||
|
||||
:param context: Current context
|
||||
:param replica_list: List of all replicas for a particular share.
|
||||
The 'active' replica will have its 'replica_state' attr set to
|
||||
'active'.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
|
||||
[
|
||||
{
|
||||
'id': 'd487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'replica_state': 'in_sync',
|
||||
...
|
||||
'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05',
|
||||
'share_server': <models.ShareServer> or None,
|
||||
},
|
||||
{
|
||||
'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af',
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'replica_state': 'active',
|
||||
...
|
||||
'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094',
|
||||
'share_server': <models.ShareServer> or None,
|
||||
},
|
||||
...
|
||||
]
|
||||
:param replica_snapshots: List of all snapshot instances that track
|
||||
the snapshot across the replicas. All the instances will have their
|
||||
status attribute set to 'creating'.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
[
|
||||
{
|
||||
'id': 'd3931a93-3984-421e-a9e7-d9f71895450a',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
'status: 'creating',
|
||||
'progress': '0%',
|
||||
...
|
||||
},
|
||||
{
|
||||
'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
'status: 'creating',
|
||||
'progress': '0%',
|
||||
...
|
||||
},
|
||||
...
|
||||
]
|
||||
:param share_server: <models.ShareServer> or None
|
||||
:return: List of replica_snapshots, a list of dictionaries containing
|
||||
values that need to be updated on the database for the snapshot
|
||||
instances being created.
|
||||
:raises: Exception. Any exception in this method will set all
|
||||
instances to 'error'.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_replicated_snapshot(self, context, replica_list,
|
||||
replica_snapshots, share_server=None):
|
||||
"""Delete a snapshot by deleting its instances across the replicas.
|
||||
|
||||
NOTE: This call is made on the 'active' replica's host, since
|
||||
drivers may not be able to delete the snapshot from an individual
|
||||
replica.
|
||||
|
||||
The driver is expected to return model updates to the share manager.
|
||||
If it was able to confirm the removal of any number of the snapshot
|
||||
instances passed in this interface, it can set their status to
|
||||
'deleted' as a cue for the share manager to clean up that instance
|
||||
from the database.
|
||||
|
||||
:param context: Current context
|
||||
:param replica_list: List of all replicas for a particular share.
|
||||
The 'active' replica will have its 'replica_state' attr set to
|
||||
'active'.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
|
||||
[
|
||||
{
|
||||
'id': 'd487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'replica_state': 'in_sync',
|
||||
...
|
||||
'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05',
|
||||
'share_server': <models.ShareServer> or None,
|
||||
},
|
||||
{
|
||||
'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af',
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'replica_state': 'active',
|
||||
...
|
||||
'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094',
|
||||
'share_server': <models.ShareServer> or None,
|
||||
},
|
||||
...
|
||||
]
|
||||
:param replica_snapshots: List of all snapshot instances that track
|
||||
the snapshot across the replicas. All the instances will have their
|
||||
status attribute set to 'deleting'.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
[
|
||||
{
|
||||
'id': 'd3931a93-3984-421e-a9e7-d9f71895450a',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
'status': 'deleting',
|
||||
'progress': '100%',
|
||||
...
|
||||
},
|
||||
{
|
||||
'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
'status: 'deleting',
|
||||
'progress': '100%',
|
||||
...
|
||||
},
|
||||
...
|
||||
]
|
||||
:param share_server: <models.ShareServer> or None
|
||||
:return: List of replica_snapshots, a list of dictionaries containing
|
||||
values that need to be updated on the database for the snapshot
|
||||
instances being deleted. To confirm the deletion of the snapshot
|
||||
instance, set the 'status' attribute of the instance to
|
||||
'deleted'(constants.STATUS_DELETED).
|
||||
:raises: Exception. Any exception in this method will set all
|
||||
instances to 'error_deleting'.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_replicated_snapshot(self, context, replica_list,
|
||||
share_replica, replica_snapshots,
|
||||
replica_snapshot, share_server=None):
|
||||
"""Update the status of a snapshot instance that lives on a replica.
|
||||
|
||||
NOTE: For DR and Readable styles of replication, this call is made on
|
||||
the replica's host and not the 'active' replica's host.
|
||||
|
||||
This method is called periodically by the share manager. It will
|
||||
query for snapshot instances that track the parent snapshot across
|
||||
non-'active' replicas. Drivers can expect the status of the instance to
|
||||
be 'creating' or 'deleting'. If the driver sees that a snapshot
|
||||
instance has been removed from the replica's backend and the
|
||||
instance status was set to 'deleting', it is expected to raise a
|
||||
SnapshotResourceNotFound exception. All other exceptions will set the
|
||||
snapshot instance status to 'error'. If the instance was not in
|
||||
'deleting' state, raising a SnapshotResourceNotFound will set the
|
||||
instance status to 'error'.
|
||||
|
||||
:param context: Current context
|
||||
:param replica_list: List of all replicas for a particular share.
|
||||
The 'active' replica will have its 'replica_state' attr set to
|
||||
'active'.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
|
||||
[
|
||||
{
|
||||
'id': 'd487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'replica_state': 'in_sync',
|
||||
...
|
||||
'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05',
|
||||
'share_server': <models.ShareServer> or None,
|
||||
},
|
||||
{
|
||||
'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af',
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'replica_state': 'active',
|
||||
...
|
||||
'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094',
|
||||
'share_server': <models.ShareServer> or None,
|
||||
},
|
||||
...
|
||||
]
|
||||
:param share_replica: Dictionary of the replica the snapshot instance
|
||||
is meant to be associated with. Replicas in 'active' replica_state
|
||||
will not be passed via this parameter.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
|
||||
{
|
||||
'id': 'd487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f',
|
||||
'deleted': False,
|
||||
'host': 'openstack2@cmodeSSVMNFS1',
|
||||
'status': 'available',
|
||||
'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
|
||||
'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
|
||||
'terminated_at': None,
|
||||
'replica_state': 'in_sync',
|
||||
'availability_zone_id': 'e2c2db5c-cb2f-4697-9966-c06fb200cb80',
|
||||
'export_locations': [
|
||||
models.ShareInstanceExportLocations,
|
||||
],
|
||||
'access_rules_status': 'in_sync',
|
||||
'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f',
|
||||
'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05',
|
||||
}
|
||||
:param replica_snapshots: List of all snapshot instances that track
|
||||
the snapshot across the replicas. This will include the instance
|
||||
being updated as well.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
[
|
||||
{
|
||||
'id': 'd3931a93-3984-421e-a9e7-d9f71895450a',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
...
|
||||
},
|
||||
{
|
||||
'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
...
|
||||
},
|
||||
...
|
||||
]
|
||||
:param replica_snapshot: Dictionary of the snapshot instance to be
|
||||
updated. replica_snapshot will be in 'creating' or 'deleting'
|
||||
states when sent via this parameter.
|
||||
EXAMPLE:
|
||||
.. code::
|
||||
|
||||
{
|
||||
'name': 'share-snapshot-18825630-574f-4912-93bb-af4611ef35a2',
|
||||
'share_id': 'd487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'share_name': 'share-d487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'status': 'creating',
|
||||
'id': '18825630-574f-4912-93bb-af4611ef35a2',
|
||||
'deleted': False,
|
||||
'created_at': datetime.datetime(2016, 8, 3, 0, 5, 58),
|
||||
'share': <models.ShareInstance>,
|
||||
'updated_at': datetime.datetime(2016, 8, 3, 0, 5, 58),
|
||||
'share_instance_id': 'd487b88d-e428-4230-a465-a800c2cce5f8',
|
||||
'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40',
|
||||
'progress': '0%',
|
||||
'deleted_at': None,
|
||||
'provider_location': None,
|
||||
}
|
||||
:param share_server: <models.ShareServer> or None
|
||||
:return: replica_snapshot_model_update, a dictionary containing
|
||||
values that need to be updated on the database for the snapshot
|
||||
instance that represents the snapshot on the replica.
|
||||
:raises: exception.SnapshotResourceNotFound for
|
||||
snapshots that are not found on the backend and their status was
|
||||
'deleting'.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
@ -1817,7 +1817,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
snapshot_info_list = attributes_list.get_children()
|
||||
|
||||
if not self._has_records(result):
|
||||
raise exception.SnapshotNotFound(name=snapshot_name)
|
||||
raise exception.SnapshotResourceNotFound(name=snapshot_name)
|
||||
elif len(snapshot_info_list) > 1:
|
||||
msg = _('Could not find unique snapshot %(snap)s on '
|
||||
'volume %(vol)s.')
|
||||
|
@ -122,10 +122,11 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
self.library.teardown_server(server_details, **kwargs)
|
||||
|
||||
def create_replica(self, context, replica_list, replica, access_rules,
|
||||
**kwargs):
|
||||
replica_snapshots, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_replica(self, context, replica_list, replica, **kwargs):
|
||||
def delete_replica(self, context, replica_list, replica_snapshots,
|
||||
replica, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
def promote_replica(self, context, replica_list, replica, access_rules,
|
||||
@ -133,5 +134,6 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_replica_state(self, context, replica_list, replica,
|
||||
access_rules, share_server=None):
|
||||
access_rules, replica_snapshots,
|
||||
share_server=None):
|
||||
raise NotImplementedError()
|
||||
|
@ -121,11 +121,12 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
self.library.teardown_server(server_details, **kwargs)
|
||||
|
||||
def create_replica(self, context, replica_list, replica, access_rules,
|
||||
**kwargs):
|
||||
replica_snapshots, **kwargs):
|
||||
return self.library.create_replica(context, replica_list, replica,
|
||||
access_rules, **kwargs)
|
||||
|
||||
def delete_replica(self, context, replica_list, replica, **kwargs):
|
||||
def delete_replica(self, context, replica_list, replica_snapshots, replica,
|
||||
**kwargs):
|
||||
self.library.delete_replica(context, replica_list, replica, **kwargs)
|
||||
|
||||
def promote_replica(self, context, replica_list, replica, access_rules,
|
||||
@ -135,7 +136,8 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
share_server=share_server)
|
||||
|
||||
def update_replica_state(self, context, replica_list, replica,
|
||||
access_rules, share_server=None):
|
||||
access_rules, replica_snapshots,
|
||||
share_server=None):
|
||||
return self.library.update_replica_state(context,
|
||||
replica_list,
|
||||
replica,
|
||||
|
@ -734,7 +734,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
|
||||
try:
|
||||
self._delete_snapshot(vserver_client, share_name, snapshot_name)
|
||||
except exception.SnapshotNotFound:
|
||||
except exception.SnapshotResourceNotFound:
|
||||
msg = _LI("Snapshot %(snap)s does not exist on share %(share)s.")
|
||||
msg_args = {'snap': snapshot_name, 'share': share_name}
|
||||
LOG.info(msg, msg_args)
|
||||
@ -992,7 +992,7 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
try:
|
||||
self._delete_snapshot(
|
||||
vserver_client, share_name, snapshot_name)
|
||||
except exception.SnapshotNotFound:
|
||||
except exception.SnapshotResourceNotFound:
|
||||
msg = _LI("Snapshot %(snap)s does not exist on share "
|
||||
"%(share)s.")
|
||||
msg_args = {'snap': snapshot_name, 'share': share_name}
|
||||
|
@ -632,7 +632,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
|
||||
@ensure_share_server_not_provided
|
||||
def create_replica(self, context, replica_list, new_replica,
|
||||
access_rules, share_server=None):
|
||||
access_rules, replica_snapshots, share_server=None):
|
||||
"""Replicates the active replica to a new replica on this backend."""
|
||||
active_replica = self._get_active_replica(replica_list)
|
||||
src_dataset_name = self.private_storage.get(
|
||||
@ -704,7 +704,7 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
}
|
||||
|
||||
@ensure_share_server_not_provided
|
||||
def delete_replica(self, context, replica_list, replica,
|
||||
def delete_replica(self, context, replica_list, replica_snapshots, replica,
|
||||
share_server=None):
|
||||
"""Deletes a replica. This is called on the destination backend."""
|
||||
pool_name = self.private_storage.get(replica['id'], 'pool_name')
|
||||
@ -737,7 +737,8 @@ class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
|
||||
|
||||
@ensure_share_server_not_provided
|
||||
def update_replica_state(self, context, replica_list, replica,
|
||||
access_rules, share_server=None):
|
||||
access_rules, replica_snapshots,
|
||||
share_server=None):
|
||||
"""Syncs replica and updates its 'replica_state'."""
|
||||
active_replica = self._get_active_replica(replica_list)
|
||||
src_dataset_name = self.private_storage.get(
|
||||
|
@ -129,12 +129,13 @@ def locked_share_replica_operation(operation):
|
||||
promote ReplicaA while deleting ReplicaB, both belonging to the same share.
|
||||
"""
|
||||
|
||||
def wrapped(instance, context, share_replica_id, share_id=None, **kwargs):
|
||||
def wrapped(*args, **kwargs):
|
||||
share_id = kwargs.get('share_id')
|
||||
|
||||
@utils.synchronized("%s" % share_id, external=True)
|
||||
def locked_operation(*_args, **_kwargs):
|
||||
return operation(*_args, **_kwargs)
|
||||
return locked_operation(instance, context, share_replica_id,
|
||||
share_id=share_id, **kwargs)
|
||||
return locked_operation(*args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
|
||||
@ -168,7 +169,7 @@ def add_hooks(f):
|
||||
class ShareManager(manager.SchedulerDependentManager):
|
||||
"""Manages NAS storages."""
|
||||
|
||||
RPC_API_VERSION = '1.10'
|
||||
RPC_API_VERSION = '1.11'
|
||||
|
||||
def __init__(self, share_driver=None, service_name=None, *args, **kwargs):
|
||||
"""Load the driver from args, or from flags."""
|
||||
@ -1032,6 +1033,39 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_instance_update_access_status(
|
||||
context, share_replica_id, state)
|
||||
|
||||
def _get_replica_snapshots_for_snapshot(self, context, snapshot_id,
|
||||
active_replica_id,
|
||||
share_replica_id,
|
||||
with_share_data=True):
|
||||
"""Return dict of snapshot instances of active and replica instances.
|
||||
|
||||
This method returns a dict of snapshot instances for snapshot
|
||||
referred to by snapshot_id. The dict contains the snapshot instance
|
||||
pertaining to the 'active' replica and the snapshot instance
|
||||
pertaining to the replica referred to by share_replica_id.
|
||||
"""
|
||||
filters = {
|
||||
'snapshot_ids': snapshot_id,
|
||||
'share_instance_ids': (share_replica_id, active_replica_id),
|
||||
}
|
||||
instance_list = self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, filters, with_share_data=with_share_data)
|
||||
|
||||
snapshots = {
|
||||
'active_replica_snapshot': self._get_snapshot_instance_dict(
|
||||
context,
|
||||
list(filter(lambda x:
|
||||
x['share_instance_id'] == active_replica_id,
|
||||
instance_list))[0]),
|
||||
'share_replica_snapshot': self._get_snapshot_instance_dict(
|
||||
context,
|
||||
list(filter(lambda x:
|
||||
x['share_instance_id'] == share_replica_id,
|
||||
instance_list))[0]),
|
||||
}
|
||||
|
||||
return snapshots
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
@locked_share_replica_operation
|
||||
@ -1102,6 +1136,17 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
share_access_rules = self.db.share_instance_access_copy(
|
||||
context, share_replica['share_id'], share_replica['id'])
|
||||
|
||||
# Get snapshots for the share.
|
||||
share_snapshots = self.db.share_snapshot_get_all_for_share(
|
||||
context, share_id)
|
||||
# Get the required data for snapshots that have 'aggregate_status'
|
||||
# set to 'available'.
|
||||
available_share_snapshots = [
|
||||
self._get_replica_snapshots_for_snapshot(
|
||||
context, x['id'], _active_replica['id'], share_replica_id)
|
||||
for x in share_snapshots
|
||||
if x['aggregate_status'] == constants.STATUS_AVAILABLE]
|
||||
|
||||
replica_list = (
|
||||
self.db.share_replicas_get_all_by_share(
|
||||
context, share_replica['share_id'],
|
||||
@ -1115,7 +1160,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
try:
|
||||
replica_ref = self.driver.create_replica(
|
||||
context, replica_list, share_replica,
|
||||
share_access_rules, share_server=share_server)
|
||||
share_access_rules, available_share_snapshots,
|
||||
share_server=share_server) or {}
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -1166,6 +1212,13 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, share_replica_id, with_share_data=True,
|
||||
with_share_server=True)
|
||||
|
||||
# Grab all the snapshot instances that belong to this replica.
|
||||
replica_snapshots = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'share_instance_ids': share_replica_id},
|
||||
with_share_data=True)
|
||||
)
|
||||
|
||||
replica_list = (
|
||||
self.db.share_replicas_get_all_by_share(
|
||||
context, share_replica['share_id'],
|
||||
@ -1174,6 +1227,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
|
||||
replica_list = [self._get_share_replica_dict(context, r)
|
||||
for r in replica_list]
|
||||
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
|
||||
for s in replica_snapshots]
|
||||
share_server = self._get_share_server(context, share_replica)
|
||||
share_replica = self._get_share_replica_dict(context, share_replica)
|
||||
|
||||
@ -1193,24 +1248,24 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
{'status': constants.STATUS_ERROR})
|
||||
if force:
|
||||
msg = _("The driver was unable to delete access rules "
|
||||
"for the replica: %s. Will attempt to delete the "
|
||||
"replica anyway.")
|
||||
LOG.error(msg % share_replica['id'])
|
||||
"for the replica: %s. Will attempt to delete "
|
||||
"the replica anyway.")
|
||||
LOG.exception(msg % share_replica['id'])
|
||||
exc_context.reraise = False
|
||||
|
||||
try:
|
||||
self.driver.delete_replica(
|
||||
context, replica_list, share_replica,
|
||||
context, replica_list, replica_snapshots, share_replica,
|
||||
share_server=share_server)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if force:
|
||||
msg = _("The driver was unable to delete the share "
|
||||
"replica: %s on the backend. Since this "
|
||||
"operation is forced, the replica will be "
|
||||
"replica: %s on the backend. Since "
|
||||
"this operation is forced, the replica will be "
|
||||
"deleted from Manila's database. A cleanup on "
|
||||
"the backend may be necessary.")
|
||||
LOG.error(msg % share_replica['id'])
|
||||
LOG.exception(msg, share_replica['id'])
|
||||
exc_context.reraise = False
|
||||
else:
|
||||
self.db.share_replica_update(
|
||||
@ -1218,6 +1273,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
{'status': constants.STATUS_ERROR_DELETING,
|
||||
'replica_state': constants.STATUS_ERROR})
|
||||
|
||||
for replica_snapshot in replica_snapshots:
|
||||
self.db.share_snapshot_instance_delete(
|
||||
context, replica_snapshot['id'])
|
||||
|
||||
self.db.share_replica_delete(context, share_replica['id'])
|
||||
LOG.info(_LI("Share replica %s deleted successfully."),
|
||||
share_replica['id'])
|
||||
@ -1286,6 +1345,30 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
self.db.share_replica_update(
|
||||
context, replica_ref['id'], updates)
|
||||
|
||||
# Set any 'creating' snapshots on the currently active replica to
|
||||
# 'error' since we cannot guarantee they will finish 'creating'.
|
||||
active_replica_snapshot_instances = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'share_instance_ids': share_replica['id']})
|
||||
)
|
||||
for instance in active_replica_snapshot_instances:
|
||||
if instance['status'] in (constants.STATUS_CREATING,
|
||||
constants.STATUS_DELETING):
|
||||
msg = _LI("The replica snapshot instance %(instance)s was "
|
||||
"in %(state)s. Since it was not in %(available)s "
|
||||
"state when the replica was promoted, it will be "
|
||||
"set to %(error)s.")
|
||||
payload = {
|
||||
'instance': instance['id'],
|
||||
'state': instance['status'],
|
||||
'available': constants.STATUS_AVAILABLE,
|
||||
'error': constants.STATUS_ERROR,
|
||||
}
|
||||
LOG.info(msg, payload)
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, instance['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
|
||||
if not updated_replica_list:
|
||||
self.db.share_replica_update(
|
||||
context, share_replica['id'],
|
||||
@ -1356,7 +1439,6 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
@locked_share_replica_operation
|
||||
def _share_replica_update(self, context, share_replica, share_id=None):
|
||||
share_server = self._get_share_server(context, share_replica)
|
||||
replica_state = None
|
||||
|
||||
# Re-grab the replica:
|
||||
try:
|
||||
@ -1386,6 +1468,22 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
with_share_data=True, with_share_server=True)
|
||||
)
|
||||
|
||||
_active_replica = [x for x in replica_list
|
||||
if x['replica_state'] ==
|
||||
constants.REPLICA_STATE_ACTIVE][0]
|
||||
|
||||
# Get snapshots for the share.
|
||||
share_snapshots = self.db.share_snapshot_get_all_for_share(
|
||||
context, share_id)
|
||||
|
||||
# Get the required data for snapshots that have 'aggregate_status'
|
||||
# set to 'available'.
|
||||
available_share_snapshots = [
|
||||
self._get_replica_snapshots_for_snapshot(
|
||||
context, x['id'], _active_replica['id'], share_replica['id'])
|
||||
for x in share_snapshots
|
||||
if x['aggregate_status'] == constants.STATUS_AVAILABLE]
|
||||
|
||||
replica_list = [self._get_share_replica_dict(context, r)
|
||||
for r in replica_list]
|
||||
|
||||
@ -1394,7 +1492,7 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
try:
|
||||
replica_state = self.driver.update_replica_state(
|
||||
context, replica_list, share_replica, access_rules,
|
||||
share_server=share_server)
|
||||
available_share_snapshots, share_server=share_server)
|
||||
except Exception:
|
||||
msg = _LE("Driver error when updating replica "
|
||||
"state for replica %s.")
|
||||
@ -1680,7 +1778,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
# quota usages later if it's required.
|
||||
LOG.warning(_LW("Failed to update quota usages: %s."), e)
|
||||
|
||||
self.db.share_snapshot_destroy(context, snapshot_id)
|
||||
self.db.share_snapshot_instance_delete(
|
||||
context, snapshot_instance['id'])
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
@ -1831,7 +1930,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
snapshot_instance_id,
|
||||
{'status': constants.STATUS_ERROR_DELETING})
|
||||
else:
|
||||
self.db.share_snapshot_destroy(context, snapshot_id)
|
||||
self.db.share_snapshot_instance_delete(
|
||||
context, snapshot_instance_id)
|
||||
try:
|
||||
reservations = QUOTAS.reserve(
|
||||
context, project_id=project_id, snapshots=-1,
|
||||
@ -1845,6 +1945,246 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
QUOTAS.commit(context, reservations, project_id=project_id,
|
||||
user_id=snapshot_ref['user_id'])
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
@locked_share_replica_operation
|
||||
def create_replicated_snapshot(self, context, snapshot_id, share_id=None):
|
||||
"""Create a snapshot for a replicated share."""
|
||||
# Grab the snapshot and replica information from the DB.
|
||||
snapshot = self.db.share_snapshot_get(context, snapshot_id)
|
||||
share_server = self._get_share_server(context, snapshot['share'])
|
||||
replica_snapshots = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'snapshot_ids': snapshot['id']},
|
||||
with_share_data=True)
|
||||
)
|
||||
replica_list = (
|
||||
self.db.share_replicas_get_all_by_share(
|
||||
context, share_id, with_share_data=True,
|
||||
with_share_server=True)
|
||||
)
|
||||
|
||||
# Make primitives to pass the information to the driver.
|
||||
|
||||
replica_list = [self._get_share_replica_dict(context, r)
|
||||
for r in replica_list]
|
||||
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
|
||||
for s in replica_snapshots]
|
||||
updated_instances = []
|
||||
|
||||
try:
|
||||
updated_instances = self.driver.create_replicated_snapshot(
|
||||
context, replica_list, replica_snapshots,
|
||||
share_server=share_server) or []
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
for instance in replica_snapshots:
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, instance['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
|
||||
for instance in updated_instances:
|
||||
if instance['status'] == constants.STATUS_AVAILABLE:
|
||||
instance.update({'progress': '100%'})
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, instance['id'], instance)
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
@locked_share_replica_operation
|
||||
def delete_replicated_snapshot(self, context, snapshot_id,
|
||||
share_id=None, force=False):
|
||||
"""Delete a snapshot from a replicated share."""
|
||||
# Grab the replica and snapshot information from the DB.
|
||||
snapshot = self.db.share_snapshot_get(context, snapshot_id)
|
||||
share_server = self._get_share_server(context, snapshot['share'])
|
||||
replica_snapshots = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, {'snapshot_ids': snapshot['id']},
|
||||
with_share_data=True)
|
||||
)
|
||||
replica_list = (
|
||||
self.db.share_replicas_get_all_by_share(
|
||||
context, share_id, with_share_data=True,
|
||||
with_share_server=True)
|
||||
)
|
||||
|
||||
replica_list = [self._get_share_replica_dict(context, r)
|
||||
for r in replica_list]
|
||||
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
|
||||
for s in replica_snapshots]
|
||||
deleted_instances = []
|
||||
updated_instances = []
|
||||
db_force_delete_msg = _('The driver was unable to delete some or all '
|
||||
'of the share replica snapshots on the '
|
||||
'backend/s. Since this operation is forced, '
|
||||
'the replica snapshots will be deleted from '
|
||||
'Manila.')
|
||||
|
||||
try:
|
||||
|
||||
updated_instances = self.driver.delete_replicated_snapshot(
|
||||
context, replica_list, replica_snapshots,
|
||||
share_server=share_server) or []
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as e:
|
||||
if force:
|
||||
# Can delete all instances if forced.
|
||||
deleted_instances = replica_snapshots
|
||||
LOG.exception(db_force_delete_msg)
|
||||
e.reraise = False
|
||||
else:
|
||||
for instance in replica_snapshots:
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, instance['id'],
|
||||
{'status': constants.STATUS_ERROR_DELETING})
|
||||
|
||||
if not deleted_instances:
|
||||
if force:
|
||||
# Ignore model updates on 'force' delete.
|
||||
LOG.warning(db_force_delete_msg)
|
||||
deleted_instances = replica_snapshots
|
||||
else:
|
||||
deleted_instances = list(filter(
|
||||
lambda x: x['status'] == constants.STATUS_DELETED,
|
||||
updated_instances))
|
||||
updated_instances = list(filter(
|
||||
lambda x: x['status'] != constants.STATUS_DELETED,
|
||||
updated_instances))
|
||||
|
||||
for instance in deleted_instances:
|
||||
self.db.share_snapshot_instance_delete(context, instance['id'])
|
||||
|
||||
for instance in updated_instances:
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, instance['id'], instance)
|
||||
|
||||
@periodic_task.periodic_task(spacing=CONF.replica_state_update_interval)
|
||||
@utils.require_driver_initialized
|
||||
def periodic_share_replica_snapshot_update(self, context):
|
||||
LOG.debug("Updating status of share replica snapshots.")
|
||||
transitional_statuses = (constants.STATUS_CREATING,
|
||||
constants.STATUS_DELETING)
|
||||
replicas = self.db.share_replicas_get_all(context,
|
||||
with_share_data=True)
|
||||
|
||||
def qualified_replica(r):
|
||||
# Filter non-active replicas belonging to this backend
|
||||
return (share_utils.extract_host(r['host']) ==
|
||||
share_utils.extract_host(self.host) and
|
||||
r['replica_state'] != constants.REPLICA_STATE_ACTIVE)
|
||||
|
||||
host_replicas = list(filter(
|
||||
lambda x: qualified_replica(x), replicas))
|
||||
transitional_replica_snapshots = []
|
||||
|
||||
# Get snapshot instances for each replica that are in 'creating' or
|
||||
# 'deleting' states.
|
||||
for replica in host_replicas:
|
||||
filters = {
|
||||
'share_instance_ids': replica['id'],
|
||||
'statuses': transitional_statuses,
|
||||
}
|
||||
replica_snapshots = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context, filters, with_share_data=True)
|
||||
)
|
||||
transitional_replica_snapshots.extend(replica_snapshots)
|
||||
|
||||
for replica_snapshot in transitional_replica_snapshots:
|
||||
replica_snapshots = (
|
||||
self.db.share_snapshot_instance_get_all_with_filters(
|
||||
context,
|
||||
{'snapshot_ids': replica_snapshot['snapshot_id']})
|
||||
)
|
||||
share_id = replica_snapshot['share']['share_id']
|
||||
self._update_replica_snapshot(
|
||||
context, replica_snapshot,
|
||||
replica_snapshots=replica_snapshots, share_id=share_id)
|
||||
|
||||
@locked_share_replica_operation
|
||||
def _update_replica_snapshot(self, context, replica_snapshot,
|
||||
replica_snapshots=None, share_id=None):
|
||||
# Re-grab the replica:
|
||||
try:
|
||||
share_replica = self.db.share_replica_get(
|
||||
context, replica_snapshot['share_instance_id'],
|
||||
with_share_data=True, with_share_server=True)
|
||||
replica_snapshot = self.db.share_snapshot_instance_get(
|
||||
context, replica_snapshot['id'], with_share_data=True)
|
||||
except exception.NotFound:
|
||||
# Replica may have been deleted, try to cleanup the snapshot
|
||||
# instance
|
||||
try:
|
||||
self.db.share_snapshot_instance_delete(
|
||||
context, replica_snapshot['id'])
|
||||
except exception.ShareSnapshotInstanceNotFound:
|
||||
# snapshot instance has been deleted, nothing to do here
|
||||
pass
|
||||
return
|
||||
|
||||
msg_payload = {
|
||||
'snapshot_instance': replica_snapshot['id'],
|
||||
'replica': share_replica['id'],
|
||||
}
|
||||
|
||||
LOG.debug("Updating status of replica snapshot %(snapshot_instance)s: "
|
||||
"on replica: %(replica)s", msg_payload)
|
||||
|
||||
# Grab all the replica and snapshot information.
|
||||
replica_list = (
|
||||
self.db.share_replicas_get_all_by_share(
|
||||
context, share_replica['share_id'],
|
||||
with_share_data=True, with_share_server=True)
|
||||
)
|
||||
|
||||
replica_list = [self._get_share_replica_dict(context, r)
|
||||
for r in replica_list]
|
||||
replica_snapshots = replica_snapshots or []
|
||||
|
||||
# Convert data to primitives to send to the driver.
|
||||
|
||||
replica_snapshots = [self._get_snapshot_instance_dict(context, s)
|
||||
for s in replica_snapshots]
|
||||
replica_snapshot = self._get_snapshot_instance_dict(
|
||||
context, replica_snapshot)
|
||||
share_replica = self._get_share_replica_dict(context, share_replica)
|
||||
share_server = share_replica['share_server']
|
||||
snapshot_update = None
|
||||
|
||||
try:
|
||||
|
||||
snapshot_update = self.driver.update_replicated_snapshot(
|
||||
context, replica_list, share_replica, replica_snapshots,
|
||||
replica_snapshot, share_server=share_server) or {}
|
||||
|
||||
except exception.SnapshotResourceNotFound:
|
||||
if replica_snapshot['status'] == constants.STATUS_DELETING:
|
||||
LOG.info(_LI('Snapshot %(snapshot_instance)s on replica '
|
||||
'%(replica)s has been deleted.'), msg_payload)
|
||||
self.db.share_snapshot_instance_delete(
|
||||
context, replica_snapshot['id'])
|
||||
else:
|
||||
LOG.exception(_LE("Replica snapshot %s was not found on "
|
||||
"the backend."), replica_snapshot['id'])
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, replica_snapshot['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
except Exception:
|
||||
LOG.exception(_LE("Driver error while updating replica snapshot: "
|
||||
"%s"), replica_snapshot['id'])
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, replica_snapshot['id'],
|
||||
{'status': constants.STATUS_ERROR})
|
||||
|
||||
if snapshot_update:
|
||||
snapshot_status = snapshot_update.get('status')
|
||||
if snapshot_status == constants.STATUS_AVAILABLE:
|
||||
snapshot_update['progress'] = '100%'
|
||||
self.db.share_snapshot_instance_update(
|
||||
context, replica_snapshot['id'], snapshot_update)
|
||||
|
||||
@add_hooks
|
||||
@utils.require_driver_initialized
|
||||
def allow_access(self, context, share_instance_id, access_rules):
|
||||
@ -2524,3 +2864,24 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
}
|
||||
|
||||
return share_replica_ref
|
||||
|
||||
def _get_snapshot_instance_dict(self, context, snapshot_instance):
|
||||
# TODO(gouthamr): remove method when the db layer returns primitives
|
||||
snapshot_instance_ref = {
|
||||
'name': snapshot_instance.get('name'),
|
||||
'share_id': snapshot_instance.get('share_id'),
|
||||
'share_name': snapshot_instance.get('share_name'),
|
||||
'status': snapshot_instance.get('status'),
|
||||
'id': snapshot_instance.get('id'),
|
||||
'deleted': snapshot_instance.get('deleted') or False,
|
||||
'created_at': snapshot_instance.get('created_at'),
|
||||
'share': snapshot_instance.get('share'),
|
||||
'updated_at': snapshot_instance.get('updated_at'),
|
||||
'share_instance_id': snapshot_instance.get('share_instance_id'),
|
||||
'snapshot_id': snapshot_instance.get('snapshot_id'),
|
||||
'progress': snapshot_instance.get('progress'),
|
||||
'deleted_at': snapshot_instance.get('deleted_at'),
|
||||
'provider_location': snapshot_instance.get('provider_location'),
|
||||
}
|
||||
|
||||
return snapshot_instance_ref
|
||||
|
@ -57,6 +57,8 @@ class ShareAPI(object):
|
||||
migration_start(), rename get_migration_info() to
|
||||
migration_get_info(), rename get_driver_migration_info() to
|
||||
migration_get_driver_info()
|
||||
1.11 - Add create_replicated_snapshot() and
|
||||
delete_replicated_snapshot() methods
|
||||
"""
|
||||
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
@ -65,7 +67,7 @@ class ShareAPI(object):
|
||||
super(ShareAPI, self).__init__()
|
||||
target = messaging.Target(topic=CONF.share_topic,
|
||||
version=self.BASE_RPC_API_VERSION)
|
||||
self.client = rpc.get_client(target, version_cap='1.10')
|
||||
self.client = rpc.get_client(target, version_cap='1.11')
|
||||
|
||||
def create_share_instance(self, context, share_instance, host,
|
||||
request_spec, filter_properties,
|
||||
@ -166,6 +168,24 @@ class ShareAPI(object):
|
||||
'delete_snapshot',
|
||||
snapshot_id=snapshot['id'])
|
||||
|
||||
def create_replicated_snapshot(self, context, share, replicated_snapshot):
|
||||
host = utils.extract_host(share['instance']['host'])
|
||||
call_context = self.client.prepare(server=host, version='1.11')
|
||||
call_context.cast(context,
|
||||
'create_replicated_snapshot',
|
||||
snapshot_id=replicated_snapshot['id'],
|
||||
share_id=share['id'])
|
||||
|
||||
def delete_replicated_snapshot(self, context, replicated_snapshot, host,
|
||||
share_id=None, force=False):
|
||||
host = utils.extract_host(host)
|
||||
call_context = self.client.prepare(server=host, version='1.11')
|
||||
call_context.cast(context,
|
||||
'delete_replicated_snapshot',
|
||||
snapshot_id=replicated_snapshot['id'],
|
||||
share_id=share_id,
|
||||
force=force)
|
||||
|
||||
@staticmethod
|
||||
def _get_access_rules(access):
|
||||
if isinstance(access, list):
|
||||
|
@ -105,6 +105,7 @@ def stub_snapshot(id, **kwargs):
|
||||
'share_size': 1,
|
||||
'size': 1,
|
||||
'status': 'fakesnapstatus',
|
||||
'aggregate_status': 'fakesnapstatus',
|
||||
'share_name': 'fakesharename',
|
||||
'display_name': 'displaysnapname',
|
||||
'display_description': 'displaysnapdesc',
|
||||
|
@ -234,19 +234,22 @@ class ShareSnapshotAPITest(test.TestCase):
|
||||
{
|
||||
'id': 'id1',
|
||||
'display_name': 'n1',
|
||||
'status': 'fake_status',
|
||||
'status': 'fake_status_other',
|
||||
'aggregate_status': 'fake_status',
|
||||
'share_id': 'fake_share_id',
|
||||
},
|
||||
{
|
||||
'id': 'id2',
|
||||
'display_name': 'n2',
|
||||
'status': 'fake_status',
|
||||
'aggregate_status': 'fake_status',
|
||||
'share_id': 'fake_share_id',
|
||||
},
|
||||
{
|
||||
'id': 'id3',
|
||||
'display_name': 'n3',
|
||||
'status': 'fake_status',
|
||||
'status': 'fake_status_other',
|
||||
'aggregate_status': 'fake_status',
|
||||
'share_id': 'fake_share_id',
|
||||
},
|
||||
]
|
||||
@ -278,10 +281,10 @@ class ShareSnapshotAPITest(test.TestCase):
|
||||
self.assertEqual(
|
||||
snapshots[1]['share_id'], result['snapshots'][0]['share_id'])
|
||||
|
||||
def test_share_list_detail_with_search_opts_by_non_admin(self):
|
||||
def test_snapshot_list_detail_with_search_opts_by_non_admin(self):
|
||||
self._snapshot_list_detail_with_search_opts(use_admin_context=False)
|
||||
|
||||
def test_share_list_detail_with_search_opts_by_admin(self):
|
||||
def test_snapshot_list_detail_with_search_opts_by_admin(self):
|
||||
self._snapshot_list_detail_with_search_opts(use_admin_context=True)
|
||||
|
||||
def test_snapshot_list_detail(self):
|
||||
|
@ -226,14 +226,25 @@ class ShareSnapshotAPITest(test.TestCase):
|
||||
req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context)
|
||||
|
||||
snapshots = [
|
||||
{'id': 'id1', 'display_name': 'n1', 'status': 'fake_status', },
|
||||
{
|
||||
'id': 'id1',
|
||||
'display_name': 'n1',
|
||||
'status': 'fake_status',
|
||||
'aggregate_status': 'fake_status',
|
||||
},
|
||||
{
|
||||
'id': 'id2',
|
||||
'display_name': 'n2',
|
||||
'status': 'fake_status',
|
||||
'status': 'someotherstatus',
|
||||
'aggregate_status': 'fake_status',
|
||||
'share_id': 'fake_share_id',
|
||||
},
|
||||
{'id': 'id3', 'display_name': 'n3', 'status': 'fake_status', },
|
||||
{
|
||||
'id': 'id3',
|
||||
'display_name': 'n3',
|
||||
'status': 'fake_status',
|
||||
'aggregate_status': 'fake_status',
|
||||
},
|
||||
]
|
||||
|
||||
self.mock_object(share_api.API, 'get_all_snapshots',
|
||||
@ -259,14 +270,14 @@ class ShareSnapshotAPITest(test.TestCase):
|
||||
self.assertEqual(
|
||||
snapshots[1]['display_name'], result['snapshots'][0]['name'])
|
||||
self.assertEqual(
|
||||
snapshots[1]['status'], result['snapshots'][0]['status'])
|
||||
snapshots[1]['aggregate_status'], result['snapshots'][0]['status'])
|
||||
self.assertEqual(
|
||||
snapshots[1]['share_id'], result['snapshots'][0]['share_id'])
|
||||
|
||||
def test_share_list_detail_with_search_opts_by_non_admin(self):
|
||||
def test_snapshot_list_detail_with_search_opts_by_non_admin(self):
|
||||
self._snapshot_list_detail_with_search_opts(use_admin_context=False)
|
||||
|
||||
def test_share_list_detail_with_search_opts_by_admin(self):
|
||||
def test_snapshot_list_detail_with_search_opts_by_admin(self):
|
||||
self._snapshot_list_detail_with_search_opts(use_admin_context=True)
|
||||
|
||||
def test_snapshot_list_detail(self):
|
||||
|
@ -855,6 +855,7 @@ class ConsistencyGroupDatabaseAPITestCase(test.TestCase):
|
||||
self.assertEqual(constants.STATUS_AVAILABLE, member['status'])
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class ShareSnapshotDatabaseAPITestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@ -862,6 +863,52 @@ class ShareSnapshotDatabaseAPITestCase(test.TestCase):
|
||||
super(ShareSnapshotDatabaseAPITestCase, self).setUp()
|
||||
self.ctxt = context.get_admin_context()
|
||||
|
||||
self.share_instances = [
|
||||
db_utils.create_share_instance(
|
||||
status=constants.STATUS_REPLICATION_CHANGE,
|
||||
share_id='fake_share_id_1'),
|
||||
db_utils.create_share_instance(
|
||||
status=constants.STATUS_AVAILABLE,
|
||||
share_id='fake_share_id_1'),
|
||||
db_utils.create_share_instance(
|
||||
status=constants.STATUS_ERROR_DELETING,
|
||||
share_id='fake_share_id_2'),
|
||||
db_utils.create_share_instance(
|
||||
status=constants.STATUS_MANAGING,
|
||||
share_id='fake_share_id_2'),
|
||||
]
|
||||
self.share_1 = db_utils.create_share(
|
||||
id='fake_share_id_1', instances=self.share_instances[0:2])
|
||||
self.share_2 = db_utils.create_share(
|
||||
id='fake_share_id_2', instances=self.share_instances[2:-1])
|
||||
self.snapshot_instances = [
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id_1',
|
||||
status=constants.STATUS_CREATING,
|
||||
share_instance_id=self.share_instances[0]['id']),
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id_1',
|
||||
status=constants.STATUS_ERROR,
|
||||
share_instance_id=self.share_instances[1]['id']),
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id_1',
|
||||
status=constants.STATUS_DELETING,
|
||||
share_instance_id=self.share_instances[2]['id']),
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id_2',
|
||||
status=constants.STATUS_AVAILABLE,
|
||||
id='fake_snapshot_instance_id',
|
||||
provider_location='hogsmeade:snapshot1',
|
||||
progress='87%',
|
||||
share_instance_id=self.share_instances[3]['id']),
|
||||
]
|
||||
self.snapshot_1 = db_utils.create_snapshot(
|
||||
id='fake_snapshot_id_1', share_id=self.share_1['id'],
|
||||
instances=self.snapshot_instances[0:3])
|
||||
self.snapshot_2 = db_utils.create_snapshot(
|
||||
id='fake_snapshot_id_2', share_id=self.share_2['id'],
|
||||
instances=self.snapshot_instances[3:4])
|
||||
|
||||
def test_create(self):
|
||||
share = db_utils.create_share(size=1)
|
||||
values = {
|
||||
@ -897,6 +944,114 @@ class ShareSnapshotDatabaseAPITestCase(test.TestCase):
|
||||
self.assertIn('share_name', instance_dict)
|
||||
self.assertIn('share_id', instance_dict)
|
||||
|
||||
@ddt.data(None, constants.STATUS_ERROR)
|
||||
def test_share_snapshot_instance_get_all_with_filters_some(self, status):
|
||||
expected_status = status or (constants.STATUS_CREATING,
|
||||
constants.STATUS_DELETING)
|
||||
expected_number = 1 if status else 3
|
||||
filters = {
|
||||
'snapshot_ids': 'fake_snapshot_id_1',
|
||||
'statuses': expected_status
|
||||
}
|
||||
instances = db_api.share_snapshot_instance_get_all_with_filters(
|
||||
self.ctxt, filters)
|
||||
|
||||
for instance in instances:
|
||||
self.assertEqual('fake_snapshot_id_1', instance['snapshot_id'])
|
||||
self.assertTrue(instance['status'] in filters['statuses'])
|
||||
|
||||
self.assertEqual(expected_number, len(instances))
|
||||
|
||||
def test_share_snapshot_instance_get_all_with_filters_all_filters(self):
|
||||
filters = {
|
||||
'snapshot_ids': 'fake_snapshot_id_2',
|
||||
'instance_ids': 'fake_snapshot_instance_id',
|
||||
'statuses': constants.STATUS_AVAILABLE,
|
||||
'share_instance_ids': self.share_instances[3]['id'],
|
||||
}
|
||||
instances = db_api.share_snapshot_instance_get_all_with_filters(
|
||||
self.ctxt, filters, with_share_data=True)
|
||||
self.assertEqual(1, len(instances))
|
||||
self.assertEqual('fake_snapshot_instance_id', instances[0]['id'])
|
||||
self.assertEqual(
|
||||
self.share_2['id'], instances[0]['share_instance']['share_id'])
|
||||
|
||||
def test_share_snapshot_instance_get_all_with_filters_wrong_filters(self):
|
||||
filters = {
|
||||
'some_key': 'some_value',
|
||||
'some_other_key': 'some_other_value',
|
||||
}
|
||||
instances = db_api.share_snapshot_instance_get_all_with_filters(
|
||||
self.ctxt, filters)
|
||||
self.assertEqual(6, len(instances))
|
||||
|
||||
def test_share_snapshot_instance_create(self):
|
||||
snapshot = db_utils.create_snapshot(with_share=True)
|
||||
share = snapshot['share']
|
||||
share_instance = db_utils.create_share_instance(share_id=share['id'])
|
||||
values = {
|
||||
'snapshot_id': snapshot['id'],
|
||||
'share_instance_id': share_instance['id'],
|
||||
'status': constants.STATUS_MANAGING,
|
||||
'progress': '88%',
|
||||
'provider_location': 'whomping_willow',
|
||||
}
|
||||
|
||||
actual_result = db_api.share_snapshot_instance_create(
|
||||
self.ctxt, snapshot['id'], values)
|
||||
|
||||
snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id'])
|
||||
|
||||
self.assertSubDictMatch(values, actual_result.to_dict())
|
||||
self.assertEqual(2, len(snapshot['instances']))
|
||||
|
||||
def test_share_snapshot_instance_update(self):
|
||||
snapshot = db_utils.create_snapshot(with_share=True)
|
||||
|
||||
values = {
|
||||
'snapshot_id': snapshot['id'],
|
||||
'status': constants.STATUS_ERROR,
|
||||
'progress': '18%',
|
||||
'provider_location': 'godrics_hollow',
|
||||
}
|
||||
|
||||
actual_result = db_api.share_snapshot_instance_update(
|
||||
self.ctxt, snapshot['instance']['id'], values)
|
||||
|
||||
self.assertSubDictMatch(values, actual_result.to_dict())
|
||||
|
||||
@ddt.data(2, 1)
|
||||
def test_share_snapshot_instance_delete(self, instances):
|
||||
snapshot = db_utils.create_snapshot(with_share=True)
|
||||
first_instance_id = snapshot['instance']['id']
|
||||
if instances > 1:
|
||||
instance = db_utils.create_snapshot_instance(
|
||||
snapshot['id'],
|
||||
share_instance_id=snapshot['share']['instance']['id'])
|
||||
else:
|
||||
instance = snapshot['instance']
|
||||
|
||||
retval = db_api.share_snapshot_instance_delete(
|
||||
self.ctxt, instance['id'])
|
||||
|
||||
self.assertIsNone(retval)
|
||||
if instances == 1:
|
||||
self.assertRaises(exception.ShareSnapshotNotFound,
|
||||
db_api.share_snapshot_get,
|
||||
self.ctxt, snapshot['id'])
|
||||
else:
|
||||
snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id'])
|
||||
self.assertEqual(1, len(snapshot['instances']))
|
||||
self.assertEqual(first_instance_id, snapshot['instance']['id'])
|
||||
|
||||
def test_share_snapshot_destroy_has_instances(self):
|
||||
snapshot = db_utils.create_snapshot(with_share=True)
|
||||
|
||||
self.assertRaises(exception.InvalidShareSnapshot,
|
||||
db_api.share_snapshot_destroy,
|
||||
context.get_admin_context(),
|
||||
snapshot['id'])
|
||||
|
||||
|
||||
class ShareExportLocationsDatabaseAPITestCase(test.TestCase):
|
||||
|
||||
|
@ -148,3 +148,62 @@ class ShareTestCase(test.TestCase):
|
||||
share = db_utils.create_share(instances=instances)
|
||||
|
||||
self.assertEqual(access_status, share.access_rules_status)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class ShareSnapshotTestCase(test.TestCase):
|
||||
"""Testing of SQLAlchemy ShareSnapshot model class."""
|
||||
|
||||
def test_instance_and_proxified_properties(self):
|
||||
|
||||
in_sync_replica_instance = db_utils.create_share_instance(
|
||||
status=constants.STATUS_AVAILABLE, share_id='fake_id',
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC)
|
||||
active_replica_instance = db_utils.create_share_instance(
|
||||
status=constants.STATUS_AVAILABLE, share_id='fake_id',
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
out_of_sync_replica_instance = db_utils.create_share_instance(
|
||||
status=constants.STATUS_ERROR, share_id='fake_id',
|
||||
replica_state=constants.REPLICA_STATE_OUT_OF_SYNC)
|
||||
non_replica_instance = db_utils.create_share_instance(
|
||||
status=constants.STATUS_CREATING, share_id='fake_id')
|
||||
share_instances = [
|
||||
in_sync_replica_instance, active_replica_instance,
|
||||
out_of_sync_replica_instance, non_replica_instance,
|
||||
]
|
||||
share = db_utils.create_share(instances=share_instances)
|
||||
snapshot_instance_list = [
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id',
|
||||
status=constants.STATUS_CREATING,
|
||||
share_instance_id=out_of_sync_replica_instance['id']),
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id',
|
||||
status=constants.STATUS_ERROR,
|
||||
share_instance_id=in_sync_replica_instance['id']),
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id',
|
||||
status=constants.STATUS_AVAILABLE,
|
||||
provider_location='hogsmeade:snapshot1',
|
||||
progress='87%',
|
||||
share_instance_id=active_replica_instance['id']),
|
||||
db_utils.create_snapshot_instance(
|
||||
'fake_snapshot_id',
|
||||
status=constants.STATUS_MANAGING,
|
||||
share_instance_id=non_replica_instance['id']),
|
||||
]
|
||||
snapshot = db_utils.create_snapshot(
|
||||
id='fake_snapshot_id', share_id=share['id'],
|
||||
instances=snapshot_instance_list)
|
||||
|
||||
# Proxified properties
|
||||
self.assertEqual(constants.STATUS_AVAILABLE, snapshot['status'])
|
||||
self.assertEqual(constants.STATUS_ERROR, snapshot['aggregate_status'])
|
||||
self.assertEqual('hogsmeade:snapshot1', snapshot['provider_location'])
|
||||
self.assertEqual('87%', snapshot['progress'])
|
||||
|
||||
# Snapshot properties
|
||||
expected_share_name = '-'.join(['share', share['id']])
|
||||
self.assertEqual(expected_share_name, snapshot['share_name'])
|
||||
self.assertEqual(active_replica_instance['id'],
|
||||
snapshot['instance']['share_instance_id'])
|
||||
|
@ -132,7 +132,22 @@ def create_snapshot(**kwargs):
|
||||
'status': 'creating',
|
||||
'provider_location': 'fake',
|
||||
}
|
||||
return _create_db_row(db.share_snapshot_create, snapshot, kwargs)
|
||||
snapshot.update(kwargs)
|
||||
return db.share_snapshot_create(context.get_admin_context(), snapshot)
|
||||
|
||||
|
||||
def create_snapshot_instance(snapshot_id, **kwargs):
|
||||
"""Create a share snapshot instance object."""
|
||||
|
||||
snapshot_instance = {
|
||||
'provider_location': 'fake_provider_location',
|
||||
'progress': '0%',
|
||||
'status': constants.STATUS_CREATING,
|
||||
}
|
||||
|
||||
snapshot_instance.update(kwargs)
|
||||
return db.share_snapshot_instance_create(
|
||||
context.get_admin_context(), snapshot_id, snapshot_instance)
|
||||
|
||||
|
||||
def create_access(**kwargs):
|
||||
|
@ -16,11 +16,13 @@
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
from manila.common import constants
|
||||
from manila.db.sqlalchemy import models
|
||||
from manila.tests.db import fakes as db_fakes
|
||||
|
||||
|
||||
def fake_share(**kwargs):
|
||||
|
||||
share = {
|
||||
'id': 'fakeid',
|
||||
'name': 'fakename',
|
||||
@ -36,6 +38,7 @@ def fake_share(**kwargs):
|
||||
'replication_type': None,
|
||||
'is_busy': False,
|
||||
'consistency_group_id': 'fakecgid',
|
||||
'instance': {'host': 'fakehost'},
|
||||
}
|
||||
share.update(kwargs)
|
||||
return db_fakes.FakeModel(share)
|
||||
@ -59,7 +62,42 @@ def fake_share_instance(base_share=None, **kwargs):
|
||||
return db_fakes.FakeModel(share_instance)
|
||||
|
||||
|
||||
def fake_snapshot(**kwargs):
|
||||
def fake_share_type(**kwargs):
|
||||
|
||||
share_type = {
|
||||
'id': "fakesharetype",
|
||||
'name': "fakesharetypename",
|
||||
'is_public': False,
|
||||
'extra_specs': {
|
||||
'driver_handles_share_servers': 'False',
|
||||
'snapshot_support': 'True',
|
||||
}
|
||||
}
|
||||
|
||||
extra_specs = kwargs.pop('extra_specs', {})
|
||||
|
||||
for key, value in extra_specs.items():
|
||||
share_type['extra_specs'][key] = value
|
||||
|
||||
share_type.update(kwargs)
|
||||
|
||||
return db_fakes.FakeModel(share_type)
|
||||
|
||||
|
||||
def fake_snapshot(create_instance=False, **kwargs):
|
||||
|
||||
instance_keys = ('instance_id', 'snapshot_id', 'share_instance_id',
|
||||
'status', 'progress', 'provider_location')
|
||||
snapshot_keys = ('id', 'share_name', 'share_id', 'name', 'share_size',
|
||||
'share_proto', 'instance', 'aggregate_status')
|
||||
|
||||
instance_kwargs = {k: kwargs.get(k) for k in instance_keys if k in kwargs}
|
||||
snapshot_kwargs = {k: kwargs.get(k) for k in snapshot_keys if k in kwargs}
|
||||
|
||||
aggregate_status = snapshot_kwargs.get(
|
||||
'aggregate_status', instance_kwargs.get(
|
||||
'status', constants.STATUS_CREATING))
|
||||
|
||||
snapshot = {
|
||||
'id': 'fakesnapshotid',
|
||||
'share_name': 'fakename',
|
||||
@ -67,12 +105,47 @@ def fake_snapshot(**kwargs):
|
||||
'name': 'fakesnapshotname',
|
||||
'share_size': 1,
|
||||
'share_proto': 'fake_proto',
|
||||
'export_location': 'fake_location:/fake_share',
|
||||
'instance': None,
|
||||
'share': 'fake_share',
|
||||
'aggregate_status': aggregate_status,
|
||||
}
|
||||
snapshot.update(kwargs)
|
||||
snapshot.update(snapshot_kwargs)
|
||||
if create_instance:
|
||||
if 'instance_id' in instance_kwargs:
|
||||
instance_kwargs['id'] = instance_kwargs.pop('instance_id')
|
||||
snapshot['instance'] = fake_snapshot_instance(
|
||||
base_snapshot=snapshot, **instance_kwargs)
|
||||
snapshot['status'] = snapshot['instance']['status']
|
||||
snapshot['provider_location'] = (
|
||||
snapshot['instance']['provider_location']
|
||||
)
|
||||
snapshot['progress'] = snapshot['instance']['progress']
|
||||
else:
|
||||
snapshot['status'] = constants.STATUS_AVAILABLE
|
||||
snapshot['progress'] = '0%'
|
||||
snapshot['provider_location'] = 'fake'
|
||||
snapshot.update(instance_kwargs)
|
||||
|
||||
return db_fakes.FakeModel(snapshot)
|
||||
|
||||
|
||||
def fake_snapshot_instance(base_snapshot=None, **kwargs):
|
||||
if base_snapshot is None:
|
||||
base_snapshot = fake_snapshot()
|
||||
snapshot_instance = {
|
||||
'id': 'fakesnapshotinstanceid',
|
||||
'snapshot_id': base_snapshot['id'],
|
||||
'status': constants.STATUS_CREATING,
|
||||
'progress': '0%',
|
||||
'provider_location': 'i_live_here_actually',
|
||||
'share_name': 'fakename',
|
||||
'share_id': 'fakeshareinstanceid',
|
||||
'share_instance_id': 'fakeshareinstanceid',
|
||||
}
|
||||
snapshot_instance.update(kwargs)
|
||||
return db_fakes.FakeModel(snapshot_instance)
|
||||
|
||||
|
||||
def expected_snapshot(id='fake_snapshot_id', **kwargs):
|
||||
self_link = 'http://localhost/v1/fake/snapshots/%s' % id
|
||||
bookmark_link = 'http://localhost/fake/snapshots/%s' % id
|
||||
|
@ -246,6 +246,9 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
def test_create_share_replica_exception_path(self):
|
||||
"""Test 'raisable' exceptions for create_share_replica."""
|
||||
db_update = self.mock_object(db, 'share_replica_update')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=[{'id': '123'}]))
|
||||
snap_update = self.mock_object(db, 'share_snapshot_instance_update')
|
||||
request_spec = fakes.fake_replica_request_spec()
|
||||
replica_id = request_spec.get('share_instance_properties').get('id')
|
||||
expected_updates = {
|
||||
@ -262,6 +265,8 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
filter_properties={})
|
||||
db_update.assert_called_once_with(
|
||||
self.context, replica_id, expected_updates)
|
||||
snap_update.assert_called_once_with(
|
||||
self.context, '123', {'status': constants.STATUS_ERROR})
|
||||
|
||||
def test_create_share_replica_no_valid_host(self):
|
||||
"""Test the NoValidHost exception for create_share_replica."""
|
||||
|
@ -3191,7 +3191,7 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
|
||||
@ddt.data({
|
||||
'api_response_xml': fake.NO_RECORDS_RESPONSE,
|
||||
'raised_exception': exception.SnapshotNotFound,
|
||||
'raised_exception': exception.SnapshotResourceNotFound,
|
||||
}, {
|
||||
'api_response_xml': fake.SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE,
|
||||
'raised_exception': exception.NetAppException,
|
||||
|
@ -1170,7 +1170,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
vserver_client)))
|
||||
mock_delete_snapshot = self.mock_object(
|
||||
self.library, '_delete_snapshot',
|
||||
mock.Mock(side_effect=exception.SnapshotNotFound(
|
||||
mock.Mock(side_effect=exception.SnapshotResourceNotFound(
|
||||
name=fake.SNAPSHOT_NAME)))
|
||||
|
||||
self.library.delete_snapshot(self.context,
|
||||
@ -1757,7 +1757,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
mock.Mock(return_value=(fake.VSERVER1, vserver_client)))
|
||||
mock_delete_snapshot = self.mock_object(
|
||||
self.library, '_delete_snapshot',
|
||||
mock.Mock(side_effect=exception.SnapshotNotFound(name='fake')))
|
||||
mock.Mock(side_effect=exception.SnapshotResourceNotFound(
|
||||
name='fake')))
|
||||
|
||||
result = self.library.delete_cgsnapshot(
|
||||
self.context,
|
||||
|
@ -297,7 +297,7 @@ SHARE_FOR_CG1 = {
|
||||
'id': SHARE_ID,
|
||||
'host': '%(host)s@%(backend)s#%(pool)s' % {
|
||||
'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME},
|
||||
'name': 'share1',
|
||||
'name': 'share_1',
|
||||
'share_proto': 'NFS',
|
||||
'source_cgsnapshot_member_id': None,
|
||||
}
|
||||
@ -306,7 +306,7 @@ SHARE_FOR_CG2 = {
|
||||
'id': SHARE_ID2,
|
||||
'host': '%(host)s@%(backend)s#%(pool)s' % {
|
||||
'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME},
|
||||
'name': 'share2',
|
||||
'name': 'share_2',
|
||||
'share_proto': 'NFS',
|
||||
'source_cgsnapshot_member_id': None,
|
||||
}
|
||||
|
@ -1126,7 +1126,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
mock_utcnow.return_value.isoformat.return_value = 'some_time'
|
||||
|
||||
result = self.driver.create_replica(
|
||||
'fake_context', replica_list, new_replica, access_rules)
|
||||
'fake_context', replica_list, new_replica, access_rules, [])
|
||||
|
||||
expected = {
|
||||
'export_locations': (
|
||||
@ -1169,6 +1169,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
pool_name = 'foo_pool'
|
||||
replica = {'id': 'fake_replica_id'}
|
||||
replica_list = [replica]
|
||||
replica_snapshots = []
|
||||
self.mock_object(
|
||||
self.driver, '_get_dataset_name',
|
||||
mock.Mock(return_value=dataset_name))
|
||||
@ -1183,7 +1184,8 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
self.driver.private_storage.update(
|
||||
replica['id'], {'pool_name': pool_name})
|
||||
|
||||
self.driver.delete_replica('fake_context', replica_list, replica)
|
||||
self.driver.delete_replica('fake_context', replica_list,
|
||||
replica_snapshots, replica)
|
||||
|
||||
zfs_driver.LOG.warning.assert_called_once_with(
|
||||
mock.ANY, {'id': replica['id'], 'name': dataset_name})
|
||||
@ -1224,7 +1226,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
replica['id'],
|
||||
{'pool_name': pool_name, 'dataset_name': dataset_name})
|
||||
|
||||
self.driver.delete_replica('fake_context', replica_list, replica)
|
||||
self.driver.delete_replica('fake_context', replica_list, [], replica)
|
||||
|
||||
self.assertEqual(0, zfs_driver.LOG.warning.call_count)
|
||||
self.assertEqual(0, self.driver._get_dataset_name.call_count)
|
||||
@ -1257,6 +1259,7 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
'replica_state': None,
|
||||
}
|
||||
replica_list = [replica, active_replica]
|
||||
replica_snapshots = []
|
||||
dst_dataset_name = (
|
||||
'bar/subbar/fake_dataset_name_prefix%s' % replica['id'])
|
||||
src_dataset_name = (
|
||||
@ -1306,7 +1309,8 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
|
||||
self.driver, '_delete_dataset_or_snapshot_with_retry')
|
||||
|
||||
result = self.driver.update_replica_state(
|
||||
'fake_context', replica_list, replica, access_rules)
|
||||
'fake_context', replica_list, replica, access_rules,
|
||||
replica_snapshots)
|
||||
|
||||
self.assertEqual(zfs_driver.constants.REPLICA_STATE_IN_SYNC, result)
|
||||
mock_helper.assert_called_once_with('NFS')
|
||||
|
@ -190,7 +190,7 @@ class ShareAPITestCase(test.TestCase):
|
||||
share = db_utils.create_share(
|
||||
user_id=self.context.user_id,
|
||||
project_id=self.context.project_id,
|
||||
share_type_id='fake',
|
||||
share_type_id=kwargs.pop('share_type_id', 'fake'),
|
||||
**kwargs
|
||||
)
|
||||
share_data = {
|
||||
@ -234,12 +234,15 @@ class ShareAPITestCase(test.TestCase):
|
||||
CONF.set_default("use_scheduler_creating_share_from_snapshot",
|
||||
use_scheduler)
|
||||
|
||||
share_type = fakes.fake_share_type()
|
||||
|
||||
original_share = db_utils.create_share(
|
||||
user_id=self.context.user_id,
|
||||
project_id=self.context.project_id,
|
||||
status=constants.STATUS_AVAILABLE,
|
||||
host=host if host else 'fake',
|
||||
size=1
|
||||
size=1,
|
||||
share_type_id=share_type['id'],
|
||||
)
|
||||
snapshot = db_utils.create_snapshot(
|
||||
share_id=original_share['id'],
|
||||
@ -248,7 +251,7 @@ class ShareAPITestCase(test.TestCase):
|
||||
)
|
||||
|
||||
share, share_data = self._setup_create_mocks(
|
||||
snapshot_id=snapshot['id'])
|
||||
snapshot_id=snapshot['id'], share_type_id=share_type['id'])
|
||||
|
||||
request_spec = {
|
||||
'share_properties': share.to_dict(),
|
||||
@ -261,7 +264,8 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.mock_object(quota.QUOTAS, 'reserve',
|
||||
mock.Mock(return_value='reservation'))
|
||||
self.mock_object(quota.QUOTAS, 'commit')
|
||||
self.mock_object(share_types, 'get_share_type')
|
||||
self.mock_object(
|
||||
share_types, 'get_share_type', mock.Mock(return_value=share_type))
|
||||
|
||||
return snapshot, share, share_data, request_spec
|
||||
|
||||
@ -879,6 +883,48 @@ class ShareAPITestCase(test.TestCase):
|
||||
db_api.share_snapshot_create.assert_called_once_with(
|
||||
self.context, options)
|
||||
|
||||
def test_create_snapshot_for_replicated_share(self):
|
||||
share = fakes.fake_share(
|
||||
has_replicas=True, status=constants.STATUS_AVAILABLE)
|
||||
snapshot = fakes.fake_snapshot(
|
||||
create_instance=True, share_instance_id='id2')
|
||||
replicas = [
|
||||
fakes.fake_replica(
|
||||
id='id1', replica_state=constants.REPLICA_STATE_ACTIVE),
|
||||
fakes.fake_replica(
|
||||
id='id2', replica_state=constants.REPLICA_STATE_IN_SYNC)
|
||||
]
|
||||
self.mock_object(share_api.policy, 'check_policy')
|
||||
self.mock_object(quota.QUOTAS, 'reserve',
|
||||
mock.Mock(return_value='reservation'))
|
||||
self.mock_object(
|
||||
db_api, 'share_snapshot_create', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(db_api, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(quota.QUOTAS, 'commit')
|
||||
mock_instance_create_call = self.mock_object(
|
||||
db_api, 'share_snapshot_instance_create')
|
||||
mock_snapshot_rpc_call = self.mock_object(
|
||||
self.share_rpcapi, 'create_snapshot')
|
||||
mock_replicated_snapshot_rpc_call = self.mock_object(
|
||||
self.share_rpcapi, 'create_replicated_snapshot')
|
||||
snapshot_instance_args = {
|
||||
'status': constants.STATUS_CREATING,
|
||||
'progress': '0%',
|
||||
'share_instance_id': 'id1',
|
||||
}
|
||||
|
||||
retval = self.api.create_snapshot(
|
||||
self.context, share, 'fake_name', 'fake_description')
|
||||
|
||||
self.assertEqual(snapshot['id'], retval['id'])
|
||||
mock_instance_create_call.assert_called_once_with(
|
||||
self.context, snapshot['id'], snapshot_instance_args)
|
||||
self.assertFalse(mock_snapshot_rpc_call.called)
|
||||
self.assertTrue(mock_replicated_snapshot_rpc_call.called)
|
||||
|
||||
@mock.patch.object(db_api, 'share_instances_get_all_by_share_server',
|
||||
mock.Mock(return_value=[]))
|
||||
@mock.patch.object(db_api, 'consistency_group_get_all_by_share_server',
|
||||
@ -927,7 +973,7 @@ class ShareAPITestCase(test.TestCase):
|
||||
db_api.consistency_group_get_all_by_share_server.\
|
||||
assert_called_once_with(self.context, server['id'])
|
||||
|
||||
@mock.patch.object(db_api, 'share_snapshot_update', mock.Mock())
|
||||
@mock.patch.object(db_api, 'share_snapshot_instance_update', mock.Mock())
|
||||
def test_delete_snapshot(self):
|
||||
snapshot = db_utils.create_snapshot(
|
||||
with_share=True, status=constants.STATUS_AVAILABLE)
|
||||
@ -940,9 +986,9 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.context, snapshot, share['host'])
|
||||
share_api.policy.check_policy.assert_called_once_with(
|
||||
self.context, 'share', 'delete_snapshot', snapshot)
|
||||
db_api.share_snapshot_update.assert_called_once_with(
|
||||
db_api.share_snapshot_instance_update.assert_called_once_with(
|
||||
self.context,
|
||||
snapshot['id'],
|
||||
snapshot['instance']['id'],
|
||||
{'status': constants.STATUS_DELETING})
|
||||
db_api.share_get.assert_called_once_with(
|
||||
self.context, snapshot['share_id'])
|
||||
@ -958,6 +1004,39 @@ class ShareAPITestCase(test.TestCase):
|
||||
share_api.policy.check_policy.assert_called_once_with(
|
||||
self.context, 'share', 'delete_snapshot', snapshot)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_delete_snapshot_replicated_snapshot(self, force):
|
||||
share = fakes.fake_share(has_replicas=True)
|
||||
snapshot = fakes.fake_snapshot(
|
||||
create_instance=True, share_id=share['id'],
|
||||
status=constants.STATUS_ERROR)
|
||||
snapshot_instance = fakes.fake_snapshot_instance(
|
||||
base_snapshot=snapshot)
|
||||
expected_update_calls = [
|
||||
mock.call(self.context, x, {'status': constants.STATUS_DELETING})
|
||||
for x in (snapshot['instance']['id'], snapshot_instance['id'])
|
||||
]
|
||||
self.mock_object(db_api, 'share_get', mock.Mock(return_value=share))
|
||||
self.mock_object(
|
||||
db_api, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=[snapshot['instance'], snapshot_instance]))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db_api, 'share_snapshot_instance_update')
|
||||
mock_snapshot_rpc_call = self.mock_object(
|
||||
self.share_rpcapi, 'delete_snapshot')
|
||||
mock_replicated_snapshot_rpc_call = self.mock_object(
|
||||
self.share_rpcapi, 'delete_replicated_snapshot')
|
||||
|
||||
retval = self.api.delete_snapshot(self.context, snapshot, force=force)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.assertEqual(2, mock_db_update_call.call_count)
|
||||
mock_db_update_call.assert_has_calls(expected_update_calls)
|
||||
mock_replicated_snapshot_rpc_call.assert_called_once_with(
|
||||
self.context, snapshot, share['instance']['host'],
|
||||
share_id=share['id'], force=force)
|
||||
self.assertFalse(mock_snapshot_rpc_call.called)
|
||||
|
||||
def test_create_snapshot_if_share_not_available(self):
|
||||
share = db_utils.create_share(status=constants.STATUS_ERROR)
|
||||
self.assertRaises(exception.InvalidShare,
|
||||
@ -990,6 +1069,10 @@ class ShareAPITestCase(test.TestCase):
|
||||
self._setup_create_from_snapshot_mocks(
|
||||
use_scheduler=use_scheduler, host=valid_host)
|
||||
)
|
||||
share_type = fakes.fake_share_type()
|
||||
|
||||
mock_get_share_type_call = self.mock_object(
|
||||
share_types, 'get_share_type', mock.Mock(return_value=share_type))
|
||||
az = share_data.pop('availability_zone')
|
||||
|
||||
self.api.create(
|
||||
@ -998,11 +1081,12 @@ class ShareAPITestCase(test.TestCase):
|
||||
None, # NOTE(u_glide): Get share size from snapshot
|
||||
share_data['display_name'],
|
||||
share_data['display_description'],
|
||||
snapshot=snapshot,
|
||||
snapshot_id=snapshot['id'],
|
||||
availability_zone=az
|
||||
)
|
||||
|
||||
self.assertEqual(0, share_types.get_share_type.call_count)
|
||||
mock_get_share_type_call.assert_called_once_with(
|
||||
self.context, share['share_type_id'])
|
||||
self.assertSubDictMatch(share_data,
|
||||
db_api.share_create.call_args[0][1])
|
||||
self.api.create_instance.assert_called_once_with(
|
||||
@ -1010,8 +1094,9 @@ class ShareAPITestCase(test.TestCase):
|
||||
host=valid_host,
|
||||
availability_zone=snapshot['share']['availability_zone'],
|
||||
consistency_group=None, cgsnapshot_member=None)
|
||||
share_api.policy.check_policy.assert_called_once_with(
|
||||
self.context, 'share', 'create')
|
||||
share_api.policy.check_policy.assert_has_calls([
|
||||
mock.call(self.context, 'share', 'create'),
|
||||
mock.call(self.context, 'share_snapshot', 'get_snapshot')])
|
||||
quota.QUOTAS.reserve.assert_called_once_with(
|
||||
self.context, gigabytes=1, shares=1)
|
||||
quota.QUOTAS.commit.assert_called_once_with(
|
||||
@ -1029,7 +1114,7 @@ class ShareAPITestCase(test.TestCase):
|
||||
share_data['size'],
|
||||
share_data['display_name'],
|
||||
share_data['display_description'],
|
||||
snapshot=snapshot,
|
||||
snapshot_id=snapshot['id'],
|
||||
availability_zone=share_data['availability_zone'],
|
||||
share_type=share_type)
|
||||
|
||||
@ -1049,7 +1134,7 @@ class ShareAPITestCase(test.TestCase):
|
||||
with_share=True, status=constants.STATUS_ERROR)
|
||||
self.assertRaises(exception.InvalidShareSnapshot, self.api.create,
|
||||
self.context, 'nfs', '1', 'fakename',
|
||||
'fakedesc', snapshot=snapshot,
|
||||
'fakedesc', snapshot_id=snapshot['id'],
|
||||
availability_zone='fakeaz')
|
||||
|
||||
def test_create_from_snapshot_larger_size(self):
|
||||
@ -1057,7 +1142,8 @@ class ShareAPITestCase(test.TestCase):
|
||||
size=100, status=constants.STATUS_AVAILABLE, with_share=True)
|
||||
self.assertRaises(exception.InvalidInput, self.api.create,
|
||||
self.context, 'nfs', 1, 'fakename', 'fakedesc',
|
||||
availability_zone='fakeaz', snapshot=snapshot)
|
||||
availability_zone='fakeaz',
|
||||
snapshot_id=snapshot['id'])
|
||||
|
||||
def test_create_share_wrong_size_0(self):
|
||||
self.assertRaises(exception.InvalidInput, self.api.create,
|
||||
@ -1876,12 +1962,17 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
self.assertFalse(mock_scheduler_rpcapi_call.called)
|
||||
|
||||
def test_create_share_replica(self):
|
||||
@ddt.data(True, False)
|
||||
def test_create_share_replica(self, has_snapshots):
|
||||
request_spec = fakes.fake_replica_request_spec()
|
||||
replica = request_spec['share_instance_properties']
|
||||
share = fakes.fake_share(
|
||||
id=replica['share_id'], replication_type='dr')
|
||||
fake_replica = fakes.fake_replica(replica['id'])
|
||||
snapshots = (
|
||||
[fakes.fake_snapshot(), fakes.fake_snapshot()]
|
||||
if has_snapshots else []
|
||||
)
|
||||
fake_replica = fakes.fake_replica(id=replica['id'])
|
||||
fake_request_spec = fakes.fake_replica_request_spec()
|
||||
self.mock_object(db_api, 'share_replicas_get_available_active_replica',
|
||||
mock.Mock(return_value={'host': 'fake_ar_host'}))
|
||||
@ -1891,12 +1982,22 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.mock_object(db_api, 'share_replica_update')
|
||||
mock_sched_rpcapi_call = self.mock_object(
|
||||
self.api.scheduler_rpcapi, 'create_share_replica')
|
||||
mock_snapshot_get_all_call = self.mock_object(
|
||||
db_api, 'share_snapshot_get_all_for_share',
|
||||
mock.Mock(return_value=snapshots))
|
||||
mock_snapshot_instance_create_call = self.mock_object(
|
||||
db_api, 'share_snapshot_instance_create')
|
||||
expected_snap_instance_create_call_count = 2 if has_snapshots else 0
|
||||
|
||||
result = self.api.create_share_replica(
|
||||
self.context, share, availability_zone='FAKE_AZ')
|
||||
|
||||
self.assertTrue(mock_sched_rpcapi_call.called)
|
||||
self.assertEqual(replica, result)
|
||||
mock_snapshot_get_all_call.assert_called_once_with(
|
||||
self.context, fake_replica['share_id'])
|
||||
self.assertEqual(expected_snap_instance_create_call_count,
|
||||
mock_snapshot_instance_create_call.call_count)
|
||||
|
||||
def test_delete_last_active_replica(self):
|
||||
fake_replica = fakes.fake_replica(
|
||||
@ -1911,13 +2012,21 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.context, fake_replica)
|
||||
self.assertFalse(mock_log.called)
|
||||
|
||||
def test_delete_share_replica_no_host(self):
|
||||
@ddt.data(True, False)
|
||||
def test_delete_share_replica_no_host(self, has_snapshots):
|
||||
snapshots = [{'id': 'xyz'}, {'id': 'abc'}, {'id': 'pqr'}]
|
||||
snapshots = snapshots if has_snapshots else []
|
||||
replica = fakes.fake_replica('FAKE_ID', host='')
|
||||
mock_sched_rpcapi_call = self.mock_object(
|
||||
self.share_rpcapi, 'delete_share_replica')
|
||||
mock_db_replica_delete_call = self.mock_object(
|
||||
db_api, 'share_replica_delete')
|
||||
mock_db_update_call = self.mock_object(db_api, 'share_replica_update')
|
||||
mock_snapshot_get_call = self.mock_object(
|
||||
db_api, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshots))
|
||||
mock_snapshot_instance_delete_call = self.mock_object(
|
||||
db_api, 'share_snapshot_instance_delete')
|
||||
|
||||
self.api.delete_share_replica(self.context, replica)
|
||||
|
||||
@ -1926,7 +2035,11 @@ class ShareAPITestCase(test.TestCase):
|
||||
self.context, replica['id'])
|
||||
mock_db_update_call.assert_called_once_with(
|
||||
self.context, replica['id'],
|
||||
{'terminated_at': mock.ANY})
|
||||
{'status': constants.STATUS_DELETING, 'terminated_at': mock.ANY})
|
||||
mock_snapshot_get_call.assert_called_once_with(
|
||||
self.context, {'share_instance_ids': replica['id']})
|
||||
self.assertEqual(
|
||||
len(snapshots), mock_snapshot_instance_delete_call.call_count)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_delete_share_replica(self, force):
|
||||
|
@ -539,14 +539,14 @@ class ShareDriverTestCase(test.TestCase):
|
||||
self.assertRaises(NotImplementedError,
|
||||
share_driver.create_replica,
|
||||
'fake_context', ['r1', 'r2'],
|
||||
'fake_new_replica', [])
|
||||
'fake_new_replica', [], [])
|
||||
|
||||
def test_delete_replica(self):
|
||||
share_driver = self._instantiate_share_driver(None, True)
|
||||
self.assertRaises(NotImplementedError,
|
||||
share_driver.delete_replica,
|
||||
'fake_context', ['r1', 'r2'],
|
||||
'fake_replica')
|
||||
'fake_replica', [])
|
||||
|
||||
def test_promote_replica(self):
|
||||
share_driver = self._instantiate_share_driver(None, True)
|
||||
@ -558,4 +558,23 @@ class ShareDriverTestCase(test.TestCase):
|
||||
share_driver = self._instantiate_share_driver(None, True)
|
||||
self.assertRaises(NotImplementedError,
|
||||
share_driver.update_replica_state,
|
||||
'fake_context', ['r1', 'r2'], 'fake_replica', [])
|
||||
'fake_context', ['r1', 'r2'], 'fake_replica', [], [])
|
||||
|
||||
def test_create_replicated_snapshot(self):
|
||||
share_driver = self._instantiate_share_driver(None, False)
|
||||
self.assertRaises(NotImplementedError,
|
||||
share_driver.create_replicated_snapshot,
|
||||
'fake_context', ['r1', 'r2'], ['s1', 's2'])
|
||||
|
||||
def test_delete_replicated_snapshot(self):
|
||||
share_driver = self._instantiate_share_driver(None, False)
|
||||
self.assertRaises(NotImplementedError,
|
||||
share_driver.delete_replicated_snapshot,
|
||||
'fake_context', ['r1', 'r2'], ['s1', 's2'])
|
||||
|
||||
def test_update_replicated_snapshot(self):
|
||||
share_driver = self._instantiate_share_driver(None, False)
|
||||
self.assertRaises(NotImplementedError,
|
||||
share_driver.update_replicated_snapshot,
|
||||
'fake_context', ['r1', 'r2'], 'r1',
|
||||
['s1', 's2'], 's1')
|
||||
|
@ -210,6 +210,9 @@ class ShareManagerTestCase(test.TestCase):
|
||||
"promote_share_replica",
|
||||
"periodic_share_replica_update",
|
||||
"update_share_replica",
|
||||
"create_replicated_snapshot",
|
||||
"delete_replicated_snapshot",
|
||||
"periodic_share_replica_snapshot_update",
|
||||
)
|
||||
def test_call_driver_when_its_init_failed(self, method_name):
|
||||
self.mock_object(self.share_manager.driver, 'do_setup',
|
||||
@ -621,6 +624,9 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(self.share_manager,
|
||||
'_provide_share_server_for_share',
|
||||
mock.Mock(return_value=('FAKE_SERVER', replica)))
|
||||
self.mock_object(self.share_manager,
|
||||
'_get_replica_snapshots_for_snapshot',
|
||||
mock.Mock(return_value=[]))
|
||||
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
|
||||
mock_export_locs_update_call = self.mock_object(
|
||||
db, 'share_export_locations_update')
|
||||
@ -668,6 +674,9 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(return_value=('FAKE_SERVER', replica)))
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.share_manager,
|
||||
'_get_replica_snapshots_for_snapshot',
|
||||
mock.Mock(return_value=[]))
|
||||
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
|
||||
mock_export_locs_update_call = self.mock_object(
|
||||
db, 'share_export_locations_update')
|
||||
@ -716,6 +725,9 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(self.share_manager,
|
||||
'_provide_share_server_for_share',
|
||||
mock.Mock(return_value=('FAKE_SERVER', replica)))
|
||||
self.mock_object(self.share_manager,
|
||||
'_get_replica_snapshots_for_snapshot',
|
||||
mock.Mock(return_value=[]))
|
||||
mock_replica_update_call = self.mock_object(
|
||||
db, 'share_replica_update', mock.Mock(return_value=replica))
|
||||
mock_calls = [
|
||||
@ -750,22 +762,35 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.assertFalse(mock_log_error.called)
|
||||
self.assertTrue(driver_call.called)
|
||||
|
||||
def test_create_share_replica(self):
|
||||
@ddt.data(True, False)
|
||||
def test_create_share_replica(self, has_snapshots):
|
||||
replica = fake_replica(
|
||||
share_network='', replica_state=constants.REPLICA_STATE_IN_SYNC)
|
||||
replica_2 = fake_replica(id='fake2')
|
||||
snapshots = ([fakes.fake_snapshot(create_instance=True)]
|
||||
if has_snapshots else [])
|
||||
snapshot_instances = [
|
||||
fakes.fake_snapshot_instance(share_instance_id=replica['id']),
|
||||
fakes.fake_snapshot_instance(share_instance_id='fake2'),
|
||||
]
|
||||
fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}]
|
||||
self.mock_object(db, 'share_replica_get',
|
||||
mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_instance_access_copy',
|
||||
mock.Mock(return_value=fake_access_rules))
|
||||
self.mock_object(db, 'share_replicas_get_available_active_replica',
|
||||
mock.Mock(return_value=fake_replica(id='fake2')))
|
||||
mock.Mock(return_value=replica_2))
|
||||
self.mock_object(self.share_manager,
|
||||
'_provide_share_server_for_share',
|
||||
mock.Mock(return_value=('FAKE_SERVER', replica)))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica, replica_2]))
|
||||
self.mock_object(db, 'share_snapshot_get_all_for_share', mock.Mock(
|
||||
return_value=snapshots))
|
||||
mock_instance_get_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
|
||||
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
|
||||
mock_export_locs_update_call = self.mock_object(
|
||||
db, 'share_export_locations_update')
|
||||
@ -795,18 +820,27 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.assertTrue(driver_call.called)
|
||||
call_args = driver_call.call_args_list[0][0]
|
||||
replica_list_arg = call_args[1]
|
||||
snapshot_list_arg = call_args[4]
|
||||
r_ids = [r['id'] for r in replica_list_arg]
|
||||
for r in (replica, replica_2):
|
||||
self.assertIn(r['id'], r_ids)
|
||||
self.assertEqual(2, len(r_ids))
|
||||
if has_snapshots:
|
||||
for snapshot_dict in snapshot_list_arg:
|
||||
self.assertTrue('active_replica_snapshot' in snapshot_dict)
|
||||
self.assertTrue('share_replica_snapshot' in snapshot_dict)
|
||||
else:
|
||||
self.assertFalse(mock_instance_get_call.called)
|
||||
|
||||
def test_delete_share_replica_access_rules_exception(self):
|
||||
replica = fake_replica()
|
||||
replica_2 = fake_replica(id='fake_2')
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica, replica_2]))
|
||||
active_replica = fake_replica(id='Current_active_replica')
|
||||
mock_error_log = self.mock_object(manager.LOG, 'error')
|
||||
active_replica = fake_replica(
|
||||
id='Current_active_replica',
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
mock_exception_log = self.mock_object(manager.LOG, 'exception')
|
||||
self.mock_object(db, 'share_replica_get',
|
||||
mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_replicas_get_available_active_replica',
|
||||
@ -824,17 +858,18 @@ class ShareManagerTestCase(test.TestCase):
|
||||
|
||||
self.assertRaises(exception.ManilaException,
|
||||
self.share_manager.delete_share_replica,
|
||||
self.context, replica, share_id=replica['share_id'])
|
||||
self.context, replica['id'],
|
||||
share_id=replica['share_id'])
|
||||
mock_replica_update_call.assert_called_once_with(
|
||||
mock.ANY, replica['id'], {'status': constants.STATUS_ERROR})
|
||||
self.assertFalse(mock_drv_delete_replica_call.called)
|
||||
self.assertFalse(mock_replica_delete_call.called)
|
||||
self.assertFalse(mock_error_log.called)
|
||||
self.assertFalse(mock_exception_log.called)
|
||||
|
||||
def test_delete_share_replica_drv_misbehavior_ignored_with_the_force(self):
|
||||
replica = fake_replica()
|
||||
active_replica = fake_replica(id='Current_active_replica')
|
||||
mock_error_log = self.mock_object(manager.LOG, 'error')
|
||||
mock_exception_log = self.mock_object(manager.LOG, 'exception')
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica, active_replica]))
|
||||
self.mock_object(db, 'share_replica_get',
|
||||
@ -845,6 +880,11 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.share_manager.access_helper,
|
||||
'update_access_rules')
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=[]))
|
||||
mock_snap_instance_delete = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
|
||||
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
|
||||
mock_drv_delete_replica_call = self.mock_object(
|
||||
@ -854,12 +894,14 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager.access_helper, 'update_access_rules')
|
||||
|
||||
self.share_manager.delete_share_replica(
|
||||
self.context, replica, share_id=replica['share_id'], force=True)
|
||||
self.context, replica['id'], share_id=replica['share_id'],
|
||||
force=True)
|
||||
|
||||
self.assertFalse(mock_replica_update_call.called)
|
||||
self.assertTrue(mock_replica_delete_call.called)
|
||||
self.assertEqual(1, mock_error_log.call_count)
|
||||
self.assertEqual(1, mock_exception_log.call_count)
|
||||
self.assertTrue(mock_drv_delete_replica_call.called)
|
||||
self.assertFalse(mock_snap_instance_delete.called)
|
||||
|
||||
def test_delete_share_replica_driver_exception(self):
|
||||
replica = fake_replica()
|
||||
@ -872,6 +914,9 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(return_value=active_replica))
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
mock_snapshot_get_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=[]))
|
||||
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
|
||||
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
|
||||
self.mock_object(
|
||||
@ -882,23 +927,37 @@ class ShareManagerTestCase(test.TestCase):
|
||||
|
||||
self.assertRaises(exception.ManilaException,
|
||||
self.share_manager.delete_share_replica,
|
||||
self.context, replica)
|
||||
self.context, replica['id'],
|
||||
share_id=replica['share_id'])
|
||||
self.assertTrue(mock_replica_update_call.called)
|
||||
self.assertFalse(mock_replica_delete_call.called)
|
||||
self.assertTrue(mock_drv_delete_replica_call.called)
|
||||
self.assertTrue(mock_snapshot_get_call.called)
|
||||
|
||||
def test_delete_share_replica_both_exceptions_ignored_with_the_force(self):
|
||||
replica = fake_replica()
|
||||
active_replica = fake_replica(id='Current_active_replica')
|
||||
snapshots = [
|
||||
fakes.fake_snapshot(share_id=replica['id'],
|
||||
status=constants.STATUS_AVAILABLE),
|
||||
fakes.fake_snapshot(share_id=replica['id'],
|
||||
id='test_creating_to_err',
|
||||
status=constants.STATUS_CREATING)
|
||||
]
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica, active_replica]))
|
||||
mock_error_log = self.mock_object(manager.LOG, 'error')
|
||||
mock_exception_log = self.mock_object(manager.LOG, 'exception')
|
||||
self.mock_object(db, 'share_replica_get',
|
||||
mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_replicas_get_available_active_replica',
|
||||
mock.Mock(return_value=active_replica))
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshots))
|
||||
mock_snapshot_instance_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
|
||||
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
|
||||
self.mock_object(
|
||||
@ -909,17 +968,29 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(side_effect=exception.ManilaException))
|
||||
|
||||
self.share_manager.delete_share_replica(
|
||||
self.context, replica, share_id=replica['share_id'], force=True)
|
||||
self.context, replica['id'], share_id=replica['share_id'],
|
||||
force=True)
|
||||
|
||||
mock_replica_update_call.assert_called_once_with(
|
||||
mock.ANY, replica['id'], {'status': constants.STATUS_ERROR})
|
||||
self.assertTrue(mock_replica_delete_call.called)
|
||||
self.assertEqual(2, mock_error_log.call_count)
|
||||
self.assertEqual(2, mock_exception_log.call_count)
|
||||
self.assertTrue(mock_drv_delete_replica_call.called)
|
||||
self.assertEqual(2, mock_snapshot_instance_delete_call.call_count)
|
||||
|
||||
def test_delete_share_replica(self):
|
||||
replica = fake_replica()
|
||||
active_replica = fake_replica(id='current_active_replica')
|
||||
snapshots = [
|
||||
fakes.fake_snapshot(share_id=replica['share_id'],
|
||||
status=constants.STATUS_AVAILABLE),
|
||||
fakes.fake_snapshot(share_id=replica['share_id'],
|
||||
id='test_creating_to_err',
|
||||
status=constants.STATUS_CREATING)
|
||||
]
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshots))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica, active_replica]))
|
||||
self.mock_object(db, 'share_replica_get',
|
||||
@ -929,6 +1000,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
mock_info_log = self.mock_object(manager.LOG, 'info')
|
||||
mock_snapshot_instance_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
mock_replica_update_call = self.mock_object(db, 'share_replica_update')
|
||||
mock_replica_delete_call = self.mock_object(db, 'share_replica_delete')
|
||||
self.mock_object(
|
||||
@ -942,6 +1015,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.assertTrue(mock_replica_delete_call.called)
|
||||
self.assertTrue(mock_info_log.called)
|
||||
self.assertTrue(mock_drv_delete_replica_call.called)
|
||||
self.assertEqual(2, mock_snapshot_instance_delete_call.call_count)
|
||||
|
||||
def test_promote_share_replica_no_active_replica(self):
|
||||
replica = fake_replica()
|
||||
@ -993,11 +1067,21 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.assertFalse(mock_info_log.called)
|
||||
|
||||
@ddt.data([], None)
|
||||
def test_promote_share_replica_driver_updates_nothing(self, retval):
|
||||
def test_promote_share_replica_driver_update_nothing_has_snaps(self,
|
||||
retval):
|
||||
replica = fake_replica()
|
||||
active_replica = fake_replica(
|
||||
id='current_active_replica',
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
snapshots_instances = [
|
||||
fakes.fake_snapshot(create_instance=True,
|
||||
share_id=replica['share_id'],
|
||||
status=constants.STATUS_AVAILABLE),
|
||||
fakes.fake_snapshot(create_instance=True,
|
||||
share_id=replica['share_id'],
|
||||
id='test_creating_to_err',
|
||||
status=constants.STATUS_CREATING)
|
||||
]
|
||||
replica_list = [replica, active_replica]
|
||||
self.mock_object(db, 'share_replica_get',
|
||||
mock.Mock(return_value=replica))
|
||||
@ -1006,9 +1090,14 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replica_list))
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshots_instances))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'promote_replica',
|
||||
mock.Mock(return_value=retval))
|
||||
mock_snap_instance_update = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_info_log = self.mock_object(manager.LOG, 'info')
|
||||
mock_export_locs_update = self.mock_object(
|
||||
db, 'share_export_locations_update')
|
||||
@ -1026,7 +1115,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.assertFalse(mock_export_locs_update.called)
|
||||
mock_replica_update.assert_has_calls(expected_update_calls,
|
||||
any_order=True)
|
||||
self.assertTrue(mock_info_log.called)
|
||||
mock_snap_instance_update.assert_called_once_with(
|
||||
mock.ANY, 'test_creating_to_err',
|
||||
{'status': constants.STATUS_ERROR})
|
||||
self.assertEqual(2, mock_info_log.call_count)
|
||||
|
||||
def test_promote_share_replica_driver_updates_replica_list(self):
|
||||
replica = fake_replica()
|
||||
@ -1052,11 +1144,16 @@ class ShareManagerTestCase(test.TestCase):
|
||||
]
|
||||
self.mock_object(db, 'share_replica_get',
|
||||
mock.Mock(return_value=replica))
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=[]))
|
||||
self.mock_object(db, 'share_access_get_all_for_share',
|
||||
mock.Mock(return_value=[]))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replica_list))
|
||||
mock_snap_instance_update = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'promote_replica',
|
||||
mock.Mock(return_value=updated_replica_list))
|
||||
@ -1075,6 +1172,7 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.assertTrue(
|
||||
reset_replication_change_call in mock_replica_update.mock_calls)
|
||||
self.assertTrue(mock_info_log.called)
|
||||
self.assertFalse(mock_snap_instance_update.called)
|
||||
|
||||
@ddt.data('openstack1@watson#_pool0', 'openstack1@newton#_pool0')
|
||||
def test_periodic_share_replica_update(self, host):
|
||||
@ -1103,8 +1201,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
def test__share_replica_update_driver_exception(self, replica_state):
|
||||
mock_debug_log = self.mock_object(manager.LOG, 'debug')
|
||||
replica = fake_replica(replica_state=replica_state)
|
||||
active_replica = fake_replica(
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica]))
|
||||
mock.Mock(return_value=[replica, active_replica]))
|
||||
self.mock_object(self.share_manager.db, 'share_replica_get',
|
||||
mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_server_get',
|
||||
@ -1127,8 +1227,9 @@ class ShareManagerTestCase(test.TestCase):
|
||||
def test__share_replica_update_driver_exception_ignored(self):
|
||||
mock_debug_log = self.mock_object(manager.LOG, 'debug')
|
||||
replica = fake_replica(replica_state=constants.STATUS_ERROR)
|
||||
active_replica = fake_replica(replica_state=constants.STATUS_ACTIVE)
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica]))
|
||||
mock.Mock(return_value=[replica, active_replica]))
|
||||
self.mock_object(self.share_manager.db, 'share_replica_get',
|
||||
mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_server_get',
|
||||
@ -1195,9 +1296,17 @@ class ShareManagerTestCase(test.TestCase):
|
||||
constants.REPLICA_STATE_OUT_OF_SYNC]
|
||||
replica = fake_replica(replica_state=random.choice(replica_states),
|
||||
share_server='fake_share_server')
|
||||
active_replica = fake_replica(
|
||||
id='fake2', replica_state=constants.STATUS_ACTIVE)
|
||||
snapshots = [fakes.fake_snapshot(
|
||||
create_instance=True, aggregate_status=constants.STATUS_AVAILABLE)]
|
||||
snapshot_instances = [
|
||||
fakes.fake_snapshot_instance(share_instance_id=replica['id']),
|
||||
fakes.fake_snapshot_instance(share_instance_id='fake2'),
|
||||
]
|
||||
del replica['availability_zone']
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica]))
|
||||
mock.Mock(return_value=[replica, active_replica]))
|
||||
self.mock_object(db, 'share_server_get',
|
||||
mock.Mock(return_value='fake_share_server'))
|
||||
mock_db_update_calls = []
|
||||
@ -1208,6 +1317,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
mock.Mock(return_value=retval))
|
||||
mock_db_update_call = self.mock_object(
|
||||
self.share_manager.db, 'share_replica_update')
|
||||
self.mock_object(db, 'share_snapshot_get_all_for_share',
|
||||
mock.Mock(return_value=snapshots))
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
|
||||
self.share_manager._share_replica_update(
|
||||
self.context, replica, share_id=replica['share_id'])
|
||||
@ -1216,9 +1329,10 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.assertEqual(1, mock_warning_log.call_count)
|
||||
elif retval:
|
||||
self.assertEqual(0, mock_warning_log.call_count)
|
||||
mock_driver_call.assert_called_once_with(
|
||||
self.context, [replica], replica, [],
|
||||
share_server='fake_share_server')
|
||||
self.assertTrue(mock_driver_call.called)
|
||||
snapshot_list_arg = mock_driver_call.call_args[0][4]
|
||||
self.assertTrue('active_replica_snapshot' in snapshot_list_arg[0])
|
||||
self.assertTrue('share_replica_snapshot' in snapshot_list_arg[0])
|
||||
mock_db_update_call.assert_has_calls(mock_db_update_calls)
|
||||
self.assertEqual(1, mock_debug_log.call_count)
|
||||
|
||||
@ -4059,7 +4173,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.share_manager,
|
||||
'_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.share_manager.db, 'share_snapshot_destroy')
|
||||
mock_snapshot_instance_destroy_call = self.mock_object(
|
||||
self.share_manager.db, 'share_snapshot_instance_delete')
|
||||
share = db_utils.create_share()
|
||||
snapshot = db_utils.create_snapshot(share_id=share['id'])
|
||||
mock_get = self.mock_object(self.share_manager.db,
|
||||
@ -4070,8 +4185,8 @@ class ShareManagerTestCase(test.TestCase):
|
||||
|
||||
self.share_manager.driver.unmanage_snapshot.assert_called_once_with(
|
||||
mock.ANY)
|
||||
self.share_manager.db.share_snapshot_destroy.assert_called_once_with(
|
||||
mock.ANY, snapshot['id'])
|
||||
mock_snapshot_instance_destroy_call.assert_called_once_with(
|
||||
mock.ANY, snapshot['instance']['id'])
|
||||
mock_get.assert_called_once_with(
|
||||
utils.IsAMatcher(context.RequestContext), snapshot['id'])
|
||||
mock_get_share_server.assert_called_once_with(
|
||||
@ -4079,6 +4194,478 @@ class ShareManagerTestCase(test.TestCase):
|
||||
if quota_error:
|
||||
self.assertTrue(mock_log_warning.called)
|
||||
|
||||
def _setup_crud_replicated_snapshot_data(self):
|
||||
snapshot = fakes.fake_snapshot(create_instance=True)
|
||||
snapshot_instance = fakes.fake_snapshot_instance(
|
||||
base_snapshot=snapshot)
|
||||
snapshot_instances = [snapshot['instance'], snapshot_instance]
|
||||
replicas = [fake_replica(), fake_replica()]
|
||||
return snapshot, snapshot_instances, replicas
|
||||
|
||||
def test_create_replicated_snapshot_driver_exception(self):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'create_replicated_snapshot',
|
||||
mock.Mock(side_effect=exception.ManilaException))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
|
||||
self.assertRaises(exception.ManilaException,
|
||||
self.share_manager.create_replicated_snapshot,
|
||||
self.context, snapshot['id'], share_id='fake_share')
|
||||
mock_db_update_call.assert_has_calls([
|
||||
mock.call(
|
||||
self.context, snapshot['instance']['id'],
|
||||
{'status': constants.STATUS_ERROR}),
|
||||
mock.call(
|
||||
self.context, snapshot_instances[1]['id'],
|
||||
{'status': constants.STATUS_ERROR}),
|
||||
])
|
||||
|
||||
@ddt.data(None, [])
|
||||
def test_create_replicated_snapshot_driver_updates_nothing(self, retval):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'create_replicated_snapshot',
|
||||
mock.Mock(return_value=retval))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
|
||||
return_value = self.share_manager.create_replicated_snapshot(
|
||||
self.context, snapshot['id'], share_id='fake_share')
|
||||
|
||||
self.assertIsNone(return_value)
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
|
||||
def test_create_replicated_snapshot_driver_updates_snapshot(self):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
snapshot_dict = {
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
'provider_location': 'spinners_end',
|
||||
'progress': '100%',
|
||||
'id': snapshot['instance']['id'],
|
||||
}
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'create_replicated_snapshot',
|
||||
mock.Mock(return_value=[snapshot_dict]))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
|
||||
return_value = self.share_manager.create_replicated_snapshot(
|
||||
self.context, snapshot['id'], share_id='fake_share')
|
||||
|
||||
self.assertIsNone(return_value)
|
||||
mock_db_update_call.assert_called_once_with(
|
||||
self.context, snapshot['instance']['id'], snapshot_dict)
|
||||
|
||||
def delete_replicated_snapshot_driver_exception(self):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'delete_replicated_snapshot',
|
||||
mock.Mock(side_effect=exception.ManilaException))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
|
||||
self.assertRaises(exception.ManilaException,
|
||||
self.share_manager.delete_replicated_snapshot,
|
||||
self.context, snapshot['id'], share_id='fake_share')
|
||||
mock_db_update_call.assert_has_calls([
|
||||
mock.call(
|
||||
self.context, snapshot['instance']['id'],
|
||||
{'status': constants.STATUS_ERROR_DELETING}),
|
||||
mock.call(
|
||||
self.context, snapshot_instances[1]['id'],
|
||||
{'status': constants.STATUS_ERROR_DELETING}),
|
||||
])
|
||||
self.assertFalse(mock_db_delete_call.called)
|
||||
|
||||
def delete_replicated_snapshot_driver_exception_ignored_with_force(self):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'delete_replicated_snapshot',
|
||||
mock.Mock(side_effect=exception.ManilaException))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
|
||||
retval = self.share_manager.delete_replicated_snapshot(
|
||||
self.context, snapshot['id'], share_id='fake_share')
|
||||
|
||||
self.assertIsNone(retval)
|
||||
mock_db_delete_call.assert_has_calls([
|
||||
mock.call(
|
||||
self.context, snapshot['instance']['id']),
|
||||
mock.call(
|
||||
self.context, snapshot_instances[1]['id']),
|
||||
])
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
|
||||
@ddt.data(None, [])
|
||||
def delete_replicated_snapshot_driver_updates_nothing(self, retval):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'delete_replicated_snapshot',
|
||||
mock.Mock(return_value=retval))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
|
||||
return_value = self.share_manager.delete_replicated_snapshot(
|
||||
self.context, snapshot['id'], share_id='fake_share')
|
||||
|
||||
self.assertIsNone(return_value)
|
||||
self.assertFalse(mock_db_delete_call.called)
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
|
||||
def delete_replicated_snapshot_driver_deletes_snapshots(self):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
retval = [{
|
||||
'status': constants.STATUS_DELETED,
|
||||
'id': snapshot['instance']['id'],
|
||||
}]
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'delete_replicated_snapshot',
|
||||
mock.Mock(return_value=retval))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
|
||||
return_value = self.share_manager.delete_replicated_snapshot(
|
||||
self.context, snapshot['id'], share_id='fake_share')
|
||||
|
||||
self.assertIsNone(return_value)
|
||||
mock_db_delete_call.assert_called_once_with(
|
||||
self.context, snapshot['instance']['id'])
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def delete_replicated_snapshot_drv_del_and_updates_snapshots(self, force):
|
||||
snapshot, snapshot_instances, replicas = (
|
||||
self._setup_crud_replicated_snapshot_data()
|
||||
)
|
||||
updated_instance_details = {
|
||||
'status': constants.STATUS_ERROR,
|
||||
'id': snapshot_instances[1]['id'],
|
||||
'provider_location': 'azkaban',
|
||||
}
|
||||
retval = [
|
||||
{
|
||||
'status': constants.STATUS_DELETED,
|
||||
'id': snapshot['instance']['id'],
|
||||
},
|
||||
]
|
||||
retval.append(updated_instance_details)
|
||||
self.mock_object(
|
||||
db, 'share_snapshot_get', mock.Mock(return_value=snapshot))
|
||||
self.mock_object(self.share_manager, '_get_share_server')
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=replicas))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'delete_replicated_snapshot',
|
||||
mock.Mock(return_value=retval))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
|
||||
return_value = self.share_manager.delete_replicated_snapshot(
|
||||
self.context, snapshot['id'], share_id='fake_share', force=force)
|
||||
|
||||
self.assertIsNone(return_value)
|
||||
if force:
|
||||
self.assertTrue(2, mock_db_delete_call.call_count)
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
else:
|
||||
mock_db_delete_call.assert_called_once_with(
|
||||
self.context, snapshot['instance']['id'])
|
||||
mock_db_update_call.assert_called_once_with(
|
||||
self.context, snapshot_instances[1]['id'],
|
||||
updated_instance_details)
|
||||
|
||||
def test_periodic_share_replica_snapshot_update(self):
|
||||
mock_debug_log = self.mock_object(manager.LOG, 'debug')
|
||||
replicas = 3 * [
|
||||
fake_replica(host='malfoy@manor#_pool0',
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC)
|
||||
]
|
||||
replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE))
|
||||
snapshot = fakes.fake_snapshot(create_instance=True,
|
||||
status=constants.STATUS_DELETING)
|
||||
snapshot_instances = 3 * [
|
||||
fakes.fake_snapshot_instance(base_snapshot=snapshot)
|
||||
]
|
||||
self.mock_object(
|
||||
db, 'share_replicas_get_all', mock.Mock(return_value=replicas))
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(return_value=snapshot_instances))
|
||||
mock_snapshot_update_call = self.mock_object(
|
||||
self.share_manager, '_update_replica_snapshot')
|
||||
|
||||
retval = self.share_manager.periodic_share_replica_snapshot_update(
|
||||
self.context)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.assertEqual(1, mock_debug_log.call_count)
|
||||
self.assertEqual(0, mock_snapshot_update_call.call_count)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_periodic_share_replica_snapshot_update_nothing_to_update(
|
||||
self, has_instances):
|
||||
mock_debug_log = self.mock_object(manager.LOG, 'debug')
|
||||
replicas = 3 * [
|
||||
fake_replica(host='malfoy@manor#_pool0',
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC)
|
||||
]
|
||||
replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE))
|
||||
snapshot = fakes.fake_snapshot(create_instance=True,
|
||||
status=constants.STATUS_DELETING)
|
||||
snapshot_instances = 3 * [
|
||||
fakes.fake_snapshot_instance(base_snapshot=snapshot)
|
||||
]
|
||||
self.mock_object(db, 'share_replicas_get_all',
|
||||
mock.Mock(side_effect=[[], replicas]))
|
||||
self.mock_object(db, 'share_snapshot_instance_get_all_with_filters',
|
||||
mock.Mock(side_effect=[snapshot_instances, []]))
|
||||
mock_snapshot_update_call = self.mock_object(
|
||||
self.share_manager, '_update_replica_snapshot')
|
||||
|
||||
retval = self.share_manager.periodic_share_replica_snapshot_update(
|
||||
self.context)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.assertEqual(1, mock_debug_log.call_count)
|
||||
self.assertEqual(0, mock_snapshot_update_call.call_count)
|
||||
|
||||
def test__update_replica_snapshot_replica_deleted_from_database(self):
|
||||
replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy')
|
||||
self.mock_object(db, 'share_replica_get', mock.Mock(
|
||||
side_effect=replica_not_found))
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_driver_update_call = self.mock_object(
|
||||
self.share_manager.driver, 'update_replicated_snapshot')
|
||||
snaphot_instance = fakes.fake_snapshot_instance()
|
||||
|
||||
retval = self.share_manager._update_replica_snapshot(
|
||||
self.context, snaphot_instance)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
mock_db_delete_call.assert_called_once_with(
|
||||
self.context, snaphot_instance['id'])
|
||||
self.assertFalse(mock_driver_update_call.called)
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
|
||||
def test__update_replica_snapshot_both_deleted_from_database(self):
|
||||
replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy')
|
||||
instance_not_found = exception.ShareSnapshotInstanceNotFound(
|
||||
instance_id='spoon!')
|
||||
self.mock_object(db, 'share_replica_get', mock.Mock(
|
||||
side_effect=replica_not_found))
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete', mock.Mock(
|
||||
side_effect=instance_not_found))
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
mock_driver_update_call = self.mock_object(
|
||||
self.share_manager.driver, 'update_replicated_snapshot')
|
||||
snapshot_instance = fakes.fake_snapshot_instance()
|
||||
|
||||
retval = self.share_manager._update_replica_snapshot(
|
||||
self.context, snapshot_instance)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
mock_db_delete_call.assert_called_once_with(
|
||||
self.context, snapshot_instance['id'])
|
||||
self.assertFalse(mock_driver_update_call.called)
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
|
||||
def test__update_replica_snapshot_driver_raises_Not_Found_exception(self):
|
||||
mock_debug_log = self.mock_object(manager.LOG, 'debug')
|
||||
replica = fake_replica()
|
||||
snapshot_instance = fakes.fake_snapshot_instance(
|
||||
status=constants.STATUS_DELETING)
|
||||
self.mock_object(
|
||||
db, 'share_replica_get', mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_snapshot_instance_get',
|
||||
mock.Mock(return_value=snapshot_instance))
|
||||
self.mock_object(db, 'share_snapshot_instance_get',
|
||||
mock.Mock(return_value=snapshot_instance))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica]))
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(
|
||||
self.share_manager.driver, 'update_replicated_snapshot',
|
||||
mock.Mock(
|
||||
side_effect=exception.SnapshotResourceNotFound(name='abc')))
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
|
||||
retval = self.share_manager._update_replica_snapshot(
|
||||
self.context, snapshot_instance, replica_snapshots=None)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.assertEqual(1, mock_debug_log.call_count)
|
||||
mock_db_delete_call.assert_called_once_with(
|
||||
self.context, snapshot_instance['id'])
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
|
||||
@ddt.data(exception.NotFound, exception.ManilaException)
|
||||
def test__update_replica_snapshot_driver_raises_other_exception(self, exc):
|
||||
mock_debug_log = self.mock_object(manager.LOG, 'debug')
|
||||
mock_info_log = self.mock_object(manager.LOG, 'info')
|
||||
mock_exception_log = self.mock_object(manager.LOG, 'exception')
|
||||
replica = fake_replica()
|
||||
snapshot_instance = fakes.fake_snapshot_instance(
|
||||
status=constants.STATUS_CREATING)
|
||||
self.mock_object(
|
||||
db, 'share_replica_get', mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_snapshot_instance_get',
|
||||
mock.Mock(return_value=snapshot_instance))
|
||||
self.mock_object(db, 'share_snapshot_instance_get',
|
||||
mock.Mock(return_value=snapshot_instance))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica]))
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.share_manager.driver,
|
||||
'update_replicated_snapshot',
|
||||
mock.Mock(side_effect=exc))
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
|
||||
retval = self.share_manager._update_replica_snapshot(
|
||||
self.context, snapshot_instance)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.assertEqual(1, mock_exception_log.call_count)
|
||||
self.assertEqual(1, mock_debug_log.call_count)
|
||||
self.assertFalse(mock_info_log.called)
|
||||
mock_db_update_call.assert_called_once_with(
|
||||
self.context, snapshot_instance['id'], {'status': 'error'})
|
||||
self.assertFalse(mock_db_delete_call.called)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test__update_replica_snapshot_driver_updates_replica(self, update):
|
||||
replica = fake_replica()
|
||||
snapshot_instance = fakes.fake_snapshot_instance()
|
||||
driver_update = {}
|
||||
if update:
|
||||
driver_update = {
|
||||
'id': snapshot_instance['id'],
|
||||
'provider_location': 'knockturn_alley',
|
||||
'status': constants.STATUS_AVAILABLE,
|
||||
}
|
||||
mock_debug_log = self.mock_object(manager.LOG, 'debug')
|
||||
mock_info_log = self.mock_object(manager.LOG, 'info')
|
||||
self.mock_object(
|
||||
db, 'share_replica_get', mock.Mock(return_value=replica))
|
||||
self.mock_object(db, 'share_snapshot_instance_get',
|
||||
mock.Mock(return_value=snapshot_instance))
|
||||
self.mock_object(db, 'share_snapshot_instance_get',
|
||||
mock.Mock(return_value=snapshot_instance))
|
||||
self.mock_object(db, 'share_replicas_get_all_by_share',
|
||||
mock.Mock(return_value=[replica]))
|
||||
self.mock_object(self.share_manager, '_get_share_server',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.share_manager.driver,
|
||||
'update_replicated_snapshot',
|
||||
mock.Mock(return_value=driver_update))
|
||||
mock_db_delete_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_delete')
|
||||
mock_db_update_call = self.mock_object(
|
||||
db, 'share_snapshot_instance_update')
|
||||
|
||||
retval = self.share_manager._update_replica_snapshot(
|
||||
self.context, snapshot_instance, replica_snapshots=None)
|
||||
|
||||
driver_update['progress'] = '100%'
|
||||
self.assertIsNone(retval)
|
||||
self.assertEqual(1, mock_debug_log.call_count)
|
||||
self.assertFalse(mock_info_log.called)
|
||||
if update:
|
||||
mock_db_update_call.assert_called_once_with(
|
||||
self.context, snapshot_instance['id'], driver_update)
|
||||
else:
|
||||
self.assertFalse(mock_db_update_call.called)
|
||||
self.assertFalse(mock_db_delete_call.called)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class HookWrapperTestCase(test.TestCase):
|
||||
|
@ -106,6 +106,10 @@ class ShareRpcAPITestCase(test.TestCase):
|
||||
share_replica = expected_msg.pop('share_replica', None)
|
||||
expected_msg['share_replica_id'] = share_replica['id']
|
||||
expected_msg['share_id'] = share_replica['share_id']
|
||||
if 'replicated_snapshot' in expected_msg:
|
||||
snapshot = expected_msg.pop('replicated_snapshot', None)
|
||||
expected_msg['snapshot_id'] = snapshot['id']
|
||||
expected_msg['share_id'] = snapshot['share_id']
|
||||
|
||||
if 'host' in kwargs:
|
||||
host = kwargs['host']
|
||||
@ -117,6 +121,8 @@ class ShareRpcAPITestCase(test.TestCase):
|
||||
host = kwargs['share_server']['host']
|
||||
elif 'share_replica' in kwargs:
|
||||
host = kwargs['share_replica']['host']
|
||||
elif 'replicated_snapshot' in kwargs:
|
||||
host = kwargs['share']['instance']['host']
|
||||
else:
|
||||
host = kwargs['share']['host']
|
||||
target['server'] = host
|
||||
@ -315,6 +321,22 @@ class ShareRpcAPITestCase(test.TestCase):
|
||||
snapshot=self.fake_snapshot,
|
||||
host='fake_host')
|
||||
|
||||
def test_create_replicated_snapshot(self):
|
||||
self._test_share_api('create_replicated_snapshot',
|
||||
rpc_method='cast',
|
||||
version='1.11',
|
||||
replicated_snapshot=self.fake_snapshot,
|
||||
share=self.fake_share)
|
||||
|
||||
def test_delete_replicated_snapshot(self):
|
||||
self._test_share_api('delete_replicated_snapshot',
|
||||
rpc_method='cast',
|
||||
version='1.11',
|
||||
replicated_snapshot=self.fake_snapshot,
|
||||
share_id=self.fake_snapshot['share_id'],
|
||||
force=False,
|
||||
host='fake_host')
|
||||
|
||||
class Desthost(object):
|
||||
host = 'fake_host'
|
||||
capabilities = 1
|
||||
|
@ -483,13 +483,20 @@ class ManilaExceptionResponseCode404(test.TestCase):
|
||||
self.assertEqual(404, e.code)
|
||||
self.assertIn(name, e.msg)
|
||||
|
||||
def test_snapshot_not_found(self):
|
||||
# verify response code for exception.SnapshotNotFound
|
||||
def test_snapshot_resource_not_found(self):
|
||||
# verify response code for exception.SnapshotResourceNotFound
|
||||
name = "fake_name"
|
||||
e = exception.SnapshotNotFound(name=name)
|
||||
e = exception.SnapshotResourceNotFound(name=name)
|
||||
self.assertEqual(404, e.code)
|
||||
self.assertIn(name, e.msg)
|
||||
|
||||
def test_snapshot_instance_not_found(self):
|
||||
# verify response code for exception.ShareSnapshotInstanceNotFound
|
||||
instance_id = 'fake_instance_id'
|
||||
e = exception.ShareSnapshotInstanceNotFound(instance_id=instance_id)
|
||||
self.assertEqual(404, e.code)
|
||||
self.assertIn(instance_id, e.msg)
|
||||
|
||||
def test_export_location_not_found(self):
|
||||
# verify response code for exception.ExportLocationNotFound
|
||||
uuid = "fake-export-location-uuid"
|
||||
|
Loading…
x
Reference in New Issue
Block a user