Deleted manila.volume
This commit is contained in:
parent
dc4ce932ed
commit
9b20ae70ce
manila/volume
@ -1,25 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Importing full names to not pollute the namespace and cause possible
|
||||
# collisions with use of 'from manila.volume import <foo>' elsewhere.
|
||||
import manila.flags
|
||||
import manila.openstack.common.importutils
|
||||
|
||||
API = manila.openstack.common.importutils.import_class(
|
||||
manila.flags.FLAGS.volume_api_class)
|
@ -1,765 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Handles all requests relating to volumes.
|
||||
"""
|
||||
|
||||
import functools
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila.db import base
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.image import glance
|
||||
from manila.openstack.common import excutils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common import timeutils
|
||||
import manila.policy
|
||||
from manila import quota
|
||||
from manila.scheduler import rpcapi as scheduler_rpcapi
|
||||
from manila.volume import rpcapi as volume_rpcapi
|
||||
from manila.volume import volume_types
|
||||
|
||||
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
|
||||
default=True,
|
||||
help='Create volume from snapshot at the host '
|
||||
'where snapshot resides')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(volume_host_opt)
|
||||
flags.DECLARE('storage_availability_zone', 'manila.volume.manager')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
GB = 1048576 * 1024
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
def wrap_check_policy(func):
|
||||
"""Check policy corresponding to the wrapped methods prior to execution
|
||||
|
||||
This decorator requires the first 3 args of the wrapped function
|
||||
to be (self, context, volume)
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapped(self, context, target_obj, *args, **kwargs):
|
||||
check_policy(context, func.__name__, target_obj)
|
||||
return func(self, context, target_obj, *args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def check_policy(context, action, target_obj=None):
|
||||
target = {
|
||||
'project_id': context.project_id,
|
||||
'user_id': context.user_id,
|
||||
}
|
||||
target.update(target_obj or {})
|
||||
_action = 'volume:%s' % action
|
||||
manila.policy.enforce(context, _action, target)
|
||||
|
||||
|
||||
class API(base.Base):
|
||||
"""API for interacting with the volume manager."""
|
||||
|
||||
def __init__(self, db_driver=None, image_service=None):
|
||||
self.image_service = (image_service or
|
||||
glance.get_default_image_service())
|
||||
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||
super(API, self).__init__(db_driver)
|
||||
|
||||
def create(self, context, size, name, description, snapshot=None,
|
||||
image_id=None, volume_type=None, metadata=None,
|
||||
availability_zone=None, source_volume=None):
|
||||
|
||||
exclusive_options = (snapshot, image_id, source_volume)
|
||||
exclusive_options_set = sum(1 for option in
|
||||
exclusive_options if option is not None)
|
||||
if exclusive_options_set > 1:
|
||||
msg = (_("May specify only one of snapshot, imageRef "
|
||||
"or source volume"))
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
check_policy(context, 'create')
|
||||
if snapshot is not None:
|
||||
if snapshot['status'] != "available":
|
||||
msg = _("status must be available")
|
||||
raise exception.InvalidSnapshot(reason=msg)
|
||||
if not size:
|
||||
size = snapshot['volume_size']
|
||||
elif size < snapshot['volume_size']:
|
||||
msg = _("Volume size cannot be lesser than"
|
||||
" the Snapshot size")
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
snapshot_id = snapshot['id']
|
||||
else:
|
||||
snapshot_id = None
|
||||
|
||||
if source_volume is not None:
|
||||
if source_volume['status'] == "error":
|
||||
msg = _("Unable to clone volumes that are in an error state")
|
||||
raise exception.InvalidSourceVolume(reason=msg)
|
||||
if not size:
|
||||
size = source_volume['size']
|
||||
else:
|
||||
if size < source_volume['size']:
|
||||
msg = _("Clones currently must be "
|
||||
">= original volume size.")
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
source_volid = source_volume['id']
|
||||
else:
|
||||
source_volid = None
|
||||
|
||||
def as_int(s):
|
||||
try:
|
||||
return int(s)
|
||||
except (ValueError, TypeError):
|
||||
return s
|
||||
|
||||
# tolerate size as stringified int
|
||||
size = as_int(size)
|
||||
|
||||
if not isinstance(size, int) or size <= 0:
|
||||
msg = (_("Volume size '%s' must be an integer and greater than 0")
|
||||
% size)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
if (image_id and not (source_volume or snapshot)):
|
||||
# check image existence
|
||||
image_meta = self.image_service.show(context, image_id)
|
||||
image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB
|
||||
#check image size is not larger than volume size.
|
||||
if image_size_in_gb > size:
|
||||
msg = _('Size of specified image is larger than volume size.')
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
# Check image minDisk requirement is met for the particular volume
|
||||
if size < image_meta.get('min_disk', 0):
|
||||
msg = _('Image minDisk size is larger than the volume size.')
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size)
|
||||
except exception.OverQuota as e:
|
||||
overs = e.kwargs['overs']
|
||||
usages = e.kwargs['usages']
|
||||
quotas = e.kwargs['quotas']
|
||||
|
||||
def _consumed(name):
|
||||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'gigabytes' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG "
|
||||
"already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
's_size': size,
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
'd_quota': quotas['gigabytes']})
|
||||
raise exception.VolumeSizeExceedsAvailableQuota()
|
||||
elif 'volumes' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"volume (%(d_consumed)d volumes "
|
||||
"already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
'd_consumed': _consumed('volumes')})
|
||||
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
|
||||
|
||||
if availability_zone is None:
|
||||
availability_zone = FLAGS.storage_availability_zone
|
||||
|
||||
if not volume_type and not source_volume:
|
||||
volume_type = volume_types.get_default_volume_type()
|
||||
|
||||
if not volume_type and source_volume:
|
||||
volume_type_id = source_volume['volume_type_id']
|
||||
else:
|
||||
volume_type_id = volume_type.get('id')
|
||||
|
||||
self._check_metadata_properties(context, metadata)
|
||||
options = {'size': size,
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'snapshot_id': snapshot_id,
|
||||
'availability_zone': availability_zone,
|
||||
'status': "creating",
|
||||
'attach_status': "detached",
|
||||
'display_name': name,
|
||||
'display_description': description,
|
||||
'volume_type_id': volume_type_id,
|
||||
'metadata': metadata,
|
||||
'source_volid': source_volid}
|
||||
|
||||
try:
|
||||
volume = self.db.volume_create(context, options)
|
||||
QUOTAS.commit(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
self.db.volume_destroy(context, volume['id'])
|
||||
finally:
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
request_spec = {'volume_properties': options,
|
||||
'volume_type': volume_type,
|
||||
'volume_id': volume['id'],
|
||||
'snapshot_id': volume['snapshot_id'],
|
||||
'image_id': image_id,
|
||||
'source_volid': volume['source_volid']}
|
||||
|
||||
filter_properties = {}
|
||||
|
||||
self._cast_create_volume(context, request_spec, filter_properties)
|
||||
|
||||
return volume
|
||||
|
||||
def _cast_create_volume(self, context, request_spec, filter_properties):
|
||||
|
||||
# NOTE(Rongze Zhu): It is a simple solution for bug 1008866
|
||||
# If snapshot_id is set, make the call create volume directly to
|
||||
# the volume host where the snapshot resides instead of passing it
|
||||
# through the scheduler. So snapshot can be copy to new volume.
|
||||
|
||||
source_volid = request_spec['source_volid']
|
||||
volume_id = request_spec['volume_id']
|
||||
snapshot_id = request_spec['snapshot_id']
|
||||
image_id = request_spec['image_id']
|
||||
|
||||
if snapshot_id and FLAGS.snapshot_same_host:
|
||||
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
|
||||
source_volume_ref = self.db.volume_get(context,
|
||||
snapshot_ref['volume_id'])
|
||||
now = timeutils.utcnow()
|
||||
values = {'host': source_volume_ref['host'], 'scheduled_at': now}
|
||||
volume_ref = self.db.volume_update(context, volume_id, values)
|
||||
|
||||
# bypass scheduler and send request directly to volume
|
||||
self.volume_rpcapi.create_volume(
|
||||
context,
|
||||
volume_ref,
|
||||
volume_ref['host'],
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties,
|
||||
allow_reschedule=False,
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id)
|
||||
elif source_volid:
|
||||
source_volume_ref = self.db.volume_get(context,
|
||||
source_volid)
|
||||
now = timeutils.utcnow()
|
||||
values = {'host': source_volume_ref['host'], 'scheduled_at': now}
|
||||
volume_ref = self.db.volume_update(context, volume_id, values)
|
||||
|
||||
# bypass scheduler and send request directly to volume
|
||||
self.volume_rpcapi.create_volume(
|
||||
context,
|
||||
volume_ref,
|
||||
volume_ref['host'],
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties,
|
||||
allow_reschedule=False,
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id,
|
||||
source_volid=source_volid)
|
||||
else:
|
||||
self.scheduler_rpcapi.create_volume(
|
||||
context,
|
||||
FLAGS.volume_topic,
|
||||
volume_id,
|
||||
snapshot_id,
|
||||
image_id,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
@wrap_check_policy
|
||||
def delete(self, context, volume, force=False):
|
||||
if context.is_admin and context.project_id != volume['project_id']:
|
||||
project_id = volume['project_id']
|
||||
else:
|
||||
project_id = context.project_id
|
||||
|
||||
volume_id = volume['id']
|
||||
if not volume['host']:
|
||||
# NOTE(vish): scheduling failed, so delete it
|
||||
# Note(zhiteng): update volume quota reservation
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
volumes=-1,
|
||||
gigabytes=-volume['size'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update quota for deleting volume"))
|
||||
self.db.volume_destroy(context.elevated(), volume_id)
|
||||
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
return
|
||||
if not force and volume['status'] not in ["available", "error",
|
||||
"error_restoring"]:
|
||||
msg = _("Volume status must be available or error")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
|
||||
if len(snapshots):
|
||||
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
now = timeutils.utcnow()
|
||||
self.db.volume_update(context, volume_id, {'status': 'deleting',
|
||||
'terminated_at': now})
|
||||
|
||||
self.volume_rpcapi.delete_volume(context, volume)
|
||||
|
||||
@wrap_check_policy
|
||||
def update(self, context, volume, fields):
|
||||
self.db.volume_update(context, volume['id'], fields)
|
||||
|
||||
def get(self, context, volume_id):
|
||||
rv = self.db.volume_get(context, volume_id)
|
||||
glance_meta = rv.get('volume_glance_metadata', None)
|
||||
volume = dict(rv.iteritems())
|
||||
check_policy(context, 'get', volume)
|
||||
|
||||
# NOTE(jdg): As per bug 1115629 iteritems doesn't pick
|
||||
# up the glance_meta dependency, add it explicitly if
|
||||
# it exists in the rv
|
||||
if glance_meta:
|
||||
volume['volume_glance_metadata'] = glance_meta
|
||||
|
||||
return volume
|
||||
|
||||
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
|
||||
sort_dir='desc', filters={}):
|
||||
check_policy(context, 'get_all')
|
||||
|
||||
try:
|
||||
if limit is not None:
|
||||
limit = int(limit)
|
||||
if limit < 0:
|
||||
msg = _('limit param must be positive')
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
except ValueError:
|
||||
msg = _('limit param must be an integer')
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
if (context.is_admin and 'all_tenants' in filters):
|
||||
# Need to remove all_tenants to pass the filtering below.
|
||||
del filters['all_tenants']
|
||||
volumes = self.db.volume_get_all(context, marker, limit, sort_key,
|
||||
sort_dir)
|
||||
else:
|
||||
volumes = self.db.volume_get_all_by_project(context,
|
||||
context.project_id,
|
||||
marker, limit,
|
||||
sort_key, sort_dir)
|
||||
|
||||
if filters:
|
||||
LOG.debug(_("Searching by: %s") % str(filters))
|
||||
|
||||
def _check_metadata_match(volume, searchdict):
|
||||
volume_metadata = {}
|
||||
for i in volume.get('volume_metadata'):
|
||||
volume_metadata[i['key']] = i['value']
|
||||
|
||||
for k, v in searchdict.iteritems():
|
||||
if (k not in volume_metadata.keys() or
|
||||
volume_metadata[k] != v):
|
||||
return False
|
||||
return True
|
||||
|
||||
# search_option to filter_name mapping.
|
||||
filter_mapping = {'metadata': _check_metadata_match}
|
||||
|
||||
result = []
|
||||
not_found = object()
|
||||
for volume in volumes:
|
||||
# go over all filters in the list
|
||||
for opt, values in filters.iteritems():
|
||||
try:
|
||||
filter_func = filter_mapping[opt]
|
||||
except KeyError:
|
||||
def filter_func(volume, value):
|
||||
return volume.get(opt, not_found) == value
|
||||
if not filter_func(volume, values):
|
||||
break # volume doesn't match this filter
|
||||
else: # did not break out loop
|
||||
result.append(volume) # volume matches all filters
|
||||
volumes = result
|
||||
|
||||
return volumes
|
||||
|
||||
def get_snapshot(self, context, snapshot_id):
|
||||
check_policy(context, 'get_snapshot')
|
||||
rv = self.db.snapshot_get(context, snapshot_id)
|
||||
return dict(rv.iteritems())
|
||||
|
||||
def get_volume(self, context, volume_id):
|
||||
check_policy(context, 'get_volume')
|
||||
rv = self.db.volume_get(context, volume_id)
|
||||
return dict(rv.iteritems())
|
||||
|
||||
def get_all_snapshots(self, context, search_opts=None):
|
||||
check_policy(context, 'get_all_snapshots')
|
||||
|
||||
search_opts = search_opts or {}
|
||||
|
||||
if (context.is_admin and 'all_tenants' in search_opts):
|
||||
# Need to remove all_tenants to pass the filtering below.
|
||||
del search_opts['all_tenants']
|
||||
snapshots = self.db.snapshot_get_all(context)
|
||||
else:
|
||||
snapshots = self.db.snapshot_get_all_by_project(
|
||||
context, context.project_id)
|
||||
|
||||
if search_opts:
|
||||
LOG.debug(_("Searching by: %s") % str(search_opts))
|
||||
|
||||
results = []
|
||||
not_found = object()
|
||||
for snapshot in snapshots:
|
||||
for opt, value in search_opts.iteritems():
|
||||
if snapshot.get(opt, not_found) != value:
|
||||
break
|
||||
else:
|
||||
results.append(snapshot)
|
||||
snapshots = results
|
||||
return snapshots
|
||||
|
||||
@wrap_check_policy
|
||||
def check_attach(self, context, volume):
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume['status'] != "available":
|
||||
msg = _("status must be available")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if volume['attach_status'] == "attached":
|
||||
msg = _("already attached")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
@wrap_check_policy
|
||||
def check_detach(self, context, volume):
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume['status'] == "available":
|
||||
msg = _("already detached")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
@wrap_check_policy
|
||||
def reserve_volume(self, context, volume):
|
||||
#NOTE(jdg): check for Race condition bug 1096983
|
||||
#explicitly get updated ref and check
|
||||
volume = self.db.volume_get(context, volume['id'])
|
||||
if volume['status'] == 'available':
|
||||
self.update(context, volume, {"status": "attaching"})
|
||||
else:
|
||||
msg = _("Volume status must be available to reserve")
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
@wrap_check_policy
|
||||
def unreserve_volume(self, context, volume):
|
||||
if volume['status'] == "attaching":
|
||||
self.update(context, volume, {"status": "available"})
|
||||
|
||||
@wrap_check_policy
|
||||
def begin_detaching(self, context, volume):
|
||||
self.update(context, volume, {"status": "detaching"})
|
||||
|
||||
@wrap_check_policy
|
||||
def roll_detaching(self, context, volume):
|
||||
if volume['status'] == "detaching":
|
||||
self.update(context, volume, {"status": "in-use"})
|
||||
|
||||
@wrap_check_policy
|
||||
def attach(self, context, volume, instance_uuid, mountpoint):
|
||||
return self.volume_rpcapi.attach_volume(context,
|
||||
volume,
|
||||
instance_uuid,
|
||||
mountpoint)
|
||||
|
||||
@wrap_check_policy
|
||||
def detach(self, context, volume):
|
||||
return self.volume_rpcapi.detach_volume(context, volume)
|
||||
|
||||
@wrap_check_policy
|
||||
def initialize_connection(self, context, volume, connector):
|
||||
return self.volume_rpcapi.initialize_connection(context,
|
||||
volume,
|
||||
connector)
|
||||
|
||||
@wrap_check_policy
|
||||
def terminate_connection(self, context, volume, connector, force=False):
|
||||
self.unreserve_volume(context, volume)
|
||||
return self.volume_rpcapi.terminate_connection(context,
|
||||
volume,
|
||||
connector,
|
||||
force)
|
||||
|
||||
def _create_snapshot(self, context,
|
||||
volume, name, description,
|
||||
force=False, metadata=None):
|
||||
check_policy(context, 'create_snapshot', volume)
|
||||
|
||||
if ((not force) and (volume['status'] != "available")):
|
||||
msg = _("must be available")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
try:
|
||||
if FLAGS.no_snapshot_gb_quota:
|
||||
reservations = QUOTAS.reserve(context, snapshots=1)
|
||||
else:
|
||||
reservations = QUOTAS.reserve(context, snapshots=1,
|
||||
gigabytes=volume['size'])
|
||||
except exception.OverQuota as e:
|
||||
overs = e.kwargs['overs']
|
||||
usages = e.kwargs['usages']
|
||||
quotas = e.kwargs['quotas']
|
||||
|
||||
def _consumed(name):
|
||||
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||
|
||||
if 'gigabytes' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"%(s_size)sG snapshot (%(d_consumed)dG of "
|
||||
"%(d_quota)dG already consumed)")
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
's_size': volume['size'],
|
||||
'd_consumed': _consumed('gigabytes'),
|
||||
'd_quota': quotas['gigabytes']})
|
||||
raise exception.VolumeSizeExceedsAvailableQuota()
|
||||
elif 'snapshots' in overs:
|
||||
msg = _("Quota exceeded for %(s_pid)s, tried to create "
|
||||
"snapshot (%(d_consumed)d snapshots "
|
||||
"already consumed)")
|
||||
|
||||
LOG.warn(msg % {'s_pid': context.project_id,
|
||||
'd_consumed': _consumed('snapshots')})
|
||||
raise exception.SnapshotLimitExceeded(
|
||||
allowed=quotas['snapshots'])
|
||||
|
||||
self._check_metadata_properties(context, metadata)
|
||||
options = {'volume_id': volume['id'],
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'status': "creating",
|
||||
'progress': '0%',
|
||||
'volume_size': volume['size'],
|
||||
'display_name': name,
|
||||
'display_description': description,
|
||||
'metadata': metadata}
|
||||
|
||||
try:
|
||||
snapshot = self.db.snapshot_create(context, options)
|
||||
QUOTAS.commit(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
self.db.snapshot_destroy(context, volume['id'])
|
||||
finally:
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
|
||||
|
||||
return snapshot
|
||||
|
||||
def create_snapshot(self, context,
|
||||
volume, name,
|
||||
description, metadata=None):
|
||||
return self._create_snapshot(context, volume, name, description,
|
||||
False, metadata)
|
||||
|
||||
def create_snapshot_force(self, context,
|
||||
volume, name,
|
||||
description, metadata=None):
|
||||
return self._create_snapshot(context, volume, name, description,
|
||||
True, metadata)
|
||||
|
||||
@wrap_check_policy
|
||||
def delete_snapshot(self, context, snapshot, force=False):
|
||||
if not force and snapshot['status'] not in ["available", "error"]:
|
||||
msg = _("Volume Snapshot status must be available or error")
|
||||
raise exception.InvalidSnapshot(reason=msg)
|
||||
self.db.snapshot_update(context, snapshot['id'],
|
||||
{'status': 'deleting'})
|
||||
volume = self.db.volume_get(context, snapshot['volume_id'])
|
||||
self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
|
||||
|
||||
@wrap_check_policy
|
||||
def update_snapshot(self, context, snapshot, fields):
|
||||
self.db.snapshot_update(context, snapshot['id'], fields)
|
||||
|
||||
@wrap_check_policy
|
||||
def get_volume_metadata(self, context, volume):
|
||||
"""Get all metadata associated with a volume."""
|
||||
rv = self.db.volume_metadata_get(context, volume['id'])
|
||||
return dict(rv.iteritems())
|
||||
|
||||
@wrap_check_policy
|
||||
def delete_volume_metadata(self, context, volume, key):
|
||||
"""Delete the given metadata item from a volume."""
|
||||
self.db.volume_metadata_delete(context, volume['id'], key)
|
||||
|
||||
def _check_metadata_properties(self, context, metadata=None):
|
||||
if not metadata:
|
||||
metadata = {}
|
||||
|
||||
for k, v in metadata.iteritems():
|
||||
if len(k) == 0:
|
||||
msg = _("Metadata property key blank")
|
||||
LOG.warn(msg)
|
||||
raise exception.InvalidVolumeMetadata(reason=msg)
|
||||
if len(k) > 255:
|
||||
msg = _("Metadata property key greater than 255 characters")
|
||||
LOG.warn(msg)
|
||||
raise exception.InvalidVolumeMetadataSize(reason=msg)
|
||||
if len(v) > 255:
|
||||
msg = _("Metadata property value greater than 255 characters")
|
||||
LOG.warn(msg)
|
||||
raise exception.InvalidVolumeMetadataSize(reason=msg)
|
||||
|
||||
@wrap_check_policy
|
||||
def update_volume_metadata(self, context, volume, metadata, delete=False):
|
||||
"""Updates or creates volume metadata.
|
||||
|
||||
If delete is True, metadata items that are not specified in the
|
||||
`metadata` argument will be deleted.
|
||||
|
||||
"""
|
||||
orig_meta = self.get_volume_metadata(context, volume)
|
||||
if delete:
|
||||
_metadata = metadata
|
||||
else:
|
||||
_metadata = orig_meta.copy()
|
||||
_metadata.update(metadata)
|
||||
|
||||
self._check_metadata_properties(context, _metadata)
|
||||
|
||||
self.db.volume_metadata_update(context, volume['id'], _metadata, True)
|
||||
|
||||
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
||||
|
||||
return _metadata
|
||||
|
||||
def get_volume_metadata_value(self, volume, key):
|
||||
"""Get value of particular metadata key."""
|
||||
metadata = volume.get('volume_metadata')
|
||||
if metadata:
|
||||
for i in volume['volume_metadata']:
|
||||
if i['key'] == key:
|
||||
return i['value']
|
||||
return None
|
||||
|
||||
def get_snapshot_metadata(self, context, snapshot):
|
||||
"""Get all metadata associated with a snapshot."""
|
||||
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
|
||||
return dict(rv.iteritems())
|
||||
|
||||
def delete_snapshot_metadata(self, context, snapshot, key):
|
||||
"""Delete the given metadata item from a snapshot."""
|
||||
self.db.snapshot_metadata_delete(context, snapshot['id'], key)
|
||||
|
||||
def update_snapshot_metadata(self, context,
|
||||
snapshot, metadata,
|
||||
delete=False):
|
||||
"""Updates or creates snapshot metadata.
|
||||
|
||||
If delete is True, metadata items that are not specified in the
|
||||
`metadata` argument will be deleted.
|
||||
|
||||
"""
|
||||
orig_meta = self.get_snapshot_metadata(context, snapshot)
|
||||
if delete:
|
||||
_metadata = metadata
|
||||
else:
|
||||
_metadata = orig_meta.copy()
|
||||
_metadata.update(metadata)
|
||||
|
||||
self._check_metadata_properties(context, _metadata)
|
||||
|
||||
self.db.snapshot_metadata_update(context,
|
||||
snapshot['id'],
|
||||
_metadata,
|
||||
True)
|
||||
|
||||
# TODO(jdg): Implement an RPC call for drivers that may use this info
|
||||
|
||||
return _metadata
|
||||
|
||||
def get_snapshot_metadata_value(self, snapshot, key):
|
||||
pass
|
||||
|
||||
@wrap_check_policy
|
||||
def get_volume_image_metadata(self, context, volume):
|
||||
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
|
||||
return dict(
|
||||
(meta_entry.key, meta_entry.value) for meta_entry in db_data
|
||||
)
|
||||
|
||||
def _check_volume_availability(self, context, volume, force):
|
||||
"""Check if the volume can be used."""
|
||||
if volume['status'] not in ['available', 'in-use']:
|
||||
msg = _('Volume status must be available/in-use.')
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if not force and 'in-use' == volume['status']:
|
||||
msg = _('Volume status is in-use.')
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
@wrap_check_policy
|
||||
def copy_volume_to_image(self, context, volume, metadata, force):
|
||||
"""Create a new image from the specified volume."""
|
||||
self._check_volume_availability(context, volume, force)
|
||||
|
||||
recv_metadata = self.image_service.create(context, metadata)
|
||||
self.update(context, volume, {'status': 'uploading'})
|
||||
self.volume_rpcapi.copy_volume_to_image(context,
|
||||
volume,
|
||||
recv_metadata)
|
||||
|
||||
response = {"id": volume['id'],
|
||||
"updated_at": volume['updated_at'],
|
||||
"status": 'uploading',
|
||||
"display_description": volume['display_description'],
|
||||
"size": volume['size'],
|
||||
"volume_type": volume['volume_type'],
|
||||
"image_id": recv_metadata['id'],
|
||||
"container_format": recv_metadata['container_format'],
|
||||
"disk_format": recv_metadata['disk_format'],
|
||||
"image_name": recv_metadata.get('name', None)}
|
||||
return response
|
||||
|
||||
|
||||
class HostAPI(base.Base):
|
||||
def __init__(self):
|
||||
super(HostAPI, self).__init__()
|
||||
|
||||
"""Sub-set of the Volume Manager API for managing host operations."""
|
||||
def set_host_enabled(self, context, host, enabled):
|
||||
"""Sets the specified host's ability to accept new volumes."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_host_uptime(self, context, host):
|
||||
"""Returns the result of calling "uptime" on the target host."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def host_power_action(self, context, host, action):
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_host_maintenance(self, context, host, mode):
|
||||
"""Start/Stop host maintenance window. On start, it triggers
|
||||
volume evacuation."""
|
||||
raise NotImplementedError()
|
@ -1,83 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright (c) 2012 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Configuration support for all drivers.
|
||||
|
||||
This module allows support for setting configurations either from default
|
||||
or from a particular FLAGS group, to be able to set multiple configurations
|
||||
for a given set of values.
|
||||
|
||||
For instance, two lvm configurations can be set by naming them in groups as
|
||||
|
||||
[lvm1]
|
||||
volume_group=lvm-group-1
|
||||
...
|
||||
|
||||
[lvm2]
|
||||
volume_group=lvm-group-2
|
||||
...
|
||||
|
||||
And the configuration group name will be passed in so that all calls to
|
||||
configuration.volume_group within that instance will be mapped to the proper
|
||||
named group.
|
||||
|
||||
This class also ensures the implementation's configuration is grafted into the
|
||||
option group. This is due to the way cfg works. All cfg options must be defined
|
||||
and registered in the group in which they are used.
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
|
||||
def __init__(self, volume_opts, config_group=None):
|
||||
"""This takes care of grafting the implementation's config
|
||||
values into the config group"""
|
||||
self.config_group = config_group
|
||||
|
||||
# set the local conf so that __call__'s know what to use
|
||||
if self.config_group:
|
||||
self._ensure_config_values(volume_opts)
|
||||
self.local_conf = FLAGS._get(self.config_group)
|
||||
else:
|
||||
self.local_conf = FLAGS
|
||||
|
||||
def _ensure_config_values(self, volume_opts):
|
||||
FLAGS.register_opts(volume_opts,
|
||||
group=self.config_group)
|
||||
|
||||
def append_config_values(self, volume_opts):
|
||||
self._ensure_config_values(volume_opts)
|
||||
|
||||
def safe_get(self, value):
|
||||
try:
|
||||
return self.__getattr__(value)
|
||||
except cfg.NoSuchOptError:
|
||||
return None
|
||||
|
||||
def __getattr__(self, value):
|
||||
return getattr(self.local_conf, value)
|
@ -1,551 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Drivers for volumes.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila.image import image_utils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
volume_opts = [
|
||||
cfg.IntOpt('num_shell_tries',
|
||||
default=3,
|
||||
help='number of times to attempt to run flakey shell commands'),
|
||||
cfg.IntOpt('reserved_percentage',
|
||||
default=0,
|
||||
help='The percentage of backend capacity is reserved'),
|
||||
cfg.IntOpt('num_iscsi_scan_tries',
|
||||
default=3,
|
||||
help='number of times to rescan iSCSI target to find volume'),
|
||||
cfg.IntOpt('iscsi_num_targets',
|
||||
default=100,
|
||||
help='Number of iscsi target ids per host'),
|
||||
cfg.StrOpt('iscsi_target_prefix',
|
||||
default='iqn.2010-10.org.openstack:',
|
||||
help='prefix for iscsi volumes'),
|
||||
cfg.StrOpt('iscsi_ip_address',
|
||||
default='$my_ip',
|
||||
help='The port that the iSCSI daemon is listening on'),
|
||||
cfg.IntOpt('iscsi_port',
|
||||
default=3260,
|
||||
help='The port that the iSCSI daemon is listening on'),
|
||||
cfg.StrOpt('volume_backend_name',
|
||||
default=None,
|
||||
help='The backend name for a given driver implementation'), ]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
CONF.import_opt('iscsi_helper', 'manila.brick.iscsi.iscsi')
|
||||
|
||||
|
||||
class VolumeDriver(object):
|
||||
"""Executes commands relating to Volumes."""
|
||||
def __init__(self, execute=utils.execute, *args, **kwargs):
|
||||
# NOTE(vish): db is set by Manager
|
||||
self.db = None
|
||||
self.configuration = kwargs.get('configuration', None)
|
||||
if self.configuration:
|
||||
self.configuration.append_config_values(volume_opts)
|
||||
self.set_execute(execute)
|
||||
self._stats = {}
|
||||
|
||||
def set_execute(self, execute):
|
||||
self._execute = execute
|
||||
|
||||
def _try_execute(self, *command, **kwargs):
|
||||
# NOTE(vish): Volume commands can partially fail due to timing, but
|
||||
# running them a second time on failure will usually
|
||||
# recover nicely.
|
||||
tries = 0
|
||||
while True:
|
||||
try:
|
||||
self._execute(*command, **kwargs)
|
||||
return True
|
||||
except exception.ProcessExecutionError:
|
||||
tries = tries + 1
|
||||
if tries >= self.configuration.num_shell_tries:
|
||||
raise
|
||||
LOG.exception(_("Recovering from a failed execute. "
|
||||
"Try number %s"), tries)
|
||||
time.sleep(tries ** 2)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume. Can optionally return a Dictionary of
|
||||
changes to the volume object to be persisted."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def local_path(self, volume):
|
||||
raise NotImplementedError()
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreates an export for a volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume. Can optionally return a Dictionary of changes
|
||||
to the volume object to be persisted."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Allow connection to connector and return connection info."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
||||
"""Disallow connection from connector"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
|
||||
""" Callback for volume attached to instance."""
|
||||
pass
|
||||
|
||||
def detach_volume(self, context, volume_id):
|
||||
""" Callback for volume detached."""
|
||||
pass
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Return the current state of the volume service. If 'refresh' is
|
||||
True, run the update first."""
|
||||
return None
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the volume driver does while starting"""
|
||||
pass
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def clone_image(self, volume, image_location):
|
||||
"""Create a volume efficiently from an existing image.
|
||||
|
||||
image_location is a string whose format depends on the
|
||||
image service backend in use. The driver should use it
|
||||
to determine whether cloning is possible.
|
||||
|
||||
Returns a boolean indicating whether cloning occurred
|
||||
"""
|
||||
return False
|
||||
|
||||
def backup_volume(self, context, backup, backup_service):
|
||||
"""Create a new backup from an existing volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def restore_backup(self, context, backup, volume, backup_service):
|
||||
"""Restore an existing backup to a new or existing volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def clear_download(self, context, volume):
|
||||
"""Clean up after an interrupted image copy."""
|
||||
pass
|
||||
|
||||
|
||||
class ISCSIDriver(VolumeDriver):
|
||||
"""Executes commands relating to ISCSI volumes.
|
||||
|
||||
We make use of model provider properties as follows:
|
||||
|
||||
``provider_location``
|
||||
if present, contains the iSCSI target information in the same
|
||||
format as an ietadm discovery
|
||||
i.e. '<ip>:<port>,<portal> <target IQN>'
|
||||
|
||||
``provider_auth``
|
||||
if present, contains a space-separated triple:
|
||||
'<auth method> <auth username> <auth password>'.
|
||||
`CHAP` is the only auth_method in use at the moment.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ISCSIDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def _do_iscsi_discovery(self, volume):
|
||||
#TODO(justinsb): Deprecate discovery and use stored info
|
||||
#NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
|
||||
LOG.warn(_("ISCSI provider_location not stored, using discovery"))
|
||||
|
||||
volume_name = volume['name']
|
||||
|
||||
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
|
||||
'-t', 'sendtargets', '-p', volume['host'],
|
||||
run_as_root=True)
|
||||
for target in out.splitlines():
|
||||
if (self.configuration.iscsi_ip_address in target
|
||||
and volume_name in target):
|
||||
return target
|
||||
return None
|
||||
|
||||
def _get_iscsi_properties(self, volume):
|
||||
"""Gets iscsi configuration
|
||||
|
||||
We ideally get saved information in the volume entity, but fall back
|
||||
to discovery if need be. Discovery may be completely removed in future
|
||||
The properties are:
|
||||
|
||||
:target_discovered: boolean indicating whether discovery was used
|
||||
|
||||
:target_iqn: the IQN of the iSCSI target
|
||||
|
||||
:target_portal: the portal of the iSCSI target
|
||||
|
||||
:target_lun: the lun of the iSCSI target
|
||||
|
||||
:volume_id: the id of the volume (currently used by xen)
|
||||
|
||||
:auth_method:, :auth_username:, :auth_password:
|
||||
|
||||
the authentication details. Right now, either auth_method is not
|
||||
present meaning no authentication, or auth_method == `CHAP`
|
||||
meaning use CHAP with the specified credentials.
|
||||
"""
|
||||
|
||||
properties = {}
|
||||
|
||||
location = volume['provider_location']
|
||||
|
||||
if location:
|
||||
# provider_location is the same format as iSCSI discovery output
|
||||
properties['target_discovered'] = False
|
||||
else:
|
||||
location = self._do_iscsi_discovery(volume)
|
||||
|
||||
if not location:
|
||||
msg = (_("Could not find iSCSI export for volume %s") %
|
||||
(volume['name']))
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
|
||||
properties['target_discovered'] = True
|
||||
|
||||
results = location.split(" ")
|
||||
properties['target_portal'] = results[0].split(",")[0]
|
||||
properties['target_iqn'] = results[1]
|
||||
try:
|
||||
properties['target_lun'] = int(results[2])
|
||||
except (IndexError, ValueError):
|
||||
if (self.configuration.volume_driver in
|
||||
['manila.volume.drivers.lvm.LVMISCSIDriver',
|
||||
'manila.volume.drivers.lvm.ThinLVMVolumeDriver'] and
|
||||
self.configuration.iscsi_helper == 'tgtadm'):
|
||||
properties['target_lun'] = 1
|
||||
else:
|
||||
properties['target_lun'] = 0
|
||||
|
||||
properties['volume_id'] = volume['id']
|
||||
|
||||
auth = volume['provider_auth']
|
||||
if auth:
|
||||
(auth_method, auth_username, auth_secret) = auth.split()
|
||||
|
||||
properties['auth_method'] = auth_method
|
||||
properties['auth_username'] = auth_username
|
||||
properties['auth_password'] = auth_secret
|
||||
|
||||
return properties
|
||||
|
||||
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
|
||||
check_exit_code = kwargs.pop('check_exit_code', 0)
|
||||
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
|
||||
iscsi_properties['target_iqn'],
|
||||
'-p', iscsi_properties['target_portal'],
|
||||
*iscsi_command, run_as_root=True,
|
||||
check_exit_code=check_exit_code)
|
||||
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
|
||||
(iscsi_command, out, err))
|
||||
return (out, err)
|
||||
|
||||
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
|
||||
**kwargs):
|
||||
iscsi_command = ('--op', 'update', '-n', property_key,
|
||||
'-v', property_value)
|
||||
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initializes the connection and returns connection info.
|
||||
|
||||
The iscsi driver returns a driver_volume_type of 'iscsi'.
|
||||
The format of the driver data is defined in _get_iscsi_properties.
|
||||
Example return value::
|
||||
|
||||
{
|
||||
'driver_volume_type': 'iscsi'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
|
||||
'target_portal': '127.0.0.0.1:3260',
|
||||
'volume_id': 1,
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
if CONF.iscsi_helper == 'lioadm':
|
||||
self.tgtadm.initialize_connection(volume, connector)
|
||||
|
||||
iscsi_properties = self._get_iscsi_properties(volume)
|
||||
return {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data': iscsi_properties
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
pass
|
||||
|
||||
def _get_iscsi_initiator(self):
|
||||
"""Get iscsi initiator name for this machine"""
|
||||
# NOTE openiscsi stores initiator name in a file that
|
||||
# needs root permission to read.
|
||||
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
|
||||
for l in contents.split('\n'):
|
||||
if l.startswith('InitiatorName='):
|
||||
return l[l.index('=') + 1:].strip()
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
LOG.debug(_('copy_image_to_volume %s.') % volume['name'])
|
||||
connector = {'initiator': self._get_iscsi_initiator(),
|
||||
'host': socket.gethostname()}
|
||||
|
||||
iscsi_properties, volume_path = self._attach_volume(
|
||||
context, volume, connector)
|
||||
|
||||
try:
|
||||
image_utils.fetch_to_raw(context,
|
||||
image_service,
|
||||
image_id,
|
||||
volume_path)
|
||||
finally:
|
||||
self.terminate_connection(volume, connector)
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
LOG.debug(_('copy_volume_to_image %s.') % volume['name'])
|
||||
connector = {'initiator': self._get_iscsi_initiator(),
|
||||
'host': socket.gethostname()}
|
||||
|
||||
iscsi_properties, volume_path = self._attach_volume(
|
||||
context, volume, connector)
|
||||
|
||||
try:
|
||||
image_utils.upload_volume(context,
|
||||
image_service,
|
||||
image_meta,
|
||||
volume_path)
|
||||
finally:
|
||||
self.terminate_connection(volume, connector)
|
||||
|
||||
def _attach_volume(self, context, volume, connector):
|
||||
"""Attach the volume."""
|
||||
iscsi_properties = None
|
||||
host_device = None
|
||||
init_conn = self.initialize_connection(volume, connector)
|
||||
iscsi_properties = init_conn['data']
|
||||
|
||||
# code "inspired by" nova/virt/libvirt/volume.py
|
||||
try:
|
||||
self._run_iscsiadm(iscsi_properties, ())
|
||||
except exception.ProcessExecutionError as exc:
|
||||
# iscsiadm returns 21 for "No records found" after version 2.0-871
|
||||
if exc.exit_code in [21, 255]:
|
||||
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
|
||||
else:
|
||||
raise
|
||||
|
||||
if iscsi_properties.get('auth_method'):
|
||||
self._iscsiadm_update(iscsi_properties,
|
||||
"node.session.auth.authmethod",
|
||||
iscsi_properties['auth_method'])
|
||||
self._iscsiadm_update(iscsi_properties,
|
||||
"node.session.auth.username",
|
||||
iscsi_properties['auth_username'])
|
||||
self._iscsiadm_update(iscsi_properties,
|
||||
"node.session.auth.password",
|
||||
iscsi_properties['auth_password'])
|
||||
|
||||
# NOTE(vish): If we have another lun on the same target, we may
|
||||
# have a duplicate login
|
||||
self._run_iscsiadm(iscsi_properties, ("--login",),
|
||||
check_exit_code=[0, 255])
|
||||
|
||||
self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
|
||||
|
||||
host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
|
||||
(iscsi_properties['target_portal'],
|
||||
iscsi_properties['target_iqn'],
|
||||
iscsi_properties.get('target_lun', 0)))
|
||||
|
||||
tries = 0
|
||||
while not os.path.exists(host_device):
|
||||
if tries >= self.configuration.num_iscsi_scan_tries:
|
||||
raise exception.CinderException(
|
||||
_("iSCSI device not found at %s") % (host_device))
|
||||
|
||||
LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. "
|
||||
"Will rescan & retry. Try number: %(tries)s") %
|
||||
locals())
|
||||
|
||||
# The rescan isn't documented as being necessary(?), but it helps
|
||||
self._run_iscsiadm(iscsi_properties, ("--rescan",))
|
||||
|
||||
tries = tries + 1
|
||||
if not os.path.exists(host_device):
|
||||
time.sleep(tries ** 2)
|
||||
|
||||
if tries != 0:
|
||||
LOG.debug(_("Found iSCSI node %(host_device)s "
|
||||
"(after %(tries)s rescans)") %
|
||||
locals())
|
||||
|
||||
return iscsi_properties, host_device
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
|
||||
If 'refresh' is True, run update the stats first."""
|
||||
if refresh:
|
||||
self._update_volume_status()
|
||||
|
||||
return self._stats
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
|
||||
LOG.debug(_("Updating volume status"))
|
||||
data = {}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data["volume_backend_name"] = backend_name or 'Generic_iSCSI'
|
||||
data["vendor_name"] = 'Open Source'
|
||||
data["driver_version"] = '1.0'
|
||||
data["storage_protocol"] = 'iSCSI'
|
||||
|
||||
data['total_capacity_gb'] = 'infinite'
|
||||
data['free_capacity_gb'] = 'infinite'
|
||||
data['reserved_percentage'] = 100
|
||||
data['QoS_support'] = False
|
||||
self._stats = data
|
||||
|
||||
|
||||
class FakeISCSIDriver(ISCSIDriver):
|
||||
"""Logs calls instead of executing."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
|
||||
*args, **kwargs)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""No setup necessary in fake mode."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
return {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data': {}
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def fake_execute(cmd, *_args, **_kwargs):
|
||||
"""Execute that simply logs the command."""
|
||||
LOG.debug(_("FAKE ISCSI: %s"), cmd)
|
||||
return (None, None)
|
||||
|
||||
|
||||
class FibreChannelDriver(VolumeDriver):
|
||||
"""Executes commands relating to Fibre Channel volumes."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FibreChannelDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initializes the connection and returns connection info.
|
||||
|
||||
The driver returns a driver_volume_type of 'fibre_channel'.
|
||||
The target_wwn can be a single entry or a list of wwns that
|
||||
correspond to the list of remote wwn(s) that will export the volume.
|
||||
Example return values:
|
||||
|
||||
{
|
||||
'driver_volume_type': 'fibre_channel'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_lun': 1,
|
||||
'target_wwn': '1234567890123',
|
||||
}
|
||||
}
|
||||
|
||||
or
|
||||
|
||||
{
|
||||
'driver_volume_type': 'fibre_channel'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_lun': 1,
|
||||
'target_wwn': ['1234567890123', '0987654321321'],
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
msg = _("Driver must implement initialize_connection")
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
raise NotImplementedError()
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
raise NotImplementedError()
|
@ -1,22 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
:mod:`manila.volume.driver` -- Cinder Drivers
|
||||
=====================================================
|
||||
|
||||
.. automodule:: manila.volume.driver
|
||||
:platform: Unix
|
||||
:synopsis: Module containing all the Cinder drivers.
|
||||
"""
|
@ -1,424 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2012 Alyseo.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Desc : Driver to store volumes on Coraid Appliances.
|
||||
Require : Coraid EtherCloud ESM, Coraid VSX and Coraid SRX.
|
||||
Author : Jean-Baptiste RANSY <openstack@alyseo.com>
|
||||
Contrib : Larry Matter <support@coraid.com>
|
||||
"""
|
||||
|
||||
import cookielib
|
||||
import os
|
||||
import time
|
||||
import urllib2
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import jsonutils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume import driver
|
||||
from manila.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
coraid_opts = [
|
||||
cfg.StrOpt('coraid_esm_address',
|
||||
default='',
|
||||
help='IP address of Coraid ESM'),
|
||||
cfg.StrOpt('coraid_user',
|
||||
default='admin',
|
||||
help='User name to connect to Coraid ESM'),
|
||||
cfg.StrOpt('coraid_group',
|
||||
default='admin',
|
||||
help='Name of group on Coraid ESM to which coraid_user belongs'
|
||||
' (must have admin privilege)'),
|
||||
cfg.StrOpt('coraid_password',
|
||||
default='password',
|
||||
help='Password to connect to Coraid ESM'),
|
||||
cfg.StrOpt('coraid_repository_key',
|
||||
default='coraid_repository',
|
||||
help='Volume Type key name to store ESM Repository Name'),
|
||||
]
|
||||
FLAGS.register_opts(coraid_opts)
|
||||
|
||||
|
||||
class CoraidException(Exception):
|
||||
def __init__(self, message=None, error=None):
|
||||
super(CoraidException, self).__init__(message, error)
|
||||
|
||||
def __str__(self):
|
||||
return '%s: %s' % self.args
|
||||
|
||||
|
||||
class CoraidRESTException(CoraidException):
|
||||
pass
|
||||
|
||||
|
||||
class CoraidESMException(CoraidException):
|
||||
pass
|
||||
|
||||
|
||||
class CoraidRESTClient(object):
|
||||
"""Executes volume driver commands on Coraid ESM EtherCloud Appliance."""
|
||||
|
||||
def __init__(self, ipaddress, user, group, password):
|
||||
self.url = "https://%s:8443/" % ipaddress
|
||||
self.user = user
|
||||
self.group = group
|
||||
self.password = password
|
||||
self.session = False
|
||||
self.cookiejar = cookielib.CookieJar()
|
||||
self.urlOpener = urllib2.build_opener(
|
||||
urllib2.HTTPCookieProcessor(self.cookiejar))
|
||||
LOG.debug(_('Running with CoraidDriver for ESM EtherCLoud'))
|
||||
|
||||
def _login(self):
|
||||
"""Login and Session Handler."""
|
||||
if not self.session or self.session < time.time():
|
||||
url = ('admin?op=login&username=%s&password=%s' %
|
||||
(self.user, self.password))
|
||||
data = 'Login'
|
||||
reply = self._admin_esm_cmd(url, data)
|
||||
if reply.get('state') == 'adminSucceed':
|
||||
self.session = time.time() + 1100
|
||||
msg = _('Update session cookie %(session)s')
|
||||
LOG.debug(msg % dict(session=self.session))
|
||||
self._set_group(reply)
|
||||
return True
|
||||
else:
|
||||
errmsg = reply.get('message', '')
|
||||
msg = _('Message : %(message)s')
|
||||
raise CoraidESMException(msg % dict(message=errmsg))
|
||||
return True
|
||||
|
||||
def _set_group(self, reply):
|
||||
"""Set effective group."""
|
||||
if self.group:
|
||||
group = self.group
|
||||
groupId = self._get_group_id(group, reply)
|
||||
if groupId:
|
||||
url = ('admin?op=setRbacGroup&groupId=%s' % (groupId))
|
||||
data = 'Group'
|
||||
reply = self._admin_esm_cmd(url, data)
|
||||
if reply.get('state') == 'adminSucceed':
|
||||
return True
|
||||
else:
|
||||
errmsg = reply.get('message', '')
|
||||
msg = _('Error while trying to set group: %(message)s')
|
||||
raise CoraidRESTException(msg % dict(message=errmsg))
|
||||
else:
|
||||
msg = _('Unable to find group: %(group)s')
|
||||
raise CoraidESMException(msg % dict(group=group))
|
||||
return True
|
||||
|
||||
def _get_group_id(self, groupName, loginResult):
|
||||
"""Map group name to group ID."""
|
||||
# NOTE(lmatter): All other groups are under the admin group
|
||||
fullName = "admin group:%s" % groupName
|
||||
groupId = False
|
||||
for kid in loginResult['values']:
|
||||
fullPath = kid['fullPath']
|
||||
if fullPath == fullName:
|
||||
return kid['groupId']
|
||||
return False
|
||||
|
||||
def _esm_cmd(self, url=False, data=None):
|
||||
self._login()
|
||||
return self._admin_esm_cmd(url, data)
|
||||
|
||||
def _admin_esm_cmd(self, url=False, data=None):
|
||||
"""
|
||||
_admin_esm_cmd represent the entry point to send requests to ESM
|
||||
Appliance. Send the HTTPS call, get response in JSON
|
||||
convert response into Python Object and return it.
|
||||
"""
|
||||
if url:
|
||||
url = self.url + url
|
||||
|
||||
req = urllib2.Request(url, data)
|
||||
|
||||
try:
|
||||
res = self.urlOpener.open(req).read()
|
||||
except Exception:
|
||||
raise CoraidRESTException(_('ESM urlOpen error'))
|
||||
|
||||
try:
|
||||
res_json = jsonutils.loads(res)
|
||||
except Exception:
|
||||
raise CoraidRESTException(_('JSON Error'))
|
||||
|
||||
return res_json
|
||||
else:
|
||||
raise CoraidRESTException(_('Request without URL'))
|
||||
|
||||
def _configure(self, data):
|
||||
"""In charge of all commands into 'configure'."""
|
||||
url = 'configure'
|
||||
LOG.debug(_('Configure data : %s'), data)
|
||||
response = self._esm_cmd(url, data)
|
||||
LOG.debug(_("Configure response : %s"), response)
|
||||
if response:
|
||||
if response.get('configState') == 'completedSuccessfully':
|
||||
return True
|
||||
else:
|
||||
errmsg = response.get('message', '')
|
||||
msg = _('Message : %(message)s')
|
||||
raise CoraidESMException(msg % dict(message=errmsg))
|
||||
return False
|
||||
|
||||
def _get_volume_info(self, volume_name):
|
||||
"""Retrive volume informations for a given volume name."""
|
||||
url = 'fetch?shelf=cms&orchStrRepo&lv=%s' % (volume_name)
|
||||
try:
|
||||
response = self._esm_cmd(url)
|
||||
info = response[0][1]['reply'][0]
|
||||
return {"pool": info['lv']['containingPool'],
|
||||
"repo": info['repoName'],
|
||||
"vsxidx": info['lv']['lunIndex'],
|
||||
"index": info['lv']['lvStatus']['exportedLun']['lun'],
|
||||
"shelf": info['lv']['lvStatus']['exportedLun']['shelf']}
|
||||
except Exception:
|
||||
msg = _('Unable to retrive volume infos for volume %(volname)s')
|
||||
raise CoraidESMException(msg % dict(volname=volume_name))
|
||||
|
||||
def _get_lun_address(self, volume_name):
|
||||
"""Return AoE Address for a given Volume."""
|
||||
volume_info = self._get_volume_info(volume_name)
|
||||
shelf = volume_info['shelf']
|
||||
lun = volume_info['index']
|
||||
return {'shelf': shelf, 'lun': lun}
|
||||
|
||||
def create_lun(self, volume_name, volume_size, repository):
|
||||
"""Create LUN on Coraid Backend Storage."""
|
||||
data = '[{"addr":"cms","data":"{' \
|
||||
'\\"servers\\":[\\"\\"],' \
|
||||
'\\"repoName\\":\\"%s\\",' \
|
||||
'\\"size\\":\\"%sG\\",' \
|
||||
'\\"lvName\\":\\"%s\\"}",' \
|
||||
'"op":"orchStrLun",' \
|
||||
'"args":"add"}]' % (repository, volume_size,
|
||||
volume_name)
|
||||
return self._configure(data)
|
||||
|
||||
def delete_lun(self, volume_name):
|
||||
"""Delete LUN."""
|
||||
volume_info = self._get_volume_info(volume_name)
|
||||
repository = volume_info['repo']
|
||||
data = '[{"addr":"cms","data":"{' \
|
||||
'\\"repoName\\":\\"%s\\",' \
|
||||
'\\"lvName\\":\\"%s\\"}",' \
|
||||
'"op":"orchStrLun/verified",' \
|
||||
'"args":"delete"}]' % (repository, volume_name)
|
||||
return self._configure(data)
|
||||
|
||||
def create_snapshot(self, volume_name, snapshot_name):
|
||||
"""Create Snapshot."""
|
||||
volume_info = self._get_volume_info(volume_name)
|
||||
repository = volume_info['repo']
|
||||
data = '[{"addr":"cms","data":"{' \
|
||||
'\\"repoName\\":\\"%s\\",' \
|
||||
'\\"lvName\\":\\"%s\\",' \
|
||||
'\\"newLvName\\":\\"%s\\"}",' \
|
||||
'"op":"orchStrLunMods",' \
|
||||
'"args":"addClSnap"}]' % (repository, volume_name,
|
||||
snapshot_name)
|
||||
return self._configure(data)
|
||||
|
||||
def delete_snapshot(self, snapshot_name):
|
||||
"""Delete Snapshot."""
|
||||
snapshot_info = self._get_volume_info(snapshot_name)
|
||||
repository = snapshot_info['repo']
|
||||
data = '[{"addr":"cms","data":"{' \
|
||||
'\\"repoName\\":\\"%s\\",' \
|
||||
'\\"lvName\\":\\"%s\\"}",' \
|
||||
'"op":"orchStrLunMods",' \
|
||||
'"args":"delClSnap"}]' % (repository, snapshot_name)
|
||||
return self._configure(data)
|
||||
|
||||
def create_volume_from_snapshot(self, snapshot_name,
|
||||
volume_name, repository):
|
||||
"""Create a LUN from a Snapshot."""
|
||||
snapshot_info = self._get_volume_info(snapshot_name)
|
||||
snapshot_repo = snapshot_info['repo']
|
||||
data = '[{"addr":"cms","data":"{' \
|
||||
'\\"lvName\\":\\"%s\\",' \
|
||||
'\\"repoName\\":\\"%s\\",' \
|
||||
'\\"newLvName\\":\\"%s\\",' \
|
||||
'\\"newRepoName\\":\\"%s\\"}",' \
|
||||
'"op":"orchStrLunMods",' \
|
||||
'"args":"addClone"}]' % (snapshot_name, snapshot_repo,
|
||||
volume_name, repository)
|
||||
return self._configure(data)
|
||||
|
||||
|
||||
class CoraidDriver(driver.VolumeDriver):
|
||||
"""This is the Class to set in manila.conf (volume_driver)."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CoraidDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(coraid_opts)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Initialize the volume driver."""
|
||||
self.esm = CoraidRESTClient(self.configuration.coraid_esm_address,
|
||||
self.configuration.coraid_user,
|
||||
self.configuration.coraid_group,
|
||||
self.configuration.coraid_password)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Return an error if prerequisites aren't met."""
|
||||
if not self.esm._login():
|
||||
raise LookupError(_("Cannot login on Coraid ESM"))
|
||||
|
||||
def _get_repository(self, volume_type):
|
||||
"""
|
||||
Return the ESM Repository from the Volume Type.
|
||||
The ESM Repository is stored into a volume_type_extra_specs key.
|
||||
"""
|
||||
volume_type_id = volume_type['id']
|
||||
repository_key_name = self.configuration.coraid_repository_key
|
||||
repository = volume_types.get_volume_type_extra_specs(
|
||||
volume_type_id, repository_key_name)
|
||||
return repository
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a Volume."""
|
||||
try:
|
||||
repository = self._get_repository(volume['volume_type'])
|
||||
self.esm.create_lun(volume['name'], volume['size'], repository)
|
||||
except Exception:
|
||||
msg = _('Fail to create volume %(volname)s')
|
||||
LOG.debug(msg % dict(volname=volume['name']))
|
||||
raise
|
||||
# NOTE(jbr_): The manager currently interprets any return as
|
||||
# being the model_update for provider location.
|
||||
# return None to not break it (thank to jgriffith and DuncanT)
|
||||
return
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Delete a Volume."""
|
||||
try:
|
||||
self.esm.delete_lun(volume['name'])
|
||||
except Exception:
|
||||
msg = _('Failed to delete volume %(volname)s')
|
||||
LOG.debug(msg % dict(volname=volume['name']))
|
||||
raise
|
||||
return
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create a Snapshot."""
|
||||
try:
|
||||
volume_name = (FLAGS.volume_name_template
|
||||
% snapshot['volume_id'])
|
||||
snapshot_name = (FLAGS.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
self.esm.create_snapshot(volume_name, snapshot_name)
|
||||
except Exception, e:
|
||||
msg = _('Failed to Create Snapshot %(snapname)s')
|
||||
LOG.debug(msg % dict(snapname=snapshot_name))
|
||||
raise
|
||||
return
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Delete a Snapshot."""
|
||||
try:
|
||||
snapshot_name = (FLAGS.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
self.esm.delete_snapshot(snapshot_name)
|
||||
except Exception:
|
||||
msg = _('Failed to Delete Snapshot %(snapname)s')
|
||||
LOG.debug(msg % dict(snapname=snapshot_name))
|
||||
raise
|
||||
return
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create a Volume from a Snapshot."""
|
||||
try:
|
||||
snapshot_name = (FLAGS.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
repository = self._get_repository(volume['volume_type'])
|
||||
self.esm.create_volume_from_snapshot(snapshot_name,
|
||||
volume['name'],
|
||||
repository)
|
||||
except Exception:
|
||||
msg = _('Failed to Create Volume from Snapshot %(snapname)s')
|
||||
LOG.debug(msg % dict(snapname=snapshot_name))
|
||||
raise
|
||||
return
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Return connection information."""
|
||||
try:
|
||||
infos = self.esm._get_lun_address(volume['name'])
|
||||
shelf = infos['shelf']
|
||||
lun = infos['lun']
|
||||
|
||||
aoe_properties = {
|
||||
'target_shelf': shelf,
|
||||
'target_lun': lun,
|
||||
}
|
||||
return {
|
||||
'driver_volume_type': 'aoe',
|
||||
'data': aoe_properties,
|
||||
}
|
||||
except Exception:
|
||||
msg = _('Failed to Initialize Connection. '
|
||||
'Volume Name: %(volname)s '
|
||||
'Shelf: %(shelf)s, '
|
||||
'Lun: %(lun)s')
|
||||
LOG.debug(msg % dict(volname=volume['name'],
|
||||
shelf=shelf,
|
||||
lun=lun))
|
||||
raise
|
||||
return
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Return Volume Stats."""
|
||||
data = {'driver_version': '1.0',
|
||||
'free_capacity_gb': 'unknown',
|
||||
'reserved_percentage': 0,
|
||||
'storage_protocol': 'aoe',
|
||||
'total_capacity_gb': 'unknown',
|
||||
'vendor_name': 'Coraid'}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data['volume_backend_name'] = backend_name or 'EtherCloud ESM'
|
||||
return data
|
||||
|
||||
def local_path(self, volume):
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
pass
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def attach_volume(self, context, volume, instance_uuid, mountpoint):
|
||||
pass
|
||||
|
||||
def detach_volume(self, context, volume):
|
||||
pass
|
@ -1,12 +0,0 @@
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<EMC>
|
||||
<!--StorageType is a thin pool name-->
|
||||
<StorageType>gold</StorageType>
|
||||
<!--MaskingView is needed only for VMAX/VMAXe-->
|
||||
<MaskingView>openstack</MaskingView>
|
||||
<!--Credentials of ECOM packaged with SMI-S-->
|
||||
<EcomServerIp>x.x.x.x</EcomServerIp>
|
||||
<EcomServerPort>xxxx</EcomServerPort>
|
||||
<EcomUserName>xxxxxxxx</EcomUserName>
|
||||
<EcomPassword>xxxxxxxx</EcomPassword>
|
||||
</EMC>
|
File diff suppressed because it is too large
Load Diff
@ -1,246 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 EMC Corporation.
|
||||
# Copyright (c) 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
ISCSI Drivers for EMC VNX and VMAX arrays based on SMI-S.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila.volume import driver
|
||||
from manila.volume.drivers.emc import emc_smis_common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class EMCSMISISCSIDriver(driver.ISCSIDriver):
|
||||
"""EMC ISCSI Drivers for VMAX and VNX using SMI-S."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.common = emc_smis_common.EMCSMISCommon(
|
||||
'iSCSI',
|
||||
configuration=self.configuration)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a EMC(VMAX/VNX) volume."""
|
||||
self.common.create_volume(volume)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
self.common.create_volume_from_snapshot(volume, snapshot)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a cloned volume."""
|
||||
self.common.create_cloned_volume(volume, src_vref)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes an EMC volume."""
|
||||
self.common.delete_volume(volume)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
self.common.create_snapshot(snapshot)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
self.common.delete_snapshot(snapshot)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for an existing volume."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for a new volume."""
|
||||
return self.common.create_export(context, volume)
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Driver entry point to remove an export for a volume."""
|
||||
pass
|
||||
|
||||
def check_for_export(self, context, volume_id):
|
||||
"""Make sure volume is exported."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initializes the connection and returns connection info.
|
||||
|
||||
The iscsi driver returns a driver_volume_type of 'iscsi'.
|
||||
the format of the driver data is defined in _get_iscsi_properties.
|
||||
Example return value::
|
||||
|
||||
{
|
||||
'driver_volume_type': 'iscsi'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
|
||||
'target_portal': '127.0.0.0.1:3260',
|
||||
'volume_id': 1,
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
self.common.initialize_connection(volume, connector)
|
||||
|
||||
iscsi_properties = self._get_iscsi_properties(volume)
|
||||
return {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data': iscsi_properties
|
||||
}
|
||||
|
||||
def _do_iscsi_discovery(self, volume):
|
||||
|
||||
LOG.warn(_("ISCSI provider_location not stored, using discovery"))
|
||||
|
||||
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
|
||||
'-t', 'sendtargets', '-p',
|
||||
self.configuration.iscsi_ip_address,
|
||||
run_as_root=True)
|
||||
targets = []
|
||||
for target in out.splitlines():
|
||||
targets.append(target)
|
||||
|
||||
return targets
|
||||
|
||||
def _get_iscsi_properties(self, volume):
|
||||
"""Gets iscsi configuration.
|
||||
|
||||
We ideally get saved information in the volume entity, but fall back
|
||||
to discovery if need be. Discovery may be completely removed in future
|
||||
The properties are:
|
||||
|
||||
:target_discovered: boolean indicating whether discovery was used
|
||||
|
||||
:target_iqn: the IQN of the iSCSI target
|
||||
|
||||
:target_portal: the portal of the iSCSI target
|
||||
|
||||
:target_lun: the lun of the iSCSI target
|
||||
|
||||
:volume_id: the id of the volume (currently used by xen)
|
||||
|
||||
:auth_method:, :auth_username:, :auth_password:
|
||||
|
||||
the authentication details. Right now, either auth_method is not
|
||||
present meaning no authentication, or auth_method == `CHAP`
|
||||
meaning use CHAP with the specified credentials.
|
||||
"""
|
||||
properties = {}
|
||||
|
||||
location = self._do_iscsi_discovery(volume)
|
||||
if not location:
|
||||
raise exception.InvalidVolume(_("Could not find iSCSI export "
|
||||
" for volume %s") %
|
||||
(volume['name']))
|
||||
|
||||
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
|
||||
properties['target_discovered'] = True
|
||||
|
||||
device_info = self.common.find_device_number(volume)
|
||||
if device_info is None or device_info['hostlunid'] is None:
|
||||
exception_message = (_("Cannot find device number for volume %s")
|
||||
% volume['name'])
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
device_number = device_info['hostlunid']
|
||||
storage_system = device_info['storagesystem']
|
||||
|
||||
# sp is "SP_A" or "SP_B"
|
||||
sp = device_info['owningsp']
|
||||
endpoints = []
|
||||
if sp:
|
||||
# endpointss example:
|
||||
# [iqn.1992-04.com.emc:cx.apm00123907237.a8,
|
||||
# iqn.1992-04.com.emc:cx.apm00123907237.a9]
|
||||
endpoints = self.common._find_iscsi_protocol_endpoints(
|
||||
sp, storage_system)
|
||||
|
||||
foundEndpoint = False
|
||||
for loc in location:
|
||||
results = loc.split(" ")
|
||||
properties['target_portal'] = results[0].split(",")[0]
|
||||
properties['target_iqn'] = results[1]
|
||||
# owning sp is None for VMAX
|
||||
# for VNX, find the target_iqn that matches the endpoint
|
||||
# target_iqn example: iqn.1992-04.com.emc:cx.apm00123907237.a8
|
||||
# or iqn.1992-04.com.emc:cx.apm00123907237.b8
|
||||
if not sp:
|
||||
break
|
||||
for endpoint in endpoints:
|
||||
if properties['target_iqn'] == endpoint:
|
||||
LOG.debug(_("Found iSCSI endpoint: %s") % endpoint)
|
||||
foundEndpoint = True
|
||||
break
|
||||
if foundEndpoint:
|
||||
break
|
||||
|
||||
if sp and not foundEndpoint:
|
||||
LOG.warn(_("ISCSI endpoint not found for SP %(sp)s on "
|
||||
"storage system %(storage)s.")
|
||||
% {'sp': sp,
|
||||
'storage': storage_system})
|
||||
|
||||
properties['target_lun'] = device_number
|
||||
|
||||
properties['volume_id'] = volume['id']
|
||||
|
||||
auth = volume['provider_auth']
|
||||
if auth:
|
||||
(auth_method, auth_username, auth_secret) = auth.split()
|
||||
|
||||
properties['auth_method'] = auth_method
|
||||
properties['auth_username'] = auth_username
|
||||
properties['auth_password'] = auth_secret
|
||||
|
||||
LOG.debug(_("ISCSI properties: %s") % (properties))
|
||||
|
||||
return properties
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Disallow connection from connector."""
|
||||
self.common.terminate_connection(volume, connector)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
|
||||
If 'refresh' is True, run update the stats first.
|
||||
"""
|
||||
if refresh:
|
||||
self.update_volume_status()
|
||||
|
||||
return self._stats
|
||||
|
||||
def update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
LOG.debug(_("Updating volume status"))
|
||||
data = self.common.update_volume_status()
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data['volume_backend_name'] = backend_name or 'EMCSMISISCSIDriver'
|
||||
data['storage_protocol'] = 'iSCSI'
|
||||
self._stats = data
|
@ -1,283 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2013 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import errno
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume.drivers import nfs
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
volume_opts = [
|
||||
cfg.StrOpt('glusterfs_shares_config',
|
||||
default='/etc/manila/glusterfs_shares',
|
||||
help='File with the list of available gluster shares'),
|
||||
cfg.StrOpt('glusterfs_mount_point_base',
|
||||
default='$state_path/mnt',
|
||||
help='Base dir containing mount points for gluster shares'),
|
||||
cfg.StrOpt('glusterfs_disk_util',
|
||||
default='df',
|
||||
help='Use du or df for free space calculation'),
|
||||
cfg.BoolOpt('glusterfs_sparsed_volumes',
|
||||
default=True,
|
||||
help=('Create volumes as sparsed files which take no space.'
|
||||
'If set to False volume is created as regular file.'
|
||||
'In such case volume creation takes a lot of time.'))]
|
||||
VERSION = '1.0'
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(volume_opts)
|
||||
|
||||
|
||||
class GlusterfsDriver(nfs.RemoteFsDriver):
|
||||
"""Gluster based manila driver. Creates file on Gluster share for using it
|
||||
as block device on hypervisor."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(GlusterfsDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(volume_opts)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the volume driver does while starting."""
|
||||
super(GlusterfsDriver, self).do_setup(context)
|
||||
|
||||
config = self.configuration.glusterfs_shares_config
|
||||
if not config:
|
||||
msg = (_("There's no Gluster config file configured (%s)") %
|
||||
'glusterfs_shares_config')
|
||||
LOG.warn(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
if not os.path.exists(config):
|
||||
msg = (_("Gluster config file at %(config)s doesn't exist") %
|
||||
locals())
|
||||
LOG.warn(msg)
|
||||
raise exception.GlusterfsException(msg)
|
||||
|
||||
try:
|
||||
self._execute('mount.glusterfs', check_exit_code=False)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
raise exception.GlusterfsException(
|
||||
_('mount.glusterfs is not installed'))
|
||||
else:
|
||||
raise
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Just to override parent behavior."""
|
||||
pass
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume."""
|
||||
|
||||
self._ensure_shares_mounted()
|
||||
|
||||
volume['provider_location'] = self._find_share(volume['size'])
|
||||
|
||||
LOG.info(_('casted to %s') % volume['provider_location'])
|
||||
|
||||
self._do_create_volume(volume)
|
||||
|
||||
return {'provider_location': volume['provider_location']}
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
|
||||
if not volume['provider_location']:
|
||||
LOG.warn(_('Volume %s does not have provider_location specified, '
|
||||
'skipping'), volume['name'])
|
||||
return
|
||||
|
||||
self._ensure_share_mounted(volume['provider_location'])
|
||||
|
||||
mounted_path = self.local_path(volume)
|
||||
|
||||
self._execute('rm', '-f', mounted_path, run_as_root=True)
|
||||
|
||||
def ensure_export(self, ctx, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
self._ensure_share_mounted(volume['provider_location'])
|
||||
|
||||
def create_export(self, ctx, volume):
|
||||
"""Exports the volume. Can optionally return a Dictionary of changes
|
||||
to the volume object to be persisted."""
|
||||
pass
|
||||
|
||||
def remove_export(self, ctx, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Allow connection to connector and return connection info."""
|
||||
data = {'export': volume['provider_location'],
|
||||
'name': volume['name']}
|
||||
return {
|
||||
'driver_volume_type': 'glusterfs',
|
||||
'data': data
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Disallow connection from connector."""
|
||||
pass
|
||||
|
||||
def _do_create_volume(self, volume):
|
||||
"""Create a volume on given glusterfs_share.
|
||||
:param volume: volume reference
|
||||
"""
|
||||
volume_path = self.local_path(volume)
|
||||
volume_size = volume['size']
|
||||
|
||||
if self.configuration.glusterfs_sparsed_volumes:
|
||||
self._create_sparsed_file(volume_path, volume_size)
|
||||
else:
|
||||
self._create_regular_file(volume_path, volume_size)
|
||||
|
||||
self._set_rw_permissions_for_all(volume_path)
|
||||
|
||||
def _ensure_shares_mounted(self):
|
||||
"""Look for GlusterFS shares in the flags and try to mount them
|
||||
locally."""
|
||||
self._mounted_shares = []
|
||||
|
||||
for share in self._load_shares_config():
|
||||
try:
|
||||
self._ensure_share_mounted(share)
|
||||
self._mounted_shares.append(share)
|
||||
except Exception, exc:
|
||||
LOG.warning(_('Exception during mounting %s') % (exc,))
|
||||
|
||||
LOG.debug('Available shares %s' % str(self._mounted_shares))
|
||||
|
||||
def _load_shares_config(self):
|
||||
return [share.strip() for share
|
||||
in open(self.configuration.glusterfs_shares_config)
|
||||
if share and not share.startswith('#')]
|
||||
|
||||
def _ensure_share_mounted(self, glusterfs_share):
|
||||
"""Mount GlusterFS share.
|
||||
:param glusterfs_share:
|
||||
"""
|
||||
mount_path = self._get_mount_point_for_share(glusterfs_share)
|
||||
self._mount_glusterfs(glusterfs_share, mount_path, ensure=True)
|
||||
|
||||
def _find_share(self, volume_size_for):
|
||||
"""Choose GlusterFS share among available ones for given volume size.
|
||||
Current implementation looks for greatest capacity.
|
||||
:param volume_size_for: int size in GB
|
||||
"""
|
||||
|
||||
if not self._mounted_shares:
|
||||
raise exception.GlusterfsNoSharesMounted()
|
||||
|
||||
greatest_size = 0
|
||||
greatest_share = None
|
||||
|
||||
for glusterfs_share in self._mounted_shares:
|
||||
capacity = self._get_available_capacity(glusterfs_share)[0]
|
||||
if capacity > greatest_size:
|
||||
greatest_share = glusterfs_share
|
||||
greatest_size = capacity
|
||||
|
||||
if volume_size_for * 1024 * 1024 * 1024 > greatest_size:
|
||||
raise exception.GlusterfsNoSuitableShareFound(
|
||||
volume_size=volume_size_for)
|
||||
return greatest_share
|
||||
|
||||
def _get_mount_point_for_share(self, glusterfs_share):
|
||||
"""Return mount point for share.
|
||||
:param glusterfs_share: example 172.18.194.100:/var/glusterfs
|
||||
"""
|
||||
return os.path.join(self.configuration.glusterfs_mount_point_base,
|
||||
self._get_hash_str(glusterfs_share))
|
||||
|
||||
def _get_available_capacity(self, glusterfs_share):
|
||||
"""Calculate available space on the GlusterFS share.
|
||||
:param glusterfs_share: example 172.18.194.100:/var/glusterfs
|
||||
"""
|
||||
mount_point = self._get_mount_point_for_share(glusterfs_share)
|
||||
|
||||
out, _ = self._execute('df', '--portability', '--block-size', '1',
|
||||
mount_point, run_as_root=True)
|
||||
out = out.splitlines()[1]
|
||||
|
||||
available = 0
|
||||
|
||||
size = int(out.split()[1])
|
||||
if self.configuration.glusterfs_disk_util == 'df':
|
||||
available = int(out.split()[3])
|
||||
else:
|
||||
out, _ = self._execute('du', '-sb', '--apparent-size',
|
||||
'--exclude', '*snapshot*', mount_point,
|
||||
run_as_root=True)
|
||||
used = int(out.split()[0])
|
||||
available = size - used
|
||||
|
||||
return available, size
|
||||
|
||||
def _mount_glusterfs(self, glusterfs_share, mount_path, ensure=False):
|
||||
"""Mount GlusterFS share to mount path."""
|
||||
self._execute('mkdir', '-p', mount_path)
|
||||
|
||||
try:
|
||||
self._execute('mount', '-t', 'glusterfs', glusterfs_share,
|
||||
mount_path, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
if ensure and 'already mounted' in exc.stderr:
|
||||
LOG.warn(_("%s is already mounted"), glusterfs_share)
|
||||
else:
|
||||
raise
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume stats.
|
||||
|
||||
If 'refresh' is True, update the stats first."""
|
||||
if refresh or not self._stats:
|
||||
self._update_volume_stats()
|
||||
|
||||
return self._stats
|
||||
|
||||
def _update_volume_stats(self):
|
||||
"""Retrieve stats info from volume group."""
|
||||
|
||||
data = {}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data['volume_backend_name'] = backend_name or 'GlusterFS'
|
||||
data['vendor_name'] = 'Open Source'
|
||||
data['driver_version'] = VERSION
|
||||
data['storage_protocol'] = 'glusterfs'
|
||||
|
||||
self._ensure_shares_mounted()
|
||||
|
||||
global_capacity = 0
|
||||
global_free = 0
|
||||
for nfs_share in self._mounted_shares:
|
||||
free, capacity = self._get_available_capacity(nfs_share)
|
||||
global_capacity += capacity
|
||||
global_free += free
|
||||
|
||||
data['total_capacity_gb'] = global_capacity / 1024.0 ** 3
|
||||
data['free_capacity_gb'] = global_free / 1024.0 ** 3
|
||||
data['reserved_percentage'] = 0
|
||||
data['QoS_support'] = False
|
||||
self._stats = data
|
@ -1,15 +0,0 @@
|
||||
# Copyright (c) 2012 Huawei Technologies Co., Ltd.
|
||||
# Copyright (c) 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,34 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<config>
|
||||
<Storage>
|
||||
<ControllerIP0>x.x.x.x</ControllerIP0>
|
||||
<ControllerIP1>x.x.x.x</ControllerIP1>
|
||||
<UserName>xxxxxx</UserName>
|
||||
<UserPassword>xxxxxx</UserPassword>
|
||||
</Storage>
|
||||
<LUN>
|
||||
<!--LUN Type: Thick, or Thin. Default: Thick-->
|
||||
<LUNType>Thick</LUNType>
|
||||
<!--The stripe size can be 4, 8, 16, 32, 64, 128, 256, and 512 in the unit of KB.Default: 64-->
|
||||
<StripUnitSize>64</StripUnitSize>
|
||||
<!--The write cache policy of the LUN:-->
|
||||
<!--1 specifies write back, 2 specifies write through, 3 specifies write back mandatorily.Default: 1-->
|
||||
<WriteType>1</WriteType>
|
||||
<!--Enables or disbles cahce mirroring: 0 Disable, or 1 Enable. Default: Enable-->
|
||||
<MirrorSwitch>1</MirrorSwitch>
|
||||
<!--The prefetch policy of the reading cache:-->
|
||||
<!--prefetch type 0 specifies non-preftch and prefetch value is 0,-->
|
||||
<!--prefetch type 1 specifies constant prefetch and prefetch value ranges from 0 to 1024 in the unit of KB,-->
|
||||
<!--prefetch type 2 specifies variable prefetch and value specifies cache prefetch multiple ranges from 0 to 65535,-->
|
||||
<!--prefetch type 3 specifies intelligent prefetch Intelligent and Vaule is 0,-->
|
||||
<!--Default: prefetch type 0 and prefetch value 0-->
|
||||
<Prefetch Type="0" Value="0"/>
|
||||
<StoragePool Name="xxxxxx"/>
|
||||
<StoragePool Name="xxxxxx"/>
|
||||
</LUN>
|
||||
<iSCSI>
|
||||
<DefaultTargetIP>x.x.x.x</DefaultTargetIP>
|
||||
<Initiator Name="xxxxxx" TargetIP="x.x.x.x"/>
|
||||
<Initiator Name="xxxxxx" TargetIP="x.x.x.x"/>
|
||||
</iSCSI>
|
||||
</config>
|
File diff suppressed because it is too large
Load Diff
@ -1,688 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Driver for Linux servers running LVM.
|
||||
|
||||
"""
|
||||
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila.brick.iscsi import iscsi
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.image import image_utils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila.volume import driver
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
volume_opts = [
|
||||
cfg.StrOpt('volume_group',
|
||||
default='manila-volumes',
|
||||
help='Name for the VG that will contain exported volumes'),
|
||||
cfg.StrOpt('volume_clear',
|
||||
default='zero',
|
||||
help='Method used to wipe old volumes (valid options are: '
|
||||
'none, zero, shred)'),
|
||||
cfg.IntOpt('volume_clear_size',
|
||||
default=0,
|
||||
help='Size in MiB to wipe at start of old volumes. 0 => all'),
|
||||
cfg.StrOpt('volume_dd_blocksize',
|
||||
default='1M',
|
||||
help='The default block size used when clearing volumes'),
|
||||
cfg.StrOpt('pool_size',
|
||||
default=None,
|
||||
help='Size of thin provisioning pool '
|
||||
'(None uses entire manila VG)'),
|
||||
cfg.IntOpt('lvm_mirrors',
|
||||
default=0,
|
||||
help='If set, create lvms with multiple mirrors. Note that '
|
||||
'this requires lvm_mirrors + 2 pvs with available space'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(volume_opts)
|
||||
|
||||
|
||||
class LVMVolumeDriver(driver.VolumeDriver):
|
||||
"""Executes commands relating to Volumes."""
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(volume_opts)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met"""
|
||||
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
|
||||
run_as_root=True)
|
||||
volume_groups = out.split()
|
||||
if self.configuration.volume_group not in volume_groups:
|
||||
exception_message = (_("volume group %s doesn't exist")
|
||||
% self.configuration.volume_group)
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
def _create_volume(self, volume_name, sizestr):
|
||||
cmd = ['lvcreate', '-L', sizestr, '-n', volume_name,
|
||||
self.configuration.volume_group]
|
||||
if self.configuration.lvm_mirrors:
|
||||
cmd += ['-m', self.configuration.lvm_mirrors, '--nosync']
|
||||
terras = int(sizestr[:-1]) / 1024.0
|
||||
if terras >= 1.5:
|
||||
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
|
||||
# NOTE(vish): Next power of two for region size. See:
|
||||
# http://red.ht/U2BPOD
|
||||
cmd += ['-R', str(rsize)]
|
||||
|
||||
self._try_execute(*cmd, run_as_root=True)
|
||||
|
||||
def _copy_volume(self, srcstr, deststr, size_in_g, clearing=False):
|
||||
# Use O_DIRECT to avoid thrashing the system buffer cache
|
||||
extra_flags = ['iflag=direct', 'oflag=direct']
|
||||
|
||||
# Check whether O_DIRECT is supported
|
||||
try:
|
||||
self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
|
||||
*extra_flags, run_as_root=True)
|
||||
except exception.ProcessExecutionError:
|
||||
extra_flags = []
|
||||
|
||||
# If the volume is being unprovisioned then
|
||||
# request the data is persisted before returning,
|
||||
# so that it's not discarded from the cache.
|
||||
if clearing and not extra_flags:
|
||||
extra_flags.append('conv=fdatasync')
|
||||
|
||||
# Perform the copy
|
||||
self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
|
||||
'count=%d' % (size_in_g * 1024),
|
||||
'bs=%s' % self.configuration.volume_dd_blocksize,
|
||||
*extra_flags, run_as_root=True)
|
||||
|
||||
def _volume_not_present(self, volume_name):
|
||||
path_name = '%s/%s' % (self.configuration.volume_group, volume_name)
|
||||
try:
|
||||
self._try_execute('lvdisplay', path_name, run_as_root=True)
|
||||
except Exception as e:
|
||||
# If the volume isn't present
|
||||
return True
|
||||
return False
|
||||
|
||||
def _delete_volume(self, volume, size_in_g):
|
||||
"""Deletes a logical volume."""
|
||||
# zero out old volumes to prevent data leaking between users
|
||||
# TODO(ja): reclaiming space should be done lazy and low priority
|
||||
dev_path = self.local_path(volume)
|
||||
if os.path.exists(dev_path):
|
||||
self.clear_volume(volume)
|
||||
|
||||
self._try_execute('lvremove', '-f', "%s/%s" %
|
||||
(self.configuration.volume_group,
|
||||
self._escape_snapshot(volume['name'])),
|
||||
run_as_root=True)
|
||||
|
||||
def _sizestr(self, size_in_g):
|
||||
if int(size_in_g) == 0:
|
||||
return '100M'
|
||||
return '%sG' % size_in_g
|
||||
|
||||
# Linux LVM reserves name that starts with snapshot, so that
|
||||
# such volume name can't be created. Mangle it.
|
||||
def _escape_snapshot(self, snapshot_name):
|
||||
if not snapshot_name.startswith('snapshot'):
|
||||
return snapshot_name
|
||||
return '_' + snapshot_name
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a logical volume. Can optionally return a Dictionary of
|
||||
changes to the volume object to be persisted."""
|
||||
self._create_volume(volume['name'], self._sizestr(volume['size']))
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
self._create_volume(volume['name'], self._sizestr(volume['size']))
|
||||
self._copy_volume(self.local_path(snapshot), self.local_path(volume),
|
||||
snapshot['volume_size'])
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
if self._volume_not_present(volume['name']):
|
||||
# If the volume isn't present, then don't attempt to delete
|
||||
return True
|
||||
|
||||
# TODO(yamahata): lvm can't delete origin volume only without
|
||||
# deleting derived snapshots. Can we do something fancy?
|
||||
out, err = self._execute('lvdisplay', '--noheading',
|
||||
'-C', '-o', 'Attr',
|
||||
'%s/%s' % (self.configuration.volume_group,
|
||||
volume['name']),
|
||||
run_as_root=True)
|
||||
# fake_execute returns None resulting unit test error
|
||||
if out:
|
||||
out = out.strip()
|
||||
if (out[0] == 'o') or (out[0] == 'O'):
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
|
||||
self._delete_volume(volume, volume['size'])
|
||||
|
||||
def clear_volume(self, volume):
|
||||
"""unprovision old volumes to prevent data leaking between users."""
|
||||
|
||||
vol_path = self.local_path(volume)
|
||||
size_in_g = volume.get('size')
|
||||
size_in_m = self.configuration.volume_clear_size
|
||||
|
||||
if not size_in_g:
|
||||
LOG.warning(_("Size for volume: %s not found, "
|
||||
"skipping secure delete.") % volume['name'])
|
||||
return
|
||||
|
||||
if self.configuration.volume_clear == 'none':
|
||||
return
|
||||
|
||||
LOG.info(_("Performing secure delete on volume: %s") % volume['id'])
|
||||
|
||||
if self.configuration.volume_clear == 'zero':
|
||||
if size_in_m == 0:
|
||||
return self._copy_volume('/dev/zero',
|
||||
vol_path, size_in_g,
|
||||
clearing=True)
|
||||
else:
|
||||
clear_cmd = ['shred', '-n0', '-z', '-s%dMiB' % size_in_m]
|
||||
elif self.configuration.volume_clear == 'shred':
|
||||
clear_cmd = ['shred', '-n3']
|
||||
if size_in_m:
|
||||
clear_cmd.append('-s%dMiB' % size_in_m)
|
||||
else:
|
||||
LOG.error(_("Error unrecognized volume_clear option: %s"),
|
||||
self.configuration.volume_clear)
|
||||
return
|
||||
|
||||
clear_cmd.append(vol_path)
|
||||
self._execute(*clear_cmd, run_as_root=True)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
orig_lv_name = "%s/%s" % (self.configuration.volume_group,
|
||||
snapshot['volume_name'])
|
||||
self._try_execute('lvcreate', '-L',
|
||||
self._sizestr(snapshot['volume_size']),
|
||||
'--name', self._escape_snapshot(snapshot['name']),
|
||||
'--snapshot', orig_lv_name, run_as_root=True)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
|
||||
# If the snapshot isn't present, then don't attempt to delete
|
||||
LOG.warning(_("snapshot: %s not found, "
|
||||
"skipping delete operations") % snapshot['name'])
|
||||
return True
|
||||
|
||||
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
|
||||
# it's quite slow.
|
||||
self._delete_volume(snapshot, snapshot['volume_size'])
|
||||
|
||||
def local_path(self, volume):
|
||||
# NOTE(vish): stops deprecation warning
|
||||
escaped_group = self.configuration.volume_group.replace('-', '--')
|
||||
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
|
||||
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
image_utils.fetch_to_raw(context,
|
||||
image_service,
|
||||
image_id,
|
||||
self.local_path(volume))
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
image_utils.upload_volume(context,
|
||||
image_service,
|
||||
image_meta,
|
||||
self.local_path(volume))
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
|
||||
volume_name = FLAGS.volume_name_template % src_vref['id']
|
||||
temp_id = 'tmp-snap-%s' % src_vref['id']
|
||||
temp_snapshot = {'volume_name': volume_name,
|
||||
'size': src_vref['size'],
|
||||
'volume_size': src_vref['size'],
|
||||
'name': 'clone-snap-%s' % src_vref['id'],
|
||||
'id': temp_id}
|
||||
self.create_snapshot(temp_snapshot)
|
||||
self._create_volume(volume['name'], self._sizestr(volume['size']))
|
||||
try:
|
||||
self._copy_volume(self.local_path(temp_snapshot),
|
||||
self.local_path(volume),
|
||||
src_vref['size'])
|
||||
finally:
|
||||
self.delete_snapshot(temp_snapshot)
|
||||
|
||||
def clone_image(self, volume, image_location):
|
||||
return False
|
||||
|
||||
def backup_volume(self, context, backup, backup_service):
|
||||
"""Create a new backup from an existing volume."""
|
||||
volume = self.db.volume_get(context, backup['volume_id'])
|
||||
volume_path = self.local_path(volume)
|
||||
with utils.temporary_chown(volume_path):
|
||||
with utils.file_open(volume_path) as volume_file:
|
||||
backup_service.backup(backup, volume_file)
|
||||
|
||||
def restore_backup(self, context, backup, volume, backup_service):
|
||||
"""Restore an existing backup to a new or existing volume."""
|
||||
volume_path = self.local_path(volume)
|
||||
with utils.temporary_chown(volume_path):
|
||||
with utils.file_open(volume_path, 'wb') as volume_file:
|
||||
backup_service.restore(backup, volume['id'], volume_file)
|
||||
|
||||
|
||||
class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
|
||||
"""Executes commands relating to ISCSI volumes.
|
||||
|
||||
We make use of model provider properties as follows:
|
||||
|
||||
``provider_location``
|
||||
if present, contains the iSCSI target information in the same
|
||||
format as an ietadm discovery
|
||||
i.e. '<ip>:<port>,<portal> <target IQN>'
|
||||
|
||||
``provider_auth``
|
||||
if present, contains a space-separated triple:
|
||||
'<auth method> <auth username> <auth password>'.
|
||||
`CHAP` is the only auth_method in use at the moment.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.tgtadm = iscsi.get_target_admin()
|
||||
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def set_execute(self, execute):
|
||||
super(LVMISCSIDriver, self).set_execute(execute)
|
||||
self.tgtadm.set_execute(execute)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
|
||||
if isinstance(self.tgtadm, iscsi.LioAdm):
|
||||
try:
|
||||
volume_info = self.db.volume_get(context, volume['id'])
|
||||
(auth_method,
|
||||
auth_user,
|
||||
auth_pass) = volume_info['provider_auth'].split(' ', 3)
|
||||
chap_auth = self._iscsi_authentication(auth_method,
|
||||
auth_user,
|
||||
auth_pass)
|
||||
except exception.NotFound:
|
||||
LOG.debug("volume_info:", volume_info)
|
||||
LOG.info(_("Skipping ensure_export. No iscsi_target "
|
||||
"provision for volume: %s"), volume['id'])
|
||||
return
|
||||
|
||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||
iscsi_target = 1
|
||||
|
||||
self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
|
||||
0, volume_path, chap_auth,
|
||||
check_exit_code=False)
|
||||
return
|
||||
|
||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
try:
|
||||
iscsi_target = self.db.volume_get_iscsi_target_num(
|
||||
context,
|
||||
volume['id'])
|
||||
except exception.NotFound:
|
||||
LOG.info(_("Skipping ensure_export. No iscsi_target "
|
||||
"provisioned for volume: %s"), volume['id'])
|
||||
return
|
||||
else:
|
||||
iscsi_target = 1 # dummy value when using TgtAdm
|
||||
|
||||
chap_auth = None
|
||||
|
||||
# Check for https://bugs.launchpad.net/manila/+bug/1065702
|
||||
old_name = None
|
||||
volume_name = volume['name']
|
||||
if (volume['provider_location'] is not None and
|
||||
volume['name'] not in volume['provider_location']):
|
||||
|
||||
msg = _('Detected inconsistency in provider_location id')
|
||||
LOG.debug(msg)
|
||||
old_name = self._fix_id_migration(context, volume)
|
||||
if 'in-use' in volume['status']:
|
||||
volume_name = old_name
|
||||
old_name = None
|
||||
|
||||
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
|
||||
volume_name)
|
||||
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
|
||||
volume_name)
|
||||
|
||||
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
|
||||
# should clean this all up at some point in the future
|
||||
self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
|
||||
0, volume_path, chap_auth,
|
||||
check_exit_code=False,
|
||||
old_name=old_name)
|
||||
|
||||
def _fix_id_migration(self, context, volume):
|
||||
"""Fix provider_location and dev files to address bug 1065702.
|
||||
|
||||
For volumes that the provider_location has NOT been updated
|
||||
and are not currently in-use we'll create a new iscsi target
|
||||
and remove the persist file.
|
||||
|
||||
If the volume is in-use, we'll just stick with the old name
|
||||
and when detach is called we'll feed back into ensure_export
|
||||
again if necessary and fix things up then.
|
||||
|
||||
Details at: https://bugs.launchpad.net/manila/+bug/1065702
|
||||
"""
|
||||
|
||||
model_update = {}
|
||||
pattern = re.compile(r":|\s")
|
||||
fields = pattern.split(volume['provider_location'])
|
||||
old_name = fields[3]
|
||||
|
||||
volume['provider_location'] = \
|
||||
volume['provider_location'].replace(old_name, volume['name'])
|
||||
model_update['provider_location'] = volume['provider_location']
|
||||
|
||||
self.db.volume_update(context, volume['id'], model_update)
|
||||
|
||||
start = os.getcwd()
|
||||
os.chdir('/dev/%s' % self.configuration.volume_group)
|
||||
|
||||
try:
|
||||
(out, err) = self._execute('readlink', old_name)
|
||||
except exception.ProcessExecutionError:
|
||||
link_path = '/dev/%s/%s' % (self.configuration.volume_group,
|
||||
old_name)
|
||||
LOG.debug(_('Symbolic link %s not found') % link_path)
|
||||
os.chdir(start)
|
||||
return
|
||||
|
||||
rel_path = out.rstrip()
|
||||
self._execute('ln',
|
||||
'-s',
|
||||
rel_path, volume['name'],
|
||||
run_as_root=True)
|
||||
os.chdir(start)
|
||||
return old_name
|
||||
|
||||
def _ensure_iscsi_targets(self, context, host):
|
||||
"""Ensure that target ids have been created in datastore."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
|
||||
host)
|
||||
if host_iscsi_targets >= self.configuration.iscsi_num_targets:
|
||||
return
|
||||
|
||||
# NOTE(vish): Target ids start at 1, not 0.
|
||||
target_end = self.configuration.iscsi_num_targets + 1
|
||||
for target_num in xrange(1, target_end):
|
||||
target = {'host': host, 'target_num': target_num}
|
||||
self.db.iscsi_target_create_safe(context, target)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Creates an export for a logical volume."""
|
||||
|
||||
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
|
||||
volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
|
||||
volume['name'])
|
||||
model_update = {}
|
||||
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
if not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
lun = 0
|
||||
self._ensure_iscsi_targets(context, volume['host'])
|
||||
iscsi_target = self.db.volume_allocate_iscsi_target(context,
|
||||
volume['id'],
|
||||
volume['host'])
|
||||
else:
|
||||
lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
|
||||
iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
|
||||
|
||||
# Use the same method to generate the username and the password.
|
||||
chap_username = utils.generate_username()
|
||||
chap_password = utils.generate_password()
|
||||
chap_auth = self._iscsi_authentication('IncomingUser', chap_username,
|
||||
chap_password)
|
||||
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
|
||||
# should clean this all up at some point in the future
|
||||
tid = self.tgtadm.create_iscsi_target(iscsi_name,
|
||||
iscsi_target,
|
||||
0,
|
||||
volume_path,
|
||||
chap_auth)
|
||||
model_update['provider_location'] = self._iscsi_location(
|
||||
self.configuration.iscsi_ip_address, tid, iscsi_name, lun)
|
||||
model_update['provider_auth'] = self._iscsi_authentication(
|
||||
'CHAP', chap_username, chap_password)
|
||||
return model_update
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
|
||||
# TODO(jdg): In the future move all of the dependent stuff into the
|
||||
# cooresponding target admin class
|
||||
|
||||
if isinstance(self.tgtadm, iscsi.LioAdm):
|
||||
try:
|
||||
iscsi_target = self.db.volume_get_iscsi_target_num(
|
||||
context,
|
||||
volume['id'])
|
||||
except exception.NotFound:
|
||||
LOG.info(_("Skipping remove_export. No iscsi_target "
|
||||
"provisioned for volume: %s"), volume['id'])
|
||||
return
|
||||
|
||||
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
|
||||
|
||||
return
|
||||
|
||||
elif not isinstance(self.tgtadm, iscsi.TgtAdm):
|
||||
try:
|
||||
iscsi_target = self.db.volume_get_iscsi_target_num(
|
||||
context,
|
||||
volume['id'])
|
||||
except exception.NotFound:
|
||||
LOG.info(_("Skipping remove_export. No iscsi_target "
|
||||
"provisioned for volume: %s"), volume['id'])
|
||||
return
|
||||
else:
|
||||
iscsi_target = 0
|
||||
|
||||
try:
|
||||
|
||||
# NOTE: provider_location may be unset if the volume hasn't
|
||||
# been exported
|
||||
location = volume['provider_location'].split(' ')
|
||||
iqn = location[1]
|
||||
|
||||
# ietadm show will exit with an error
|
||||
# this export has already been removed
|
||||
self.tgtadm.show_target(iscsi_target, iqn=iqn)
|
||||
|
||||
except Exception as e:
|
||||
LOG.info(_("Skipping remove_export. No iscsi_target "
|
||||
"is presently exported for volume: %s"), volume['id'])
|
||||
return
|
||||
|
||||
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
|
||||
If 'refresh' is True, run update the stats first."""
|
||||
if refresh:
|
||||
self._update_volume_status()
|
||||
|
||||
return self._stats
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
|
||||
LOG.debug(_("Updating volume status"))
|
||||
data = {}
|
||||
|
||||
# Note(zhiteng): These information are driver/backend specific,
|
||||
# each driver may define these values in its own config options
|
||||
# or fetch from driver specific configuration file.
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data["volume_backend_name"] = backend_name or 'LVM_iSCSI'
|
||||
data["vendor_name"] = 'Open Source'
|
||||
data["driver_version"] = self.VERSION
|
||||
data["storage_protocol"] = 'iSCSI'
|
||||
|
||||
data['total_capacity_gb'] = 0
|
||||
data['free_capacity_gb'] = 0
|
||||
data['reserved_percentage'] = self.configuration.reserved_percentage
|
||||
data['QoS_support'] = False
|
||||
|
||||
try:
|
||||
out, err = self._execute('vgs', '--noheadings', '--nosuffix',
|
||||
'--unit=G', '-o', 'name,size,free',
|
||||
self.configuration.volume_group,
|
||||
run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
LOG.error(_("Error retrieving volume status: "), exc.stderr)
|
||||
out = False
|
||||
|
||||
if out:
|
||||
volume = out.split()
|
||||
data['total_capacity_gb'] = float(volume[1].replace(',', '.'))
|
||||
data['free_capacity_gb'] = float(volume[2].replace(',', '.'))
|
||||
|
||||
self._stats = data
|
||||
|
||||
def _iscsi_location(self, ip, target, iqn, lun=None):
|
||||
return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port,
|
||||
target, iqn, lun)
|
||||
|
||||
def _iscsi_authentication(self, chap, name, password):
|
||||
return "%s %s %s" % (chap, name, password)
|
||||
|
||||
|
||||
class ThinLVMVolumeDriver(LVMISCSIDriver):
|
||||
"""Subclass for thin provisioned LVM's."""
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ThinLVMVolumeDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met"""
|
||||
out, err = self._execute('lvs', '--option',
|
||||
'name', '--noheadings',
|
||||
run_as_root=True)
|
||||
pool_name = "%s-pool" % FLAGS.volume_group
|
||||
if pool_name not in out:
|
||||
if not FLAGS.pool_size:
|
||||
out, err = self._execute('vgs', FLAGS.volume_group,
|
||||
'--noheadings', '--options',
|
||||
'name,size', run_as_root=True)
|
||||
size = re.sub(r'[\.][\d][\d]', '', out.split()[1])
|
||||
else:
|
||||
size = "%s" % FLAGS.pool_size
|
||||
|
||||
pool_path = '%s/%s' % (FLAGS.volume_group, pool_name)
|
||||
out, err = self._execute('lvcreate', '-T', '-L', size,
|
||||
pool_path, run_as_root=True)
|
||||
|
||||
def _do_lvm_snapshot(self, src_lvm_name, dest_vref, is_cinder_snap=True):
|
||||
if is_cinder_snap:
|
||||
new_name = self._escape_snapshot(dest_vref['name'])
|
||||
else:
|
||||
new_name = dest_vref['name']
|
||||
|
||||
self._try_execute('lvcreate', '-s', '-n', new_name,
|
||||
src_lvm_name, run_as_root=True)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a logical volume. Can optionally return a Dictionary of
|
||||
changes to the volume object to be persisted."""
|
||||
sizestr = self._sizestr(volume['size'])
|
||||
vg_name = ("%s/%s-pool" % (FLAGS.volume_group, FLAGS.volume_group))
|
||||
self._try_execute('lvcreate', '-T', '-V', sizestr, '-n',
|
||||
volume['name'], vg_name, run_as_root=True)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
if self._volume_not_present(volume['name']):
|
||||
return True
|
||||
self._try_execute('lvremove', '-f', "%s/%s" %
|
||||
(FLAGS.volume_group,
|
||||
self._escape_snapshot(volume['name'])),
|
||||
run_as_root=True)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
|
||||
orig_lv_name = "%s/%s" % (FLAGS.volume_group, src_vref['name'])
|
||||
self._do_lvm_snapshot(orig_lv_name, volume, False)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot of a volume."""
|
||||
orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
|
||||
self._do_lvm_snapshot(orig_lv_name, snapshot)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
If 'refresh' is True, run update the stats first."""
|
||||
if refresh:
|
||||
self._update_volume_status()
|
||||
|
||||
return self._stats
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
|
||||
LOG.debug(_("Updating volume status"))
|
||||
data = {}
|
||||
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data["volume_backend_name"] = backend_name or self.__class__.__name__
|
||||
data["vendor_name"] = 'Open Source'
|
||||
data["driver_version"] = self.VERSION
|
||||
data["storage_protocol"] = 'iSCSI'
|
||||
data['reserved_percentage'] = self.configuration.reserved_percentage
|
||||
data['QoS_support'] = False
|
||||
data['total_capacity_gb'] = 'infinite'
|
||||
data['free_capacity_gb'] = 'infinite'
|
||||
self._stats = data
|
@ -1,410 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 NetApp, Inc.
|
||||
# Copyright (c) 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
NetApp api for ONTAP and OnCommand DFM.
|
||||
|
||||
Contains classes required to issue api calls to ONTAP and OnCommand DFM.
|
||||
"""
|
||||
|
||||
from lxml import etree
|
||||
import urllib2
|
||||
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NaServer(object):
|
||||
"""Encapsulates server connection logic."""
|
||||
|
||||
TRANSPORT_TYPE_HTTP = 'http'
|
||||
TRANSPORT_TYPE_HTTPS = 'https'
|
||||
SERVER_TYPE_FILER = 'filer'
|
||||
SERVER_TYPE_DFM = 'dfm'
|
||||
URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer'
|
||||
URL_DFM = 'apis/XMLrequest'
|
||||
NETAPP_NS = 'http://www.netapp.com/filer/admin'
|
||||
STYLE_LOGIN_PASSWORD = 'basic_auth'
|
||||
STYLE_CERTIFICATE = 'certificate_auth'
|
||||
|
||||
def __init__(self, host, server_type=SERVER_TYPE_FILER,
|
||||
transport_type=TRANSPORT_TYPE_HTTP,
|
||||
style=STYLE_LOGIN_PASSWORD, username=None,
|
||||
password=None):
|
||||
self._host = host
|
||||
self.set_server_type(server_type)
|
||||
self.set_transport_type(transport_type)
|
||||
self.set_style(style)
|
||||
self._username = username
|
||||
self._password = password
|
||||
self._refresh_conn = True
|
||||
|
||||
def get_transport_type(self):
|
||||
"""Get the transport type protocol."""
|
||||
return self._protocol
|
||||
|
||||
def set_transport_type(self, transport_type):
|
||||
"""Set the transport type protocol for api.
|
||||
|
||||
Supports http and https transport types.
|
||||
"""
|
||||
if transport_type.lower() not in (
|
||||
NaServer.TRANSPORT_TYPE_HTTP,
|
||||
NaServer.TRANSPORT_TYPE_HTTPS):
|
||||
raise ValueError('Unsupported transport type')
|
||||
self._protocol = transport_type.lower()
|
||||
if self._protocol == NaServer.TRANSPORT_TYPE_HTTP:
|
||||
if self._server_type == NaServer.SERVER_TYPE_FILER:
|
||||
self.set_port(80)
|
||||
else:
|
||||
self.set_port(8088)
|
||||
else:
|
||||
if self._server_type == NaServer.SERVER_TYPE_FILER:
|
||||
self.set_port(443)
|
||||
else:
|
||||
self.set_port(8488)
|
||||
self._refresh_conn = True
|
||||
|
||||
def get_style(self):
|
||||
"""Get the authorization style for communicating with the server."""
|
||||
return self._auth_style
|
||||
|
||||
def set_style(self, style):
|
||||
"""Set the authorization style for communicating with the server.
|
||||
|
||||
Supports basic_auth for now. Certificate_auth mode to be done.
|
||||
"""
|
||||
if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD,
|
||||
NaServer.STYLE_CERTIFICATE):
|
||||
raise ValueError('Unsupported authentication style')
|
||||
self._auth_style = style.lower()
|
||||
|
||||
def get_server_type(self):
|
||||
"""Get the target server type."""
|
||||
return self._server_type
|
||||
|
||||
def set_server_type(self, server_type):
|
||||
"""Set the target server type.
|
||||
|
||||
Supports filer and dfm server types.
|
||||
"""
|
||||
if server_type.lower() not in (NaServer.SERVER_TYPE_FILER,
|
||||
NaServer.SERVER_TYPE_DFM):
|
||||
raise ValueError('Unsupported server type')
|
||||
self._server_type = server_type.lower()
|
||||
if self._server_type == NaServer.SERVER_TYPE_FILER:
|
||||
self._url = NaServer.URL_FILER
|
||||
else:
|
||||
self._url = NaServer.URL_DFM
|
||||
self._ns = NaServer.NETAPP_NS
|
||||
self._refresh_conn = True
|
||||
|
||||
def set_api_version(self, major, minor):
|
||||
"""Set the api version."""
|
||||
try:
|
||||
self._api_major_version = int(major)
|
||||
self._api_minor_version = int(minor)
|
||||
self._api_version = str(major) + "." + str(minor)
|
||||
except ValueError:
|
||||
raise ValueError('Major and minor versions must be integers')
|
||||
self._refresh_conn = True
|
||||
|
||||
def get_api_version(self):
|
||||
"""Gets the api version."""
|
||||
if hasattr(self, '_api_version'):
|
||||
return self._api_version
|
||||
return self._api_version
|
||||
|
||||
def set_port(self, port):
|
||||
"""Set the server communication port."""
|
||||
try:
|
||||
int(port)
|
||||
except ValueError:
|
||||
raise ValueError('Port must be integer')
|
||||
self._port = str(port)
|
||||
self._refresh_conn = True
|
||||
|
||||
def get_port(self):
|
||||
"""Get the server communication port."""
|
||||
return self._port
|
||||
|
||||
def set_timeout(self, seconds):
|
||||
"""Sets the timeout in seconds."""
|
||||
try:
|
||||
self._timeout = int(seconds)
|
||||
except ValueError:
|
||||
raise ValueError('timeout in seconds must be integer')
|
||||
|
||||
def get_timeout(self):
|
||||
"""Gets the timeout in seconds if set."""
|
||||
if hasattr(self, '_timeout'):
|
||||
return self._timeout
|
||||
return None
|
||||
|
||||
def get_vfiler(self):
|
||||
"""Get the vfiler to use in tunneling."""
|
||||
return self._vfiler
|
||||
|
||||
def set_vfiler(self, vfiler):
|
||||
"""Set the vfiler to use if tunneling gets enabled."""
|
||||
self._vfiler = vfiler
|
||||
|
||||
def get_vserver(self):
|
||||
"""Get the vserver to use in tunneling."""
|
||||
return self._vserver
|
||||
|
||||
def set_vserver(self, vserver):
|
||||
"""Set the vserver to use if tunneling gets enabled."""
|
||||
self._vserver = vserver
|
||||
|
||||
def set_username(self, username):
|
||||
"""Set the user name for authentication."""
|
||||
self._username = username
|
||||
self._refresh_conn = True
|
||||
|
||||
def set_password(self, password):
|
||||
"""Set the password for authentication."""
|
||||
self._password = password
|
||||
self._refresh_conn = True
|
||||
|
||||
def invoke_elem(self, na_element, enable_tunneling=False):
|
||||
"""Invoke the api on the server."""
|
||||
if na_element and not isinstance(na_element, NaElement):
|
||||
ValueError('NaElement must be supplied to invoke api')
|
||||
request = self._create_request(na_element, enable_tunneling)
|
||||
if not hasattr(self, '_opener') or not self._opener \
|
||||
or self._refresh_conn:
|
||||
self._build_opener()
|
||||
try:
|
||||
if hasattr(self, '_timeout'):
|
||||
response = self._opener.open(request, timeout=self._timeout)
|
||||
else:
|
||||
response = self._opener.open(request)
|
||||
except urllib2.HTTPError as e:
|
||||
raise NaApiError(e.code, e.msg)
|
||||
except Exception as e:
|
||||
raise NaApiError('Unexpected error', e)
|
||||
xml = response.read()
|
||||
return self._get_result(xml)
|
||||
|
||||
def invoke_successfully(self, na_element, enable_tunneling=False):
|
||||
"""Invokes api and checks execution status as success.
|
||||
|
||||
Need to set enable_tunneling to True explicitly to achieve it.
|
||||
This helps to use same connection instance to enable or disable
|
||||
tunneling. The vserver or vfiler should be set before this call
|
||||
otherwise tunneling remains disabled.
|
||||
"""
|
||||
result = self.invoke_elem(na_element, enable_tunneling)
|
||||
if result.has_attr('status') and result.get_attr('status') == 'passed':
|
||||
return result
|
||||
code = result.get_attr('errno')\
|
||||
or result.get_child_content('errorno')\
|
||||
or 'ESTATUSFAILED'
|
||||
msg = result.get_attr('reason')\
|
||||
or result.get_child_content('reason')\
|
||||
or 'Execution status is failed due to unknown reason'
|
||||
raise NaApiError(code, msg)
|
||||
|
||||
def _create_request(self, na_element, enable_tunneling=False):
|
||||
"""Creates request in the desired format."""
|
||||
netapp_elem = NaElement('netapp')
|
||||
netapp_elem.add_attr('xmlns', self._ns)
|
||||
if hasattr(self, '_api_version'):
|
||||
netapp_elem.add_attr('version', self._api_version)
|
||||
if enable_tunneling:
|
||||
self._enable_tunnel_request(netapp_elem)
|
||||
netapp_elem.add_child_elem(na_element)
|
||||
request_d = netapp_elem.to_string()
|
||||
request = urllib2.Request(
|
||||
self._get_url(), data=request_d,
|
||||
headers={'Content-Type': 'text/xml', 'charset': 'utf-8'})
|
||||
return request
|
||||
|
||||
def _enable_tunnel_request(self, netapp_elem):
|
||||
"""Enables vserver or vfiler tunneling."""
|
||||
if hasattr(self, '_vfiler') and self._vfiler:
|
||||
if hasattr(self, '_api_major_version') and \
|
||||
hasattr(self, '_api_minor_version') and \
|
||||
self._api_major_version >= 1 and \
|
||||
self._api_minor_version >= 7:
|
||||
netapp_elem.add_attr('vfiler', self._vfiler)
|
||||
else:
|
||||
raise ValueError('ontapi version has to be atleast 1.7'
|
||||
' to send request to vfiler')
|
||||
if hasattr(self, '_vserver') and self._vserver:
|
||||
if hasattr(self, '_api_major_version') and \
|
||||
hasattr(self, '_api_minor_version') and \
|
||||
self._api_major_version >= 1 and \
|
||||
self._api_minor_version >= 15:
|
||||
netapp_elem.add_attr('vfiler', self._vserver)
|
||||
else:
|
||||
raise ValueError('ontapi version has to be atleast 1.15'
|
||||
' to send request to vserver')
|
||||
|
||||
def _parse_response(self, response):
|
||||
"""Get the NaElement for the response."""
|
||||
if not response:
|
||||
raise NaApiError('No response received')
|
||||
xml = etree.XML(response)
|
||||
return NaElement(xml)
|
||||
|
||||
def _get_result(self, response):
|
||||
"""Gets the call result."""
|
||||
processed_response = self._parse_response(response)
|
||||
return processed_response.get_child_by_name('results')
|
||||
|
||||
def _get_url(self):
|
||||
return '%s://%s:%s/%s' % (self._protocol, self._host, self._port,
|
||||
self._url)
|
||||
|
||||
def _build_opener(self):
|
||||
if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD:
|
||||
auth_handler = self._create_basic_auth_handler()
|
||||
else:
|
||||
auth_handler = self._create_certificate_auth_handler()
|
||||
opener = urllib2.build_opener(auth_handler)
|
||||
self._opener = opener
|
||||
|
||||
def _create_basic_auth_handler(self):
|
||||
password_man = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
password_man.add_password(None, self._get_url(), self._username,
|
||||
self._password)
|
||||
auth_handler = urllib2.HTTPBasicAuthHandler(password_man)
|
||||
return auth_handler
|
||||
|
||||
def _create_certificate_auth_handler(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class NaElement(object):
|
||||
"""Class wraps basic building block for NetApp api request."""
|
||||
|
||||
def __init__(self, name):
|
||||
"""Name of the element or etree.Element."""
|
||||
if isinstance(name, etree._Element):
|
||||
self._element = name
|
||||
else:
|
||||
self._element = etree.Element(name)
|
||||
|
||||
def get_name(self):
|
||||
"""Returns the tag name of the element."""
|
||||
return self._element.tag
|
||||
|
||||
def set_content(self, text):
|
||||
"""Set the text for the element."""
|
||||
self._element.text = text
|
||||
|
||||
def get_content(self):
|
||||
"""Get the text for the element."""
|
||||
return self._element.text
|
||||
|
||||
def add_attr(self, name, value):
|
||||
"""Add the attribute to the element."""
|
||||
self._element.set(name, value)
|
||||
|
||||
def add_attrs(self, **attrs):
|
||||
"""Add multiple attributes to the element."""
|
||||
for attr in attrs.keys():
|
||||
self._element.set(attr, attrs.get(attr))
|
||||
|
||||
def add_child_elem(self, na_element):
|
||||
"""Add the child element to the element."""
|
||||
if isinstance(na_element, NaElement):
|
||||
self._element.append(na_element._element)
|
||||
return
|
||||
raise
|
||||
|
||||
def get_child_by_name(self, name):
|
||||
"""Get the child element by the tag name."""
|
||||
for child in self._element.iterchildren():
|
||||
if child.tag == name or etree.QName(child.tag).localname == name:
|
||||
return NaElement(child)
|
||||
return None
|
||||
|
||||
def get_child_content(self, name):
|
||||
"""Get the content of the child."""
|
||||
for child in self._element.iterchildren():
|
||||
if child.tag == name or etree.QName(child.tag).localname == name:
|
||||
return child.text
|
||||
return None
|
||||
|
||||
def get_children(self):
|
||||
"""Get the children for the element."""
|
||||
return [NaElement(el) for el in self._element.iterchildren()]
|
||||
|
||||
def has_attr(self, name):
|
||||
"""Checks whether element has attribute."""
|
||||
attributes = self._element.attrib or {}
|
||||
return name in attributes.keys()
|
||||
|
||||
def get_attr(self, name):
|
||||
"""Get the attribute with the given name."""
|
||||
attributes = self._element.attrib or {}
|
||||
return attributes.get(name)
|
||||
|
||||
def get_attr_names(self):
|
||||
"""Returns the list of attribute names."""
|
||||
attributes = self._element.attrib or {}
|
||||
return attributes.keys()
|
||||
|
||||
def add_new_child(self, name, content, convert=False):
|
||||
"""Add child with tag name and context.
|
||||
|
||||
Convert replaces entity refs to chars."""
|
||||
child = NaElement(name)
|
||||
if convert:
|
||||
content = NaElement._convert_entity_refs(content)
|
||||
child.set_content(content)
|
||||
self.add_child_elem(child)
|
||||
|
||||
@staticmethod
|
||||
def _convert_entity_refs(text):
|
||||
"""Converts entity refs to chars to handle etree auto conversions."""
|
||||
text = text.replace("<", "<")
|
||||
text = text.replace(">", ">")
|
||||
return text
|
||||
|
||||
@staticmethod
|
||||
def create_node_with_children(node, **children):
|
||||
"""Creates and returns named node with children."""
|
||||
parent = NaElement(node)
|
||||
for child in children.keys():
|
||||
parent.add_new_child(child, children.get(child, None))
|
||||
return parent
|
||||
|
||||
def add_node_with_children(self, node, **children):
|
||||
"""Creates named node with children."""
|
||||
parent = NaElement.create_node_with_children(node, **children)
|
||||
self.add_child_elem(parent)
|
||||
|
||||
def to_string(self, pretty=False, method='xml', encoding='UTF-8'):
|
||||
"""Prints the element to string."""
|
||||
return etree.tostring(self._element, method=method, encoding=encoding,
|
||||
pretty_print=pretty)
|
||||
|
||||
|
||||
class NaApiError(Exception):
|
||||
"""Base exception class for NetApp api errors."""
|
||||
|
||||
def __init__(self, code='unknown', message='unknown'):
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
def __str__(self, *args, **kwargs):
|
||||
return 'NetApp api failed. Reason - %s:%s' % (self.code, self.message)
|
File diff suppressed because it is too large
Load Diff
@ -1,624 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 NetApp, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Volume driver for NetApp NFS storage.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import os
|
||||
import time
|
||||
|
||||
from oslo.config import cfg
|
||||
import suds
|
||||
from suds.sax import text
|
||||
|
||||
from manila import exception
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume.drivers.netapp.api import NaApiError
|
||||
from manila.volume.drivers.netapp.api import NaElement
|
||||
from manila.volume.drivers.netapp.api import NaServer
|
||||
from manila.volume.drivers.netapp.iscsi import netapp_opts
|
||||
from manila.volume.drivers import nfs
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
netapp_nfs_opts = [
|
||||
cfg.IntOpt('synchronous_snapshot_create',
|
||||
default=0,
|
||||
help='Does snapshot creation call returns immediately')]
|
||||
|
||||
|
||||
class NetAppNFSDriver(nfs.NfsDriver):
|
||||
"""Executes commands relating to Volumes."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
# NOTE(vish): db is set by Manager
|
||||
self._execute = None
|
||||
self._context = None
|
||||
super(NetAppNFSDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(netapp_opts)
|
||||
self.configuration.append_config_values(netapp_nfs_opts)
|
||||
|
||||
def set_execute(self, execute):
|
||||
self._execute = execute
|
||||
|
||||
def do_setup(self, context):
|
||||
self._context = context
|
||||
self.check_for_setup_error()
|
||||
self._client = self._get_client()
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self._check_dfm_flags()
|
||||
super(NetAppNFSDriver, self).check_for_setup_error()
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
vol_size = volume.size
|
||||
snap_size = snapshot.volume_size
|
||||
|
||||
if vol_size != snap_size:
|
||||
msg = _('Cannot create volume of size %(vol_size)s from '
|
||||
'snapshot of size %(snap_size)s')
|
||||
raise exception.CinderException(msg % locals())
|
||||
|
||||
self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
|
||||
share = self._get_volume_location(snapshot.volume_id)
|
||||
|
||||
return {'provider_location': share}
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
self._clone_volume(snapshot['volume_name'],
|
||||
snapshot['name'],
|
||||
snapshot['volume_id'])
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
nfs_mount = self._get_provider_location(snapshot.volume_id)
|
||||
|
||||
if self._volume_not_present(nfs_mount, snapshot.name):
|
||||
return True
|
||||
|
||||
self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
|
||||
run_as_root=True)
|
||||
|
||||
def _check_dfm_flags(self):
|
||||
"""Raises error if any required configuration flag for OnCommand proxy
|
||||
is missing."""
|
||||
required_flags = ['netapp_wsdl_url',
|
||||
'netapp_login',
|
||||
'netapp_password',
|
||||
'netapp_server_hostname',
|
||||
'netapp_server_port']
|
||||
for flag in required_flags:
|
||||
if not getattr(self.configuration, flag, None):
|
||||
raise exception.CinderException(_('%s is not set') % flag)
|
||||
|
||||
def _get_client(self):
|
||||
"""Creates SOAP _client for ONTAP-7 DataFabric Service."""
|
||||
client = suds.client.Client(
|
||||
self.configuration.netapp_wsdl_url,
|
||||
username=self.configuration.netapp_login,
|
||||
password=self.configuration.netapp_password)
|
||||
soap_url = 'http://%s:%s/apis/soap/v1' % (
|
||||
self.configuration.netapp_server_hostname,
|
||||
self.configuration.netapp_server_port)
|
||||
client.set_options(location=soap_url)
|
||||
|
||||
return client
|
||||
|
||||
def _get_volume_location(self, volume_id):
|
||||
"""Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>"""
|
||||
nfs_server_ip = self._get_host_ip(volume_id)
|
||||
export_path = self._get_export_path(volume_id)
|
||||
return (nfs_server_ip + ':' + export_path)
|
||||
|
||||
def _clone_volume(self, volume_name, clone_name, volume_id):
|
||||
"""Clones mounted volume with OnCommand proxy API."""
|
||||
host_id = self._get_host_id(volume_id)
|
||||
export_path = self._get_full_export_path(volume_id, host_id)
|
||||
|
||||
request = self._client.factory.create('Request')
|
||||
request.Name = 'clone-start'
|
||||
|
||||
clone_start_args = ('<source-path>%s/%s</source-path>'
|
||||
'<destination-path>%s/%s</destination-path>')
|
||||
|
||||
request.Args = text.Raw(clone_start_args % (export_path,
|
||||
volume_name,
|
||||
export_path,
|
||||
clone_name))
|
||||
|
||||
resp = self._client.service.ApiProxy(Target=host_id,
|
||||
Request=request)
|
||||
|
||||
if (resp.Status == 'passed' and
|
||||
self.configuration.synchronous_snapshot_create):
|
||||
clone_id = resp.Results['clone-id'][0]
|
||||
clone_id_info = clone_id['clone-id-info'][0]
|
||||
clone_operation_id = int(clone_id_info['clone-op-id'][0])
|
||||
|
||||
self._wait_for_clone_finished(clone_operation_id, host_id)
|
||||
elif resp.Status == 'failed':
|
||||
raise exception.CinderException(resp.Reason)
|
||||
|
||||
def _wait_for_clone_finished(self, clone_operation_id, host_id):
|
||||
"""
|
||||
Polls ONTAP7 for clone status. Returns once clone is finished.
|
||||
:param clone_operation_id: Identifier of ONTAP clone operation
|
||||
"""
|
||||
clone_list_options = ('<clone-id>'
|
||||
'<clone-id-info>'
|
||||
'<clone-op-id>%d</clone-op-id>'
|
||||
'<volume-uuid></volume-uuid>'
|
||||
'</clone-id>'
|
||||
'</clone-id-info>')
|
||||
|
||||
request = self._client.factory.create('Request')
|
||||
request.Name = 'clone-list-status'
|
||||
request.Args = text.Raw(clone_list_options % clone_operation_id)
|
||||
|
||||
resp = self._client.service.ApiProxy(Target=host_id, Request=request)
|
||||
|
||||
while resp.Status != 'passed':
|
||||
time.sleep(1)
|
||||
resp = self._client.service.ApiProxy(Target=host_id,
|
||||
Request=request)
|
||||
|
||||
def _get_provider_location(self, volume_id):
|
||||
"""
|
||||
Returns provider location for given volume
|
||||
:param volume_id:
|
||||
"""
|
||||
volume = self.db.volume_get(self._context, volume_id)
|
||||
return volume.provider_location
|
||||
|
||||
def _get_host_ip(self, volume_id):
|
||||
"""Returns IP address for the given volume."""
|
||||
return self._get_provider_location(volume_id).split(':')[0]
|
||||
|
||||
def _get_export_path(self, volume_id):
|
||||
"""Returns NFS export path for the given volume."""
|
||||
return self._get_provider_location(volume_id).split(':')[1]
|
||||
|
||||
def _get_host_id(self, volume_id):
|
||||
"""Returns ID of the ONTAP-7 host."""
|
||||
host_ip = self._get_host_ip(volume_id)
|
||||
server = self._client.service
|
||||
|
||||
resp = server.HostListInfoIterStart(ObjectNameOrId=host_ip)
|
||||
tag = resp.Tag
|
||||
|
||||
try:
|
||||
res = server.HostListInfoIterNext(Tag=tag, Maximum=1)
|
||||
if hasattr(res, 'Hosts') and res.Hosts.HostInfo:
|
||||
return res.Hosts.HostInfo[0].HostId
|
||||
finally:
|
||||
server.HostListInfoIterEnd(Tag=tag)
|
||||
|
||||
def _get_full_export_path(self, volume_id, host_id):
|
||||
"""Returns full path to the NFS share, e.g. /vol/vol0/home."""
|
||||
export_path = self._get_export_path(volume_id)
|
||||
command_args = '<pathname>%s</pathname>'
|
||||
|
||||
request = self._client.factory.create('Request')
|
||||
request.Name = 'nfs-exportfs-storage-path'
|
||||
request.Args = text.Raw(command_args % export_path)
|
||||
|
||||
resp = self._client.service.ApiProxy(Target=host_id,
|
||||
Request=request)
|
||||
|
||||
if resp.Status == 'passed':
|
||||
return resp.Results['actual-pathname'][0]
|
||||
elif resp.Status == 'failed':
|
||||
raise exception.CinderException(resp.Reason)
|
||||
|
||||
def _volume_not_present(self, nfs_mount, volume_name):
|
||||
"""Check if volume exists."""
|
||||
try:
|
||||
self._try_execute('ls', self._get_volume_path(nfs_mount,
|
||||
volume_name))
|
||||
except exception.ProcessExecutionError:
|
||||
# If the volume isn't present
|
||||
return True
|
||||
return False
|
||||
|
||||
def _try_execute(self, *command, **kwargs):
|
||||
# NOTE(vish): Volume commands can partially fail due to timing, but
|
||||
# running them a second time on failure will usually
|
||||
# recover nicely.
|
||||
tries = 0
|
||||
while True:
|
||||
try:
|
||||
self._execute(*command, **kwargs)
|
||||
return True
|
||||
except exception.ProcessExecutionError:
|
||||
tries = tries + 1
|
||||
if tries >= self.configuration.num_shell_tries:
|
||||
raise
|
||||
LOG.exception(_("Recovering from a failed execute. "
|
||||
"Try number %s"), tries)
|
||||
time.sleep(tries ** 2)
|
||||
|
||||
def _get_volume_path(self, nfs_share, volume_name):
|
||||
"""Get volume path (local fs path) for given volume name on given nfs
|
||||
share
|
||||
@param nfs_share string, example 172.18.194.100:/var/nfs
|
||||
@param volume_name string,
|
||||
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
|
||||
"""
|
||||
return os.path.join(self._get_mount_point_for_share(nfs_share),
|
||||
volume_name)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
vol_size = volume.size
|
||||
src_vol_size = src_vref.size
|
||||
|
||||
if vol_size != src_vol_size:
|
||||
msg = _('Cannot create clone of size %(vol_size)s from '
|
||||
'volume of size %(src_vol_size)s')
|
||||
raise exception.CinderException(msg % locals())
|
||||
|
||||
self._clone_volume(src_vref.name, volume.name, src_vref.id)
|
||||
share = self._get_volume_location(src_vref.id)
|
||||
|
||||
return {'provider_location': share}
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
super(NetAppNFSDriver, self)._update_volume_status()
|
||||
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
self._stats["volume_backend_name"] = (backend_name or
|
||||
'NetApp_NFS_7mode')
|
||||
self._stats["vendor_name"] = 'NetApp'
|
||||
self._stats["driver_version"] = '1.0'
|
||||
|
||||
|
||||
class NetAppCmodeNfsDriver (NetAppNFSDriver):
|
||||
"""Executes commands related to volumes on c mode."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def do_setup(self, context):
|
||||
self._context = context
|
||||
self.check_for_setup_error()
|
||||
self._client = self._get_client()
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self._check_flags()
|
||||
|
||||
def _clone_volume(self, volume_name, clone_name, volume_id):
|
||||
"""Clones mounted volume with NetApp Cloud Services."""
|
||||
host_ip = self._get_host_ip(volume_id)
|
||||
export_path = self._get_export_path(volume_id)
|
||||
LOG.debug(_("""Cloning with params ip %(host_ip)s, exp_path
|
||||
%(export_path)s, vol %(volume_name)s,
|
||||
clone_name %(clone_name)s""") % locals())
|
||||
self._client.service.CloneNasFile(host_ip, export_path,
|
||||
volume_name, clone_name)
|
||||
|
||||
def _check_flags(self):
|
||||
"""Raises error if any required configuration flag for NetApp Cloud
|
||||
Webservices is missing."""
|
||||
required_flags = ['netapp_wsdl_url',
|
||||
'netapp_login',
|
||||
'netapp_password',
|
||||
'netapp_server_hostname',
|
||||
'netapp_server_port']
|
||||
for flag in required_flags:
|
||||
if not getattr(self.configuration, flag, None):
|
||||
raise exception.CinderException(_('%s is not set') % flag)
|
||||
|
||||
def _get_client(self):
|
||||
"""Creates SOAP _client for NetApp Cloud service."""
|
||||
client = suds.client.Client(
|
||||
self.configuration.netapp_wsdl_url,
|
||||
username=self.configuration.netapp_login,
|
||||
password=self.configuration.netapp_password)
|
||||
return client
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
super(NetAppCmodeNfsDriver, self)._update_volume_status()
|
||||
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
self._stats["volume_backend_name"] = (backend_name or
|
||||
'NetApp_NFS_Cluster')
|
||||
self._stats["vendor_name"] = 'NetApp'
|
||||
self._stats["driver_version"] = '1.0'
|
||||
|
||||
|
||||
class NetAppDirectNfsDriver (NetAppNFSDriver):
|
||||
"""Executes commands related to volumes on NetApp filer."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetAppDirectNfsDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def do_setup(self, context):
|
||||
self._context = context
|
||||
self.check_for_setup_error()
|
||||
self._client = self._get_client()
|
||||
self._do_custom_setup(self._client)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self._check_flags()
|
||||
|
||||
def _clone_volume(self, volume_name, clone_name, volume_id):
|
||||
"""Clones mounted volume on NetApp filer."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _check_flags(self):
|
||||
"""Raises error if any required configuration flag for NetApp
|
||||
filer is missing."""
|
||||
required_flags = ['netapp_login',
|
||||
'netapp_password',
|
||||
'netapp_server_hostname',
|
||||
'netapp_server_port',
|
||||
'netapp_transport_type']
|
||||
for flag in required_flags:
|
||||
if not getattr(self.configuration, flag, None):
|
||||
raise exception.CinderException(_('%s is not set') % flag)
|
||||
|
||||
def _get_client(self):
|
||||
"""Creates NetApp api client."""
|
||||
client = NaServer(
|
||||
host=self.configuration.netapp_server_hostname,
|
||||
server_type=NaServer.SERVER_TYPE_FILER,
|
||||
transport_type=self.configuration.netapp_transport_type,
|
||||
style=NaServer.STYLE_LOGIN_PASSWORD,
|
||||
username=self.configuration.netapp_login,
|
||||
password=self.configuration.netapp_password)
|
||||
return client
|
||||
|
||||
def _do_custom_setup(self, client):
|
||||
"""Do the customized set up on client if any for different types."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _is_naelement(self, elem):
|
||||
"""Checks if element is NetApp element."""
|
||||
if not isinstance(elem, NaElement):
|
||||
raise ValueError('Expects NaElement')
|
||||
|
||||
def _invoke_successfully(self, na_element, vserver=None):
|
||||
"""Invoke the api for successful result.
|
||||
|
||||
If vserver is present then invokes vserver/vfiler api
|
||||
else filer/Cluster api.
|
||||
:param vserver: vserver/vfiler name.
|
||||
"""
|
||||
self._is_naelement(na_element)
|
||||
server = copy.copy(self._client)
|
||||
if vserver:
|
||||
server.set_vserver(vserver)
|
||||
else:
|
||||
server.set_vserver(None)
|
||||
result = server.invoke_successfully(na_element, True)
|
||||
return result
|
||||
|
||||
def _get_ontapi_version(self):
|
||||
"""Gets the supported ontapi version."""
|
||||
ontapi_version = NaElement('system-get-ontapi-version')
|
||||
res = self._invoke_successfully(ontapi_version, False)
|
||||
major = res.get_child_content('major-version')
|
||||
minor = res.get_child_content('minor-version')
|
||||
return (major, minor)
|
||||
|
||||
|
||||
class NetAppDirectCmodeNfsDriver (NetAppDirectNfsDriver):
|
||||
"""Executes commands related to volumes on c mode."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetAppDirectCmodeNfsDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def _do_custom_setup(self, client):
|
||||
"""Do the customized set up on client for cluster mode."""
|
||||
# Default values to run first api
|
||||
client.set_api_version(1, 15)
|
||||
(major, minor) = self._get_ontapi_version()
|
||||
client.set_api_version(major, minor)
|
||||
|
||||
def _clone_volume(self, volume_name, clone_name, volume_id):
|
||||
"""Clones mounted volume on NetApp Cluster."""
|
||||
host_ip = self._get_host_ip(volume_id)
|
||||
export_path = self._get_export_path(volume_id)
|
||||
ifs = self._get_if_info_by_ip(host_ip)
|
||||
vserver = ifs[0].get_child_content('vserver')
|
||||
exp_volume = self._get_vol_by_junc_vserver(vserver, export_path)
|
||||
self._clone_file(exp_volume, volume_name, clone_name, vserver)
|
||||
|
||||
def _get_if_info_by_ip(self, ip):
|
||||
"""Gets the network interface info by ip."""
|
||||
net_if_iter = NaElement('net-interface-get-iter')
|
||||
net_if_iter.add_new_child('max-records', '10')
|
||||
query = NaElement('query')
|
||||
net_if_iter.add_child_elem(query)
|
||||
query.add_node_with_children('net-interface-info', **{'address': ip})
|
||||
result = self._invoke_successfully(net_if_iter)
|
||||
if result.get_child_content('num-records') and\
|
||||
int(result.get_child_content('num-records')) >= 1:
|
||||
attr_list = result.get_child_by_name('attributes-list')
|
||||
return attr_list.get_children()
|
||||
raise exception.NotFound(
|
||||
_('No interface found on cluster for ip %s')
|
||||
% (ip))
|
||||
|
||||
def _get_vol_by_junc_vserver(self, vserver, junction):
|
||||
"""Gets the volume by junction path and vserver."""
|
||||
vol_iter = NaElement('volume-get-iter')
|
||||
vol_iter.add_new_child('max-records', '10')
|
||||
query = NaElement('query')
|
||||
vol_iter.add_child_elem(query)
|
||||
vol_attrs = NaElement('volume-attributes')
|
||||
query.add_child_elem(vol_attrs)
|
||||
vol_attrs.add_node_with_children(
|
||||
'volume-id-attributes',
|
||||
**{'junction-path': junction,
|
||||
'owning-vserver-name': vserver})
|
||||
des_attrs = NaElement('desired-attributes')
|
||||
des_attrs.add_node_with_children('volume-attributes',
|
||||
**{'volume-id-attributes': None})
|
||||
vol_iter.add_child_elem(des_attrs)
|
||||
result = self._invoke_successfully(vol_iter, vserver)
|
||||
if result.get_child_content('num-records') and\
|
||||
int(result.get_child_content('num-records')) >= 1:
|
||||
attr_list = result.get_child_by_name('attributes-list')
|
||||
vols = attr_list.get_children()
|
||||
vol_id = vols[0].get_child_by_name('volume-id-attributes')
|
||||
return vol_id.get_child_content('name')
|
||||
raise exception.NotFound(_("""No volume on cluster with vserver
|
||||
%(vserver)s and junction path %(junction)s
|
||||
""") % locals())
|
||||
|
||||
def _clone_file(self, volume, src_path, dest_path, vserver=None):
|
||||
"""Clones file on vserver."""
|
||||
LOG.debug(_("""Cloning with params volume %(volume)s,src %(src_path)s,
|
||||
dest %(dest_path)s, vserver %(vserver)s""")
|
||||
% locals())
|
||||
clone_create = NaElement.create_node_with_children(
|
||||
'clone-create',
|
||||
**{'volume': volume, 'source-path': src_path,
|
||||
'destination-path': dest_path})
|
||||
self._invoke_successfully(clone_create, vserver)
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
super(NetAppDirectCmodeNfsDriver, self)._update_volume_status()
|
||||
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
self._stats["volume_backend_name"] = (backend_name or
|
||||
'NetApp_NFS_cluster_direct')
|
||||
self._stats["vendor_name"] = 'NetApp'
|
||||
self._stats["driver_version"] = '1.0'
|
||||
|
||||
|
||||
class NetAppDirect7modeNfsDriver (NetAppDirectNfsDriver):
|
||||
"""Executes commands related to volumes on 7 mode."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetAppDirect7modeNfsDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def _do_custom_setup(self, client):
|
||||
"""Do the customized set up on client if any for 7 mode."""
|
||||
(major, minor) = self._get_ontapi_version()
|
||||
client.set_api_version(major, minor)
|
||||
|
||||
def _clone_volume(self, volume_name, clone_name, volume_id):
|
||||
"""Clones mounted volume with NetApp filer."""
|
||||
export_path = self._get_export_path(volume_id)
|
||||
storage_path = self._get_actual_path_for_export(export_path)
|
||||
target_path = '%s/%s' % (storage_path, clone_name)
|
||||
(clone_id, vol_uuid) = self._start_clone('%s/%s' % (storage_path,
|
||||
volume_name),
|
||||
target_path)
|
||||
if vol_uuid:
|
||||
try:
|
||||
self._wait_for_clone_finish(clone_id, vol_uuid)
|
||||
except NaApiError as e:
|
||||
if e.code != 'UnknownCloneId':
|
||||
self._clear_clone(clone_id)
|
||||
raise e
|
||||
|
||||
def _get_actual_path_for_export(self, export_path):
|
||||
"""Gets the actual path on the filer for export path."""
|
||||
storage_path = NaElement.create_node_with_children(
|
||||
'nfs-exportfs-storage-path', **{'pathname': export_path})
|
||||
result = self._invoke_successfully(storage_path, None)
|
||||
if result.get_child_content('actual-pathname'):
|
||||
return result.get_child_content('actual-pathname')
|
||||
raise exception.NotFound(_('No storage path found for export path %s')
|
||||
% (export_path))
|
||||
|
||||
def _start_clone(self, src_path, dest_path):
|
||||
"""Starts the clone operation.
|
||||
|
||||
:returns: clone-id
|
||||
"""
|
||||
LOG.debug(_("""Cloning with src %(src_path)s, dest %(dest_path)s""")
|
||||
% locals())
|
||||
clone_start = NaElement.create_node_with_children(
|
||||
'clone-start',
|
||||
**{'source-path': src_path,
|
||||
'destination-path': dest_path,
|
||||
'no-snap': 'true'})
|
||||
result = self._invoke_successfully(clone_start, None)
|
||||
clone_id_el = result.get_child_by_name('clone-id')
|
||||
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
|
||||
vol_uuid = cl_id_info.get_child_content('volume-uuid')
|
||||
clone_id = cl_id_info.get_child_content('clone-op-id')
|
||||
return (clone_id, vol_uuid)
|
||||
|
||||
def _wait_for_clone_finish(self, clone_op_id, vol_uuid):
|
||||
"""Waits till a clone operation is complete or errored out."""
|
||||
clone_ls_st = NaElement('clone-list-status')
|
||||
clone_id = NaElement('clone-id')
|
||||
clone_ls_st.add_child_elem(clone_id)
|
||||
clone_id.add_node_with_children('clone-id-info',
|
||||
**{'clone-op-id': clone_op_id,
|
||||
'volume-uuid': vol_uuid})
|
||||
task_running = True
|
||||
while task_running:
|
||||
result = self._invoke_successfully(clone_ls_st, None)
|
||||
status = result.get_child_by_name('status')
|
||||
ops_info = status.get_children()
|
||||
if ops_info:
|
||||
state = ops_info[0].get_child_content('clone-state')
|
||||
if state == 'completed':
|
||||
task_running = False
|
||||
elif state == 'failed':
|
||||
code = ops_info[0].get_child_content('error')
|
||||
reason = ops_info[0].get_child_content('reason')
|
||||
raise NaApiError(code, reason)
|
||||
else:
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise NaApiError(
|
||||
'UnknownCloneId',
|
||||
'No clone operation for clone id %s found on the filer'
|
||||
% (clone_id))
|
||||
|
||||
def _clear_clone(self, clone_id):
|
||||
"""Clear the clone information.
|
||||
|
||||
Invoke this in case of failed clone.
|
||||
"""
|
||||
clone_clear = NaElement.create_node_with_children(
|
||||
'clone-clear',
|
||||
**{'clone-id': clone_id})
|
||||
retry = 3
|
||||
while retry:
|
||||
try:
|
||||
self._invoke_successfully(clone_clear, None)
|
||||
break
|
||||
except Exception as e:
|
||||
# Filer might be rebooting
|
||||
time.sleep(5)
|
||||
retry = retry - 1
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
super(NetAppDirect7modeNfsDriver, self)._update_volume_status()
|
||||
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
self._stats["volume_backend_name"] = (backend_name or
|
||||
'NetApp_NFS_7mode_direct')
|
||||
self._stats["vendor_name"] = 'NetApp'
|
||||
self._stats["driver_version"] = '1.0'
|
@ -1,33 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2011 Nexenta Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
:mod:`nexenta` -- Package contains Nexenta-specific modules
|
||||
=====================================================================
|
||||
|
||||
.. automodule:: nexenta
|
||||
.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
|
||||
"""
|
||||
|
||||
|
||||
class NexentaException(Exception):
|
||||
MESSAGE = _('Nexenta SA returned the error')
|
||||
|
||||
def __init__(self, error=None):
|
||||
super(NexentaException, self).__init__(self.message, error)
|
||||
|
||||
def __str__(self):
|
||||
return '%s: %s' % self.args
|
@ -1,84 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2011 Nexenta Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
:mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client
|
||||
=====================================================================
|
||||
|
||||
.. automodule:: nexenta.jsonrpc
|
||||
.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
|
||||
"""
|
||||
|
||||
import urllib2
|
||||
|
||||
from manila.openstack.common import jsonutils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume.drivers import nexenta
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NexentaJSONException(nexenta.NexentaException):
|
||||
pass
|
||||
|
||||
|
||||
class NexentaJSONProxy(object):
|
||||
def __init__(self, url, user, password, auto=False, obj=None, method=None):
|
||||
self.url = url
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.auto = auto
|
||||
self.obj = obj
|
||||
self.method = method
|
||||
|
||||
def __getattr__(self, name):
|
||||
if not self.obj:
|
||||
obj, method = name, None
|
||||
elif not self.method:
|
||||
obj, method = self.obj, name
|
||||
else:
|
||||
obj, method = '%s.%s' % (self.obj, self.method), name
|
||||
return NexentaJSONProxy(self.url, self.user, self.password, self.auto,
|
||||
obj, method)
|
||||
|
||||
def __call__(self, *args):
|
||||
data = jsonutils.dumps({'object': self.obj,
|
||||
'method': self.method,
|
||||
'params': args})
|
||||
auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1]
|
||||
headers = {'Content-Type': 'application/json',
|
||||
'Authorization': 'Basic %s' % (auth,)}
|
||||
LOG.debug(_('Sending JSON data: %s'), data)
|
||||
request = urllib2.Request(self.url, data, headers)
|
||||
response_obj = urllib2.urlopen(request)
|
||||
if response_obj.info().status == 'EOF in headers':
|
||||
if self.auto and self.url.startswith('http://'):
|
||||
LOG.info(_('Auto switching to HTTPS connection to %s'),
|
||||
self.url)
|
||||
self.url = 'https' + self.url[4:]
|
||||
request = urllib2.Request(self.url, data, headers)
|
||||
response_obj = urllib2.urlopen(request)
|
||||
else:
|
||||
LOG.error(_('No headers in server response'))
|
||||
raise NexentaJSONException(_('Bad response from server'))
|
||||
|
||||
response_data = response_obj.read()
|
||||
LOG.debug(_('Got response: %s'), response_data)
|
||||
response = jsonutils.loads(response_data)
|
||||
if response.get('error') is not None:
|
||||
raise NexentaJSONException(response['error'].get('message', ''))
|
||||
else:
|
||||
return response.get('result')
|
@ -1,353 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2011 Nexenta Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
|
||||
=====================================================================
|
||||
|
||||
.. automodule:: nexenta.volume
|
||||
.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume import driver
|
||||
from manila.volume.drivers import nexenta
|
||||
from manila.volume.drivers.nexenta import jsonrpc
|
||||
|
||||
VERSION = '1.0'
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
nexenta_opts = [
|
||||
cfg.StrOpt('nexenta_host',
|
||||
default='',
|
||||
help='IP address of Nexenta SA'),
|
||||
cfg.IntOpt('nexenta_rest_port',
|
||||
default=2000,
|
||||
help='HTTP port to connect to Nexenta REST API server'),
|
||||
cfg.StrOpt('nexenta_rest_protocol',
|
||||
default='auto',
|
||||
help='Use http or https for REST connection (default auto)'),
|
||||
cfg.StrOpt('nexenta_user',
|
||||
default='admin',
|
||||
help='User name to connect to Nexenta SA'),
|
||||
cfg.StrOpt('nexenta_password',
|
||||
default='nexenta',
|
||||
help='Password to connect to Nexenta SA',
|
||||
secret=True),
|
||||
cfg.IntOpt('nexenta_iscsi_target_portal_port',
|
||||
default=3260,
|
||||
help='Nexenta target portal port'),
|
||||
cfg.StrOpt('nexenta_volume',
|
||||
default='manila',
|
||||
help='pool on SA that will hold all volumes'),
|
||||
cfg.StrOpt('nexenta_target_prefix',
|
||||
default='iqn.1986-03.com.sun:02:manila-',
|
||||
help='IQN prefix for iSCSI targets'),
|
||||
cfg.StrOpt('nexenta_target_group_prefix',
|
||||
default='manila/',
|
||||
help='prefix for iSCSI target groups on SA'),
|
||||
cfg.StrOpt('nexenta_blocksize',
|
||||
default='',
|
||||
help='block size for volumes (blank=default,8KB)'),
|
||||
cfg.BoolOpt('nexenta_sparse',
|
||||
default=False,
|
||||
help='flag to create sparse volumes'),
|
||||
]
|
||||
FLAGS.register_opts(nexenta_opts)
|
||||
|
||||
|
||||
class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
|
||||
"""Executes volume driver commands on Nexenta Appliance."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NexentaDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def do_setup(self, context):
|
||||
protocol = FLAGS.nexenta_rest_protocol
|
||||
auto = protocol == 'auto'
|
||||
if auto:
|
||||
protocol = 'http'
|
||||
self.nms = jsonrpc.NexentaJSONProxy(
|
||||
'%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
|
||||
FLAGS.nexenta_rest_port),
|
||||
FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Verify that the volume for our zvols exists.
|
||||
|
||||
:raise: :py:exc:`LookupError`
|
||||
"""
|
||||
if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
|
||||
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
|
||||
FLAGS.nexenta_volume)
|
||||
|
||||
@staticmethod
|
||||
def _get_zvol_name(volume_name):
|
||||
"""Return zvol name that corresponds given volume name."""
|
||||
return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
|
||||
|
||||
@staticmethod
|
||||
def _get_target_name(volume_name):
|
||||
"""Return iSCSI target name to access volume."""
|
||||
return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
|
||||
|
||||
@staticmethod
|
||||
def _get_target_group_name(volume_name):
|
||||
"""Return Nexenta iSCSI target group name for volume."""
|
||||
return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a zvol on appliance.
|
||||
|
||||
:param volume: volume reference
|
||||
"""
|
||||
self.nms.zvol.create(
|
||||
self._get_zvol_name(volume['name']),
|
||||
'%sG' % (volume['size'],),
|
||||
FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Destroy a zvol on appliance.
|
||||
|
||||
:param volume: volume reference
|
||||
"""
|
||||
try:
|
||||
self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '')
|
||||
except nexenta.NexentaException as exc:
|
||||
if "zvol has children" in exc.args[1]:
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
else:
|
||||
raise
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create snapshot of existing zvol on appliance.
|
||||
|
||||
:param snapshot: shapshot reference
|
||||
"""
|
||||
self.nms.zvol.create_snapshot(
|
||||
self._get_zvol_name(snapshot['volume_name']),
|
||||
snapshot['name'], '')
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create new volume from other's snapshot on appliance.
|
||||
|
||||
:param volume: reference of volume to be created
|
||||
:param snapshot: reference of source snapshot
|
||||
"""
|
||||
self.nms.zvol.clone(
|
||||
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
|
||||
snapshot['name']),
|
||||
self._get_zvol_name(volume['name']))
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Delete volume's snapshot on appliance.
|
||||
|
||||
:param snapshot: shapshot reference
|
||||
"""
|
||||
try:
|
||||
self.nms.snapshot.destroy(
|
||||
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
|
||||
snapshot['name']),
|
||||
'')
|
||||
except nexenta.NexentaException as exc:
|
||||
if "snapshot has dependent clones" in exc.args[1]:
|
||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||
else:
|
||||
raise
|
||||
|
||||
def local_path(self, volume):
|
||||
"""Return local path to existing local volume.
|
||||
|
||||
We never have local volumes, so it raises NotImplementedError.
|
||||
|
||||
:raise: :py:exc:`NotImplementedError`
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _do_export(self, _ctx, volume, ensure=False):
|
||||
"""Do all steps to get zvol exported as LUN 0 at separate target.
|
||||
|
||||
:param volume: reference of volume to be exported
|
||||
:param ensure: if True, ignore errors caused by already existing
|
||||
resources
|
||||
:return: iscsiadm-formatted provider location string
|
||||
"""
|
||||
zvol_name = self._get_zvol_name(volume['name'])
|
||||
target_name = self._get_target_name(volume['name'])
|
||||
target_group_name = self._get_target_group_name(volume['name'])
|
||||
|
||||
try:
|
||||
self.nms.iscsitarget.create_target({'target_name': target_name})
|
||||
except nexenta.NexentaException as exc:
|
||||
if not ensure or 'already configured' not in exc.args[1]:
|
||||
raise
|
||||
else:
|
||||
LOG.info(_('Ignored target creation error "%s"'
|
||||
' while ensuring export'), exc)
|
||||
try:
|
||||
self.nms.stmf.create_targetgroup(target_group_name)
|
||||
except nexenta.NexentaException as exc:
|
||||
if not ensure or 'already exists' not in exc.args[1]:
|
||||
raise
|
||||
else:
|
||||
LOG.info(_('Ignored target group creation error "%s"'
|
||||
' while ensuring export'), exc)
|
||||
try:
|
||||
self.nms.stmf.add_targetgroup_member(target_group_name,
|
||||
target_name)
|
||||
except nexenta.NexentaException as exc:
|
||||
if not ensure or 'already exists' not in exc.args[1]:
|
||||
raise
|
||||
else:
|
||||
LOG.info(_('Ignored target group member addition error "%s"'
|
||||
' while ensuring export'), exc)
|
||||
try:
|
||||
self.nms.scsidisk.create_lu(zvol_name, {})
|
||||
except nexenta.NexentaException as exc:
|
||||
if not ensure or 'in use' not in exc.args[1]:
|
||||
raise
|
||||
else:
|
||||
LOG.info(_('Ignored LU creation error "%s"'
|
||||
' while ensuring export'), exc)
|
||||
try:
|
||||
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
|
||||
'target_group': target_group_name,
|
||||
'lun': '0'})
|
||||
except nexenta.NexentaException as exc:
|
||||
if not ensure or 'view entry exists' not in exc.args[1]:
|
||||
raise
|
||||
else:
|
||||
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
|
||||
' while ensuring export'), exc)
|
||||
return '%s:%s,1 %s 0' % (FLAGS.nexenta_host,
|
||||
FLAGS.nexenta_iscsi_target_portal_port,
|
||||
target_name)
|
||||
|
||||
def create_export(self, _ctx, volume):
|
||||
"""Create new export for zvol.
|
||||
|
||||
:param volume: reference of volume to be exported
|
||||
:return: iscsiadm-formatted provider location string
|
||||
"""
|
||||
loc = self._do_export(_ctx, volume, ensure=False)
|
||||
return {'provider_location': loc}
|
||||
|
||||
def ensure_export(self, _ctx, volume):
|
||||
"""Recreate parts of export if necessary.
|
||||
|
||||
:param volume: reference of volume to be exported
|
||||
"""
|
||||
self._do_export(_ctx, volume, ensure=True)
|
||||
|
||||
def remove_export(self, _ctx, volume):
|
||||
"""Destroy all resources created to export zvol.
|
||||
|
||||
:param volume: reference of volume to be unexported
|
||||
"""
|
||||
zvol_name = self._get_zvol_name(volume['name'])
|
||||
target_name = self._get_target_name(volume['name'])
|
||||
target_group_name = self._get_target_group_name(volume['name'])
|
||||
self.nms.scsidisk.delete_lu(zvol_name)
|
||||
|
||||
try:
|
||||
self.nms.stmf.destroy_targetgroup(target_group_name)
|
||||
except nexenta.NexentaException as exc:
|
||||
# We assume that target group is already gone
|
||||
LOG.warn(_('Got error trying to destroy target group'
|
||||
' %(target_group)s, assuming it is '
|
||||
'already gone: %(exc)s'),
|
||||
{'target_group': target_group_name, 'exc': exc})
|
||||
try:
|
||||
self.nms.iscsitarget.delete_target(target_name)
|
||||
except nexenta.NexentaException as exc:
|
||||
# We assume that target is gone as well
|
||||
LOG.warn(_('Got error trying to delete target %(target)s,'
|
||||
' assuming it is already gone: %(exc)s'),
|
||||
{'target': target_name, 'exc': exc})
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
|
||||
If 'refresh' is True, run update the stats first."""
|
||||
if refresh:
|
||||
self._update_volume_status()
|
||||
|
||||
return self._stats
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info for Nexenta device."""
|
||||
|
||||
# NOTE(jdg): Aimon Bustardo was kind enough to point out the
|
||||
# info he had regarding Nexenta Capabilities, ideally it would
|
||||
# be great if somebody from Nexenta looked this over at some point
|
||||
|
||||
KB = 1024
|
||||
MB = KB ** 2
|
||||
|
||||
LOG.debug(_("Updating volume status"))
|
||||
data = {}
|
||||
backend_name = self.__class__.__name__
|
||||
if self.configuration:
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data["volume_backend_name"] = backend_name or self.__class__.__name__
|
||||
data["vendor_name"] = 'Nexenta'
|
||||
data["driver_version"] = VERSION
|
||||
data["storage_protocol"] = 'iSCSI'
|
||||
|
||||
stats = self.nms.volume.get_child_props(FLAGS.nexenta_volume,
|
||||
'health|size|used|available')
|
||||
total_unit = stats['size'][-1]
|
||||
total_amount = float(stats['size'][:-1])
|
||||
free_unit = stats['available'][-1]
|
||||
free_amount = float(stats['available'][:-1])
|
||||
|
||||
if total_unit == "T":
|
||||
total_amount = total_amount * KB
|
||||
elif total_unit == "M":
|
||||
total_amount = total_amount / KB
|
||||
elif total_unit == "B":
|
||||
total_amount = total_amount / MB
|
||||
|
||||
if free_unit == "T":
|
||||
free_amount = free_amount * KB
|
||||
elif free_unit == "M":
|
||||
free_amount = free_amount / KB
|
||||
elif free_unit == "B":
|
||||
free_amount = free_amount / MB
|
||||
|
||||
data['total_capacity_gb'] = total_amount
|
||||
data['free_capacity_gb'] = free_amount
|
||||
|
||||
data['reserved_percentage'] = 0
|
||||
data['QoS_support'] = False
|
||||
self._stats = data
|
@ -1,357 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 NetApp, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import errno
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila.image import image_utils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume import driver
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
volume_opts = [
|
||||
cfg.StrOpt('nfs_shares_config',
|
||||
default='/etc/manila/nfs_shares',
|
||||
help='File with the list of available nfs shares'),
|
||||
cfg.StrOpt('nfs_mount_point_base',
|
||||
default='$state_path/mnt',
|
||||
help='Base dir containing mount points for nfs shares'),
|
||||
cfg.StrOpt('nfs_disk_util',
|
||||
default='df',
|
||||
help='Use du or df for free space calculation'),
|
||||
cfg.BoolOpt('nfs_sparsed_volumes',
|
||||
default=True,
|
||||
help=('Create volumes as sparsed files which take no space.'
|
||||
'If set to False volume is created as regular file.'
|
||||
'In such case volume creation takes a lot of time.')),
|
||||
cfg.StrOpt('nfs_mount_options',
|
||||
default=None,
|
||||
help='Mount options passed to the nfs client. See section '
|
||||
'of the nfs man page for details'),
|
||||
]
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
|
||||
class RemoteFsDriver(driver.VolumeDriver):
|
||||
"""Common base for drivers that work like NFS."""
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Just to override parent behavior."""
|
||||
pass
|
||||
|
||||
def create_volume(self, volume):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_volume(self, volume):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Do nothing for this driver, but allow manager to handle deletion
|
||||
of snapshot in error state."""
|
||||
pass
|
||||
|
||||
def ensure_export(self, ctx, volume):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _create_sparsed_file(self, path, size):
|
||||
"""Creates file with 0 disk usage."""
|
||||
self._execute('truncate', '-s', '%sG' % size,
|
||||
path, run_as_root=True)
|
||||
|
||||
def _create_regular_file(self, path, size):
|
||||
"""Creates regular file of given size. Takes a lot of time for large
|
||||
files."""
|
||||
KB = 1024
|
||||
MB = KB * 1024
|
||||
GB = MB * 1024
|
||||
|
||||
block_size_mb = 1
|
||||
block_count = size * GB / (block_size_mb * MB)
|
||||
|
||||
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
|
||||
'bs=%dM' % block_size_mb,
|
||||
'count=%d' % block_count,
|
||||
run_as_root=True)
|
||||
|
||||
def _set_rw_permissions_for_all(self, path):
|
||||
"""Sets 666 permissions for the path."""
|
||||
self._execute('chmod', 'ugo+rw', path, run_as_root=True)
|
||||
|
||||
def local_path(self, volume):
|
||||
"""Get volume path (mounted locally fs path) for given volume
|
||||
:param volume: volume reference
|
||||
"""
|
||||
nfs_share = volume['provider_location']
|
||||
return os.path.join(self._get_mount_point_for_share(nfs_share),
|
||||
volume['name'])
|
||||
|
||||
def _get_hash_str(self, base_str):
|
||||
"""returns string that represents hash of base_str
|
||||
(in a hex format)."""
|
||||
return hashlib.md5(base_str).hexdigest()
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
image_utils.fetch_to_raw(context,
|
||||
image_service,
|
||||
image_id,
|
||||
self.local_path(volume))
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
image_utils.upload_volume(context,
|
||||
image_service,
|
||||
image_meta,
|
||||
self.local_path(volume))
|
||||
|
||||
|
||||
class NfsDriver(RemoteFsDriver):
|
||||
"""NFS based manila driver. Creates file on NFS share for using it
|
||||
as block device on hypervisor."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NfsDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(volume_opts)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the volume driver does while starting"""
|
||||
super(NfsDriver, self).do_setup(context)
|
||||
|
||||
config = self.configuration.nfs_shares_config
|
||||
if not config:
|
||||
msg = (_("There's no NFS config file configured (%s)") %
|
||||
'nfs_shares_config')
|
||||
LOG.warn(msg)
|
||||
raise exception.NfsException(msg)
|
||||
if not os.path.exists(config):
|
||||
msg = _("NFS config file at %(config)s doesn't exist") % locals()
|
||||
LOG.warn(msg)
|
||||
raise exception.NfsException(msg)
|
||||
|
||||
try:
|
||||
self._execute('mount.nfs', check_exit_code=False)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
raise exception.NfsException('mount.nfs is not installed')
|
||||
else:
|
||||
raise
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume"""
|
||||
|
||||
self._ensure_shares_mounted()
|
||||
|
||||
volume['provider_location'] = self._find_share(volume['size'])
|
||||
|
||||
LOG.info(_('casted to %s') % volume['provider_location'])
|
||||
|
||||
self._do_create_volume(volume)
|
||||
|
||||
return {'provider_location': volume['provider_location']}
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
|
||||
if not volume['provider_location']:
|
||||
LOG.warn(_('Volume %s does not have provider_location specified, '
|
||||
'skipping'), volume['name'])
|
||||
return
|
||||
|
||||
self._ensure_share_mounted(volume['provider_location'])
|
||||
|
||||
mounted_path = self.local_path(volume)
|
||||
|
||||
self._execute('rm', '-f', mounted_path, run_as_root=True)
|
||||
|
||||
def ensure_export(self, ctx, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
self._ensure_share_mounted(volume['provider_location'])
|
||||
|
||||
def create_export(self, ctx, volume):
|
||||
"""Exports the volume. Can optionally return a Dictionary of changes
|
||||
to the volume object to be persisted."""
|
||||
pass
|
||||
|
||||
def remove_export(self, ctx, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Allow connection to connector and return connection info."""
|
||||
data = {'export': volume['provider_location'],
|
||||
'name': volume['name']}
|
||||
return {
|
||||
'driver_volume_type': 'nfs',
|
||||
'data': data
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Disallow connection from connector"""
|
||||
pass
|
||||
|
||||
def _do_create_volume(self, volume):
|
||||
"""Create a volume on given nfs_share
|
||||
:param volume: volume reference
|
||||
"""
|
||||
volume_path = self.local_path(volume)
|
||||
volume_size = volume['size']
|
||||
|
||||
if self.configuration.nfs_sparsed_volumes:
|
||||
self._create_sparsed_file(volume_path, volume_size)
|
||||
else:
|
||||
self._create_regular_file(volume_path, volume_size)
|
||||
|
||||
self._set_rw_permissions_for_all(volume_path)
|
||||
|
||||
def _ensure_shares_mounted(self):
|
||||
"""Look for NFS shares in the flags and tries to mount them locally"""
|
||||
self._mounted_shares = []
|
||||
|
||||
for share in self._load_shares_config():
|
||||
try:
|
||||
self._ensure_share_mounted(share)
|
||||
self._mounted_shares.append(share)
|
||||
except Exception, exc:
|
||||
LOG.warning(_('Exception during mounting %s') % (exc,))
|
||||
|
||||
LOG.debug('Available shares %s' % str(self._mounted_shares))
|
||||
|
||||
def _load_shares_config(self):
|
||||
return [share.strip() for share in
|
||||
open(self.configuration.nfs_shares_config)
|
||||
if share and not share.startswith('#')]
|
||||
|
||||
def _ensure_share_mounted(self, nfs_share):
|
||||
"""Mount NFS share
|
||||
:param nfs_share:
|
||||
"""
|
||||
mount_path = self._get_mount_point_for_share(nfs_share)
|
||||
self._mount_nfs(nfs_share, mount_path, ensure=True)
|
||||
|
||||
def _find_share(self, volume_size_for):
|
||||
"""Choose NFS share among available ones for given volume size. Current
|
||||
implementation looks for greatest capacity
|
||||
:param volume_size_for: int size in Gb
|
||||
"""
|
||||
|
||||
if not self._mounted_shares:
|
||||
raise exception.NfsNoSharesMounted()
|
||||
|
||||
greatest_size = 0
|
||||
greatest_share = None
|
||||
|
||||
for nfs_share in self._mounted_shares:
|
||||
capacity = self._get_available_capacity(nfs_share)[0]
|
||||
if capacity > greatest_size:
|
||||
greatest_share = nfs_share
|
||||
greatest_size = capacity
|
||||
|
||||
if volume_size_for * 1024 * 1024 * 1024 > greatest_size:
|
||||
raise exception.NfsNoSuitableShareFound(
|
||||
volume_size=volume_size_for)
|
||||
return greatest_share
|
||||
|
||||
def _get_mount_point_for_share(self, nfs_share):
|
||||
"""
|
||||
:param nfs_share: example 172.18.194.100:/var/nfs
|
||||
"""
|
||||
return os.path.join(self.configuration.nfs_mount_point_base,
|
||||
self._get_hash_str(nfs_share))
|
||||
|
||||
def _get_available_capacity(self, nfs_share):
|
||||
"""Calculate available space on the NFS share
|
||||
:param nfs_share: example 172.18.194.100:/var/nfs
|
||||
"""
|
||||
mount_point = self._get_mount_point_for_share(nfs_share)
|
||||
|
||||
out, _ = self._execute('df', '-P', '-B', '1', mount_point,
|
||||
run_as_root=True)
|
||||
out = out.splitlines()[1]
|
||||
|
||||
available = 0
|
||||
|
||||
size = int(out.split()[1])
|
||||
if self.configuration.nfs_disk_util == 'df':
|
||||
available = int(out.split()[3])
|
||||
else:
|
||||
out, _ = self._execute('du', '-sb', '--apparent-size',
|
||||
'--exclude', '*snapshot*', mount_point,
|
||||
run_as_root=True)
|
||||
used = int(out.split()[0])
|
||||
available = size - used
|
||||
|
||||
return available, size
|
||||
|
||||
def _mount_nfs(self, nfs_share, mount_path, ensure=False):
|
||||
"""Mount NFS share to mount path"""
|
||||
self._execute('mkdir', '-p', mount_path)
|
||||
|
||||
# Construct the NFS mount command.
|
||||
nfs_cmd = ['mount', '-t', 'nfs']
|
||||
if self.configuration.nfs_mount_options is not None:
|
||||
nfs_cmd.extend(['-o', self.configuration.nfs_mount_options])
|
||||
nfs_cmd.extend([nfs_share, mount_path])
|
||||
|
||||
try:
|
||||
self._execute(*nfs_cmd, run_as_root=True)
|
||||
except exception.ProcessExecutionError as exc:
|
||||
if ensure and 'already mounted' in exc.stderr:
|
||||
LOG.warn(_("%s is already mounted"), nfs_share)
|
||||
else:
|
||||
raise
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
|
||||
If 'refresh' is True, run update the stats first."""
|
||||
if refresh or not self._stats:
|
||||
self._update_volume_status()
|
||||
|
||||
return self._stats
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
|
||||
data = {}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data["volume_backend_name"] = backend_name or 'Generic_NFS'
|
||||
data["vendor_name"] = 'Open Source'
|
||||
data["driver_version"] = VERSION
|
||||
data["storage_protocol"] = 'nfs'
|
||||
|
||||
self._ensure_shares_mounted()
|
||||
|
||||
global_capacity = 0
|
||||
global_free = 0
|
||||
for nfs_share in self._mounted_shares:
|
||||
free, capacity = self._get_available_capacity(nfs_share)
|
||||
global_capacity += capacity
|
||||
global_free += free
|
||||
|
||||
data['total_capacity_gb'] = global_capacity / 1024.0 ** 3
|
||||
data['free_capacity_gb'] = global_free / 1024.0 ** 3
|
||||
data['reserved_percentage'] = 0
|
||||
data['QoS_support'] = False
|
||||
self._stats = data
|
@ -1,306 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
RADOS Block Device Driver
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import urllib
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila.image import image_utils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila.volume import driver
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
rbd_opts = [
|
||||
cfg.StrOpt('rbd_pool',
|
||||
default='rbd',
|
||||
help='the RADOS pool in which rbd volumes are stored'),
|
||||
cfg.StrOpt('rbd_user',
|
||||
default=None,
|
||||
help='the RADOS client name for accessing rbd volumes'),
|
||||
cfg.StrOpt('rbd_secret_uuid',
|
||||
default=None,
|
||||
help='the libvirt uuid of the secret for the rbd_user'
|
||||
'volumes'),
|
||||
cfg.StrOpt('volume_tmp_dir',
|
||||
default=None,
|
||||
help='where to store temporary image files if the volume '
|
||||
'driver does not write them directly to the volume'), ]
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
|
||||
class RBDDriver(driver.VolumeDriver):
|
||||
"""Implements RADOS block device (RBD) volume commands"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RBDDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(rbd_opts)
|
||||
self._stats = {}
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met"""
|
||||
(stdout, stderr) = self._execute('rados', 'lspools')
|
||||
pools = stdout.split("\n")
|
||||
if self.configuration.rbd_pool not in pools:
|
||||
exception_message = (_("rbd has no pool %s") %
|
||||
self.configuration.rbd_pool)
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
def _update_volume_stats(self):
|
||||
stats = {'vendor_name': 'Open Source',
|
||||
'driver_version': VERSION,
|
||||
'storage_protocol': 'ceph',
|
||||
'total_capacity_gb': 'unknown',
|
||||
'free_capacity_gb': 'unknown',
|
||||
'reserved_percentage': 0}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
stats['volume_backend_name'] = backend_name or 'RBD'
|
||||
|
||||
try:
|
||||
stdout, _err = self._execute('rados', 'df', '--format', 'json')
|
||||
new_stats = json.loads(stdout)
|
||||
total = int(new_stats['total_space']) / 1024 ** 2
|
||||
free = int(new_stats['total_avail']) / 1024 ** 2
|
||||
stats['total_capacity_gb'] = total
|
||||
stats['free_capacity_gb'] = free
|
||||
except exception.ProcessExecutionError:
|
||||
# just log and return unknown capacities
|
||||
LOG.exception(_('error refreshing volume stats'))
|
||||
self._stats = stats
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Return the current state of the volume service. If 'refresh' is
|
||||
True, run the update first."""
|
||||
if refresh:
|
||||
self._update_volume_stats()
|
||||
return self._stats
|
||||
|
||||
def _supports_layering(self):
|
||||
stdout, _ = self._execute('rbd', '--help')
|
||||
return 'clone' in stdout
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Clone a logical volume"""
|
||||
self._try_execute('rbd', 'cp',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
'--image', src_vref['name'],
|
||||
'--dest-pool', self.configuration.rbd_pool,
|
||||
'--dest', volume['name'])
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a logical volume."""
|
||||
if int(volume['size']) == 0:
|
||||
size = 100
|
||||
else:
|
||||
size = int(volume['size']) * 1024
|
||||
args = ['rbd', 'create',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
'--size', size,
|
||||
volume['name']]
|
||||
if self._supports_layering():
|
||||
args += ['--new-format']
|
||||
self._try_execute(*args)
|
||||
|
||||
def _clone(self, volume, src_pool, src_image, src_snap):
|
||||
self._try_execute('rbd', 'clone',
|
||||
'--pool', src_pool,
|
||||
'--image', src_image,
|
||||
'--snap', src_snap,
|
||||
'--dest-pool', self.configuration.rbd_pool,
|
||||
'--dest', volume['name'])
|
||||
|
||||
def _resize(self, volume):
|
||||
size = int(volume['size']) * 1024
|
||||
self._try_execute('rbd', 'resize',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
'--image', volume['name'],
|
||||
'--size', size)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
self._clone(volume, self.configuration.rbd_pool,
|
||||
snapshot['volume_name'], snapshot['name'])
|
||||
if int(volume['size']):
|
||||
self._resize(volume)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
stdout, _ = self._execute('rbd', 'snap', 'ls',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
volume['name'])
|
||||
if stdout.count('\n') > 1:
|
||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||
self._try_execute('rbd', 'rm',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
volume['name'])
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates an rbd snapshot"""
|
||||
self._try_execute('rbd', 'snap', 'create',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
'--snap', snapshot['name'],
|
||||
snapshot['volume_name'])
|
||||
if self._supports_layering():
|
||||
self._try_execute('rbd', 'snap', 'protect',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
'--snap', snapshot['name'],
|
||||
snapshot['volume_name'])
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes an rbd snapshot"""
|
||||
if self._supports_layering():
|
||||
try:
|
||||
self._try_execute('rbd', 'snap', 'unprotect',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
'--snap', snapshot['name'],
|
||||
snapshot['volume_name'])
|
||||
except exception.ProcessExecutionError:
|
||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||
self._try_execute('rbd', 'snap', 'rm',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
'--snap', snapshot['name'],
|
||||
snapshot['volume_name'])
|
||||
|
||||
def local_path(self, volume):
|
||||
"""Returns the path of the rbd volume."""
|
||||
# This is the same as the remote path
|
||||
# since qemu accesses it directly.
|
||||
return "rbd:%s/%s" % (self.configuration.rbd_pool, volume['name'])
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume"""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume"""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
return {
|
||||
'driver_volume_type': 'rbd',
|
||||
'data': {
|
||||
'name': '%s/%s' % (self.configuration.rbd_pool,
|
||||
volume['name']),
|
||||
'auth_enabled': (self.configuration.rbd_secret_uuid
|
||||
is not None),
|
||||
'auth_username': self.configuration.rbd_user,
|
||||
'secret_type': 'ceph',
|
||||
'secret_uuid': self.configuration.rbd_secret_uuid, }
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
pass
|
||||
|
||||
def _parse_location(self, location):
|
||||
prefix = 'rbd://'
|
||||
if not location.startswith(prefix):
|
||||
reason = _('Not stored in rbd')
|
||||
raise exception.ImageUnacceptable(image_id=location, reason=reason)
|
||||
pieces = map(urllib.unquote, location[len(prefix):].split('/'))
|
||||
if any(map(lambda p: p == '', pieces)):
|
||||
reason = _('Blank components')
|
||||
raise exception.ImageUnacceptable(image_id=location, reason=reason)
|
||||
if len(pieces) != 4:
|
||||
reason = _('Not an rbd snapshot')
|
||||
raise exception.ImageUnacceptable(image_id=location, reason=reason)
|
||||
return pieces
|
||||
|
||||
def _get_fsid(self):
|
||||
stdout, _ = self._execute('ceph', 'fsid')
|
||||
return stdout.rstrip('\n')
|
||||
|
||||
def _is_cloneable(self, image_location):
|
||||
try:
|
||||
fsid, pool, image, snapshot = self._parse_location(image_location)
|
||||
except exception.ImageUnacceptable:
|
||||
return False
|
||||
|
||||
if self._get_fsid() != fsid:
|
||||
reason = _('%s is in a different ceph cluster') % image_location
|
||||
LOG.debug(reason)
|
||||
return False
|
||||
|
||||
# check that we can read the image
|
||||
try:
|
||||
self._execute('rbd', 'info',
|
||||
'--pool', pool,
|
||||
'--image', image,
|
||||
'--snap', snapshot)
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.debug(_('Unable to read image %s') % image_location)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def clone_image(self, volume, image_location):
|
||||
if image_location is None or not self._is_cloneable(image_location):
|
||||
return False
|
||||
_, pool, image, snapshot = self._parse_location(image_location)
|
||||
self._clone(volume, pool, image, snapshot)
|
||||
self._resize(volume)
|
||||
return True
|
||||
|
||||
def _ensure_tmp_exists(self):
|
||||
tmp_dir = self.configuration.volume_tmp_dir
|
||||
if tmp_dir and not os.path.exists(tmp_dir):
|
||||
os.makedirs(tmp_dir)
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
# TODO(jdurgin): replace with librbd
|
||||
# this is a temporary hack, since rewriting this driver
|
||||
# to use librbd would take too long
|
||||
self._ensure_tmp_exists()
|
||||
tmp_dir = self.configuration.volume_tmp_dir
|
||||
|
||||
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
|
||||
image_utils.fetch_to_raw(context, image_service, image_id,
|
||||
tmp.name)
|
||||
# import creates the image, so we must remove it first
|
||||
self._try_execute('rbd', 'rm',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
volume['name'])
|
||||
|
||||
args = ['rbd', 'import',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
tmp.name, volume['name']]
|
||||
if self._supports_layering():
|
||||
args += ['--new-format']
|
||||
self._try_execute(*args)
|
||||
self._resize(volume)
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
self._ensure_tmp_exists()
|
||||
|
||||
tmp_dir = self.configuration.volume_tmp_dir or '/tmp'
|
||||
tmp_file = os.path.join(tmp_dir,
|
||||
volume['name'] + '-' + image_meta['id'])
|
||||
with utils.remove_path_on_error(tmp_file):
|
||||
self._try_execute('rbd', 'export',
|
||||
'--pool', self.configuration.rbd_pool,
|
||||
volume['name'], tmp_file)
|
||||
image_utils.upload_volume(context, image_service,
|
||||
image_meta, tmp_file)
|
||||
os.unlink(tmp_file)
|
@ -1,27 +0,0 @@
|
||||
# Copyright (c) 2012 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
:mod:`manila.volume.san` -- Cinder San Drivers
|
||||
=====================================================
|
||||
|
||||
.. automodule:: manila.volume.san
|
||||
:platform: Unix
|
||||
:synopsis: Module containing all the Cinder San drivers.
|
||||
"""
|
||||
|
||||
# Adding imports for backwards compatibility in loading volume_driver.
|
||||
from hp_lefthand import HpSanISCSIDriver
|
||||
from san import SanISCSIDriver
|
||||
from solaris import SolarisISCSIDriver
|
@ -1,742 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""
|
||||
Volume driver common utilities for HP 3PAR Storage array
|
||||
The 3PAR drivers requires 3.1.2 firmware on the 3PAR array.
|
||||
|
||||
You will need to install the python hp3parclient.
|
||||
sudo pip install hp3parclient
|
||||
|
||||
The drivers uses both the REST service and the SSH
|
||||
command line to correctly operate. Since the
|
||||
ssh credentials and the REST credentials can be different
|
||||
we need to have settings for both.
|
||||
|
||||
The drivers requires the use of the san_ip, san_login,
|
||||
san_password settings for ssh connections into the 3PAR
|
||||
array. It also requires the setting of
|
||||
hp3par_api_url, hp3par_username, hp3par_password
|
||||
for credentials to talk to the REST service on the 3PAR
|
||||
array.
|
||||
"""
|
||||
import base64
|
||||
import json
|
||||
import paramiko
|
||||
import pprint
|
||||
from random import randint
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from eventlet import greenthread
|
||||
from hp3parclient import exceptions as hpexceptions
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
hp3par_opts = [
|
||||
cfg.StrOpt('hp3par_api_url',
|
||||
default='',
|
||||
help="3PAR WSAPI Server Url like "
|
||||
"https://<3par ip>:8080/api/v1"),
|
||||
cfg.StrOpt('hp3par_username',
|
||||
default='',
|
||||
help="3PAR Super user username"),
|
||||
cfg.StrOpt('hp3par_password',
|
||||
default='',
|
||||
help="3PAR Super user password",
|
||||
secret=True),
|
||||
cfg.StrOpt('hp3par_domain',
|
||||
default="OpenStack",
|
||||
help="The 3par domain name to use"),
|
||||
cfg.StrOpt('hp3par_cpg',
|
||||
default="OpenStack",
|
||||
help="The CPG to use for volume creation"),
|
||||
cfg.StrOpt('hp3par_cpg_snap',
|
||||
default="",
|
||||
help="The CPG to use for Snapshots for volumes. "
|
||||
"If empty hp3par_cpg will be used"),
|
||||
cfg.StrOpt('hp3par_snapshot_retention',
|
||||
default="",
|
||||
help="The time in hours to retain a snapshot. "
|
||||
"You can't delete it before this expires."),
|
||||
cfg.StrOpt('hp3par_snapshot_expiration',
|
||||
default="",
|
||||
help="The time in hours when a snapshot expires "
|
||||
" and is deleted. This must be larger than expiration"),
|
||||
cfg.BoolOpt('hp3par_debug',
|
||||
default=False,
|
||||
help="Enable HTTP debugging to 3PAR")
|
||||
]
|
||||
|
||||
|
||||
class HP3PARCommon():
|
||||
|
||||
stats = {}
|
||||
|
||||
# Valid values for volume type extra specs
|
||||
# The first value in the list is the default value
|
||||
valid_prov_values = ['thin', 'full']
|
||||
valid_persona_values = ['1 - Generic',
|
||||
'2 - Generic-ALUA',
|
||||
'6 - Generic-legacy',
|
||||
'7 - HPUX-legacy',
|
||||
'8 - AIX-legacy',
|
||||
'9 - EGENERA',
|
||||
'10 - ONTAP-legacy',
|
||||
'11 - VMware']
|
||||
|
||||
def __init__(self, config):
|
||||
self.sshpool = None
|
||||
self.config = config
|
||||
|
||||
def check_flags(self, options, required_flags):
|
||||
for flag in required_flags:
|
||||
if not getattr(options, flag, None):
|
||||
raise exception.InvalidInput(reason=_('%s is not set') % flag)
|
||||
|
||||
def _get_3par_vol_name(self, volume_id):
|
||||
"""
|
||||
Converts the openstack volume id from
|
||||
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
|
||||
to
|
||||
osv-7P.DD5jLTPWF7tcwnMF80g
|
||||
|
||||
We convert the 128 bits of the uuid into a 24character long
|
||||
base64 encoded string to ensure we don't exceed the maximum
|
||||
allowed 31 character name limit on 3Par
|
||||
|
||||
We strip the padding '=' and replace + with .
|
||||
and / with -
|
||||
"""
|
||||
volume_name = self._encode_name(volume_id)
|
||||
return "osv-%s" % volume_name
|
||||
|
||||
def _get_3par_snap_name(self, snapshot_id):
|
||||
snapshot_name = self._encode_name(snapshot_id)
|
||||
return "oss-%s" % snapshot_name
|
||||
|
||||
def _encode_name(self, name):
|
||||
uuid_str = name.replace("-", "")
|
||||
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
|
||||
vol_encoded = base64.b64encode(vol_uuid.bytes)
|
||||
|
||||
# 3par doesn't allow +, nor /
|
||||
vol_encoded = vol_encoded.replace('+', '.')
|
||||
vol_encoded = vol_encoded.replace('/', '-')
|
||||
# strip off the == as 3par doesn't like those.
|
||||
vol_encoded = vol_encoded.replace('=', '')
|
||||
return vol_encoded
|
||||
|
||||
def _capacity_from_size(self, vol_size):
|
||||
|
||||
# because 3PAR volume sizes are in
|
||||
# Mebibytes, Gigibytes, not Megabytes.
|
||||
MB = 1000L
|
||||
MiB = 1.048576
|
||||
|
||||
if int(vol_size) == 0:
|
||||
capacity = MB # default: 1GB
|
||||
else:
|
||||
capacity = vol_size * MB
|
||||
|
||||
capacity = int(round(capacity / MiB))
|
||||
return capacity
|
||||
|
||||
def _cli_run(self, verb, cli_args):
|
||||
""" Runs a CLI command over SSH, without doing any result parsing. """
|
||||
cli_arg_strings = []
|
||||
if cli_args:
|
||||
for k, v in cli_args.items():
|
||||
if k == '':
|
||||
cli_arg_strings.append(" %s" % k)
|
||||
else:
|
||||
cli_arg_strings.append(" %s=%s" % (k, v))
|
||||
|
||||
cmd = verb + ''.join(cli_arg_strings)
|
||||
LOG.debug("SSH CMD = %s " % cmd)
|
||||
|
||||
(stdout, stderr) = self._run_ssh(cmd, False)
|
||||
|
||||
# we have to strip out the input and exit lines
|
||||
tmp = stdout.split("\r\n")
|
||||
out = tmp[5:len(tmp) - 2]
|
||||
return out
|
||||
|
||||
def _ssh_execute(self, ssh, cmd,
|
||||
check_exit_code=True):
|
||||
"""
|
||||
We have to do this in order to get CSV output
|
||||
from the CLI command. We first have to issue
|
||||
a command to tell the CLI that we want the output
|
||||
to be formatted in CSV, then we issue the real
|
||||
command.
|
||||
"""
|
||||
LOG.debug(_('Running cmd (SSH): %s'), cmd)
|
||||
|
||||
channel = ssh.invoke_shell()
|
||||
stdin_stream = channel.makefile('wb')
|
||||
stdout_stream = channel.makefile('rb')
|
||||
stderr_stream = channel.makefile('rb')
|
||||
|
||||
stdin_stream.write('''setclienv csvtable 1
|
||||
%s
|
||||
exit
|
||||
''' % cmd)
|
||||
|
||||
# stdin.write('process_input would go here')
|
||||
# stdin.flush()
|
||||
|
||||
# NOTE(justinsb): This seems suspicious...
|
||||
# ...other SSH clients have buffering issues with this approach
|
||||
stdout = stdout_stream.read()
|
||||
stderr = stderr_stream.read()
|
||||
stdin_stream.close()
|
||||
stdout_stream.close()
|
||||
stderr_stream.close()
|
||||
|
||||
exit_status = channel.recv_exit_status()
|
||||
|
||||
# exit_status == -1 if no exit code was returned
|
||||
if exit_status != -1:
|
||||
LOG.debug(_('Result was %s') % exit_status)
|
||||
if check_exit_code and exit_status != 0:
|
||||
raise exception.ProcessExecutionError(exit_code=exit_status,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cmd=cmd)
|
||||
channel.close()
|
||||
return (stdout, stderr)
|
||||
|
||||
def _run_ssh(self, command, check_exit=True, attempts=1):
|
||||
if not self.sshpool:
|
||||
self.sshpool = utils.SSHPool(self.config.san_ip,
|
||||
self.config.san_ssh_port,
|
||||
self.config.ssh_conn_timeout,
|
||||
self.config.san_login,
|
||||
password=self.config.san_password,
|
||||
privatekey=
|
||||
self.config.san_private_key,
|
||||
min_size=
|
||||
self.config.ssh_min_pool_conn,
|
||||
max_size=
|
||||
self.config.ssh_max_pool_conn)
|
||||
try:
|
||||
total_attempts = attempts
|
||||
with self.sshpool.item() as ssh:
|
||||
while attempts > 0:
|
||||
attempts -= 1
|
||||
try:
|
||||
return self._ssh_execute(ssh, command,
|
||||
check_exit_code=check_exit)
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
greenthread.sleep(randint(20, 500) / 100.0)
|
||||
raise paramiko.SSHException(_("SSH Command failed after "
|
||||
"'%(total_attempts)r' attempts"
|
||||
": '%(command)s'"), locals())
|
||||
except Exception as e:
|
||||
LOG.error(_("Error running ssh command: %s") % command)
|
||||
raise e
|
||||
|
||||
def _delete_3par_host(self, hostname):
|
||||
self._cli_run('removehost %s' % hostname, None)
|
||||
|
||||
def _create_3par_vlun(self, volume, hostname):
|
||||
out = self._cli_run('createvlun %s auto %s' % (volume, hostname), None)
|
||||
if out and len(out) > 1:
|
||||
if "must be in the same domain" in out[0]:
|
||||
err = out[0].strip()
|
||||
err = err + " " + out[1].strip()
|
||||
raise exception.Invalid3PARDomain(err=err)
|
||||
|
||||
def _safe_hostname(self, hostname):
|
||||
"""
|
||||
We have to use a safe hostname length
|
||||
for 3PAR host names.
|
||||
"""
|
||||
try:
|
||||
index = hostname.index('.')
|
||||
except ValueError:
|
||||
# couldn't find it
|
||||
index = len(hostname)
|
||||
|
||||
# we'll just chop this off for now.
|
||||
if index > 23:
|
||||
index = 23
|
||||
|
||||
return hostname[:index]
|
||||
|
||||
def _get_3par_host(self, hostname):
|
||||
out = self._cli_run('showhost -verbose %s' % (hostname), None)
|
||||
LOG.debug("OUTPUT = \n%s" % (pprint.pformat(out)))
|
||||
host = {'id': None, 'name': None,
|
||||
'domain': None,
|
||||
'descriptors': {},
|
||||
'iSCSIPaths': [],
|
||||
'FCPaths': []}
|
||||
|
||||
if out:
|
||||
err = out[0]
|
||||
if err == 'no hosts listed':
|
||||
msg = {'code': 'NON_EXISTENT_HOST',
|
||||
'desc': "HOST '%s' was not found" % hostname}
|
||||
raise hpexceptions.HTTPNotFound(msg)
|
||||
|
||||
# start parsing the lines after the header line
|
||||
for line in out[1:]:
|
||||
if line == '':
|
||||
break
|
||||
tmp = line.split(',')
|
||||
paths = {}
|
||||
|
||||
LOG.debug("line = %s" % (pprint.pformat(tmp)))
|
||||
host['id'] = tmp[0]
|
||||
host['name'] = tmp[1]
|
||||
|
||||
portPos = tmp[4]
|
||||
LOG.debug("portPos = %s" % (pprint.pformat(portPos)))
|
||||
if portPos == '---':
|
||||
portPos = None
|
||||
else:
|
||||
port = portPos.split(':')
|
||||
portPos = {'node': int(port[0]), 'slot': int(port[1]),
|
||||
'cardPort': int(port[2])}
|
||||
|
||||
paths['portPos'] = portPos
|
||||
|
||||
# If FC entry
|
||||
if tmp[5] == 'n/a':
|
||||
paths['wwn'] = tmp[3]
|
||||
host['FCPaths'].append(paths)
|
||||
# else iSCSI entry
|
||||
else:
|
||||
paths['name'] = tmp[3]
|
||||
paths['ipAddr'] = tmp[5]
|
||||
host['iSCSIPaths'].append(paths)
|
||||
|
||||
# find the offset to the description stuff
|
||||
offset = 0
|
||||
for line in out:
|
||||
if line[:15] == '---------- Host':
|
||||
break
|
||||
else:
|
||||
offset += 1
|
||||
|
||||
info = out[offset + 2]
|
||||
tmp = info.split(':')
|
||||
host['domain'] = tmp[1]
|
||||
|
||||
info = out[offset + 4]
|
||||
tmp = info.split(':')
|
||||
host['descriptors']['location'] = tmp[1]
|
||||
|
||||
info = out[offset + 5]
|
||||
tmp = info.split(':')
|
||||
host['descriptors']['ipAddr'] = tmp[1]
|
||||
|
||||
info = out[offset + 6]
|
||||
tmp = info.split(':')
|
||||
host['descriptors']['os'] = tmp[1]
|
||||
|
||||
info = out[offset + 7]
|
||||
tmp = info.split(':')
|
||||
host['descriptors']['model'] = tmp[1]
|
||||
|
||||
info = out[offset + 8]
|
||||
tmp = info.split(':')
|
||||
host['descriptors']['contact'] = tmp[1]
|
||||
|
||||
info = out[offset + 9]
|
||||
tmp = info.split(':')
|
||||
host['descriptors']['comment'] = tmp[1]
|
||||
|
||||
return host
|
||||
|
||||
def get_ports(self):
|
||||
# First get the active FC ports
|
||||
out = self._cli_run('showport', None)
|
||||
|
||||
# strip out header
|
||||
# N:S:P,Mode,State,----Node_WWN----,-Port_WWN/HW_Addr-,Type,
|
||||
# Protocol,Label,Partner,FailoverState
|
||||
out = out[1:len(out) - 2]
|
||||
|
||||
ports = {'FC': [], 'iSCSI': []}
|
||||
for line in out:
|
||||
tmp = line.split(',')
|
||||
|
||||
if tmp:
|
||||
if tmp[1] == 'target' and tmp[2] == 'ready':
|
||||
if tmp[6] == 'FC':
|
||||
ports['FC'].append(tmp[4])
|
||||
|
||||
# now get the active iSCSI ports
|
||||
out = self._cli_run('showport -iscsi', None)
|
||||
|
||||
# strip out header
|
||||
# N:S:P,State,IPAddr,Netmask,Gateway,
|
||||
# TPGT,MTU,Rate,DHCP,iSNS_Addr,iSNS_Port
|
||||
out = out[1:len(out) - 2]
|
||||
for line in out:
|
||||
tmp = line.split(',')
|
||||
|
||||
if tmp:
|
||||
if tmp[1] == 'ready':
|
||||
ports['iSCSI'].append(tmp[2])
|
||||
|
||||
LOG.debug("PORTS = %s" % pprint.pformat(ports))
|
||||
return ports
|
||||
|
||||
def get_volume_stats(self, refresh, client):
|
||||
if refresh:
|
||||
self._update_volume_stats(client)
|
||||
|
||||
return self.stats
|
||||
|
||||
def _update_volume_stats(self, client):
|
||||
# const to convert MiB to GB
|
||||
const = 0.0009765625
|
||||
|
||||
# storage_protocol and volume_backend_name are
|
||||
# set in the child classes
|
||||
stats = {'driver_version': '1.0',
|
||||
'free_capacity_gb': 'unknown',
|
||||
'reserved_percentage': 0,
|
||||
'storage_protocol': None,
|
||||
'total_capacity_gb': 'unknown',
|
||||
'vendor_name': 'Hewlett-Packard',
|
||||
'volume_backend_name': None}
|
||||
|
||||
try:
|
||||
cpg = client.getCPG(self.config.hp3par_cpg)
|
||||
if 'limitMiB' not in cpg['SDGrowth']:
|
||||
total_capacity = 'infinite'
|
||||
free_capacity = 'infinite'
|
||||
else:
|
||||
total_capacity = int(cpg['SDGrowth']['limitMiB'] * const)
|
||||
free_capacity = int((cpg['SDGrowth']['limitMiB'] -
|
||||
cpg['UsrUsage']['usedMiB']) * const)
|
||||
|
||||
stats['total_capacity_gb'] = total_capacity
|
||||
stats['free_capacity_gb'] = free_capacity
|
||||
except hpexceptions.HTTPNotFound:
|
||||
err = (_("CPG (%s) doesn't exist on array")
|
||||
% self.config.hp3par_cpg)
|
||||
LOG.error(err)
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
self.stats = stats
|
||||
|
||||
def create_vlun(self, volume, host, client):
|
||||
"""
|
||||
In order to export a volume on a 3PAR box, we have to
|
||||
create a VLUN.
|
||||
"""
|
||||
volume_name = self._get_3par_vol_name(volume['id'])
|
||||
self._create_3par_vlun(volume_name, host['name'])
|
||||
return client.getVLUN(volume_name)
|
||||
|
||||
def delete_vlun(self, volume, connector, client):
|
||||
hostname = self._safe_hostname(connector['host'])
|
||||
|
||||
volume_name = self._get_3par_vol_name(volume['id'])
|
||||
vlun = client.getVLUN(volume_name)
|
||||
client.deleteVLUN(volume_name, vlun['lun'], hostname)
|
||||
self._delete_3par_host(hostname)
|
||||
|
||||
def _get_volume_type(self, type_id):
|
||||
ctxt = context.get_admin_context()
|
||||
return volume_types.get_volume_type(ctxt, type_id)
|
||||
|
||||
def _get_volume_type_value(self, volume_type, key, default=None):
|
||||
if volume_type is not None:
|
||||
specs = volume_type.get('extra_specs')
|
||||
if key in specs:
|
||||
return specs[key]
|
||||
else:
|
||||
return default
|
||||
else:
|
||||
return default
|
||||
|
||||
def get_persona_type(self, volume):
|
||||
default_persona = self.valid_persona_values[0]
|
||||
type_id = volume.get('volume_type_id', None)
|
||||
volume_type = None
|
||||
if type_id is not None:
|
||||
volume_type = self._get_volume_type(type_id)
|
||||
persona_value = self._get_volume_type_value(volume_type, 'persona',
|
||||
default_persona)
|
||||
if persona_value not in self.valid_persona_values:
|
||||
err = _("Must specify a valid persona %(valid)s, "
|
||||
"value '%(persona)s' is invalid.") % \
|
||||
({'valid': self.valid_persona_values,
|
||||
'persona': persona_value})
|
||||
raise exception.InvalidInput(reason=err)
|
||||
# persona is set by the id so remove the text and return the id
|
||||
# i.e for persona '1 - Generic' returns 1
|
||||
persona_id = persona_value.split(' ')
|
||||
return persona_id[0]
|
||||
|
||||
def create_volume(self, volume, client):
|
||||
LOG.debug("CREATE VOLUME (%s : %s %s)" %
|
||||
(volume['display_name'], volume['name'],
|
||||
self._get_3par_vol_name(volume['id'])))
|
||||
try:
|
||||
comments = {'volume_id': volume['id'],
|
||||
'name': volume['name'],
|
||||
'type': 'OpenStack'}
|
||||
|
||||
name = volume.get('display_name', None)
|
||||
if name:
|
||||
comments['display_name'] = name
|
||||
|
||||
# get the options supported by volume types
|
||||
volume_type = None
|
||||
type_id = volume.get('volume_type_id', None)
|
||||
if type_id is not None:
|
||||
volume_type = self._get_volume_type(type_id)
|
||||
|
||||
cpg = self._get_volume_type_value(volume_type, 'cpg',
|
||||
self.config.hp3par_cpg)
|
||||
|
||||
# if provisioning is not set use thin
|
||||
default_prov = self.valid_prov_values[0]
|
||||
prov_value = self._get_volume_type_value(volume_type,
|
||||
'provisioning',
|
||||
default_prov)
|
||||
# check for valid provisioning type
|
||||
if prov_value not in self.valid_prov_values:
|
||||
err = _("Must specify a valid provisioning type %(valid)s, "
|
||||
"value '%(prov)s' is invalid.") % \
|
||||
({'valid': self.valid_prov_values,
|
||||
'prov': prov_value})
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
ttpv = True
|
||||
if prov_value == "full":
|
||||
ttpv = False
|
||||
|
||||
# default to hp3par_cpg if hp3par_cpg_snap is not set.
|
||||
if self.config.hp3par_cpg_snap == "":
|
||||
snap_default = self.config.hp3par_cpg
|
||||
else:
|
||||
snap_default = self.config.hp3par_cpg_snap
|
||||
snap_cpg = self._get_volume_type_value(volume_type,
|
||||
'snap_cpg',
|
||||
snap_default)
|
||||
|
||||
# check for valid persona even if we don't use it until
|
||||
# attach time, this will given end user notice that the
|
||||
# persona type is invalid at volume creation time
|
||||
self.get_persona_type(volume)
|
||||
|
||||
if type_id is not None:
|
||||
comments['volume_type_name'] = volume_type.get('name')
|
||||
comments['volume_type_id'] = type_id
|
||||
|
||||
extras = {'comment': json.dumps(comments),
|
||||
'snapCPG': snap_cpg,
|
||||
'tpvv': ttpv}
|
||||
|
||||
capacity = self._capacity_from_size(volume['size'])
|
||||
volume_name = self._get_3par_vol_name(volume['id'])
|
||||
client.createVolume(volume_name, cpg, capacity, extras)
|
||||
|
||||
except hpexceptions.HTTPConflict:
|
||||
raise exception.Duplicate(_("Volume (%s) already exists on array")
|
||||
% volume_name)
|
||||
except hpexceptions.HTTPBadRequest as ex:
|
||||
LOG.error(str(ex))
|
||||
raise exception.Invalid(ex.get_description())
|
||||
except exception.InvalidInput as ex:
|
||||
LOG.error(str(ex))
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
raise exception.CinderException(ex.get_description())
|
||||
|
||||
metadata = {'3ParName': volume_name, 'CPG': self.config.hp3par_cpg,
|
||||
'snapCPG': extras['snapCPG']}
|
||||
return metadata
|
||||
|
||||
def _copy_volume(self, src_name, dest_name):
|
||||
self._cli_run('createvvcopy -p %s %s' % (src_name, dest_name), None)
|
||||
|
||||
def _get_volume_state(self, vol_name):
|
||||
out = self._cli_run('showvv -state %s' % vol_name, None)
|
||||
status = None
|
||||
if out:
|
||||
# out[0] is the header
|
||||
info = out[1].split(',')
|
||||
status = info[5]
|
||||
|
||||
return status
|
||||
|
||||
@utils.synchronized('3parclone', external=True)
|
||||
def create_cloned_volume(self, volume, src_vref, client):
|
||||
|
||||
try:
|
||||
orig_name = self._get_3par_vol_name(volume['source_volid'])
|
||||
vol_name = self._get_3par_vol_name(volume['id'])
|
||||
# We need to create a new volume first. Otherwise you
|
||||
# can't delete the original
|
||||
new_vol = self.create_volume(volume, client)
|
||||
|
||||
# make the 3PAR copy the contents.
|
||||
# can't delete the original until the copy is done.
|
||||
self._copy_volume(orig_name, vol_name)
|
||||
|
||||
# this can take a long time to complete
|
||||
done = False
|
||||
while not done:
|
||||
status = self._get_volume_state(vol_name)
|
||||
if status == 'normal':
|
||||
done = True
|
||||
elif status == 'copy_target':
|
||||
LOG.debug("3Par still copying %s => %s"
|
||||
% (orig_name, vol_name))
|
||||
else:
|
||||
msg = _("Unexpected state while cloning %s") % status
|
||||
LOG.warn(msg)
|
||||
raise exception.CinderException(msg)
|
||||
|
||||
if not done:
|
||||
# wait 5 seconds between tests
|
||||
time.sleep(5)
|
||||
|
||||
return new_vol
|
||||
except hpexceptions.HTTPForbidden:
|
||||
raise exception.NotAuthorized()
|
||||
except hpexceptions.HTTPNotFound:
|
||||
raise exception.NotFound()
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
raise exception.CinderException(ex)
|
||||
|
||||
return None
|
||||
|
||||
def delete_volume(self, volume, client):
|
||||
try:
|
||||
volume_name = self._get_3par_vol_name(volume['id'])
|
||||
client.deleteVolume(volume_name)
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
# We'll let this act as if it worked
|
||||
# it helps clean up the manila entries.
|
||||
LOG.error(str(ex))
|
||||
except hpexceptions.HTTPForbidden as ex:
|
||||
LOG.error(str(ex))
|
||||
raise exception.NotAuthorized(ex.get_description())
|
||||
except Exception as ex:
|
||||
LOG.error(str(ex))
|
||||
raise exception.CinderException(ex.get_description())
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot, client):
|
||||
"""
|
||||
Creates a volume from a snapshot.
|
||||
|
||||
TODO: support using the size from the user.
|
||||
"""
|
||||
LOG.debug("Create Volume from Snapshot\n%s\n%s" %
|
||||
(pprint.pformat(volume['display_name']),
|
||||
pprint.pformat(snapshot['display_name'])))
|
||||
|
||||
if snapshot['volume_size'] != volume['size']:
|
||||
err = "You cannot change size of the volume. It must "
|
||||
"be the same as the snapshot."
|
||||
LOG.error(err)
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
try:
|
||||
snap_name = self._get_3par_snap_name(snapshot['id'])
|
||||
vol_name = self._get_3par_vol_name(volume['id'])
|
||||
|
||||
extra = {'volume_id': volume['id'],
|
||||
'snapshot_id': snapshot['id']}
|
||||
name = snapshot.get('display_name', None)
|
||||
if name:
|
||||
extra['name'] = name
|
||||
|
||||
description = snapshot.get('display_description', None)
|
||||
if description:
|
||||
extra['description'] = description
|
||||
|
||||
optional = {'comment': json.dumps(extra),
|
||||
'readOnly': False}
|
||||
|
||||
client.createSnapshot(vol_name, snap_name, optional)
|
||||
except hpexceptions.HTTPForbidden:
|
||||
raise exception.NotAuthorized()
|
||||
except hpexceptions.HTTPNotFound:
|
||||
raise exception.NotFound()
|
||||
|
||||
def create_snapshot(self, snapshot, client):
|
||||
LOG.debug("Create Snapshot\n%s" % pprint.pformat(snapshot))
|
||||
|
||||
try:
|
||||
snap_name = self._get_3par_snap_name(snapshot['id'])
|
||||
vol_name = self._get_3par_vol_name(snapshot['volume_id'])
|
||||
|
||||
extra = {'volume_name': snapshot['volume_name']}
|
||||
vol_id = snapshot.get('volume_id', None)
|
||||
if vol_id:
|
||||
extra['volume_id'] = vol_id
|
||||
|
||||
try:
|
||||
extra['name'] = snapshot['display_name']
|
||||
except AttribteError:
|
||||
pass
|
||||
|
||||
try:
|
||||
extra['description'] = snapshot['display_description']
|
||||
except AttribteError:
|
||||
pass
|
||||
|
||||
optional = {'comment': json.dumps(extra),
|
||||
'readOnly': True}
|
||||
if self.config.hp3par_snapshot_expiration:
|
||||
optional['expirationHours'] = (
|
||||
self.config.hp3par_snapshot_expiration)
|
||||
|
||||
if self.config.hp3par_snapshot_retention:
|
||||
optional['retentionHours'] = (
|
||||
self.config.hp3par_snapshot_retention)
|
||||
|
||||
client.createSnapshot(snap_name, vol_name, optional)
|
||||
except hpexceptions.HTTPForbidden:
|
||||
raise exception.NotAuthorized()
|
||||
except hpexceptions.HTTPNotFound:
|
||||
raise exception.NotFound()
|
||||
|
||||
def delete_snapshot(self, snapshot, client):
|
||||
LOG.debug("Delete Snapshot\n%s" % pprint.pformat(snapshot))
|
||||
|
||||
try:
|
||||
snap_name = self._get_3par_snap_name(snapshot['id'])
|
||||
client.deleteVolume(snap_name)
|
||||
except hpexceptions.HTTPForbidden:
|
||||
raise exception.NotAuthorized()
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
LOG.error(str(ex))
|
@ -1,259 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""
|
||||
Volume driver for HP 3PAR Storage array. This driver requires 3.1.2 firmware
|
||||
on the 3PAR array.
|
||||
|
||||
You will need to install the python hp3parclient.
|
||||
sudo pip install hp3parclient
|
||||
|
||||
Set the following in the manila.conf file to enable the
|
||||
3PAR Fibre Channel Driver along with the required flags:
|
||||
|
||||
volume_driver=manila.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
|
||||
"""
|
||||
|
||||
from hp3parclient import client
|
||||
from hp3parclient import exceptions as hpexceptions
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
import manila.volume.driver
|
||||
from manila.volume.drivers.san.hp import hp_3par_common as hpcommon
|
||||
from manila.volume.drivers.san import san
|
||||
|
||||
VERSION = 1.0
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HP3PARFCDriver(manila.volume.driver.FibreChannelDriver):
|
||||
"""OpenStack Fibre Channel driver to enable 3PAR storage array.
|
||||
|
||||
Version history:
|
||||
1.0 - Initial driver
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HP3PARFCDriver, self).__init__(*args, **kwargs)
|
||||
self.client = None
|
||||
self.common = None
|
||||
self.configuration.append_config_values(hpcommon.hp3par_opts)
|
||||
self.configuration.append_config_values(san.san_opts)
|
||||
|
||||
def _init_common(self):
|
||||
return hpcommon.HP3PARCommon(self.configuration)
|
||||
|
||||
def _check_flags(self):
|
||||
"""Sanity check to ensure we have required options set."""
|
||||
required_flags = ['hp3par_api_url', 'hp3par_username',
|
||||
'hp3par_password',
|
||||
'san_ip', 'san_login', 'san_password']
|
||||
self.common.check_flags(self.configuration, required_flags)
|
||||
|
||||
def _create_client(self):
|
||||
return client.HP3ParClient(self.configuration.hp3par_api_url)
|
||||
|
||||
def get_volume_stats(self, refresh):
|
||||
stats = self.common.get_volume_stats(refresh, self.client)
|
||||
stats['storage_protocol'] = 'FC'
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
stats['volume_backend_name'] = backend_name or self.__class__.__name__
|
||||
return stats
|
||||
|
||||
def do_setup(self, context):
|
||||
self.common = self._init_common()
|
||||
self._check_flags()
|
||||
self.client = self._create_client()
|
||||
if self.configuration.hp3par_debug:
|
||||
self.client.debug_rest(True)
|
||||
|
||||
try:
|
||||
LOG.debug("Connecting to 3PAR")
|
||||
self.client.login(self.configuration.hp3par_username,
|
||||
self.configuration.hp3par_password)
|
||||
except hpexceptions.HTTPUnauthorized as ex:
|
||||
LOG.warning("Failed to connect to 3PAR (%s) because %s" %
|
||||
(self.configuration.hp3par_api_url, str(ex)))
|
||||
msg = _("Login to 3PAR array invalid")
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
# make sure the CPG exists
|
||||
try:
|
||||
cpg = self.client.getCPG(self.configuration.hp3par_cpg)
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
err = (_("CPG (%s) doesn't exist on array")
|
||||
% self.configuration.hp3par_cpg)
|
||||
LOG.error(err)
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
if ('domain' not in cpg
|
||||
and cpg['domain'] != self.configuration.hp3par_domain):
|
||||
err = "CPG's domain '%s' and config option hp3par_domain '%s' \
|
||||
must be the same" % (cpg['domain'], self.configuration.hp3par_domain)
|
||||
LOG.error(err)
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self._check_flags()
|
||||
|
||||
@utils.synchronized('3par-vol', external=True)
|
||||
def create_volume(self, volume):
|
||||
metadata = self.common.create_volume(volume, self.client)
|
||||
return {'metadata': metadata}
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
new_vol = self.common.create_cloned_volume(volume, src_vref,
|
||||
self.client)
|
||||
return {'metadata': new_vol}
|
||||
|
||||
@utils.synchronized('3par-vol', external=True)
|
||||
def delete_volume(self, volume):
|
||||
self.common.delete_volume(volume, self.client)
|
||||
|
||||
@utils.synchronized('3par-vol', external=True)
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""
|
||||
Creates a volume from a snapshot.
|
||||
|
||||
TODO: support using the size from the user.
|
||||
"""
|
||||
self.common.create_volume_from_snapshot(volume, snapshot, self.client)
|
||||
|
||||
@utils.synchronized('3par-snap', external=True)
|
||||
def create_snapshot(self, snapshot):
|
||||
self.common.create_snapshot(snapshot, self.client)
|
||||
|
||||
@utils.synchronized('3par-snap', external=True)
|
||||
def delete_snapshot(self, snapshot):
|
||||
self.common.delete_snapshot(snapshot, self.client)
|
||||
|
||||
@utils.synchronized('3par-attach', external=True)
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Assigns the volume to a server.
|
||||
|
||||
Assign any created volume to a compute node/host so that it can be
|
||||
used from that host.
|
||||
|
||||
The driver returns a driver_volume_type of 'fibre_channel'.
|
||||
The target_wwn can be a single entry or a list of wwns that
|
||||
correspond to the list of remote wwn(s) that will export the volume.
|
||||
Example return values:
|
||||
|
||||
{
|
||||
'driver_volume_type': 'fibre_channel'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_lun': 1,
|
||||
'target_wwn': '1234567890123',
|
||||
}
|
||||
}
|
||||
|
||||
or
|
||||
|
||||
{
|
||||
'driver_volume_type': 'fibre_channel'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_lun': 1,
|
||||
'target_wwn': ['1234567890123', '0987654321321'],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Steps to export a volume on 3PAR
|
||||
* Create a host on the 3par with the target wwn
|
||||
* Create a VLUN for that HOST with the volume we want to export.
|
||||
|
||||
"""
|
||||
# we have to make sure we have a host
|
||||
host = self._create_host(volume, connector)
|
||||
|
||||
# now that we have a host, create the VLUN
|
||||
vlun = self.common.create_vlun(volume, host, self.client)
|
||||
|
||||
ports = self.common.get_ports()
|
||||
|
||||
info = {'driver_volume_type': 'fibre_channel',
|
||||
'data': {'target_lun': vlun['lun'],
|
||||
'target_discovered': True,
|
||||
'target_wwn': ports['FC']}}
|
||||
return info
|
||||
|
||||
@utils.synchronized('3par-attach', external=True)
|
||||
def terminate_connection(self, volume, connector, force):
|
||||
"""
|
||||
Driver entry point to unattach a volume from an instance.
|
||||
"""
|
||||
self.common.delete_vlun(volume, connector, self.client)
|
||||
pass
|
||||
|
||||
def _create_3par_fibrechan_host(self, hostname, wwn, domain, persona_id):
|
||||
out = self.common._cli_run('createhost -persona %s -domain %s %s %s'
|
||||
% (persona_id, domain,
|
||||
hostname, " ".join(wwn)), None)
|
||||
if out and len(out) > 1:
|
||||
if "already used by host" in out[1]:
|
||||
err = out[1].strip()
|
||||
info = _("The hostname must be called '%s'") % hostname
|
||||
raise exception.Duplicate3PARHost(err=err, info=info)
|
||||
|
||||
def _modify_3par_fibrechan_host(self, hostname, wwn):
|
||||
# when using -add, you can not send the persona or domain options
|
||||
out = self.common._cli_run('createhost -add %s %s'
|
||||
% (hostname, " ".join(wwn)), None)
|
||||
|
||||
def _create_host(self, volume, connector):
|
||||
"""
|
||||
This is a 3PAR host entry for exporting volumes
|
||||
via active VLUNs.
|
||||
"""
|
||||
host = None
|
||||
hostname = self.common._safe_hostname(connector['host'])
|
||||
try:
|
||||
host = self.common._get_3par_host(hostname)
|
||||
if not host['FCPaths']:
|
||||
self._modify_3par_fibrechan_host(hostname, connector['wwpns'])
|
||||
host = self.common._get_3par_host(hostname)
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
# get persona from the volume type extra specs
|
||||
persona_id = self.common.get_persona_type(volume)
|
||||
# host doesn't exist, we have to create it
|
||||
self._create_3par_fibrechan_host(hostname, connector['wwpns'],
|
||||
self.configuration.hp3par_domain,
|
||||
persona_id)
|
||||
host = self.common._get_3par_host(hostname)
|
||||
|
||||
return host
|
||||
|
||||
@utils.synchronized('3par-exp', external=True)
|
||||
def create_export(self, context, volume):
|
||||
pass
|
||||
|
||||
@utils.synchronized('3par-exp', external=True)
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
@utils.synchronized('3par-exp', external=True)
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
@ -1,279 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
"""
|
||||
Volume driver for HP 3PAR Storage array. This driver requires 3.1.2 firmware
|
||||
on the 3PAR array.
|
||||
|
||||
You will need to install the python hp3parclient.
|
||||
sudo pip install hp3parclient
|
||||
|
||||
Set the following in the manila.conf file to enable the
|
||||
3PAR iSCSI Driver along with the required flags:
|
||||
|
||||
volume_driver=manila.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver
|
||||
"""
|
||||
|
||||
from hp3parclient import client
|
||||
from hp3parclient import exceptions as hpexceptions
|
||||
|
||||
from manila import exception
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
import manila.volume.driver
|
||||
from manila.volume.drivers.san.hp import hp_3par_common as hpcommon
|
||||
from manila.volume.drivers.san import san
|
||||
|
||||
VERSION = 1.0
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HP3PARISCSIDriver(manila.volume.driver.ISCSIDriver):
|
||||
"""OpenStack iSCSI driver to enable 3PAR storage array.
|
||||
|
||||
Version history:
|
||||
1.0 - Initial driver
|
||||
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HP3PARISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.client = None
|
||||
self.common = None
|
||||
self.configuration.append_config_values(hpcommon.hp3par_opts)
|
||||
self.configuration.append_config_values(san.san_opts)
|
||||
|
||||
def _init_common(self):
|
||||
return hpcommon.HP3PARCommon(self.configuration)
|
||||
|
||||
def _check_flags(self):
|
||||
"""Sanity check to ensure we have required options set."""
|
||||
required_flags = ['hp3par_api_url', 'hp3par_username',
|
||||
'hp3par_password', 'iscsi_ip_address',
|
||||
'iscsi_port', 'san_ip', 'san_login',
|
||||
'san_password']
|
||||
self.common.check_flags(self.configuration, required_flags)
|
||||
|
||||
def _create_client(self):
|
||||
return client.HP3ParClient(self.configuration.hp3par_api_url)
|
||||
|
||||
def get_volume_stats(self, refresh):
|
||||
stats = self.common.get_volume_stats(refresh, self.client)
|
||||
stats['storage_protocol'] = 'iSCSI'
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
stats['volume_backend_name'] = backend_name or self.__class__.__name__
|
||||
return stats
|
||||
|
||||
def do_setup(self, context):
|
||||
self.common = self._init_common()
|
||||
self._check_flags()
|
||||
self.client = self._create_client()
|
||||
if self.configuration.hp3par_debug:
|
||||
self.client.debug_rest(True)
|
||||
|
||||
try:
|
||||
LOG.debug("Connecting to 3PAR")
|
||||
self.client.login(self.configuration.hp3par_username,
|
||||
self.configuration.hp3par_password)
|
||||
except hpexceptions.HTTPUnauthorized as ex:
|
||||
LOG.warning("Failed to connect to 3PAR (%s) because %s" %
|
||||
(self.configuration.hp3par_api_url, str(ex)))
|
||||
msg = _("Login to 3PAR array invalid")
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
# make sure the CPG exists
|
||||
try:
|
||||
cpg = self.client.getCPG(self.configuration.hp3par_cpg)
|
||||
except hpexceptions.HTTPNotFound as ex:
|
||||
err = (_("CPG (%s) doesn't exist on array")
|
||||
% self.configuration.hp3par_cpg)
|
||||
LOG.error(err)
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
if ('domain' not in cpg and
|
||||
cpg['domain'] != self.configuration.hp3par_domain):
|
||||
err = "CPG's domain '%s' and config option hp3par_domain '%s' \
|
||||
must be the same" % (cpg['domain'], self.configuration.hp3par_domain)
|
||||
LOG.error(err)
|
||||
raise exception.InvalidInput(reason=err)
|
||||
|
||||
# make sure ssh works.
|
||||
self._iscsi_discover_target_iqn(self.configuration.iscsi_ip_address)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self._check_flags()
|
||||
|
||||
@utils.synchronized('3par-vol', external=True)
|
||||
def create_volume(self, volume):
|
||||
metadata = self.common.create_volume(volume, self.client)
|
||||
|
||||
return {'provider_location': "%s:%s" %
|
||||
(self.configuration.iscsi_ip_address,
|
||||
self.configuration.iscsi_port),
|
||||
'metadata': metadata}
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
""" Clone an existing volume. """
|
||||
new_vol = self.common.create_cloned_volume(volume, src_vref,
|
||||
self.client)
|
||||
return {'provider_location': "%s:%s" %
|
||||
(self.configuration.iscsi_ip_address,
|
||||
self.configuration.iscsi_port),
|
||||
'metadata': new_vol}
|
||||
|
||||
@utils.synchronized('3par-vol', external=True)
|
||||
def delete_volume(self, volume):
|
||||
self.common.delete_volume(volume, self.client)
|
||||
|
||||
@utils.synchronized('3par-vol', external=True)
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""
|
||||
Creates a volume from a snapshot.
|
||||
|
||||
TODO: support using the size from the user.
|
||||
"""
|
||||
self.common.create_volume_from_snapshot(volume, snapshot, self.client)
|
||||
|
||||
@utils.synchronized('3par-snap', external=True)
|
||||
def create_snapshot(self, snapshot):
|
||||
self.common.create_snapshot(snapshot, self.client)
|
||||
|
||||
@utils.synchronized('3par-snap', external=True)
|
||||
def delete_snapshot(self, snapshot):
|
||||
self.common.delete_snapshot(snapshot, self.client)
|
||||
|
||||
@utils.synchronized('3par-attach', external=True)
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Assigns the volume to a server.
|
||||
|
||||
Assign any created volume to a compute node/host so that it can be
|
||||
used from that host.
|
||||
|
||||
This driver returns a driver_volume_type of 'iscsi'.
|
||||
The format of the driver data is defined in _get_iscsi_properties.
|
||||
Example return value:
|
||||
|
||||
{
|
||||
'driver_volume_type': 'iscsi'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
|
||||
'target_protal': '127.0.0.1:3260',
|
||||
'volume_id': 1,
|
||||
}
|
||||
}
|
||||
|
||||
Steps to export a volume on 3PAR
|
||||
* Get the 3PAR iSCSI iqn
|
||||
* Create a host on the 3par
|
||||
* create vlun on the 3par
|
||||
"""
|
||||
# get the target_iqn on the 3par interface.
|
||||
target_iqn = self._iscsi_discover_target_iqn(
|
||||
self.configuration.iscsi_ip_address)
|
||||
|
||||
# we have to make sure we have a host
|
||||
host = self._create_host(volume, connector)
|
||||
|
||||
# now that we have a host, create the VLUN
|
||||
vlun = self.common.create_vlun(volume, host, self.client)
|
||||
|
||||
info = {'driver_volume_type': 'iscsi',
|
||||
'data': {'target_portal': "%s:%s" %
|
||||
(self.configuration.iscsi_ip_address,
|
||||
self.configuration.iscsi_port),
|
||||
'target_iqn': target_iqn,
|
||||
'target_lun': vlun['lun'],
|
||||
'target_discovered': True
|
||||
}
|
||||
}
|
||||
return info
|
||||
|
||||
@utils.synchronized('3par-attach', external=True)
|
||||
def terminate_connection(self, volume, connector, force):
|
||||
"""
|
||||
Driver entry point to unattach a volume from an instance.
|
||||
"""
|
||||
self.common.delete_vlun(volume, connector, self.client)
|
||||
|
||||
def _iscsi_discover_target_iqn(self, remote_ip):
|
||||
result = self.common._cli_run('showport -ids', None)
|
||||
|
||||
iqn = None
|
||||
if result:
|
||||
# first line is header
|
||||
result = result[1:]
|
||||
for line in result:
|
||||
info = line.split(",")
|
||||
if info and len(info) > 2:
|
||||
if info[1] == remote_ip:
|
||||
iqn = info[2]
|
||||
|
||||
return iqn
|
||||
|
||||
def _create_3par_iscsi_host(self, hostname, iscsi_iqn, domain, persona_id):
|
||||
cmd = 'createhost -iscsi -persona %s -domain %s %s %s' % \
|
||||
(persona_id, domain, hostname, iscsi_iqn)
|
||||
out = self.common._cli_run(cmd, None)
|
||||
if out and len(out) > 1:
|
||||
if "already used by host" in out[1]:
|
||||
err = out[1].strip()
|
||||
info = _("The hostname must be called '%s'") % hostname
|
||||
raise exception.Duplicate3PARHost(err=err, info=info)
|
||||
|
||||
def _modify_3par_iscsi_host(self, hostname, iscsi_iqn):
|
||||
# when using -add, you can not send the persona or domain options
|
||||
self.common._cli_run('createhost -iscsi -add %s %s'
|
||||
% (hostname, iscsi_iqn), None)
|
||||
|
||||
def _create_host(self, volume, connector):
|
||||
"""
|
||||
This is a 3PAR host entry for exporting volumes
|
||||
via active VLUNs.
|
||||
"""
|
||||
# make sure we don't have the host already
|
||||
host = None
|
||||
hostname = self.common._safe_hostname(connector['host'])
|
||||
try:
|
||||
host = self.common._get_3par_host(hostname)
|
||||
if not host['iSCSIPaths']:
|
||||
self._modify_3par_iscsi_host(hostname, connector['initiator'])
|
||||
host = self.common._get_3par_host(hostname)
|
||||
except hpexceptions.HTTPNotFound:
|
||||
# get persona from the volume type extra specs
|
||||
persona_id = self.common.get_persona_type(volume)
|
||||
# host doesn't exist, we have to create it
|
||||
self._create_3par_iscsi_host(hostname, connector['initiator'],
|
||||
self.configuration.hp3par_domain,
|
||||
persona_id)
|
||||
host = self.common._get_3par_host(hostname)
|
||||
|
||||
return host
|
||||
|
||||
@utils.synchronized('3par-exp', external=True)
|
||||
def create_export(self, context, volume):
|
||||
pass
|
||||
|
||||
@utils.synchronized('3par-exp', external=True)
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
@utils.synchronized('3par-exp', external=True)
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
@ -1,314 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
HP Lefthand SAN ISCSI Driver.
|
||||
|
||||
The driver communicates to the backend aka Cliq via SSH to perform all the
|
||||
operations on the SAN.
|
||||
"""
|
||||
from lxml import etree
|
||||
|
||||
from manila import exception
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume.drivers.san.san import SanISCSIDriver
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HpSanISCSIDriver(SanISCSIDriver):
|
||||
"""Executes commands relating to HP/Lefthand SAN ISCSI volumes.
|
||||
|
||||
We use the CLIQ interface, over SSH.
|
||||
|
||||
Rough overview of CLIQ commands used:
|
||||
|
||||
:createVolume: (creates the volume)
|
||||
|
||||
:getVolumeInfo: (to discover the IQN etc)
|
||||
|
||||
:getClusterInfo: (to discover the iSCSI target IP address)
|
||||
|
||||
:assignVolumeChap: (exports it with CHAP security)
|
||||
|
||||
The 'trick' here is that the HP SAN enforces security by default, so
|
||||
normally a volume mount would need both to configure the SAN in the volume
|
||||
layer and do the mount on the compute layer. Multi-layer operations are
|
||||
not catered for at the moment in the manila architecture, so instead we
|
||||
share the volume using CHAP at volume creation time. Then the mount need
|
||||
only use those CHAP credentials, so can take place exclusively in the
|
||||
compute layer.
|
||||
"""
|
||||
|
||||
device_stats = {}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HpSanISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.cluster_vip = None
|
||||
|
||||
def _cliq_run(self, verb, cliq_args, check_exit_code=True):
|
||||
"""Runs a CLIQ command over SSH, without doing any result parsing"""
|
||||
cliq_arg_strings = []
|
||||
for k, v in cliq_args.items():
|
||||
cliq_arg_strings.append(" %s=%s" % (k, v))
|
||||
cmd = verb + ''.join(cliq_arg_strings)
|
||||
|
||||
return self._run_ssh(cmd, check_exit_code)
|
||||
|
||||
def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
|
||||
"""Runs a CLIQ command over SSH, parsing and checking the output"""
|
||||
cliq_args['output'] = 'XML'
|
||||
(out, _err) = self._cliq_run(verb, cliq_args, check_cliq_result)
|
||||
|
||||
LOG.debug(_("CLIQ command returned %s"), out)
|
||||
|
||||
result_xml = etree.fromstring(out)
|
||||
if check_cliq_result:
|
||||
response_node = result_xml.find("response")
|
||||
if response_node is None:
|
||||
msg = (_("Malformed response to CLIQ command "
|
||||
"%(verb)s %(cliq_args)s. Result=%(out)s") %
|
||||
locals())
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
result_code = response_node.attrib.get("result")
|
||||
|
||||
if result_code != "0":
|
||||
msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
|
||||
" Result=%(out)s") %
|
||||
locals())
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
return result_xml
|
||||
|
||||
def _cliq_get_cluster_info(self, cluster_name):
|
||||
"""Queries for info about the cluster (including IP)"""
|
||||
cliq_args = {}
|
||||
cliq_args['clusterName'] = cluster_name
|
||||
cliq_args['searchDepth'] = '1'
|
||||
cliq_args['verbose'] = '0'
|
||||
|
||||
result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
|
||||
|
||||
return result_xml
|
||||
|
||||
def _cliq_get_cluster_vip(self, cluster_name):
|
||||
"""Gets the IP on which a cluster shares iSCSI volumes"""
|
||||
cluster_xml = self._cliq_get_cluster_info(cluster_name)
|
||||
|
||||
vips = []
|
||||
for vip in cluster_xml.findall("response/cluster/vip"):
|
||||
vips.append(vip.attrib.get('ipAddress'))
|
||||
|
||||
if len(vips) == 1:
|
||||
return vips[0]
|
||||
|
||||
_xml = etree.tostring(cluster_xml)
|
||||
msg = (_("Unexpected number of virtual ips for cluster "
|
||||
" %(cluster_name)s. Result=%(_xml)s") %
|
||||
locals())
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _cliq_get_volume_info(self, volume_name):
|
||||
"""Gets the volume info, including IQN"""
|
||||
cliq_args = {}
|
||||
cliq_args['volumeName'] = volume_name
|
||||
result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
|
||||
|
||||
# Result looks like this:
|
||||
#<gauche version="1.0">
|
||||
# <response description="Operation succeeded." name="CliqSuccess"
|
||||
# processingTime="87" result="0">
|
||||
# <volume autogrowPages="4" availability="online" blockSize="1024"
|
||||
# bytesWritten="0" checkSum="false" clusterName="Cluster01"
|
||||
# created="2011-02-08T19:56:53Z" deleting="false" description=""
|
||||
# groupName="Group01" initialQuota="536870912" isPrimary="true"
|
||||
# iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
|
||||
# maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
|
||||
# minReplication="1" name="vol-b" parity="0" replication="2"
|
||||
# reserveQuota="536870912" scratchQuota="4194304"
|
||||
# serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
|
||||
# size="1073741824" stridePages="32" thinProvision="true">
|
||||
# <status description="OK" value="2"/>
|
||||
# <permission access="rw"
|
||||
# authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
|
||||
# chapName="chapusername" chapRequired="true" id="25369"
|
||||
# initiatorSecret="" iqn="" iscsiEnabled="true"
|
||||
# loadBalance="true" targetSecret="supersecret"/>
|
||||
# </volume>
|
||||
# </response>
|
||||
#</gauche>
|
||||
|
||||
# Flatten the nodes into a dictionary; use prefixes to avoid collisions
|
||||
volume_attributes = {}
|
||||
|
||||
volume_node = result_xml.find("response/volume")
|
||||
for k, v in volume_node.attrib.items():
|
||||
volume_attributes["volume." + k] = v
|
||||
|
||||
status_node = volume_node.find("status")
|
||||
if status_node is not None:
|
||||
for k, v in status_node.attrib.items():
|
||||
volume_attributes["status." + k] = v
|
||||
|
||||
# We only consider the first permission node
|
||||
permission_node = volume_node.find("permission")
|
||||
if permission_node is not None:
|
||||
for k, v in status_node.attrib.items():
|
||||
volume_attributes["permission." + k] = v
|
||||
|
||||
LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") %
|
||||
locals())
|
||||
return volume_attributes
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume."""
|
||||
cliq_args = {}
|
||||
cliq_args['clusterName'] = self.configuration.san_clustername
|
||||
|
||||
if self.configuration.san_thin_provision:
|
||||
cliq_args['thinProvision'] = '1'
|
||||
else:
|
||||
cliq_args['thinProvision'] = '0'
|
||||
|
||||
cliq_args['volumeName'] = volume['name']
|
||||
if int(volume['size']) == 0:
|
||||
cliq_args['size'] = '100MB'
|
||||
else:
|
||||
cliq_args['size'] = '%sGB' % volume['size']
|
||||
|
||||
self._cliq_run_xml("createVolume", cliq_args)
|
||||
|
||||
volume_info = self._cliq_get_volume_info(volume['name'])
|
||||
cluster_name = volume_info['volume.clusterName']
|
||||
iscsi_iqn = volume_info['volume.iscsiIqn']
|
||||
|
||||
#TODO(justinsb): Is this always 1? Does it matter?
|
||||
cluster_interface = '1'
|
||||
|
||||
if not self.cluster_vip:
|
||||
self.cluster_vip = self._cliq_get_cluster_vip(cluster_name)
|
||||
iscsi_portal = self.cluster_vip + ":3260," + cluster_interface
|
||||
|
||||
model_update = {}
|
||||
|
||||
# NOTE(jdg): LH volumes always at lun 0 ?
|
||||
model_update['provider_location'] = ("%s %s %s" %
|
||||
(iscsi_portal,
|
||||
iscsi_iqn,
|
||||
0))
|
||||
|
||||
return model_update
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume."""
|
||||
cliq_args = {}
|
||||
cliq_args['volumeName'] = volume['name']
|
||||
cliq_args['prompt'] = 'false' # Don't confirm
|
||||
try:
|
||||
volume_info = self._cliq_get_volume_info(volume['name'])
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.error("Volume did not exist. It will not be deleted")
|
||||
return
|
||||
self._cliq_run_xml("deleteVolume", cliq_args)
|
||||
|
||||
def local_path(self, volume):
|
||||
msg = _("local_path not supported")
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Assigns the volume to a server.
|
||||
|
||||
Assign any created volume to a compute node/host so that it can be
|
||||
used from that host. HP VSA requires a volume to be assigned
|
||||
to a server.
|
||||
|
||||
This driver returns a driver_volume_type of 'iscsi'.
|
||||
The format of the driver data is defined in _get_iscsi_properties.
|
||||
Example return value:
|
||||
|
||||
{
|
||||
'driver_volume_type': 'iscsi'
|
||||
'data': {
|
||||
'target_discovered': True,
|
||||
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
|
||||
'target_protal': '127.0.0.1:3260',
|
||||
'volume_id': 1,
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
self._create_server(connector)
|
||||
cliq_args = {}
|
||||
cliq_args['volumeName'] = volume['name']
|
||||
cliq_args['serverName'] = connector['host']
|
||||
self._cliq_run_xml("assignVolumeToServer", cliq_args)
|
||||
|
||||
iscsi_properties = self._get_iscsi_properties(volume)
|
||||
return {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data': iscsi_properties
|
||||
}
|
||||
|
||||
def _create_server(self, connector):
|
||||
cliq_args = {}
|
||||
cliq_args['serverName'] = connector['host']
|
||||
out = self._cliq_run_xml("getServerInfo", cliq_args, False)
|
||||
response = out.find("response")
|
||||
result = response.attrib.get("result")
|
||||
if result != '0':
|
||||
cliq_args = {}
|
||||
cliq_args['serverName'] = connector['host']
|
||||
cliq_args['initiator'] = connector['initiator']
|
||||
self._cliq_run_xml("createServer", cliq_args)
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Unassign the volume from the host."""
|
||||
cliq_args = {}
|
||||
cliq_args['volumeName'] = volume['name']
|
||||
cliq_args['serverName'] = connector['host']
|
||||
self._cliq_run_xml("unassignVolumeToServer", cliq_args)
|
||||
|
||||
def get_volume_stats(self, refresh):
|
||||
if refresh:
|
||||
self._update_backend_status()
|
||||
|
||||
return self.device_stats
|
||||
|
||||
def _update_backend_status(self):
|
||||
data = {}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data['volume_backend_name'] = backend_name or self.__class__.__name__
|
||||
data['driver_version'] = '1.0'
|
||||
data['reserved_percentage'] = 0
|
||||
data['storage_protocol'] = 'iSCSI'
|
||||
data['vendor_name'] = 'Hewlett-Packard'
|
||||
|
||||
result_xml = self._cliq_run_xml("getClusterInfo", {})
|
||||
cluster_node = result_xml.find("response/cluster")
|
||||
total_capacity = cluster_node.attrib.get("spaceTotal")
|
||||
free_capacity = cluster_node.attrib.get("unprovisionedSpace")
|
||||
GB = 1073741824
|
||||
|
||||
data['total_capacity_gb'] = int(total_capacity) / GB
|
||||
data['free_capacity_gb'] = int(free_capacity) / GB
|
||||
self.device_stats = data
|
@ -1,177 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Default Driver for san-stored volumes.
|
||||
|
||||
The unique thing about a SAN is that we don't expect that we can run the volume
|
||||
controller on the SAN hardware. We expect to access it over SSH or some API.
|
||||
"""
|
||||
|
||||
import random
|
||||
|
||||
from eventlet import greenthread
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila.volume.driver import ISCSIDriver
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
san_opts = [
|
||||
cfg.BoolOpt('san_thin_provision',
|
||||
default=True,
|
||||
help='Use thin provisioning for SAN volumes?'),
|
||||
cfg.StrOpt('san_ip',
|
||||
default='',
|
||||
help='IP address of SAN controller'),
|
||||
cfg.StrOpt('san_login',
|
||||
default='admin',
|
||||
help='Username for SAN controller'),
|
||||
cfg.StrOpt('san_password',
|
||||
default='',
|
||||
help='Password for SAN controller',
|
||||
secret=True),
|
||||
cfg.StrOpt('san_private_key',
|
||||
default='',
|
||||
help='Filename of private key to use for SSH authentication'),
|
||||
cfg.StrOpt('san_clustername',
|
||||
default='',
|
||||
help='Cluster name to use for creating volumes'),
|
||||
cfg.IntOpt('san_ssh_port',
|
||||
default=22,
|
||||
help='SSH port to use with SAN'),
|
||||
cfg.BoolOpt('san_is_local',
|
||||
default=False,
|
||||
help='Execute commands locally instead of over SSH; '
|
||||
'use if the volume service is running on the SAN device'),
|
||||
cfg.IntOpt('ssh_conn_timeout',
|
||||
default=30,
|
||||
help="SSH connection timeout in seconds"),
|
||||
cfg.IntOpt('ssh_min_pool_conn',
|
||||
default=1,
|
||||
help='Minimum ssh connections in the pool'),
|
||||
cfg.IntOpt('ssh_max_pool_conn',
|
||||
default=5,
|
||||
help='Maximum ssh connections in the pool'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(san_opts)
|
||||
|
||||
|
||||
class SanISCSIDriver(ISCSIDriver):
|
||||
"""Base class for SAN-style storage volumes
|
||||
|
||||
A SAN-style storage value is 'different' because the volume controller
|
||||
probably won't run on it, so we need to access is over SSH or another
|
||||
remote protocol.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SanISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(san_opts)
|
||||
self.run_local = self.configuration.san_is_local
|
||||
self.sshpool = None
|
||||
|
||||
def _build_iscsi_target_name(self, volume):
|
||||
return "%s%s" % (self.configuration.iscsi_target_prefix,
|
||||
volume['name'])
|
||||
|
||||
def _execute(self, *cmd, **kwargs):
|
||||
if self.run_local:
|
||||
return utils.execute(*cmd, **kwargs)
|
||||
else:
|
||||
check_exit_code = kwargs.pop('check_exit_code', None)
|
||||
command = ' '.join(cmd)
|
||||
return self._run_ssh(command, check_exit_code)
|
||||
|
||||
def _run_ssh(self, command, check_exit_code=True, attempts=1):
|
||||
if not self.sshpool:
|
||||
password = self.configuration.san_password
|
||||
privatekey = self.configuration.san_private_key
|
||||
min_size = self.configuration.ssh_min_pool_conn
|
||||
max_size = self.configuration.ssh_max_pool_conn
|
||||
self.sshpool = utils.SSHPool(self.configuration.san_ip,
|
||||
self.configuration.san_ssh_port,
|
||||
self.configuration.ssh_conn_timeout,
|
||||
self.configuration.san_login,
|
||||
password=password,
|
||||
privatekey=privatekey,
|
||||
min_size=min_size,
|
||||
max_size=max_size)
|
||||
last_exception = None
|
||||
try:
|
||||
total_attempts = attempts
|
||||
with self.sshpool.item() as ssh:
|
||||
while attempts > 0:
|
||||
attempts -= 1
|
||||
try:
|
||||
return utils.ssh_execute(
|
||||
ssh,
|
||||
command,
|
||||
check_exit_code=check_exit_code)
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
last_exception = e
|
||||
greenthread.sleep(random.randint(20, 500) / 100.0)
|
||||
try:
|
||||
raise exception.ProcessExecutionError(
|
||||
exit_code=last_exception.exit_code,
|
||||
stdout=last_exception.stdout,
|
||||
stderr=last_exception.stderr,
|
||||
cmd=last_exception.cmd)
|
||||
except AttributeError:
|
||||
raise exception.ProcessExecutionError(
|
||||
exit_code=-1,
|
||||
stdout="",
|
||||
stderr="Error running SSH command",
|
||||
cmd=command)
|
||||
|
||||
except Exception as e:
|
||||
LOG.error(_("Error running SSH command: %s") % command)
|
||||
raise e
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume."""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
if not self.run_local:
|
||||
if not (self.configuration.san_password or
|
||||
self.configuration.san_private_key):
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Specify san_password or san_private_key'))
|
||||
|
||||
# The san_ip must always be set, because we use it for the target
|
||||
if not self.configuration.san_ip:
|
||||
raise exception.InvalidInput(reason=_("san_ip must be set"))
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Create a cloen of the specified volume."""
|
||||
raise NotImplementedError()
|
@ -1,285 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume.drivers.san.san import SanISCSIDriver
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
solaris_opts = [
|
||||
cfg.StrOpt('san_zfs_volume_base',
|
||||
default='rpool/',
|
||||
help='The ZFS path under which to create zvols for volumes.'), ]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(solaris_opts)
|
||||
|
||||
|
||||
class SolarisISCSIDriver(SanISCSIDriver):
|
||||
"""Executes commands relating to Solaris-hosted ISCSI volumes.
|
||||
|
||||
Basic setup for a Solaris iSCSI server:
|
||||
|
||||
pkg install storage-server SUNWiscsit
|
||||
|
||||
svcadm enable stmf
|
||||
|
||||
svcadm enable -r svc:/network/iscsi/target:default
|
||||
|
||||
pfexec itadm create-tpg e1000g0 ${MYIP}
|
||||
|
||||
pfexec itadm create-target -t e1000g0
|
||||
|
||||
|
||||
Then grant the user that will be logging on lots of permissions.
|
||||
I'm not sure exactly which though:
|
||||
|
||||
zfs allow justinsb create,mount,destroy rpool
|
||||
|
||||
usermod -P'File System Management' justinsb
|
||||
|
||||
usermod -P'Primary Administrator' justinsb
|
||||
|
||||
Also make sure you can login using san_login & san_password/san_private_key
|
||||
"""
|
||||
def __init__(self, *cmd, **kwargs):
|
||||
super(SolarisISCSIDriver, self).__init__(*cmd,
|
||||
execute=self._execute,
|
||||
**kwargs)
|
||||
|
||||
def _execute(self, *cmd, **kwargs):
|
||||
new_cmd = ['pfexec']
|
||||
new_cmd.extend(cmd)
|
||||
return super(SolarisISCSIDriver, self)._execute(*new_cmd,
|
||||
**kwargs)
|
||||
|
||||
def _view_exists(self, luid):
|
||||
(out, _err) = self._execute('/usr/sbin/stmfadm',
|
||||
'list-view', '-l', luid,
|
||||
check_exit_code=False)
|
||||
if "no views found" in out:
|
||||
return False
|
||||
|
||||
if "View Entry:" in out:
|
||||
return True
|
||||
msg = _("Cannot parse list-view output: %s") % out
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _get_target_groups(self):
|
||||
"""Gets list of target groups from host."""
|
||||
(out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg')
|
||||
matches = self._get_prefixed_values(out, 'Target group: ')
|
||||
LOG.debug("target_groups=%s" % matches)
|
||||
return matches
|
||||
|
||||
def _target_group_exists(self, target_group_name):
|
||||
return target_group_name not in self._get_target_groups()
|
||||
|
||||
def _get_target_group_members(self, target_group_name):
|
||||
(out, _err) = self._execute('/usr/sbin/stmfadm',
|
||||
'list-tg', '-v', target_group_name)
|
||||
matches = self._get_prefixed_values(out, 'Member: ')
|
||||
LOG.debug("members of %s=%s" % (target_group_name, matches))
|
||||
return matches
|
||||
|
||||
def _is_target_group_member(self, target_group_name, iscsi_target_name):
|
||||
return iscsi_target_name in (
|
||||
self._get_target_group_members(target_group_name))
|
||||
|
||||
def _get_iscsi_targets(self):
|
||||
(out, _err) = self._execute('/usr/sbin/itadm', 'list-target')
|
||||
matches = self._collect_lines(out)
|
||||
|
||||
# Skip header
|
||||
if len(matches) != 0:
|
||||
assert 'TARGET NAME' in matches[0]
|
||||
matches = matches[1:]
|
||||
|
||||
targets = []
|
||||
for line in matches:
|
||||
items = line.split()
|
||||
assert len(items) == 3
|
||||
targets.append(items[0])
|
||||
|
||||
LOG.debug("_get_iscsi_targets=%s" % (targets))
|
||||
return targets
|
||||
|
||||
def _iscsi_target_exists(self, iscsi_target_name):
|
||||
return iscsi_target_name in self._get_iscsi_targets()
|
||||
|
||||
def _build_zfs_poolname(self, volume):
|
||||
zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name'])
|
||||
return zfs_poolname
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume."""
|
||||
if int(volume['size']) == 0:
|
||||
sizestr = '100M'
|
||||
else:
|
||||
sizestr = '%sG' % volume['size']
|
||||
|
||||
zfs_poolname = self._build_zfs_poolname(volume)
|
||||
|
||||
# Create a zfs volume
|
||||
cmd = ['/usr/sbin/zfs', 'create']
|
||||
if FLAGS.san_thin_provision:
|
||||
cmd.append('-s')
|
||||
cmd.extend(['-V', sizestr])
|
||||
cmd.append(zfs_poolname)
|
||||
self._execute(*cmd)
|
||||
|
||||
def _get_luid(self, volume):
|
||||
zfs_poolname = self._build_zfs_poolname(volume)
|
||||
zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname
|
||||
|
||||
(out, _err) = self._execute('/usr/sbin/sbdadm', 'list-lu')
|
||||
|
||||
lines = self._collect_lines(out)
|
||||
|
||||
# Strip headers
|
||||
if len(lines) >= 1:
|
||||
if lines[0] == '':
|
||||
lines = lines[1:]
|
||||
|
||||
if len(lines) >= 4:
|
||||
assert 'Found' in lines[0]
|
||||
assert '' == lines[1]
|
||||
assert 'GUID' in lines[2]
|
||||
assert '------------------' in lines[3]
|
||||
|
||||
lines = lines[4:]
|
||||
|
||||
for line in lines:
|
||||
items = line.split()
|
||||
assert len(items) == 3
|
||||
if items[2] == zvol_name:
|
||||
luid = items[0].strip()
|
||||
return luid
|
||||
|
||||
msg = _('LUID not found for %(zfs_poolname)s. '
|
||||
'Output=%(out)s') % locals()
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _is_lu_created(self, volume):
|
||||
luid = self._get_luid(volume)
|
||||
return luid
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume."""
|
||||
zfs_poolname = self._build_zfs_poolname(volume)
|
||||
self._execute('/usr/sbin/zfs', 'destroy', zfs_poolname)
|
||||
|
||||
def local_path(self, volume):
|
||||
# TODO(justinsb): Is this needed here?
|
||||
escaped_group = FLAGS.volume_group.replace('-', '--')
|
||||
escaped_name = volume['name'].replace('-', '--')
|
||||
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
#TODO(justinsb): On bootup, this is called for every volume.
|
||||
# It then runs ~5 SSH commands for each volume,
|
||||
# most of which fetch the same info each time
|
||||
# This makes initial start stupid-slow
|
||||
return self._do_export(volume, force_create=False)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
return self._do_export(volume, force_create=True)
|
||||
|
||||
def _do_export(self, volume, force_create):
|
||||
# Create a Logical Unit (LU) backed by the zfs volume
|
||||
zfs_poolname = self._build_zfs_poolname(volume)
|
||||
|
||||
if force_create or not self._is_lu_created(volume):
|
||||
zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname
|
||||
self._execute('/usr/sbin/sbdadm', 'create-lu', zvol_name)
|
||||
|
||||
luid = self._get_luid(volume)
|
||||
iscsi_name = self._build_iscsi_target_name(volume)
|
||||
target_group_name = 'tg-%s' % volume['name']
|
||||
|
||||
# Create a iSCSI target, mapped to just this volume
|
||||
if force_create or not self._target_group_exists(target_group_name):
|
||||
self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name)
|
||||
|
||||
# Yes, we add the initiatior before we create it!
|
||||
# Otherwise, it complains that the target is already active
|
||||
if force_create or not self._is_target_group_member(target_group_name,
|
||||
iscsi_name):
|
||||
self._execute('/usr/sbin/stmfadm',
|
||||
'add-tg-member', '-g', target_group_name, iscsi_name)
|
||||
|
||||
if force_create or not self._iscsi_target_exists(iscsi_name):
|
||||
self._execute('/usr/sbin/itadm', 'create-target', '-n', iscsi_name)
|
||||
|
||||
if force_create or not self._view_exists(luid):
|
||||
self._execute('/usr/sbin/stmfadm',
|
||||
'add-view', '-t', target_group_name, luid)
|
||||
|
||||
#TODO(justinsb): Is this always 1? Does it matter?
|
||||
iscsi_portal_interface = '1'
|
||||
iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
|
||||
|
||||
db_update = {}
|
||||
db_update['provider_location'] = ("%s %s" %
|
||||
(iscsi_portal,
|
||||
iscsi_name))
|
||||
|
||||
return db_update
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
|
||||
# This is the reverse of _do_export
|
||||
luid = self._get_luid(volume)
|
||||
iscsi_name = self._build_iscsi_target_name(volume)
|
||||
target_group_name = 'tg-%s' % volume['name']
|
||||
|
||||
if self._view_exists(luid):
|
||||
self._execute('/usr/sbin/stmfadm', 'remove-view', '-l', luid, '-a')
|
||||
|
||||
if self._iscsi_target_exists(iscsi_name):
|
||||
self._execute('/usr/sbin/stmfadm', 'offline-target', iscsi_name)
|
||||
self._execute('/usr/sbin/itadm', 'delete-target', iscsi_name)
|
||||
|
||||
# We don't delete the tg-member; we delete the whole tg!
|
||||
|
||||
if self._target_group_exists(target_group_name):
|
||||
self._execute('/usr/sbin/stmfadm', 'delete-tg', target_group_name)
|
||||
|
||||
if self._is_lu_created(volume):
|
||||
self._execute('/usr/sbin/sbdadm', 'delete-lu', luid)
|
||||
|
||||
def _collect_lines(self, data):
|
||||
"""Split lines from data into an array, trimming them """
|
||||
matches = []
|
||||
for line in data.splitlines():
|
||||
match = line.strip()
|
||||
matches.append(match)
|
||||
return matches
|
||||
|
||||
def _get_prefixed_values(self, data, prefix):
|
||||
"""Collect lines which start with prefix; with trimming"""
|
||||
matches = []
|
||||
for line in data.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith(prefix):
|
||||
match = line[len(prefix):]
|
||||
match = match.strip()
|
||||
matches.append(match)
|
||||
return matches
|
@ -1,261 +0,0 @@
|
||||
# Copyright (c) 2013 Scality
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Scality SOFS Volume Driver.
|
||||
"""
|
||||
|
||||
import errno
|
||||
import os
|
||||
import urllib2
|
||||
import urlparse
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.image import image_utils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume import driver
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
volume_opts = [
|
||||
cfg.StrOpt('scality_sofs_config',
|
||||
default=None,
|
||||
help='Path or URL to Scality SOFS configuration file'),
|
||||
cfg.StrOpt('scality_sofs_mount_point',
|
||||
default='$state_path/scality',
|
||||
help='Base dir where Scality SOFS shall be mounted'),
|
||||
cfg.StrOpt('scality_sofs_volume_dir',
|
||||
default='manila/volumes',
|
||||
help='Path from Scality SOFS root to volume dir'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(volume_opts)
|
||||
|
||||
|
||||
class ScalityDriver(driver.VolumeDriver):
|
||||
"""Scality SOFS manila driver.
|
||||
|
||||
Creates sparse files on SOFS for hypervisors to use as block
|
||||
devices.
|
||||
"""
|
||||
|
||||
def _check_prerequisites(self):
|
||||
"""Sanity checks before attempting to mount SOFS."""
|
||||
|
||||
# config is mandatory
|
||||
config = FLAGS.scality_sofs_config
|
||||
if not config:
|
||||
msg = _("Value required for 'scality_sofs_config'")
|
||||
LOG.warn(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
# config can be a file path or a URL, check it
|
||||
if urlparse.urlparse(config).scheme == '':
|
||||
# turn local path into URL
|
||||
config = 'file://%s' % config
|
||||
try:
|
||||
urllib2.urlopen(config, timeout=5).close()
|
||||
except urllib2.URLError as e:
|
||||
msg = _("Cannot access 'scality_sofs_config': %s") % e
|
||||
LOG.warn(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
# mount.sofs must be installed
|
||||
if not os.access('/sbin/mount.sofs', os.X_OK):
|
||||
msg = _("Cannot execute /sbin/mount.sofs")
|
||||
LOG.warn(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _makedirs(self, path):
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise e
|
||||
|
||||
def _mount_sofs(self):
|
||||
config = FLAGS.scality_sofs_config
|
||||
mount_path = FLAGS.scality_sofs_mount_point
|
||||
sysdir = os.path.join(mount_path, 'sys')
|
||||
|
||||
self._makedirs(mount_path)
|
||||
if not os.path.isdir(sysdir):
|
||||
self._execute('mount', '-t', 'sofs', config, mount_path,
|
||||
run_as_root=True)
|
||||
if not os.path.isdir(sysdir):
|
||||
msg = _("Cannot mount Scality SOFS, check syslog for errors")
|
||||
LOG.warn(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _size_bytes(self, size_in_g):
|
||||
if int(size_in_g) == 0:
|
||||
return 100 * 1024 * 1024
|
||||
return int(size_in_g) * 1024 * 1024 * 1024
|
||||
|
||||
def _create_file(self, path, size):
|
||||
with open(path, "ab") as f:
|
||||
f.truncate(size)
|
||||
os.chmod(path, 0666)
|
||||
|
||||
def _copy_file(self, src_path, dest_path):
|
||||
self._execute('dd', 'if=%s' % src_path, 'of=%s' % dest_path,
|
||||
'bs=1M', 'conv=fsync,nocreat,notrunc',
|
||||
run_as_root=True)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the volume driver does while starting."""
|
||||
self._check_prerequisites()
|
||||
self._mount_sofs()
|
||||
voldir = os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
FLAGS.scality_sofs_volume_dir)
|
||||
if not os.path.isdir(voldir):
|
||||
self._makedirs(voldir)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self._check_prerequisites()
|
||||
voldir = os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
FLAGS.scality_sofs_volume_dir)
|
||||
if not os.path.isdir(voldir):
|
||||
msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
|
||||
LOG.warn(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a logical volume.
|
||||
|
||||
Can optionally return a Dictionary of changes to the volume
|
||||
object to be persisted.
|
||||
"""
|
||||
self._create_file(self.local_path(volume),
|
||||
self._size_bytes(volume['size']))
|
||||
volume['provider_location'] = self._sofs_path(volume)
|
||||
return {'provider_location': volume['provider_location']}
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
changes = self.create_volume(volume)
|
||||
self._copy_file(self.local_path(snapshot),
|
||||
self.local_path(volume))
|
||||
return changes
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume."""
|
||||
os.remove(self.local_path(volume))
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
volume_path = os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
FLAGS.scality_sofs_volume_dir,
|
||||
snapshot['volume_name'])
|
||||
snapshot_path = self.local_path(snapshot)
|
||||
self._create_file(snapshot_path,
|
||||
self._size_bytes(snapshot['volume_size']))
|
||||
self._copy_file(volume_path, snapshot_path)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
os.remove(self.local_path(snapshot))
|
||||
|
||||
def _sofs_path(self, volume):
|
||||
return os.path.join(FLAGS.scality_sofs_volume_dir,
|
||||
volume['name'])
|
||||
|
||||
def local_path(self, volume):
|
||||
return os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
self._sofs_path(volume))
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously recreates an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume.
|
||||
|
||||
Can optionally return a Dictionary of changes to the volume
|
||||
object to be persisted.
|
||||
"""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Allow connection to connector and return connection info."""
|
||||
return {
|
||||
'driver_volume_type': 'scality',
|
||||
'data': {
|
||||
'sofs_path': self._sofs_path(volume),
|
||||
}
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
||||
"""Disallow connection from connector."""
|
||||
pass
|
||||
|
||||
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
|
||||
""" Callback for volume attached to instance."""
|
||||
pass
|
||||
|
||||
def detach_volume(self, context, volume_id):
|
||||
""" Callback for volume detached."""
|
||||
pass
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Return the current state of the volume service.
|
||||
|
||||
If 'refresh' is True, run the update first.
|
||||
"""
|
||||
stats = {
|
||||
'vendor_name': 'Scality',
|
||||
'driver_version': '1.0',
|
||||
'storage_protocol': 'scality',
|
||||
'total_capacity_gb': 'infinite',
|
||||
'free_capacity_gb': 'infinite',
|
||||
'reserved_percentage': 0,
|
||||
}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
stats['volume_backend_name'] = backend_name or 'Scality_SOFS'
|
||||
return stats
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
image_utils.fetch_to_raw(context,
|
||||
image_service,
|
||||
image_id,
|
||||
self.local_path(volume))
|
||||
self.create_volume(volume)
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
image_utils.upload_volume(context,
|
||||
image_service,
|
||||
image_meta,
|
||||
self.local_path(volume))
|
||||
|
||||
def clone_image(self, volume, image_location):
|
||||
"""Create a volume efficiently from an existing image.
|
||||
|
||||
image_location is a string whose format depends on the
|
||||
image service backend in use. The driver should use it
|
||||
to determine whether cloning is possible.
|
||||
|
||||
Returns a boolean indicating whether cloning occurred
|
||||
"""
|
||||
return False
|
@ -1,141 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
SheepDog Volume Driver.
|
||||
|
||||
"""
|
||||
import re
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume import driver
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class SheepdogDriver(driver.VolumeDriver):
|
||||
"""Executes commands relating to Sheepdog Volumes"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SheepdogDriver, self).__init__(*args, **kwargs)
|
||||
self.stats_pattern = re.compile(r'[\w\s%]*Total\s(\d+)\s(\d+)*')
|
||||
self._stats = {}
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met"""
|
||||
try:
|
||||
#NOTE(francois-charlier) Since 0.24 'collie cluster info -r'
|
||||
# gives short output, but for compatibility reason we won't
|
||||
# use it and just check if 'running' is in the output.
|
||||
(out, err) = self._execute('collie', 'cluster', 'info')
|
||||
if 'running' not in out.split():
|
||||
exception_message = (_("Sheepdog is not working: %s") % out)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
data=exception_message)
|
||||
|
||||
except exception.ProcessExecutionError:
|
||||
exception_message = _("Sheepdog is not working")
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a sheepdog volume"""
|
||||
self._try_execute('qemu-img', 'create',
|
||||
"sheepdog:%s" % volume['name'],
|
||||
'%sG' % volume['size'])
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a sheepdog volume from a snapshot."""
|
||||
self._try_execute('qemu-img', 'create', '-b',
|
||||
"sheepdog:%s:%s" % (snapshot['volume_name'],
|
||||
snapshot['name']),
|
||||
"sheepdog:%s" % volume['name'])
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a logical volume"""
|
||||
self._try_execute('collie', 'vdi', 'delete', volume['name'])
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a sheepdog snapshot"""
|
||||
self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'],
|
||||
"sheepdog:%s" % snapshot['volume_name'])
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a sheepdog snapshot"""
|
||||
self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'],
|
||||
'-s', snapshot['name'])
|
||||
|
||||
def local_path(self, volume):
|
||||
return "sheepdog:%s" % volume['name']
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Safely and synchronously recreates an export for a logical volume"""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume"""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume"""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
return {
|
||||
'driver_volume_type': 'sheepdog',
|
||||
'data': {
|
||||
'name': volume['name']
|
||||
}
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
pass
|
||||
|
||||
def _update_volume_stats(self):
|
||||
stats = {}
|
||||
|
||||
backend_name = "sheepdog"
|
||||
if self.configuration:
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
stats["volume_backend_name"] = backend_name or 'sheepdog'
|
||||
stats['vendor_name'] = 'Open Source'
|
||||
stats['dirver_version'] = '1.0'
|
||||
stats['storage_protocol'] = 'sheepdog'
|
||||
stats['total_capacity_gb'] = 'unknown'
|
||||
stats['free_capacity_gb'] = 'unknown'
|
||||
stats['reserved_percentage'] = 0
|
||||
stats['QoS_support'] = False
|
||||
|
||||
try:
|
||||
stdout, _err = self._execute('collie', 'node', 'info', '-r')
|
||||
m = self.stats_pattern.match(stdout)
|
||||
total = float(m.group(1))
|
||||
used = float(m.group(2))
|
||||
stats['total_capacity_gb'] = total / (1024 ** 3)
|
||||
stats['free_capacity_gb'] = (total - used) / (1024 ** 3)
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.exception(_('error refreshing volume stats'))
|
||||
|
||||
self._stats = stats
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
if refresh:
|
||||
self._update_volume_stats()
|
||||
return self._stats
|
@ -1,590 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import httplib
|
||||
import json
|
||||
import math
|
||||
import random
|
||||
import socket
|
||||
import string
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume.drivers.san.san import SanISCSIDriver
|
||||
from manila.volume import volume_types
|
||||
|
||||
VERSION = '1.2'
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
sf_opts = [
|
||||
cfg.BoolOpt('sf_emulate_512',
|
||||
default=True,
|
||||
help='Set 512 byte emulation on volume creation; '),
|
||||
|
||||
cfg.BoolOpt('sf_allow_tenant_qos',
|
||||
default=False,
|
||||
help='Allow tenants to specify QOS on create'),
|
||||
|
||||
cfg.StrOpt('sf_account_prefix',
|
||||
default=socket.gethostname(),
|
||||
help='Create SolidFire accounts with this prefix'), ]
|
||||
|
||||
|
||||
class SolidFire(SanISCSIDriver):
|
||||
"""OpenStack driver to enable SolidFire cluster.
|
||||
|
||||
Version history:
|
||||
1.0 - Initial driver
|
||||
1.1 - Refactor, clone support, qos by type and minor bug fixes
|
||||
|
||||
"""
|
||||
|
||||
sf_qos_dict = {'slow': {'minIOPS': 100,
|
||||
'maxIOPS': 200,
|
||||
'burstIOPS': 200},
|
||||
'medium': {'minIOPS': 200,
|
||||
'maxIOPS': 400,
|
||||
'burstIOPS': 400},
|
||||
'fast': {'minIOPS': 500,
|
||||
'maxIOPS': 1000,
|
||||
'burstIOPS': 1000},
|
||||
'performant': {'minIOPS': 2000,
|
||||
'maxIOPS': 4000,
|
||||
'burstIOPS': 4000},
|
||||
'off': None}
|
||||
|
||||
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
|
||||
cluster_stats = {}
|
||||
|
||||
GB = math.pow(2, 30)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SolidFire, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(sf_opts)
|
||||
self._update_cluster_status()
|
||||
|
||||
def _issue_api_request(self, method_name, params):
|
||||
"""All API requests to SolidFire device go through this method.
|
||||
|
||||
Simple json-rpc web based API calls.
|
||||
each call takes a set of paramaters (dict)
|
||||
and returns results in a dict as well.
|
||||
|
||||
"""
|
||||
max_simultaneous_clones = ['xMaxSnapshotsPerVolumeExceeded',
|
||||
'xMaxClonesPerVolumeExceeded',
|
||||
'xMaxSnapshotsPerNodeExceeded',
|
||||
'xMaxClonesPerNodeExceeded']
|
||||
host = self.configuration.san_ip
|
||||
# For now 443 is the only port our server accepts requests on
|
||||
port = 443
|
||||
|
||||
cluster_admin = self.configuration.san_login
|
||||
cluster_password = self.configuration.san_password
|
||||
|
||||
# NOTE(jdg): We're wrapping a retry loop for a know XDB issue
|
||||
# Shows up in very high request rates (ie create 1000 volumes)
|
||||
# we have to wrap the whole sequence because the request_id
|
||||
# can't be re-used
|
||||
retry_count = 5
|
||||
while retry_count > 0:
|
||||
request_id = int(uuid.uuid4()) # just generate a random number
|
||||
command = {'method': method_name,
|
||||
'id': request_id}
|
||||
|
||||
if params is not None:
|
||||
command['params'] = params
|
||||
|
||||
payload = json.dumps(command, ensure_ascii=False)
|
||||
payload.encode('utf-8')
|
||||
header = {'Content-Type': 'application/json-rpc; charset=utf-8'}
|
||||
|
||||
if cluster_password is not None:
|
||||
# base64.encodestring includes a newline character
|
||||
# in the result, make sure we strip it off
|
||||
auth_key = base64.encodestring('%s:%s' % (cluster_admin,
|
||||
cluster_password))[:-1]
|
||||
header['Authorization'] = 'Basic %s' % auth_key
|
||||
|
||||
LOG.debug(_("Payload for SolidFire API call: %s"), payload)
|
||||
|
||||
connection = httplib.HTTPSConnection(host, port)
|
||||
connection.request('POST', '/json-rpc/1.0', payload, header)
|
||||
response = connection.getresponse()
|
||||
|
||||
data = {}
|
||||
if response.status != 200:
|
||||
connection.close()
|
||||
raise exception.SolidFireAPIException(status=response.status)
|
||||
|
||||
else:
|
||||
data = response.read()
|
||||
try:
|
||||
data = json.loads(data)
|
||||
except (TypeError, ValueError), exc:
|
||||
connection.close()
|
||||
msg = _("Call to json.loads() raised "
|
||||
"an exception: %s") % exc
|
||||
raise exception.SfJsonEncodeFailure(msg)
|
||||
|
||||
connection.close()
|
||||
|
||||
LOG.debug(_("Results of SolidFire API call: %s"), data)
|
||||
|
||||
if 'error' in data:
|
||||
if data['error']['name'] in max_simultaneous_clones:
|
||||
LOG.warning(_('Clone operation '
|
||||
'encountered: %s') % data['error']['name'])
|
||||
LOG.warning(_(
|
||||
'Waiting for outstanding operation '
|
||||
'before retrying snapshot: %s') % params['name'])
|
||||
time.sleep(5)
|
||||
# Don't decrement the retry count for this one
|
||||
elif 'xDBVersionMismatch' in data['error']['name']:
|
||||
LOG.warning(_('Detected xDBVersionMismatch, '
|
||||
'retry %s of 5') % (5 - retry_count))
|
||||
time.sleep(1)
|
||||
retry_count -= 1
|
||||
elif 'xUnknownAccount' in data['error']['name']:
|
||||
retry_count = 0
|
||||
else:
|
||||
msg = _("API response: %s") % data
|
||||
raise exception.SolidFireAPIException(msg)
|
||||
else:
|
||||
retry_count = 0
|
||||
|
||||
return data
|
||||
|
||||
def _get_volumes_by_sfaccount(self, account_id):
|
||||
"""Get all volumes on cluster for specified account."""
|
||||
params = {'accountID': account_id}
|
||||
data = self._issue_api_request('ListVolumesForAccount', params)
|
||||
if 'result' in data:
|
||||
return data['result']['volumes']
|
||||
|
||||
def _get_sfaccount_by_name(self, sf_account_name):
|
||||
"""Get SolidFire account object by name."""
|
||||
sfaccount = None
|
||||
params = {'username': sf_account_name}
|
||||
data = self._issue_api_request('GetAccountByName', params)
|
||||
if 'result' in data and 'account' in data['result']:
|
||||
LOG.debug(_('Found solidfire account: %s'), sf_account_name)
|
||||
sfaccount = data['result']['account']
|
||||
return sfaccount
|
||||
|
||||
def _get_sf_account_name(self, project_id):
|
||||
"""Build the SolidFire account name to use."""
|
||||
return '%s%s%s' % (self.configuration.sf_account_prefix,
|
||||
'-' if self.configuration.sf_account_prefix else '',
|
||||
project_id)
|
||||
|
||||
def _get_sfaccount(self, project_id):
|
||||
sf_account_name = self._get_sf_account_name(project_id)
|
||||
sfaccount = self._get_sfaccount_by_name(sf_account_name)
|
||||
if sfaccount is None:
|
||||
raise exception.SfAccountNotFound(account_name=sf_account_name)
|
||||
|
||||
return sfaccount
|
||||
|
||||
def _create_sfaccount(self, project_id):
|
||||
"""Create account on SolidFire device if it doesn't already exist.
|
||||
|
||||
We're first going to check if the account already exits, if it does
|
||||
just return it. If not, then create it.
|
||||
|
||||
"""
|
||||
|
||||
sf_account_name = self._get_sf_account_name(project_id)
|
||||
sfaccount = self._get_sfaccount_by_name(sf_account_name)
|
||||
if sfaccount is None:
|
||||
LOG.debug(_('solidfire account: %s does not exist, create it...'),
|
||||
sf_account_name)
|
||||
chap_secret = self._generate_random_string(12)
|
||||
params = {'username': sf_account_name,
|
||||
'initiatorSecret': chap_secret,
|
||||
'targetSecret': chap_secret,
|
||||
'attributes': {}}
|
||||
data = self._issue_api_request('AddAccount', params)
|
||||
if 'result' in data:
|
||||
sfaccount = self._get_sfaccount_by_name(sf_account_name)
|
||||
|
||||
return sfaccount
|
||||
|
||||
def _get_cluster_info(self):
|
||||
"""Query the SolidFire cluster for some property info."""
|
||||
params = {}
|
||||
data = self._issue_api_request('GetClusterInfo', params)
|
||||
if 'result' not in data:
|
||||
raise exception.SolidFireAPIDataException(data=data)
|
||||
|
||||
return data['result']
|
||||
|
||||
def _do_export(self, volume):
|
||||
"""Gets the associated account, retrieves CHAP info and updates."""
|
||||
sfaccount = self._get_sfaccount(volume['project_id'])
|
||||
|
||||
model_update = {}
|
||||
model_update['provider_auth'] = ('CHAP %s %s'
|
||||
% (sfaccount['username'],
|
||||
sfaccount['targetSecret']))
|
||||
|
||||
return model_update
|
||||
|
||||
def _generate_random_string(self, length):
|
||||
"""Generates random_string to use for CHAP password."""
|
||||
|
||||
char_set = string.ascii_uppercase + string.digits
|
||||
return ''.join(random.sample(char_set, length))
|
||||
|
||||
def _get_model_info(self, sfaccount, sf_volume_id):
|
||||
"""Gets the connection info for specified account and volume."""
|
||||
cluster_info = self._get_cluster_info()
|
||||
iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260'
|
||||
chap_secret = sfaccount['targetSecret']
|
||||
|
||||
found_volume = False
|
||||
iteration_count = 0
|
||||
while not found_volume and iteration_count < 10:
|
||||
volume_list = self._get_volumes_by_sfaccount(
|
||||
sfaccount['accountID'])
|
||||
iqn = None
|
||||
for v in volume_list:
|
||||
if v['volumeID'] == sf_volume_id:
|
||||
iqn = v['iqn']
|
||||
found_volume = True
|
||||
break
|
||||
if not found_volume:
|
||||
time.sleep(2)
|
||||
iteration_count += 1
|
||||
|
||||
if not found_volume:
|
||||
LOG.error(_('Failed to retrieve volume SolidFire-'
|
||||
'ID: %s in get_by_account!') % sf_volume_id)
|
||||
raise exception.VolumeNotFound(volume_id=uuid)
|
||||
|
||||
model_update = {}
|
||||
# NOTE(john-griffith): SF volumes are always at lun 0
|
||||
model_update['provider_location'] = ('%s %s %s'
|
||||
% (iscsi_portal, iqn, 0))
|
||||
model_update['provider_auth'] = ('CHAP %s %s'
|
||||
% (sfaccount['username'],
|
||||
chap_secret))
|
||||
return model_update
|
||||
|
||||
def _do_clone_volume(self, src_uuid, src_project_id, v_ref):
|
||||
"""Create a clone of an existing volume.
|
||||
|
||||
Currently snapshots are the same as clones on the SF cluster.
|
||||
Due to the way the SF cluster works there's no loss in efficiency
|
||||
or space usage between the two. The only thing different right
|
||||
now is the restore snapshot functionality which has not been
|
||||
implemented in the pre-release version of the SolidFire Cluster.
|
||||
|
||||
"""
|
||||
attributes = {}
|
||||
qos = {}
|
||||
|
||||
sfaccount = self._get_sfaccount(src_project_id)
|
||||
params = {'accountID': sfaccount['accountID']}
|
||||
|
||||
sf_vol = self._get_sf_volume(src_uuid, params)
|
||||
if sf_vol is None:
|
||||
raise exception.VolumeNotFound(volume_id=uuid)
|
||||
|
||||
if 'qos' in sf_vol:
|
||||
qos = sf_vol['qos']
|
||||
|
||||
attributes = {'uuid': v_ref['id'],
|
||||
'is_clone': 'True',
|
||||
'src_uuid': src_uuid}
|
||||
|
||||
if qos:
|
||||
for k, v in qos.items():
|
||||
attributes[k] = str(v)
|
||||
|
||||
params = {'volumeID': int(sf_vol['volumeID']),
|
||||
'name': 'UUID-%s' % v_ref['id'],
|
||||
'attributes': attributes,
|
||||
'qos': qos}
|
||||
|
||||
data = self._issue_api_request('CloneVolume', params)
|
||||
|
||||
if (('result' not in data) or ('volumeID' not in data['result'])):
|
||||
raise exception.SolidFireAPIDataException(data=data)
|
||||
|
||||
sf_volume_id = data['result']['volumeID']
|
||||
model_update = self._get_model_info(sfaccount, sf_volume_id)
|
||||
if model_update is None:
|
||||
mesg = _('Failed to get model update from clone')
|
||||
raise exception.SolidFireAPIDataException(mesg)
|
||||
|
||||
return (data, sfaccount, model_update)
|
||||
|
||||
def _do_volume_create(self, project_id, params):
|
||||
sfaccount = self._create_sfaccount(project_id)
|
||||
|
||||
params['accountID'] = sfaccount['accountID']
|
||||
data = self._issue_api_request('CreateVolume', params)
|
||||
|
||||
if (('result' not in data) or ('volumeID' not in data['result'])):
|
||||
raise exception.SolidFireAPIDataException(data=data)
|
||||
|
||||
sf_volume_id = data['result']['volumeID']
|
||||
return self._get_model_info(sfaccount, sf_volume_id)
|
||||
|
||||
def _set_qos_presets(self, volume):
|
||||
qos = {}
|
||||
valid_presets = self.sf_qos_dict.keys()
|
||||
|
||||
#First look to see if they included a preset
|
||||
presets = [i.value for i in volume.get('volume_metadata')
|
||||
if i.key == 'sf-qos' and i.value in valid_presets]
|
||||
if len(presets) > 0:
|
||||
if len(presets) > 1:
|
||||
LOG.warning(_('More than one valid preset was '
|
||||
'detected, using %s') % presets[0])
|
||||
qos = self.sf_qos_dict[presets[0]]
|
||||
else:
|
||||
#look for explicit settings
|
||||
for i in volume.get('volume_metadata'):
|
||||
if i.key in self.sf_qos_keys:
|
||||
qos[i.key] = int(i.value)
|
||||
return qos
|
||||
|
||||
def _set_qos_by_volume_type(self, ctxt, type_id):
|
||||
qos = {}
|
||||
volume_type = volume_types.get_volume_type(ctxt, type_id)
|
||||
specs = volume_type.get('extra_specs')
|
||||
for key, value in specs.iteritems():
|
||||
if ':' in key:
|
||||
fields = key.split(':')
|
||||
key = fields[1]
|
||||
if key in self.sf_qos_keys:
|
||||
qos[key] = int(value)
|
||||
return qos
|
||||
|
||||
def _get_sf_volume(self, uuid, params):
|
||||
data = self._issue_api_request('ListVolumesForAccount', params)
|
||||
if 'result' not in data:
|
||||
raise exception.SolidFireAPIDataException(data=data)
|
||||
|
||||
found_count = 0
|
||||
sf_volref = None
|
||||
for v in data['result']['volumes']:
|
||||
if uuid in v['name']:
|
||||
found_count += 1
|
||||
sf_volref = v
|
||||
LOG.debug(_("Mapped SolidFire volumeID %(sfid)s "
|
||||
"to manila ID %(uuid)s.") %
|
||||
{'sfid': v['volumeID'],
|
||||
'uuid': uuid})
|
||||
|
||||
if found_count == 0:
|
||||
# NOTE(jdg): Previously we would raise here, but there are cases
|
||||
# where this might be a cleanup for a failed delete.
|
||||
# Until we get better states we'll just log an error
|
||||
LOG.error(_("Volume %s, not found on SF Cluster."), uuid)
|
||||
|
||||
if found_count > 1:
|
||||
LOG.error(_("Found %(count)s volumes mapped to id: %(uuid)s.") %
|
||||
{'count': found_count,
|
||||
'uuid': uuid})
|
||||
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
|
||||
|
||||
return sf_volref
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create volume on SolidFire device.
|
||||
|
||||
The account is where CHAP settings are derived from, volume is
|
||||
created and exported. Note that the new volume is immediately ready
|
||||
for use.
|
||||
|
||||
One caveat here is that an existing user account must be specified
|
||||
in the API call to create a new volume. We use a set algorithm to
|
||||
determine account info based on passed in manila volume object. First
|
||||
we check to see if the account already exists (and use it), or if it
|
||||
does not already exist, we'll go ahead and create it.
|
||||
|
||||
"""
|
||||
slice_count = 1
|
||||
attributes = {}
|
||||
qos = {}
|
||||
|
||||
if (self.configuration.sf_allow_tenant_qos and
|
||||
volume.get('volume_metadata')is not None):
|
||||
qos = self._set_qos_presets(volume)
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
type_id = volume['volume_type_id']
|
||||
if type_id is not None:
|
||||
qos = self._set_qos_by_volume_type(ctxt, type_id)
|
||||
|
||||
attributes = {'uuid': volume['id'],
|
||||
'is_clone': 'False'}
|
||||
if qos:
|
||||
for k, v in qos.items():
|
||||
attributes[k] = str(v)
|
||||
|
||||
params = {'name': 'UUID-%s' % volume['id'],
|
||||
'accountID': None,
|
||||
'sliceCount': slice_count,
|
||||
'totalSize': int(volume['size'] * self.GB),
|
||||
'enable512e': self.configuration.sf_emulate_512,
|
||||
'attributes': attributes,
|
||||
'qos': qos}
|
||||
|
||||
return self._do_volume_create(volume['project_id'], params)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Create a clone of an existing volume."""
|
||||
(data, sfaccount, model) = self._do_clone_volume(
|
||||
src_vref['id'],
|
||||
src_vref['project_id'],
|
||||
volume)
|
||||
|
||||
return model
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Delete SolidFire Volume from device.
|
||||
|
||||
SolidFire allows multipe volumes with same name,
|
||||
volumeID is what's guaranteed unique.
|
||||
|
||||
"""
|
||||
|
||||
LOG.debug(_("Enter SolidFire delete_volume..."))
|
||||
|
||||
sfaccount = self._get_sfaccount(volume['project_id'])
|
||||
if sfaccount is None:
|
||||
LOG.error(_("Account for Volume ID %s was not found on "
|
||||
"the SolidFire Cluster!") % volume['id'])
|
||||
LOG.error(_("This usually means the volume was never "
|
||||
"succesfully created."))
|
||||
return
|
||||
|
||||
params = {'accountID': sfaccount['accountID']}
|
||||
|
||||
sf_vol = self._get_sf_volume(volume['id'], params)
|
||||
|
||||
if sf_vol is not None:
|
||||
params = {'volumeID': sf_vol['volumeID']}
|
||||
data = self._issue_api_request('DeleteVolume', params)
|
||||
|
||||
if 'result' not in data:
|
||||
raise exception.SolidFireAPIDataException(data=data)
|
||||
else:
|
||||
LOG.error(_("Volume ID %s was not found on "
|
||||
"the SolidFire Cluster!"), volume['id'])
|
||||
|
||||
LOG.debug(_("Leaving SolidFire delete_volume"))
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Verify the iscsi export info."""
|
||||
LOG.debug(_("Executing SolidFire ensure_export..."))
|
||||
return self._do_export(volume)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Setup the iscsi export info."""
|
||||
LOG.debug(_("Executing SolidFire create_export..."))
|
||||
return self._do_export(volume)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Delete the specified snapshot from the SolidFire cluster."""
|
||||
self.delete_volume(snapshot)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create a snapshot of a volume on the SolidFire cluster.
|
||||
|
||||
Note that for SolidFire Clusters currently there is no snapshot
|
||||
implementation. Due to the way SF does cloning there's no performance
|
||||
hit or extra space used. The only thing that's lacking from this is
|
||||
the abilit to restore snaps.
|
||||
|
||||
After GA a true snapshot implementation will be available with
|
||||
restore at which time we'll rework this appropriately.
|
||||
|
||||
"""
|
||||
(data, sfaccount, model) = self._do_clone_volume(
|
||||
snapshot['volume_id'],
|
||||
snapshot['project_id'],
|
||||
snapshot)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create a volume from the specified snapshot."""
|
||||
(data, sfaccount, model) = self._do_clone_volume(
|
||||
snapshot['id'],
|
||||
snapshot['project_id'],
|
||||
volume)
|
||||
|
||||
return model
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
|
||||
If 'refresh' is True, run update first.
|
||||
The name is a bit misleading as
|
||||
the majority of the data here is cluster
|
||||
data
|
||||
"""
|
||||
if refresh:
|
||||
self._update_cluster_status()
|
||||
|
||||
return self.cluster_stats
|
||||
|
||||
def _update_cluster_status(self):
|
||||
"""Retrieve status info for the Cluster."""
|
||||
|
||||
LOG.debug(_("Updating cluster status info"))
|
||||
|
||||
params = {}
|
||||
|
||||
# NOTE(jdg): The SF api provides an UNBELIEVABLE amount
|
||||
# of stats data, this is just one of the calls
|
||||
results = self._issue_api_request('GetClusterCapacity', params)
|
||||
if 'result' not in results:
|
||||
LOG.error(_('Failed to get updated stats'))
|
||||
|
||||
results = results['result']['clusterCapacity']
|
||||
free_capacity =\
|
||||
results['maxProvisionedSpace'] - results['usedSpace']
|
||||
|
||||
data = {}
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data["volume_backend_name"] = backend_name or self.__class__.__name__
|
||||
data["vendor_name"] = 'SolidFire Inc'
|
||||
data["driver_version"] = VERSION
|
||||
data["storage_protocol"] = 'iSCSI'
|
||||
|
||||
data['total_capacity_gb'] = results['maxProvisionedSpace']
|
||||
|
||||
data['free_capacity_gb'] = float(free_capacity)
|
||||
data['reserved_percentage'] = 0
|
||||
data['QoS_support'] = True
|
||||
data['compression_percent'] =\
|
||||
results['compressionPercent']
|
||||
data['deduplicaton_percent'] =\
|
||||
results['deDuplicationPercent']
|
||||
data['thin_provision_percent'] =\
|
||||
results['thinProvisioningPercent']
|
||||
self.cluster_stats = data
|
File diff suppressed because it is too large
Load Diff
@ -1,246 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Pedro Navarro Perez
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Volume driver for Windows Server 2012
|
||||
|
||||
This driver requires ISCSI target role installed
|
||||
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume import driver
|
||||
|
||||
# Check needed for unit testing on Unix
|
||||
if os.name == 'nt':
|
||||
import wmi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
windows_opts = [
|
||||
cfg.StrOpt('windows_iscsi_lun_path',
|
||||
default='C:\iSCSIVirtualDisks',
|
||||
help='Path to store VHD backed volumes'),
|
||||
]
|
||||
|
||||
FLAGS.register_opts(windows_opts)
|
||||
|
||||
|
||||
class WindowsDriver(driver.ISCSIDriver):
|
||||
"""Executes volume driver commands on Windows Storage server."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(WindowsDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Setup the Windows Volume driver.
|
||||
|
||||
Called one time by the manager after the driver is loaded.
|
||||
Validate the flags we care about
|
||||
"""
|
||||
#Set the flags
|
||||
self._conn_wmi = wmi.WMI(moniker='//./root/wmi')
|
||||
self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Check that the driver is working and can communicate.
|
||||
"""
|
||||
#Invoking the portal an checking that is listening
|
||||
wt_portal = self._conn_wmi.WT_Portal()[0]
|
||||
listen = wt_portal.Listen
|
||||
if not listen:
|
||||
raise exception.VolumeBackendAPIException()
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Driver entry point to attach a volume to an instance.
|
||||
"""
|
||||
initiator_name = connector['initiator']
|
||||
target_name = volume['provider_location']
|
||||
|
||||
cl = self._conn_wmi.__getattr__("WT_IDMethod")
|
||||
wt_idmethod = cl.new()
|
||||
wt_idmethod.HostName = target_name
|
||||
wt_idmethod.Method = 4
|
||||
wt_idmethod.Value = initiator_name
|
||||
wt_idmethod.put()
|
||||
#Getting the portal and port information
|
||||
wt_portal = self._conn_wmi.WT_Portal()[0]
|
||||
(address, port) = (wt_portal.Address, wt_portal.Port)
|
||||
#Getting the host information
|
||||
hosts = self._conn_wmi.WT_Host(Hostname=target_name)
|
||||
host = hosts[0]
|
||||
|
||||
properties = {}
|
||||
properties['target_discovered'] = False
|
||||
properties['target_portal'] = '%s:%s' % (address, port)
|
||||
properties['target_iqn'] = host.TargetIQN
|
||||
properties['target_lun'] = 0
|
||||
properties['volume_id'] = volume['id']
|
||||
|
||||
auth = volume['provider_auth']
|
||||
if auth:
|
||||
(auth_method, auth_username, auth_secret) = auth.split()
|
||||
|
||||
properties['auth_method'] = auth_method
|
||||
properties['auth_username'] = auth_username
|
||||
properties['auth_password'] = auth_secret
|
||||
|
||||
return {
|
||||
'driver_volume_type': 'iscsi',
|
||||
'data': properties,
|
||||
}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Driver entry point to unattach a volume from an instance.
|
||||
|
||||
Unmask the LUN on the storage system so the given intiator can no
|
||||
longer access it.
|
||||
"""
|
||||
initiator_name = connector['initiator']
|
||||
provider_location = volume['provider_location']
|
||||
#DesAssigning target to initiators
|
||||
wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=provider_location,
|
||||
Method=4,
|
||||
Value=initiator_name)[0]
|
||||
wt_idmethod.Delete_()
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Driver entry point for creating a new volume."""
|
||||
vhd_path = self._get_vhd_path(volume)
|
||||
vol_name = volume['name']
|
||||
#The WMI procedure returns a Generic failure
|
||||
cl = self._conn_wmi.__getattr__("WT_Disk")
|
||||
cl.NewWTDisk(DevicePath=vhd_path,
|
||||
Description=vol_name,
|
||||
SizeInMB=volume['size'] * 1024)
|
||||
|
||||
def _get_vhd_path(self, volume):
|
||||
base_vhd_folder = FLAGS.windows_iscsi_lun_path
|
||||
if not os.path.exists(base_vhd_folder):
|
||||
LOG.debug(_('Creating folder %s '), base_vhd_folder)
|
||||
os.makedirs(base_vhd_folder)
|
||||
return os.path.join(base_vhd_folder, str(volume['name']) + ".vhd")
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Driver entry point for destroying existing volumes."""
|
||||
vol_name = volume['name']
|
||||
wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0]
|
||||
wt_disk.Delete_()
|
||||
vhdfiles = self._conn_cimv2.query(
|
||||
"Select * from CIM_DataFile where Name = '" +
|
||||
self._get_vhd_path(volume) + "'")
|
||||
if len(vhdfiles) > 0:
|
||||
vhdfiles[0].Delete()
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Driver entry point for creating a snapshot.
|
||||
"""
|
||||
#Getting WT_Snapshot class
|
||||
vol_name = snapshot['volume_name']
|
||||
snapshot_name = snapshot['name']
|
||||
|
||||
wt_disk = self._conn_wmi.WT_Disk(Description=vol_name)[0]
|
||||
#API Calls gets Generic Failure
|
||||
cl = self._conn_wmi.__getattr__("WT_Snapshot")
|
||||
disk_id = wt_disk.WTD
|
||||
out = cl.Create(WTD=disk_id)
|
||||
#Setting description since it used as a KEY
|
||||
wt_snapshot_created = self._conn_wmi.WT_Snapshot(Id=out[0])[0]
|
||||
wt_snapshot_created.Description = snapshot_name
|
||||
wt_snapshot_created.put()
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Driver entry point for exporting snapshots as volumes."""
|
||||
snapshot_name = snapshot['name']
|
||||
wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snapshot_name)[0]
|
||||
disk_id = wt_snapshot.Export()[0]
|
||||
wt_disk = self._conn_wmi.WT_Disk(WTD=disk_id)[0]
|
||||
wt_disk.Description = volume['name']
|
||||
wt_disk.put()
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Driver entry point for deleting a snapshot."""
|
||||
snapshot_name = snapshot['name']
|
||||
wt_snapshot = self._conn_wmi.WT_Snapshot(Description=snapshot_name)[0]
|
||||
wt_snapshot.Delete_()
|
||||
|
||||
def _do_export(self, _ctx, volume, ensure=False):
|
||||
"""Do all steps to get disk exported as LUN 0 at separate target.
|
||||
|
||||
:param volume: reference of volume to be exported
|
||||
:param ensure: if True, ignore errors caused by already existing
|
||||
resources
|
||||
:return: iscsiadm-formatted provider location string
|
||||
"""
|
||||
target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
#ISCSI target creation
|
||||
try:
|
||||
cl = self._conn_wmi.__getattr__("WT_Host")
|
||||
cl.NewHost(HostName=target_name)
|
||||
except Exception as exc:
|
||||
excep_info = exc.com_error.excepinfo[2]
|
||||
if not ensure or excep_info.find(u'The file exists') == -1:
|
||||
raise
|
||||
else:
|
||||
LOG.info(_('Ignored target creation error "%s"'
|
||||
' while ensuring export'), exc)
|
||||
#Get the disk to add
|
||||
vol_name = volume['name']
|
||||
q = self._conn_wmi.WT_Disk(Description=vol_name)
|
||||
if not len(q):
|
||||
LOG.debug(_('Disk not found: %s'), vol_name)
|
||||
return None
|
||||
wt_disk = q[0]
|
||||
wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0]
|
||||
wt_host.AddWTDisk(wt_disk.WTD)
|
||||
|
||||
return target_name
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for an existing volume."""
|
||||
self._do_export(context, volume, ensure=True)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for a new volume."""
|
||||
loc = self._do_export(context, volume, ensure=False)
|
||||
return {'provider_location': loc}
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Driver exntry point to remove an export for a volume.
|
||||
"""
|
||||
target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
|
||||
#Get ISCSI target
|
||||
wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0]
|
||||
wt_host.RemoveAllWTDisks()
|
||||
wt_host.Delete_()
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
raise NotImplementedError()
|
@ -1,13 +0,0 @@
|
||||
# Copyright 2012 OpenStack LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
@ -1,542 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from manila.volume.drivers.xenapi import tools
|
||||
import contextlib
|
||||
import os
|
||||
import pickle
|
||||
|
||||
|
||||
class XenAPIException(Exception):
|
||||
def __init__(self, original_exception):
|
||||
super(XenAPIException, self).__init__(str(original_exception))
|
||||
self.original_exception = original_exception
|
||||
|
||||
|
||||
class OperationsBase(object):
|
||||
def __init__(self, xenapi_session):
|
||||
self.session = xenapi_session
|
||||
|
||||
def call_xenapi(self, method, *args):
|
||||
return self.session.call_xenapi(method, *args)
|
||||
|
||||
|
||||
class VMOperations(OperationsBase):
|
||||
def get_by_uuid(self, vm_uuid):
|
||||
return self.call_xenapi('VM.get_by_uuid', vm_uuid)
|
||||
|
||||
def get_vbds(self, vm_uuid):
|
||||
return self.call_xenapi('VM.get_VBDs', vm_uuid)
|
||||
|
||||
|
||||
class VBDOperations(OperationsBase):
|
||||
def create(self, vm_ref, vdi_ref, userdevice, bootable, mode, type,
|
||||
empty, other_config):
|
||||
vbd_rec = dict(
|
||||
VM=vm_ref,
|
||||
VDI=vdi_ref,
|
||||
userdevice=str(userdevice),
|
||||
bootable=bootable,
|
||||
mode=mode,
|
||||
type=type,
|
||||
empty=empty,
|
||||
other_config=other_config,
|
||||
qos_algorithm_type='',
|
||||
qos_algorithm_params=dict()
|
||||
)
|
||||
return self.call_xenapi('VBD.create', vbd_rec)
|
||||
|
||||
def destroy(self, vbd_ref):
|
||||
self.call_xenapi('VBD.destroy', vbd_ref)
|
||||
|
||||
def get_device(self, vbd_ref):
|
||||
return self.call_xenapi('VBD.get_device', vbd_ref)
|
||||
|
||||
def plug(self, vbd_ref):
|
||||
return self.call_xenapi('VBD.plug', vbd_ref)
|
||||
|
||||
def unplug(self, vbd_ref):
|
||||
return self.call_xenapi('VBD.unplug', vbd_ref)
|
||||
|
||||
def get_vdi(self, vbd_ref):
|
||||
return self.call_xenapi('VBD.get_VDI', vbd_ref)
|
||||
|
||||
|
||||
class PoolOperations(OperationsBase):
|
||||
def get_all(self):
|
||||
return self.call_xenapi('pool.get_all')
|
||||
|
||||
def get_default_SR(self, pool_ref):
|
||||
return self.call_xenapi('pool.get_default_SR', pool_ref)
|
||||
|
||||
|
||||
class PbdOperations(OperationsBase):
|
||||
def get_all(self):
|
||||
return self.call_xenapi('PBD.get_all')
|
||||
|
||||
def unplug(self, pbd_ref):
|
||||
self.call_xenapi('PBD.unplug', pbd_ref)
|
||||
|
||||
def create(self, host_ref, sr_ref, device_config):
|
||||
return self.call_xenapi(
|
||||
'PBD.create',
|
||||
dict(
|
||||
host=host_ref,
|
||||
SR=sr_ref,
|
||||
device_config=device_config
|
||||
)
|
||||
)
|
||||
|
||||
def plug(self, pbd_ref):
|
||||
self.call_xenapi('PBD.plug', pbd_ref)
|
||||
|
||||
|
||||
class SrOperations(OperationsBase):
|
||||
def get_all(self):
|
||||
return self.call_xenapi('SR.get_all')
|
||||
|
||||
def get_record(self, sr_ref):
|
||||
return self.call_xenapi('SR.get_record', sr_ref)
|
||||
|
||||
def forget(self, sr_ref):
|
||||
self.call_xenapi('SR.forget', sr_ref)
|
||||
|
||||
def scan(self, sr_ref):
|
||||
self.call_xenapi('SR.scan', sr_ref)
|
||||
|
||||
def create(self, host_ref, device_config, name_label, name_description,
|
||||
sr_type, physical_size=None, content_type=None,
|
||||
shared=False, sm_config=None):
|
||||
return self.call_xenapi(
|
||||
'SR.create',
|
||||
host_ref,
|
||||
device_config,
|
||||
physical_size or '0',
|
||||
name_label or '',
|
||||
name_description or '',
|
||||
sr_type,
|
||||
content_type or '',
|
||||
shared,
|
||||
sm_config or dict()
|
||||
)
|
||||
|
||||
def introduce(self, sr_uuid, name_label, name_description, sr_type,
|
||||
content_type=None, shared=False, sm_config=None):
|
||||
return self.call_xenapi(
|
||||
'SR.introduce',
|
||||
sr_uuid,
|
||||
name_label or '',
|
||||
name_description or '',
|
||||
sr_type,
|
||||
content_type or '',
|
||||
shared,
|
||||
sm_config or dict()
|
||||
)
|
||||
|
||||
def get_uuid(self, sr_ref):
|
||||
return self.get_record(sr_ref)['uuid']
|
||||
|
||||
def get_name_label(self, sr_ref):
|
||||
return self.get_record(sr_ref)['name_label']
|
||||
|
||||
def get_name_description(self, sr_ref):
|
||||
return self.get_record(sr_ref)['name_description']
|
||||
|
||||
def destroy(self, sr_ref):
|
||||
self.call_xenapi('SR.destroy', sr_ref)
|
||||
|
||||
|
||||
class VdiOperations(OperationsBase):
|
||||
def get_all(self):
|
||||
return self.call_xenapi('VDI.get_all')
|
||||
|
||||
def get_record(self, vdi_ref):
|
||||
return self.call_xenapi('VDI.get_record', vdi_ref)
|
||||
|
||||
def get_by_uuid(self, vdi_uuid):
|
||||
return self.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
||||
|
||||
def get_uuid(self, vdi_ref):
|
||||
return self.get_record(vdi_ref)['uuid']
|
||||
|
||||
def create(self, sr_ref, size, vdi_type,
|
||||
sharable=False, read_only=False, other_config=None):
|
||||
return self.call_xenapi('VDI.create',
|
||||
dict(SR=sr_ref,
|
||||
virtual_size=str(size),
|
||||
type=vdi_type,
|
||||
sharable=sharable,
|
||||
read_only=read_only,
|
||||
other_config=other_config or dict()))
|
||||
|
||||
def destroy(self, vdi_ref):
|
||||
self.call_xenapi('VDI.destroy', vdi_ref)
|
||||
|
||||
def copy(self, vdi_ref, sr_ref):
|
||||
return self.call_xenapi('VDI.copy', vdi_ref, sr_ref)
|
||||
|
||||
def resize(self, vdi_ref, size):
|
||||
return self.call_xenapi('VDI.resize', vdi_ref, str(size))
|
||||
|
||||
|
||||
class HostOperations(OperationsBase):
|
||||
def get_record(self, host_ref):
|
||||
return self.call_xenapi('host.get_record', host_ref)
|
||||
|
||||
def get_uuid(self, host_ref):
|
||||
return self.get_record(host_ref)['uuid']
|
||||
|
||||
|
||||
class XenAPISession(object):
|
||||
def __init__(self, session, exception_to_convert):
|
||||
self._session = session
|
||||
self._exception_to_convert = exception_to_convert
|
||||
self.handle = self._session.handle
|
||||
self.PBD = PbdOperations(self)
|
||||
self.SR = SrOperations(self)
|
||||
self.VDI = VdiOperations(self)
|
||||
self.host = HostOperations(self)
|
||||
self.pool = PoolOperations(self)
|
||||
self.VBD = VBDOperations(self)
|
||||
self.VM = VMOperations(self)
|
||||
|
||||
def close(self):
|
||||
return self.call_xenapi('logout')
|
||||
|
||||
@contextlib.contextmanager
|
||||
def exception_converter(self):
|
||||
try:
|
||||
yield None
|
||||
except self._exception_to_convert as e:
|
||||
raise XenAPIException(e)
|
||||
|
||||
def call_xenapi(self, method, *args):
|
||||
with self.exception_converter():
|
||||
return self._session.xenapi_request(method, args)
|
||||
|
||||
def call_plugin(self, host_ref, plugin, function, args):
|
||||
with self.exception_converter():
|
||||
return self._session.xenapi.host.call_plugin(
|
||||
host_ref, plugin, function, args)
|
||||
|
||||
def get_pool(self):
|
||||
return self.call_xenapi('session.get_pool', self.handle)
|
||||
|
||||
def get_this_host(self):
|
||||
return self.call_xenapi('session.get_this_host', self.handle)
|
||||
|
||||
|
||||
class CompoundOperations(object):
|
||||
def unplug_pbds_from_sr(self, sr_ref):
|
||||
sr_rec = self.SR.get_record(sr_ref)
|
||||
for pbd_ref in sr_rec.get('PBDs', []):
|
||||
self.PBD.unplug(pbd_ref)
|
||||
|
||||
def unplug_pbds_and_forget_sr(self, sr_ref):
|
||||
self.unplug_pbds_from_sr(sr_ref)
|
||||
self.SR.forget(sr_ref)
|
||||
|
||||
def create_new_vdi(self, sr_ref, size_in_gigabytes):
|
||||
return self.VDI.create(sr_ref,
|
||||
to_bytes(size_in_gigabytes),
|
||||
'User', )
|
||||
|
||||
|
||||
def to_bytes(size_in_gigs):
|
||||
return size_in_gigs * 1024 * 1024 * 1024
|
||||
|
||||
|
||||
class NFSOperationsMixIn(CompoundOperations):
|
||||
def is_nfs_sr(self, sr_ref):
|
||||
return self.SR.get_record(sr_ref).get('type') == 'nfs'
|
||||
|
||||
@contextlib.contextmanager
|
||||
def new_sr_on_nfs(self, host_ref, server, serverpath,
|
||||
name_label=None, name_description=None):
|
||||
|
||||
device_config = dict(
|
||||
server=server,
|
||||
serverpath=serverpath
|
||||
)
|
||||
name_label = name_label or ''
|
||||
name_description = name_description or ''
|
||||
sr_type = 'nfs'
|
||||
|
||||
sr_ref = self.SR.create(
|
||||
host_ref,
|
||||
device_config,
|
||||
name_label,
|
||||
name_description,
|
||||
sr_type,
|
||||
)
|
||||
yield sr_ref
|
||||
|
||||
self.unplug_pbds_and_forget_sr(sr_ref)
|
||||
|
||||
def plug_nfs_sr(self, host_ref, server, serverpath, sr_uuid,
|
||||
name_label=None, name_description=None):
|
||||
|
||||
device_config = dict(
|
||||
server=server,
|
||||
serverpath=serverpath
|
||||
)
|
||||
sr_type = 'nfs'
|
||||
|
||||
sr_ref = self.SR.introduce(
|
||||
sr_uuid,
|
||||
name_label,
|
||||
name_description,
|
||||
sr_type,
|
||||
)
|
||||
|
||||
pbd_ref = self.PBD.create(
|
||||
host_ref,
|
||||
sr_ref,
|
||||
device_config
|
||||
)
|
||||
|
||||
self.PBD.plug(pbd_ref)
|
||||
|
||||
return sr_ref
|
||||
|
||||
def connect_volume(self, server, serverpath, sr_uuid, vdi_uuid):
|
||||
host_ref = self.get_this_host()
|
||||
sr_ref = self.plug_nfs_sr(
|
||||
host_ref,
|
||||
server,
|
||||
serverpath,
|
||||
sr_uuid
|
||||
)
|
||||
self.SR.scan(sr_ref)
|
||||
vdi_ref = self.VDI.get_by_uuid(vdi_uuid)
|
||||
return dict(sr_ref=sr_ref, vdi_ref=vdi_ref)
|
||||
|
||||
def copy_vdi_to_sr(self, vdi_ref, sr_ref):
|
||||
return self.VDI.copy(vdi_ref, sr_ref)
|
||||
|
||||
|
||||
class ContextAwareSession(XenAPISession):
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
|
||||
|
||||
class OpenStackXenAPISession(ContextAwareSession,
|
||||
NFSOperationsMixIn):
|
||||
pass
|
||||
|
||||
|
||||
def connect(url, user, password):
|
||||
import XenAPI
|
||||
session = XenAPI.Session(url)
|
||||
session.login_with_password(user, password)
|
||||
return OpenStackXenAPISession(session, XenAPI.Failure)
|
||||
|
||||
|
||||
class SessionFactory(object):
|
||||
def __init__(self, url, user, password):
|
||||
self.url = url
|
||||
self.user = user
|
||||
self.password = password
|
||||
|
||||
def get_session(self):
|
||||
return connect(self.url, self.user, self.password)
|
||||
|
||||
|
||||
class XapiPluginProxy(object):
|
||||
def __init__(self, session_factory, plugin_name):
|
||||
self._session_factory = session_factory
|
||||
self._plugin_name = plugin_name
|
||||
|
||||
def call(self, function, *plugin_args, **plugin_kwargs):
|
||||
plugin_params = dict(args=plugin_args, kwargs=plugin_kwargs)
|
||||
args = dict(params=pickle.dumps(plugin_params))
|
||||
|
||||
with self._session_factory.get_session() as session:
|
||||
host_ref = session.get_this_host()
|
||||
result = session.call_plugin(
|
||||
host_ref, self._plugin_name, function, args)
|
||||
|
||||
return pickle.loads(result)
|
||||
|
||||
|
||||
class GlancePluginProxy(XapiPluginProxy):
|
||||
def __init__(self, session_factory):
|
||||
super(GlancePluginProxy, self).__init__(session_factory, 'glance')
|
||||
|
||||
def download_vhd(self, image_id, glance_host, glance_port, glance_use_ssl,
|
||||
uuid_stack, sr_path, auth_token):
|
||||
return self.call(
|
||||
'download_vhd',
|
||||
image_id=image_id,
|
||||
glance_host=glance_host,
|
||||
glance_port=glance_port,
|
||||
glance_use_ssl=glance_use_ssl,
|
||||
uuid_stack=uuid_stack,
|
||||
sr_path=sr_path,
|
||||
auth_token=auth_token)
|
||||
|
||||
def upload_vhd(self, vdi_uuids, image_id, glance_host, glance_port,
|
||||
glance_use_ssl, sr_path, auth_token, properties):
|
||||
return self.call(
|
||||
'upload_vhd',
|
||||
vdi_uuids=vdi_uuids,
|
||||
image_id=image_id,
|
||||
glance_host=glance_host,
|
||||
glance_port=glance_port,
|
||||
glance_use_ssl=glance_use_ssl,
|
||||
sr_path=sr_path,
|
||||
auth_token=auth_token,
|
||||
properties=properties)
|
||||
|
||||
|
||||
class NFSBasedVolumeOperations(object):
|
||||
def __init__(self, session_factory):
|
||||
self._session_factory = session_factory
|
||||
self.glance_plugin = GlancePluginProxy(session_factory)
|
||||
|
||||
def create_volume(self, server, serverpath, size,
|
||||
name=None, description=None):
|
||||
with self._session_factory.get_session() as session:
|
||||
host_ref = session.get_this_host()
|
||||
with session.new_sr_on_nfs(host_ref, server, serverpath,
|
||||
name, description) as sr_ref:
|
||||
vdi_ref = session.create_new_vdi(sr_ref, size)
|
||||
|
||||
return dict(
|
||||
sr_uuid=session.SR.get_uuid(sr_ref),
|
||||
vdi_uuid=session.VDI.get_uuid(vdi_ref)
|
||||
)
|
||||
|
||||
def delete_volume(self, server, serverpath, sr_uuid, vdi_uuid):
|
||||
with self._session_factory.get_session() as session:
|
||||
refs = session.connect_volume(
|
||||
server, serverpath, sr_uuid, vdi_uuid)
|
||||
|
||||
session.VDI.destroy(refs['vdi_ref'])
|
||||
sr_ref = refs['sr_ref']
|
||||
session.unplug_pbds_from_sr(sr_ref)
|
||||
session.SR.destroy(sr_ref)
|
||||
|
||||
def connect_volume(self, server, serverpath, sr_uuid, vdi_uuid):
|
||||
with self._session_factory.get_session() as session:
|
||||
refs = session.connect_volume(
|
||||
server, serverpath, sr_uuid, vdi_uuid)
|
||||
|
||||
return session.VDI.get_uuid(refs['vdi_ref'])
|
||||
|
||||
def disconnect_volume(self, vdi_uuid):
|
||||
with self._session_factory.get_session() as session:
|
||||
vdi_ref = session.VDI.get_by_uuid(vdi_uuid)
|
||||
vdi_rec = session.VDI.get_record(vdi_ref)
|
||||
sr_ref = vdi_rec['SR']
|
||||
session.unplug_pbds_and_forget_sr(sr_ref)
|
||||
|
||||
def copy_volume(self, server, serverpath, sr_uuid, vdi_uuid,
|
||||
name=None, description=None):
|
||||
with self._session_factory.get_session() as session:
|
||||
src_refs = session.connect_volume(
|
||||
server, serverpath, sr_uuid, vdi_uuid)
|
||||
try:
|
||||
host_ref = session.get_this_host()
|
||||
|
||||
with session.new_sr_on_nfs(host_ref, server, serverpath,
|
||||
name, description) as target_sr_ref:
|
||||
target_vdi_ref = session.copy_vdi_to_sr(
|
||||
src_refs['vdi_ref'], target_sr_ref)
|
||||
|
||||
dst_refs = dict(
|
||||
sr_uuid=session.SR.get_uuid(target_sr_ref),
|
||||
vdi_uuid=session.VDI.get_uuid(target_vdi_ref)
|
||||
)
|
||||
|
||||
finally:
|
||||
session.unplug_pbds_and_forget_sr(src_refs['sr_ref'])
|
||||
|
||||
return dst_refs
|
||||
|
||||
def resize_volume(self, server, serverpath, sr_uuid, vdi_uuid,
|
||||
size_in_gigabytes):
|
||||
self.connect_volume(server, serverpath, sr_uuid, vdi_uuid)
|
||||
|
||||
try:
|
||||
with self._session_factory.get_session() as session:
|
||||
vdi_ref = session.VDI.get_by_uuid(vdi_uuid)
|
||||
session.VDI.resize(vdi_ref, to_bytes(size_in_gigabytes))
|
||||
finally:
|
||||
self.disconnect_volume(vdi_uuid)
|
||||
|
||||
def use_glance_plugin_to_overwrite_volume(self, server, serverpath,
|
||||
sr_uuid, vdi_uuid, glance_server,
|
||||
image_id, auth_token,
|
||||
sr_base_path):
|
||||
self.connect_volume(server, serverpath, sr_uuid, vdi_uuid)
|
||||
|
||||
uuid_stack = [vdi_uuid]
|
||||
glance_host, glance_port, glance_use_ssl = glance_server
|
||||
|
||||
try:
|
||||
result = self.glance_plugin.download_vhd(
|
||||
image_id, glance_host, glance_port, glance_use_ssl, uuid_stack,
|
||||
os.path.join(sr_base_path, sr_uuid), auth_token)
|
||||
finally:
|
||||
self.disconnect_volume(vdi_uuid)
|
||||
|
||||
if len(result) != 1 or result['root']['uuid'] != vdi_uuid:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def use_glance_plugin_to_upload_volume(self, server, serverpath,
|
||||
sr_uuid, vdi_uuid, glance_server,
|
||||
image_id, auth_token, sr_base_path):
|
||||
self.connect_volume(server, serverpath, sr_uuid, vdi_uuid)
|
||||
|
||||
vdi_uuids = [vdi_uuid]
|
||||
glance_host, glance_port, glance_use_ssl = glance_server
|
||||
|
||||
try:
|
||||
result = self.glance_plugin.upload_vhd(
|
||||
vdi_uuids, image_id, glance_host, glance_port, glance_use_ssl,
|
||||
os.path.join(sr_base_path, sr_uuid), auth_token, dict())
|
||||
finally:
|
||||
self.disconnect_volume(vdi_uuid)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def volume_attached_here(self, server, serverpath, sr_uuid, vdi_uuid,
|
||||
readonly=True):
|
||||
self.connect_volume(server, serverpath, sr_uuid, vdi_uuid)
|
||||
|
||||
with self._session_factory.get_session() as session:
|
||||
vm_uuid = tools.get_this_vm_uuid()
|
||||
vm_ref = session.VM.get_by_uuid(vm_uuid)
|
||||
vdi_ref = session.VDI.get_by_uuid(vdi_uuid)
|
||||
vbd_ref = session.VBD.create(
|
||||
vm_ref, vdi_ref, userdevice='autodetect', bootable=False,
|
||||
mode='RO' if readonly else 'RW', type='disk', empty=False,
|
||||
other_config=dict())
|
||||
session.VBD.plug(vbd_ref)
|
||||
device = session.VBD.get_device(vbd_ref)
|
||||
try:
|
||||
yield "/dev/" + device
|
||||
finally:
|
||||
session.VBD.unplug(vbd_ref)
|
||||
session.VBD.destroy(vbd_ref)
|
||||
self.disconnect_volume(vdi_uuid)
|
@ -1,272 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.image import glance
|
||||
from manila.image import image_utils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume import driver
|
||||
from manila.volume.drivers.xenapi import lib as xenapi_lib
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
xenapi_opts = [
|
||||
cfg.StrOpt('xenapi_connection_url',
|
||||
default=None,
|
||||
help='URL for XenAPI connection'),
|
||||
cfg.StrOpt('xenapi_connection_username',
|
||||
default='root',
|
||||
help='Username for XenAPI connection'),
|
||||
cfg.StrOpt('xenapi_connection_password',
|
||||
default=None,
|
||||
help='Password for XenAPI connection',
|
||||
secret=True),
|
||||
cfg.StrOpt('xenapi_sr_base_path',
|
||||
default='/var/run/sr-mount',
|
||||
help='Base path to the storage repository'),
|
||||
]
|
||||
|
||||
xenapi_nfs_opts = [
|
||||
cfg.StrOpt('xenapi_nfs_server',
|
||||
default=None,
|
||||
help='NFS server to be used by XenAPINFSDriver'),
|
||||
cfg.StrOpt('xenapi_nfs_serverpath',
|
||||
default=None,
|
||||
help='Path of exported NFS, used by XenAPINFSDriver'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(xenapi_opts)
|
||||
FLAGS.register_opts(xenapi_nfs_opts)
|
||||
|
||||
|
||||
class XenAPINFSDriver(driver.VolumeDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(XenAPINFSDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(xenapi_opts)
|
||||
self.configuration.append_config_values(xenapi_nfs_opts)
|
||||
|
||||
def do_setup(self, context):
|
||||
session_factory = xenapi_lib.SessionFactory(
|
||||
self.configuration.xenapi_connection_url,
|
||||
self.configuration.xenapi_connection_username,
|
||||
self.configuration.xenapi_connection_password
|
||||
)
|
||||
self.nfs_ops = xenapi_lib.NFSBasedVolumeOperations(session_factory)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_volume(self, volume):
|
||||
volume_details = self.nfs_ops.create_volume(
|
||||
self.configuration.xenapi_nfs_server,
|
||||
self.configuration.xenapi_nfs_serverpath,
|
||||
volume['size'],
|
||||
volume['display_name'],
|
||||
volume['display_description']
|
||||
)
|
||||
location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details
|
||||
return dict(provider_location=location)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def delete_volume(self, volume):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
|
||||
self.nfs_ops.delete_volume(
|
||||
self.configuration.xenapi_nfs_server,
|
||||
self.configuration.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid
|
||||
)
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
|
||||
return dict(
|
||||
driver_volume_type='xensm',
|
||||
data=dict(
|
||||
name_label=volume['display_name'] or '',
|
||||
name_description=volume['display_description'] or '',
|
||||
sr_uuid=sr_uuid,
|
||||
vdi_uuid=vdi_uuid,
|
||||
sr_type='nfs',
|
||||
server=self.configuration.xenapi_nfs_server,
|
||||
serverpath=self.configuration.xenapi_nfs_serverpath,
|
||||
introduce_sr_keys=['sr_type', 'server', 'serverpath']
|
||||
)
|
||||
)
|
||||
|
||||
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
||||
pass
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""To override superclass' method"""
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
return self._copy_volume(
|
||||
snapshot, volume['display_name'], volume['name_description'])
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
volume_id = snapshot['volume_id']
|
||||
volume = snapshot['volume']
|
||||
return self._copy_volume(
|
||||
volume, snapshot['display_name'], snapshot['display_description'])
|
||||
|
||||
def _copy_volume(self, volume, target_name, target_desc):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
|
||||
volume_details = self.nfs_ops.copy_volume(
|
||||
self.configuration.xenapi_nfs_server,
|
||||
self.configuration.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid,
|
||||
target_name,
|
||||
target_desc
|
||||
)
|
||||
location = "%(sr_uuid)s/%(vdi_uuid)s" % volume_details
|
||||
return dict(provider_location=location)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
self.delete_volume(snapshot)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
pass
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
if is_xenserver_image(context, image_service, image_id):
|
||||
return self._use_glance_plugin_to_copy_image_to_volume(
|
||||
context, volume, image_service, image_id)
|
||||
|
||||
return self._use_image_utils_to_pipe_bytes_to_volume(
|
||||
context, volume, image_service, image_id)
|
||||
|
||||
def _use_image_utils_to_pipe_bytes_to_volume(self, context, volume,
|
||||
image_service, image_id):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
sr_uuid, vdi_uuid,
|
||||
False) as device:
|
||||
image_utils.fetch_to_raw(context,
|
||||
image_service,
|
||||
image_id,
|
||||
device)
|
||||
|
||||
def _use_glance_plugin_to_copy_image_to_volume(self, context, volume,
|
||||
image_service, image_id):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
|
||||
api_servers = glance.get_api_servers()
|
||||
glance_server = api_servers.next()
|
||||
auth_token = context.auth_token
|
||||
|
||||
overwrite_result = self.nfs_ops.use_glance_plugin_to_overwrite_volume(
|
||||
FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid,
|
||||
glance_server,
|
||||
image_id,
|
||||
auth_token,
|
||||
FLAGS.xenapi_sr_base_path)
|
||||
|
||||
if overwrite_result is False:
|
||||
raise exception.ImageCopyFailure()
|
||||
|
||||
self.nfs_ops.resize_volume(
|
||||
FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid,
|
||||
volume['size'])
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
if is_xenserver_format(image_meta):
|
||||
return self._use_glance_plugin_to_upload_volume(
|
||||
context, volume, image_service, image_meta)
|
||||
|
||||
return self._use_image_utils_to_upload_volume(
|
||||
context, volume, image_service, image_meta)
|
||||
|
||||
def _use_image_utils_to_upload_volume(self, context, volume, image_service,
|
||||
image_meta):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
sr_uuid, vdi_uuid,
|
||||
True) as device:
|
||||
image_utils.upload_volume(context,
|
||||
image_service,
|
||||
image_meta,
|
||||
device)
|
||||
|
||||
def _use_glance_plugin_to_upload_volume(self, context, volume,
|
||||
image_service, image_meta):
|
||||
image_id = image_meta['id']
|
||||
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
|
||||
api_servers = glance.get_api_servers()
|
||||
glance_server = api_servers.next()
|
||||
auth_token = context.auth_token
|
||||
|
||||
self.nfs_ops.use_glance_plugin_to_upload_volume(
|
||||
FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid,
|
||||
glance_server,
|
||||
image_id,
|
||||
auth_token,
|
||||
FLAGS.xenapi_sr_base_path)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
if refresh or not self._stats:
|
||||
data = {}
|
||||
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data["volume_backend_name"] = backend_name or 'XenAPINFS',
|
||||
data['vendor_name'] = 'Open Source',
|
||||
data['driver_version'] = '1.0'
|
||||
data['storage_protocol'] = 'xensm'
|
||||
data['total_capacity_gb'] = 'unknown'
|
||||
data['free_capacity_gb'] = 'unknown'
|
||||
data['reserved_percentage'] = 0
|
||||
self._stats = data
|
||||
|
||||
return self._stats
|
||||
|
||||
|
||||
def is_xenserver_image(context, image_service, image_id):
|
||||
image_meta = image_service.show(context, image_id)
|
||||
return is_xenserver_format(image_meta)
|
||||
|
||||
|
||||
def is_xenserver_format(image_meta):
|
||||
return (
|
||||
image_meta['disk_format'] == 'vhd'
|
||||
and image_meta['container_format'] == 'ovf'
|
||||
)
|
@ -1,7 +0,0 @@
|
||||
def _stripped_first_line_of(filename):
|
||||
with open(filename, 'rb') as f:
|
||||
return f.readline().strip()
|
||||
|
||||
|
||||
def get_this_vm_uuid():
|
||||
return _stripped_first_line_of('/sys/hypervisor/uuid')
|
@ -1,122 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 IBM Corp.
|
||||
# Copyright (c) 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Authors:
|
||||
# Erik Zaadi <erikz@il.ibm.com>
|
||||
# Avishay Traeger <avishay@il.ibm.com>
|
||||
|
||||
"""
|
||||
Volume driver for IBM XIV storage systems.
|
||||
"""
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import importutils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.volume.drivers.san import san
|
||||
|
||||
ibm_xiv_opts = [
|
||||
cfg.StrOpt('xiv_proxy',
|
||||
default='xiv_openstack.nova_proxy.XIVNovaProxy',
|
||||
help='Proxy driver'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(ibm_xiv_opts)
|
||||
|
||||
LOG = logging.getLogger('manila.volume.xiv')
|
||||
|
||||
|
||||
class XIVDriver(san.SanISCSIDriver):
|
||||
"""IBM XIV volume driver."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize the driver."""
|
||||
|
||||
proxy = importutils.import_class(FLAGS.xiv_proxy)
|
||||
|
||||
self.xiv_proxy = proxy({"xiv_user": FLAGS.san_login,
|
||||
"xiv_pass": FLAGS.san_password,
|
||||
"xiv_address": FLAGS.san_ip,
|
||||
"xiv_vol_pool": FLAGS.san_clustername},
|
||||
LOG,
|
||||
exception)
|
||||
san.SanISCSIDriver.__init__(self, *args, **kwargs)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Setup and verify IBM XIV storage connection."""
|
||||
|
||||
self.xiv_proxy.setup(context)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Ensure an export."""
|
||||
|
||||
return self.xiv_proxy.ensure_export(context, volume)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Create an export."""
|
||||
|
||||
return self.xiv_proxy.create_export(context, volume)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume on the IBM XIV storage system."""
|
||||
|
||||
return self.xiv_proxy.create_volume(volume)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Delete a volume on the IBM XIV storage system."""
|
||||
|
||||
self.xiv_proxy.delete_volume(volume)
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Disconnect a volume from an attached instance."""
|
||||
|
||||
return self.xiv_proxy.remove_export(context, volume)
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Map the created volume."""
|
||||
|
||||
return self.xiv_proxy.initialize_connection(volume, connector)
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""Terminate a connection to a volume."""
|
||||
|
||||
return self.xiv_proxy.terminate_connection(volume, connector)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create a volume from a snapshot."""
|
||||
|
||||
return self.xiv_proxy.create_volume_from_snapshot(volume,
|
||||
snapshot)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create a snapshot."""
|
||||
|
||||
return self.xiv_proxy.create_snapshot(snapshot)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Delete a snapshot."""
|
||||
|
||||
return self.xiv_proxy.delete_snapshot(snapshot)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume stats."""
|
||||
|
||||
return self.xiv_proxy.get_volume_stats(refresh)
|
@ -1,491 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 Zadara Storage, Inc.
|
||||
# Copyright (c) 2012 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Volume driver for Zadara Virtual Private Storage Array (VPSA).
|
||||
|
||||
This driver requires VPSA with API ver.12.06 or higher.
|
||||
"""
|
||||
|
||||
import httplib
|
||||
|
||||
from lxml import etree
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila import utils
|
||||
from manila.volume import driver
|
||||
|
||||
LOG = logging.getLogger("manila.volume.driver")
|
||||
|
||||
zadara_opts = [
|
||||
cfg.StrOpt('zadara_vpsa_ip',
|
||||
default=None,
|
||||
help='Management IP of Zadara VPSA'),
|
||||
cfg.StrOpt('zadara_vpsa_port',
|
||||
default=None,
|
||||
help='Zadara VPSA port number'),
|
||||
cfg.BoolOpt('zadara_vpsa_use_ssl',
|
||||
default=False,
|
||||
help='Use SSL connection'),
|
||||
cfg.StrOpt('zadara_user',
|
||||
default=None,
|
||||
help='User name for the VPSA'),
|
||||
cfg.StrOpt('zadara_password',
|
||||
default=None,
|
||||
help='Password for the VPSA',
|
||||
secret=True),
|
||||
|
||||
cfg.StrOpt('zadara_vpsa_poolname',
|
||||
default=None,
|
||||
help='Name of VPSA storage pool for volumes'),
|
||||
|
||||
cfg.StrOpt('zadara_default_cache_policy',
|
||||
default='write-through',
|
||||
help='Default cache policy for volumes'),
|
||||
cfg.StrOpt('zadara_default_encryption',
|
||||
default='NO',
|
||||
help='Default encryption policy for volumes'),
|
||||
cfg.StrOpt('zadara_default_striping_mode',
|
||||
default='simple',
|
||||
help='Default striping mode for volumes'),
|
||||
cfg.StrOpt('zadara_default_stripesize',
|
||||
default='64',
|
||||
help='Default stripe size for volumes'),
|
||||
cfg.StrOpt('zadara_vol_name_template',
|
||||
default='OS_%s',
|
||||
help='Default template for VPSA volume names'),
|
||||
cfg.BoolOpt('zadara_vpsa_auto_detach_on_delete',
|
||||
default=True,
|
||||
help="Automatically detach from servers on volume delete"),
|
||||
cfg.BoolOpt('zadara_vpsa_allow_nonexistent_delete',
|
||||
default=True,
|
||||
help="Don't halt on deletion of non-existing volumes"), ]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(zadara_opts)
|
||||
|
||||
|
||||
class ZadaraVPSAConnection(object):
|
||||
"""Executes volume driver commands on VPSA."""
|
||||
|
||||
def __init__(self, host, port, ssl, user, password):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.use_ssl = ssl
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.access_key = None
|
||||
|
||||
self.ensure_connection()
|
||||
|
||||
def _generate_vpsa_cmd(self, cmd, **kwargs):
|
||||
"""Generate command to be sent to VPSA."""
|
||||
|
||||
def _joined_params(params):
|
||||
param_str = []
|
||||
for k, v in params.items():
|
||||
param_str.append("%s=%s" % (k, v))
|
||||
return '&'.join(param_str)
|
||||
|
||||
# Dictionary of applicable VPSA commands in the following format:
|
||||
# 'command': (method, API_URL, {optional parameters})
|
||||
vpsa_commands = {
|
||||
'login': ('POST',
|
||||
'/api/users/login.xml',
|
||||
{'user': self.user,
|
||||
'password': self.password}),
|
||||
|
||||
# Volume operations
|
||||
'create_volume': ('POST',
|
||||
'/api/volumes.xml',
|
||||
{'display_name': kwargs.get('name'),
|
||||
'virtual_capacity': kwargs.get('size'),
|
||||
'raid_group_name[]': FLAGS.zadara_vpsa_poolname,
|
||||
'quantity': 1,
|
||||
'cache': FLAGS.zadara_default_cache_policy,
|
||||
'crypt': FLAGS.zadara_default_encryption,
|
||||
'mode': FLAGS.zadara_default_striping_mode,
|
||||
'stripesize': FLAGS.zadara_default_stripesize,
|
||||
'force': 'NO'}),
|
||||
'delete_volume': ('DELETE',
|
||||
'/api/volumes/%s.xml' % kwargs.get('vpsa_vol'),
|
||||
{}),
|
||||
|
||||
# Server operations
|
||||
'create_server': ('POST',
|
||||
'/api/servers.xml',
|
||||
{'display_name': kwargs.get('initiator'),
|
||||
'iqn': kwargs.get('initiator')}),
|
||||
|
||||
# Attach/Detach operations
|
||||
'attach_volume': ('POST',
|
||||
'/api/servers/%s/volumes.xml'
|
||||
% kwargs.get('vpsa_srv'),
|
||||
{'volume_name[]': kwargs.get('vpsa_vol'),
|
||||
'force': 'NO'}),
|
||||
'detach_volume': ('POST',
|
||||
'/api/volumes/%s/detach.xml'
|
||||
% kwargs.get('vpsa_vol'),
|
||||
{'server_name[]': kwargs.get('vpsa_srv'),
|
||||
'force': 'NO'}),
|
||||
|
||||
# Get operations
|
||||
'list_volumes': ('GET',
|
||||
'/api/volumes.xml',
|
||||
{}),
|
||||
'list_controllers': ('GET',
|
||||
'/api/vcontrollers.xml',
|
||||
{}),
|
||||
'list_servers': ('GET',
|
||||
'/api/servers.xml',
|
||||
{}),
|
||||
'list_vol_attachments': ('GET',
|
||||
'/api/volumes/%s/servers.xml'
|
||||
% kwargs.get('vpsa_vol'),
|
||||
{}), }
|
||||
|
||||
if cmd not in vpsa_commands.keys():
|
||||
raise exception.UnknownCmd(cmd=cmd)
|
||||
else:
|
||||
(method, url, params) = vpsa_commands[cmd]
|
||||
|
||||
if method == 'GET':
|
||||
# For GET commands add parameters to the URL
|
||||
params.update(dict(access_key=self.access_key,
|
||||
page=1, start=0, limit=0))
|
||||
url += '?' + _joined_params(params)
|
||||
body = ''
|
||||
|
||||
elif method == 'DELETE':
|
||||
# For DELETE commands add parameters to the URL
|
||||
params.update(dict(access_key=self.access_key))
|
||||
url += '?' + _joined_params(params)
|
||||
body = ''
|
||||
|
||||
elif method == 'POST':
|
||||
if self.access_key:
|
||||
params.update(dict(access_key=self.access_key))
|
||||
body = _joined_params(params)
|
||||
|
||||
else:
|
||||
raise exception.UnknownCmd(cmd=method)
|
||||
|
||||
return (method, url, body)
|
||||
|
||||
def ensure_connection(self, cmd=None):
|
||||
"""Retrieve access key for VPSA connection."""
|
||||
|
||||
if self.access_key or cmd == 'login':
|
||||
return
|
||||
|
||||
cmd = 'login'
|
||||
xml_tree = self.send_cmd(cmd)
|
||||
user = xml_tree.find('user')
|
||||
if user is None:
|
||||
raise exception.MalformedResponse(cmd=cmd,
|
||||
reason='no "user" field')
|
||||
|
||||
access_key = user.findtext('access-key')
|
||||
if access_key is None:
|
||||
raise exception.MalformedResponse(cmd=cmd,
|
||||
reason='no "access-key" field')
|
||||
|
||||
self.access_key = access_key
|
||||
|
||||
def send_cmd(self, cmd, **kwargs):
|
||||
"""Send command to VPSA Controller."""
|
||||
|
||||
self.ensure_connection(cmd)
|
||||
|
||||
(method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs)
|
||||
LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"')
|
||||
% locals())
|
||||
|
||||
if self.use_ssl:
|
||||
connection = httplib.HTTPSConnection(self.host, self.port)
|
||||
else:
|
||||
connection = httplib.HTTPConnection(self.host, self.port)
|
||||
connection.request(method, url, body)
|
||||
response = connection.getresponse()
|
||||
|
||||
if response.status != 200:
|
||||
connection.close()
|
||||
raise exception.BadHTTPResponseStatus(status=response.status)
|
||||
data = response.read()
|
||||
connection.close()
|
||||
|
||||
xml_tree = etree.fromstring(data)
|
||||
status = xml_tree.findtext('status')
|
||||
if status != '0':
|
||||
raise exception.FailedCmdWithDump(status=status, data=data)
|
||||
|
||||
if method in ['POST', 'DELETE']:
|
||||
LOG.debug(_('Operation completed. %(data)s') % locals())
|
||||
return xml_tree
|
||||
|
||||
|
||||
class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
|
||||
"""Zadara VPSA iSCSI volume driver."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""
|
||||
Any initialization the volume driver does while starting.
|
||||
Establishes initial connection with VPSA and retrieves access_key.
|
||||
"""
|
||||
self.vpsa = ZadaraVPSAConnection(FLAGS.zadara_vpsa_ip,
|
||||
FLAGS.zadara_vpsa_port,
|
||||
FLAGS.zadara_vpsa_use_ssl,
|
||||
FLAGS.zadara_user,
|
||||
FLAGS.zadara_password)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error (exception) if prerequisites aren't met."""
|
||||
self.vpsa.ensure_connection()
|
||||
|
||||
def local_path(self, volume):
|
||||
"""Return local path to existing local volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _xml_parse_helper(self, xml_tree, first_level, search_tuple,
|
||||
first=True):
|
||||
"""
|
||||
Helper for parsing VPSA's XML output.
|
||||
|
||||
Returns single item if first==True or list for multiple selection.
|
||||
If second argument in search_tuple is None - returns all items with
|
||||
appropriate key.
|
||||
"""
|
||||
|
||||
objects = xml_tree.find(first_level)
|
||||
if objects is None:
|
||||
return None
|
||||
|
||||
result_list = []
|
||||
(key, value) = search_tuple
|
||||
for object in objects.getchildren():
|
||||
found_value = object.findtext(key)
|
||||
if found_value and (found_value == value or value is None):
|
||||
if first:
|
||||
return object
|
||||
else:
|
||||
result_list.append(object)
|
||||
return result_list if result_list else None
|
||||
|
||||
def _get_vpsa_volume_name(self, name):
|
||||
"""Return VPSA's name for the volume."""
|
||||
xml_tree = self.vpsa.send_cmd('list_volumes')
|
||||
volume = self._xml_parse_helper(xml_tree, 'volumes',
|
||||
('display-name', name))
|
||||
if volume is not None:
|
||||
return volume.findtext('name')
|
||||
|
||||
return None
|
||||
|
||||
def _get_active_controller_details(self):
|
||||
"""Return details of VPSA's active controller."""
|
||||
xml_tree = self.vpsa.send_cmd('list_controllers')
|
||||
ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers',
|
||||
('state', 'active'))
|
||||
if ctrl is not None:
|
||||
return dict(target=ctrl.findtext('target'),
|
||||
ip=ctrl.findtext('iscsi-ip'),
|
||||
chap_user=ctrl.findtext('chap-username'),
|
||||
chap_passwd=ctrl.findtext('chap-target-secret'))
|
||||
return None
|
||||
|
||||
def _get_server_name(self, initiator):
|
||||
"""Return VPSA's name for server object with given IQN."""
|
||||
xml_tree = self.vpsa.send_cmd('list_servers')
|
||||
server = self._xml_parse_helper(xml_tree, 'servers',
|
||||
('iqn', initiator))
|
||||
if server is not None:
|
||||
return server.findtext('name')
|
||||
return None
|
||||
|
||||
def _create_vpsa_server(self, initiator):
|
||||
"""Create server object within VPSA (if doesn't exist)."""
|
||||
vpsa_srv = self._get_server_name(initiator)
|
||||
if not vpsa_srv:
|
||||
xml_tree = self.vpsa.send_cmd('create_server', initiator=initiator)
|
||||
vpsa_srv = xml_tree.findtext('server-name')
|
||||
return vpsa_srv
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create volume."""
|
||||
self.vpsa.send_cmd(
|
||||
'create_volume',
|
||||
name=FLAGS.zadara_vol_name_template % volume['name'],
|
||||
size=volume['size'])
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""
|
||||
Delete volume.
|
||||
|
||||
Return ok if doesn't exist. Auto detach from all servers.
|
||||
"""
|
||||
# Get volume name
|
||||
name = FLAGS.zadara_vol_name_template % volume['name']
|
||||
vpsa_vol = self._get_vpsa_volume_name(name)
|
||||
if not vpsa_vol:
|
||||
msg = _('Volume %(name)s could not be found. '
|
||||
'It might be already deleted') % locals()
|
||||
LOG.warning(msg)
|
||||
if FLAGS.zadara_vpsa_allow_nonexistent_delete:
|
||||
return
|
||||
else:
|
||||
raise exception.VolumeNotFound(volume_id=name)
|
||||
|
||||
# Check attachment info and detach from all
|
||||
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
|
||||
vpsa_vol=vpsa_vol)
|
||||
servers = self._xml_parse_helper(xml_tree, 'servers',
|
||||
('iqn', None), first=False)
|
||||
if servers:
|
||||
if not FLAGS.zadara_vpsa_auto_detach_on_delete:
|
||||
raise exception.VolumeAttached(volume_id=name)
|
||||
|
||||
for server in servers:
|
||||
vpsa_srv = server.findtext('name')
|
||||
if vpsa_srv:
|
||||
self.vpsa.send_cmd('detach_volume',
|
||||
vpsa_srv=vpsa_srv,
|
||||
vpsa_vol=vpsa_vol)
|
||||
|
||||
# Delete volume
|
||||
self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Irrelevant for VPSA volumes. Export created during attachment."""
|
||||
pass
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Irrelevant for VPSA volumes. Export created during attachment."""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Irrelevant for VPSA volumes. Export removed during detach."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""
|
||||
Attach volume to initiator/host.
|
||||
|
||||
During this call VPSA exposes volume to particular Initiator. It also
|
||||
creates a 'server' entity for Initiator (if it was not created before)
|
||||
|
||||
All necessary connection information is returned, including auth data.
|
||||
Connection data (target, LUN) is not stored in the DB.
|
||||
"""
|
||||
|
||||
# Get/Create server name for IQN
|
||||
initiator_name = connector['initiator']
|
||||
vpsa_srv = self._create_vpsa_server(initiator_name)
|
||||
if not vpsa_srv:
|
||||
raise exception.ZadaraServerCreateFailure(name=initiator_name)
|
||||
|
||||
# Get volume name
|
||||
name = FLAGS.zadara_vol_name_template % volume['name']
|
||||
vpsa_vol = self._get_vpsa_volume_name(name)
|
||||
if not vpsa_vol:
|
||||
raise exception.VolumeNotFound(volume_id=name)
|
||||
|
||||
# Get Active controller details
|
||||
ctrl = self._get_active_controller_details()
|
||||
if not ctrl:
|
||||
raise exception.ZadaraVPSANoActiveController()
|
||||
|
||||
# Attach volume to server
|
||||
self.vpsa.send_cmd('attach_volume',
|
||||
vpsa_srv=vpsa_srv,
|
||||
vpsa_vol=vpsa_vol)
|
||||
|
||||
# Get connection info
|
||||
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
|
||||
vpsa_vol=vpsa_vol)
|
||||
server = self._xml_parse_helper(xml_tree, 'servers',
|
||||
('iqn', initiator_name))
|
||||
if server is None:
|
||||
raise exception.ZadaraAttachmentsNotFound(name=name)
|
||||
target = server.findtext('target')
|
||||
lun = server.findtext('lun')
|
||||
if target is None or lun is None:
|
||||
raise exception.ZadaraInvalidAttachmentInfo(
|
||||
name=name,
|
||||
reason='target=%s, lun=%s' % (target, lun))
|
||||
|
||||
properties = {}
|
||||
properties['target_discovered'] = False
|
||||
properties['target_portal'] = '%s:%s' % (ctrl['ip'], '3260')
|
||||
properties['target_iqn'] = target
|
||||
properties['target_lun'] = lun
|
||||
properties['volume_id'] = volume['id']
|
||||
|
||||
properties['auth_method'] = 'CHAP'
|
||||
properties['auth_username'] = ctrl['chap_user']
|
||||
properties['auth_password'] = ctrl['chap_passwd']
|
||||
|
||||
LOG.debug(_('Attach properties: %(properties)s') % locals())
|
||||
return {'driver_volume_type': 'iscsi',
|
||||
'data': properties}
|
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs):
|
||||
"""
|
||||
Detach volume from the initiator.
|
||||
"""
|
||||
# Get server name for IQN
|
||||
initiator_name = connector['initiator']
|
||||
vpsa_srv = self._get_server_name(initiator_name)
|
||||
if not vpsa_srv:
|
||||
raise exception.ZadaraServerNotFound(name=initiator_name)
|
||||
|
||||
# Get volume name
|
||||
name = FLAGS.zadara_vol_name_template % volume['name']
|
||||
vpsa_vol = self._get_vpsa_volume_name(name)
|
||||
if not vpsa_vol:
|
||||
raise exception.VolumeNotFound(volume_id=name)
|
||||
|
||||
# Detach volume from server
|
||||
self.vpsa.send_cmd('detach_volume',
|
||||
vpsa_srv=vpsa_srv,
|
||||
vpsa_vol=vpsa_vol)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
raise NotImplementedError()
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Fetch the image from image_service and write it to the volume."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
||||
"""Copy the volume to the specified image."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
raise NotImplementedError()
|
@ -1,725 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Volume manager manages creating, attaching, detaching, and persistent storage.
|
||||
|
||||
Persistent storage volumes keep their state independent of instances. You can
|
||||
attach to an instance, terminate the instance, spawn a new instance (even
|
||||
one from a different image) and re-attach the volume with the same data
|
||||
intact.
|
||||
|
||||
**Related Flags**
|
||||
|
||||
:volume_topic: What :mod:`rpc` topic to listen to (default: `manila-volume`).
|
||||
:volume_manager: The module name of a class derived from
|
||||
:class:`manager.Manager` (default:
|
||||
:class:`manila.volume.manager.Manager`).
|
||||
:volume_driver: Used by :class:`Manager`. Defaults to
|
||||
:class:`manila.volume.drivers.lvm.LVMISCSIDriver`.
|
||||
:volume_group: Name of the group that will contain exported volumes (default:
|
||||
`manila-volumes`)
|
||||
:num_shell_tries: Number of times to attempt to run commands (default: 3)
|
||||
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from manila import context
|
||||
from manila import exception
|
||||
from manila.image import glance
|
||||
from manila import manager
|
||||
from manila.openstack.common import excutils
|
||||
from manila.openstack.common import importutils
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common import timeutils
|
||||
from manila.openstack.common import uuidutils
|
||||
from manila import quota
|
||||
from manila import utils
|
||||
from manila.volume.configuration import Configuration
|
||||
from manila.volume import utils as volume_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
volume_manager_opts = [
|
||||
cfg.StrOpt('volume_driver',
|
||||
default='manila.volume.drivers.lvm.LVMISCSIDriver',
|
||||
help='Driver to use for volume creation'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_manager_opts)
|
||||
|
||||
MAPPING = {
|
||||
'manila.volume.driver.RBDDriver': 'manila.volume.drivers.rbd.RBDDriver',
|
||||
'manila.volume.driver.SheepdogDriver':
|
||||
'manila.volume.drivers.sheepdog.SheepdogDriver',
|
||||
'manila.volume.nexenta.volume.NexentaDriver':
|
||||
'manila.volume.drivers.nexenta.volume.NexentaDriver',
|
||||
'manila.volume.san.SanISCSIDriver':
|
||||
'manila.volume.drivers.san.san.SanISCSIDriver',
|
||||
'manila.volume.san.SolarisISCSIDriver':
|
||||
'manila.volume.drivers.san.solaris.SolarisISCSIDriver',
|
||||
'manila.volume.san.HpSanISCSIDriver':
|
||||
'manila.volume.drivers.san.hp_lefthand.HpSanISCSIDriver',
|
||||
'manila.volume.netapp.NetAppISCSIDriver':
|
||||
'manila.volume.drivers.netapp.iscsi.NetAppISCSIDriver',
|
||||
'manila.volume.netapp.NetAppCmodeISCSIDriver':
|
||||
'manila.volume.drivers.netapp.iscsi.NetAppCmodeISCSIDriver',
|
||||
'manila.volume.netapp_nfs.NetAppNFSDriver':
|
||||
'manila.volume.drivers.netapp.nfs.NetAppNFSDriver',
|
||||
'manila.volume.nfs.NfsDriver':
|
||||
'manila.volume.drivers.nfs.NfsDriver',
|
||||
'manila.volume.solidfire.SolidFire':
|
||||
'manila.volume.drivers.solidfire.SolidFire',
|
||||
'manila.volume.storwize_svc.StorwizeSVCDriver':
|
||||
'manila.volume.drivers.storwize_svc.StorwizeSVCDriver',
|
||||
'manila.volume.windows.WindowsDriver':
|
||||
'manila.volume.drivers.windows.WindowsDriver',
|
||||
'manila.volume.xiv.XIVDriver':
|
||||
'manila.volume.drivers.xiv.XIVDriver',
|
||||
'manila.volume.zadara.ZadaraVPSAISCSIDriver':
|
||||
'manila.volume.drivers.zadara.ZadaraVPSAISCSIDriver',
|
||||
'manila.volume.driver.ISCSIDriver':
|
||||
'manila.volume.drivers.lvm.LVMISCSIDriver'}
|
||||
|
||||
|
||||
class VolumeManager(manager.SchedulerDependentManager):
|
||||
"""Manages attachable block storage devices."""
|
||||
|
||||
RPC_API_VERSION = '1.4'
|
||||
|
||||
def __init__(self, volume_driver=None, service_name=None,
|
||||
*args, **kwargs):
|
||||
"""Load the driver from the one specified in args, or from flags."""
|
||||
self.configuration = Configuration(volume_manager_opts,
|
||||
config_group=service_name)
|
||||
if not volume_driver:
|
||||
# Get from configuration, which will get the default
|
||||
# if its not using the multi backend
|
||||
volume_driver = self.configuration.volume_driver
|
||||
if volume_driver in MAPPING:
|
||||
LOG.warn(_("Driver path %s is deprecated, update your "
|
||||
"configuration to the new path."), volume_driver)
|
||||
volume_driver = MAPPING[volume_driver]
|
||||
self.driver = importutils.import_object(
|
||||
volume_driver,
|
||||
configuration=self.configuration)
|
||||
# update_service_capabilities needs service_name to be volume
|
||||
super(VolumeManager, self).__init__(service_name='volume',
|
||||
*args, **kwargs)
|
||||
# NOTE(vish): Implementation specific db handling is done
|
||||
# by the driver.
|
||||
self.driver.db = self.db
|
||||
|
||||
def init_host(self):
|
||||
"""Do any initialization that needs to be run if this is a
|
||||
standalone service."""
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
self.driver.do_setup(ctxt)
|
||||
self.driver.check_for_setup_error()
|
||||
|
||||
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
|
||||
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
|
||||
for volume in volumes:
|
||||
if volume['status'] in ['available', 'in-use']:
|
||||
self.driver.ensure_export(ctxt, volume)
|
||||
elif volume['status'] == 'downloading':
|
||||
LOG.info(_("volume %s stuck in a downloading state"),
|
||||
volume['id'])
|
||||
self.driver.clear_download(ctxt, volume)
|
||||
self.db.volume_update(ctxt, volume['id'], {'status': 'error'})
|
||||
else:
|
||||
LOG.info(_("volume %s: skipping export"), volume['name'])
|
||||
|
||||
LOG.debug(_('Resuming any in progress delete operations'))
|
||||
for volume in volumes:
|
||||
if volume['status'] == 'deleting':
|
||||
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
|
||||
self.delete_volume(ctxt, volume['id'])
|
||||
|
||||
# collect and publish service capabilities
|
||||
self.publish_service_capabilities(ctxt)
|
||||
|
||||
def _create_volume(self, context, volume_ref, snapshot_ref,
|
||||
srcvol_ref, image_service, image_id, image_location):
|
||||
cloned = None
|
||||
model_update = False
|
||||
|
||||
if all(x is None for x in(snapshot_ref, image_id, srcvol_ref)):
|
||||
model_update = self.driver.create_volume(volume_ref)
|
||||
elif snapshot_ref is not None:
|
||||
model_update = self.driver.create_volume_from_snapshot(
|
||||
volume_ref,
|
||||
snapshot_ref)
|
||||
elif srcvol_ref is not None:
|
||||
model_update = self.driver.create_cloned_volume(volume_ref,
|
||||
srcvol_ref)
|
||||
else:
|
||||
# create the volume from an image
|
||||
cloned = self.driver.clone_image(volume_ref, image_location)
|
||||
if not cloned:
|
||||
model_update = self.driver.create_volume(volume_ref)
|
||||
|
||||
updates = dict(model_update or dict(), status='downloading')
|
||||
volume_ref = self.db.volume_update(context,
|
||||
volume_ref['id'],
|
||||
updates)
|
||||
|
||||
self._copy_image_to_volume(context,
|
||||
volume_ref,
|
||||
image_service,
|
||||
image_id)
|
||||
|
||||
return model_update, cloned
|
||||
|
||||
def create_volume(self, context, volume_id, request_spec=None,
|
||||
filter_properties=None, allow_reschedule=True,
|
||||
snapshot_id=None, image_id=None, source_volid=None):
|
||||
"""Creates and exports the volume."""
|
||||
context = context.elevated()
|
||||
if filter_properties is None:
|
||||
filter_properties = {}
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
self._notify_about_volume_usage(context, volume_ref, "create.start")
|
||||
|
||||
# NOTE(vish): so we don't have to get volume from db again
|
||||
# before passing it to the driver.
|
||||
volume_ref['host'] = self.host
|
||||
|
||||
status = 'available'
|
||||
model_update = False
|
||||
image_meta = None
|
||||
cloned = False
|
||||
|
||||
try:
|
||||
vol_name = volume_ref['name']
|
||||
vol_size = volume_ref['size']
|
||||
LOG.debug(_("volume %(vol_name)s: creating lv of"
|
||||
" size %(vol_size)sG") % locals())
|
||||
snapshot_ref = None
|
||||
sourcevol_ref = None
|
||||
image_service = None
|
||||
image_location = None
|
||||
image_meta = None
|
||||
|
||||
if snapshot_id is not None:
|
||||
LOG.info(_("volume %s: creating from snapshot"),
|
||||
volume_ref['name'])
|
||||
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
|
||||
elif source_volid is not None:
|
||||
LOG.info(_("volume %s: creating from existing volume"),
|
||||
volume_ref['name'])
|
||||
sourcevol_ref = self.db.volume_get(context, source_volid)
|
||||
elif image_id is not None:
|
||||
LOG.info(_("volume %s: creating from image"),
|
||||
volume_ref['name'])
|
||||
# create the volume from an image
|
||||
image_service, image_id = \
|
||||
glance.get_remote_image_service(context,
|
||||
image_id)
|
||||
image_location = image_service.get_location(context, image_id)
|
||||
image_meta = image_service.show(context, image_id)
|
||||
else:
|
||||
LOG.info(_("volume %s: creating"), volume_ref['name'])
|
||||
|
||||
try:
|
||||
model_update, cloned = self._create_volume(context,
|
||||
volume_ref,
|
||||
snapshot_ref,
|
||||
sourcevol_ref,
|
||||
image_service,
|
||||
image_id,
|
||||
image_location)
|
||||
except Exception:
|
||||
# restore source volume status before reschedule
|
||||
if sourcevol_ref is not None:
|
||||
self.db.volume_update(context, sourcevol_ref['id'],
|
||||
{'status': sourcevol_ref['status']})
|
||||
exc_info = sys.exc_info()
|
||||
# try to re-schedule volume:
|
||||
self._reschedule_or_reraise(context, volume_id, exc_info,
|
||||
snapshot_id, image_id,
|
||||
request_spec, filter_properties,
|
||||
allow_reschedule)
|
||||
return
|
||||
|
||||
if model_update:
|
||||
volume_ref = self.db.volume_update(
|
||||
context, volume_ref['id'], model_update)
|
||||
if sourcevol_ref is not None:
|
||||
self.db.volume_glance_metadata_copy_from_volume_to_volume(
|
||||
context,
|
||||
source_volid,
|
||||
volume_id)
|
||||
|
||||
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
|
||||
model_update = self.driver.create_export(context, volume_ref)
|
||||
if model_update:
|
||||
self.db.volume_update(context, volume_ref['id'], model_update)
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'], {'status': 'error'})
|
||||
LOG.error(_("volume %s: create failed"), volume_ref['name'])
|
||||
|
||||
if snapshot_id:
|
||||
# Copy any Glance metadata from the original volume
|
||||
self.db.volume_glance_metadata_copy_to_volume(context,
|
||||
volume_ref['id'],
|
||||
snapshot_id)
|
||||
|
||||
if image_id and not cloned:
|
||||
if image_meta:
|
||||
# Copy all of the Glance image properties to the
|
||||
# volume_glance_metadata table for future reference.
|
||||
self.db.volume_glance_metadata_create(context,
|
||||
volume_ref['id'],
|
||||
'image_id', image_id)
|
||||
name = image_meta.get('name', None)
|
||||
if name:
|
||||
self.db.volume_glance_metadata_create(context,
|
||||
volume_ref['id'],
|
||||
'image_name', name)
|
||||
image_properties = image_meta.get('properties', {})
|
||||
for key, value in image_properties.items():
|
||||
self.db.volume_glance_metadata_create(context,
|
||||
volume_ref['id'],
|
||||
key, value)
|
||||
|
||||
now = timeutils.utcnow()
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'], {'status': status,
|
||||
'launched_at': now})
|
||||
LOG.info(_("volume %s: created successfully"), volume_ref['name'])
|
||||
self._reset_stats()
|
||||
|
||||
self._notify_about_volume_usage(context, volume_ref, "create.end")
|
||||
return volume_ref['id']
|
||||
|
||||
def _log_original_error(self, exc_info):
|
||||
type_, value, tb = exc_info
|
||||
LOG.error(_('Error: %s') %
|
||||
traceback.format_exception(type_, value, tb))
|
||||
|
||||
def _reschedule_or_reraise(self, context, volume_id, exc_info,
|
||||
snapshot_id, image_id, request_spec,
|
||||
filter_properties, allow_reschedule):
|
||||
"""Try to re-schedule the create or re-raise the original error to
|
||||
error out the volume.
|
||||
"""
|
||||
if not allow_reschedule:
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
|
||||
rescheduled = False
|
||||
|
||||
try:
|
||||
method_args = (CONF.volume_topic, volume_id, snapshot_id,
|
||||
image_id, request_spec, filter_properties)
|
||||
|
||||
rescheduled = self._reschedule(context, request_spec,
|
||||
filter_properties, volume_id,
|
||||
self.scheduler_rpcapi.create_volume,
|
||||
method_args,
|
||||
exc_info)
|
||||
|
||||
except Exception:
|
||||
rescheduled = False
|
||||
LOG.exception(_("volume %s: Error trying to reschedule create"),
|
||||
volume_id)
|
||||
|
||||
if rescheduled:
|
||||
# log the original build error
|
||||
self._log_original_error(exc_info)
|
||||
else:
|
||||
# not re-scheduling
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
|
||||
def _reschedule(self, context, request_spec, filter_properties,
|
||||
volume_id, scheduler_method, method_args,
|
||||
exc_info=None):
|
||||
"""Attempt to re-schedule a volume operation."""
|
||||
|
||||
retry = filter_properties.get('retry', None)
|
||||
if not retry:
|
||||
# no retry information, do not reschedule.
|
||||
LOG.debug(_("Retry info not present, will not reschedule"))
|
||||
return
|
||||
|
||||
if not request_spec:
|
||||
LOG.debug(_("No request spec, will not reschedule"))
|
||||
return
|
||||
|
||||
request_spec['volume_id'] = volume_id
|
||||
|
||||
LOG.debug(_("volume %(volume_id)s: re-scheduling %(method)s "
|
||||
"attempt %(num)d") %
|
||||
{'volume_id': volume_id,
|
||||
'method': scheduler_method.func_name,
|
||||
'num': retry['num_attempts']})
|
||||
|
||||
# reset the volume state:
|
||||
now = timeutils.utcnow()
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'creating',
|
||||
'scheduled_at': now})
|
||||
|
||||
if exc_info:
|
||||
# stringify to avoid circular ref problem in json serialization:
|
||||
retry['exc'] = traceback.format_exception(*exc_info)
|
||||
|
||||
scheduler_method(context, *method_args)
|
||||
return True
|
||||
|
||||
def delete_volume(self, context, volume_id):
|
||||
"""Deletes and unexports volume."""
|
||||
context = context.elevated()
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
|
||||
if context.project_id != volume_ref['project_id']:
|
||||
project_id = volume_ref['project_id']
|
||||
else:
|
||||
project_id = context.project_id
|
||||
|
||||
LOG.info(_("volume %s: deleting"), volume_ref['name'])
|
||||
if volume_ref['attach_status'] == "attached":
|
||||
# Volume is still attached, need to detach first
|
||||
raise exception.VolumeAttached(volume_id=volume_id)
|
||||
if volume_ref['host'] != self.host:
|
||||
raise exception.InvalidVolume(
|
||||
reason=_("volume is not local to this node"))
|
||||
|
||||
self._notify_about_volume_usage(context, volume_ref, "delete.start")
|
||||
self._reset_stats()
|
||||
try:
|
||||
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
|
||||
self.driver.remove_export(context, volume_ref)
|
||||
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
|
||||
self.driver.delete_volume(volume_ref)
|
||||
except exception.VolumeIsBusy:
|
||||
LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
|
||||
self.driver.ensure_export(context, volume_ref)
|
||||
self.db.volume_update(context, volume_ref['id'],
|
||||
{'status': 'available'})
|
||||
return True
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context,
|
||||
volume_ref['id'],
|
||||
{'status': 'error_deleting'})
|
||||
|
||||
# Get reservations
|
||||
try:
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
volumes=-1,
|
||||
gigabytes=-volume_ref['size'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deleting volume"))
|
||||
|
||||
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
|
||||
self.db.volume_destroy(context, volume_id)
|
||||
LOG.info(_("volume %s: deleted successfully"), volume_ref['name'])
|
||||
self._notify_about_volume_usage(context, volume_ref, "delete.end")
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
|
||||
self.publish_service_capabilities(context)
|
||||
|
||||
return True
|
||||
|
||||
def create_snapshot(self, context, volume_id, snapshot_id):
|
||||
"""Creates and exports the snapshot."""
|
||||
context = context.elevated()
|
||||
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
|
||||
LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])
|
||||
self._notify_about_snapshot_usage(
|
||||
context, snapshot_ref, "create.start")
|
||||
|
||||
try:
|
||||
snap_name = snapshot_ref['name']
|
||||
LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
|
||||
model_update = self.driver.create_snapshot(snapshot_ref)
|
||||
if model_update:
|
||||
self.db.snapshot_update(context, snapshot_ref['id'],
|
||||
model_update)
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.snapshot_update(context,
|
||||
snapshot_ref['id'],
|
||||
{'status': 'error'})
|
||||
|
||||
self.db.snapshot_update(context,
|
||||
snapshot_ref['id'], {'status': 'available',
|
||||
'progress': '100%'})
|
||||
self.db.volume_glance_metadata_copy_to_snapshot(context,
|
||||
snapshot_ref['id'],
|
||||
volume_id)
|
||||
LOG.info(_("snapshot %s: created successfully"), snapshot_ref['name'])
|
||||
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
|
||||
return snapshot_id
|
||||
|
||||
def delete_snapshot(self, context, snapshot_id):
|
||||
"""Deletes and unexports snapshot."""
|
||||
context = context.elevated()
|
||||
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
|
||||
LOG.info(_("snapshot %s: deleting"), snapshot_ref['name'])
|
||||
self._notify_about_snapshot_usage(
|
||||
context, snapshot_ref, "delete.start")
|
||||
|
||||
if context.project_id != snapshot_ref['project_id']:
|
||||
project_id = snapshot_ref['project_id']
|
||||
else:
|
||||
project_id = context.project_id
|
||||
|
||||
try:
|
||||
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
|
||||
self.driver.delete_snapshot(snapshot_ref)
|
||||
except exception.SnapshotIsBusy:
|
||||
LOG.debug(_("snapshot %s: snapshot is busy"), snapshot_ref['name'])
|
||||
self.db.snapshot_update(context,
|
||||
snapshot_ref['id'],
|
||||
{'status': 'available'})
|
||||
return True
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.snapshot_update(context,
|
||||
snapshot_ref['id'],
|
||||
{'status': 'error_deleting'})
|
||||
|
||||
# Get reservations
|
||||
try:
|
||||
if CONF.no_snapshot_gb_quota:
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
snapshots=-1)
|
||||
else:
|
||||
reservations = QUOTAS.reserve(
|
||||
context,
|
||||
project_id=project_id,
|
||||
snapshots=-1,
|
||||
gigabytes=-snapshot_ref['volume_size'])
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_("Failed to update usages deleting snapshot"))
|
||||
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
|
||||
self.db.snapshot_destroy(context, snapshot_id)
|
||||
LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
|
||||
self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
return True
|
||||
|
||||
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
|
||||
"""Updates db to show volume is attached"""
|
||||
|
||||
@utils.synchronized(volume_id, external=True)
|
||||
def do_attach():
|
||||
# check the volume status before attaching
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
if volume['status'] == 'attaching':
|
||||
if (volume['instance_uuid'] and volume['instance_uuid'] !=
|
||||
instance_uuid):
|
||||
msg = _("being attached by another instance")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
elif volume['status'] != "available":
|
||||
msg = _("status must be available")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
self.db.volume_update(context, volume_id,
|
||||
{"instance_uuid": instance_uuid,
|
||||
"status": "attaching"})
|
||||
|
||||
# TODO(vish): refactor this into a more general "reserve"
|
||||
# TODO(sleepsonthefloor): Is this 'elevated' appropriate?
|
||||
if not uuidutils.is_uuid_like(instance_uuid):
|
||||
raise exception.InvalidUUID(uuid=instance_uuid)
|
||||
|
||||
try:
|
||||
self.driver.attach_volume(context,
|
||||
volume_id,
|
||||
instance_uuid,
|
||||
mountpoint)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context,
|
||||
volume_id,
|
||||
{'status': 'error_attaching'})
|
||||
|
||||
self.db.volume_attached(context.elevated(),
|
||||
volume_id,
|
||||
instance_uuid,
|
||||
mountpoint)
|
||||
return do_attach()
|
||||
|
||||
def detach_volume(self, context, volume_id):
|
||||
"""Updates db to show volume is detached"""
|
||||
# TODO(vish): refactor this into a more general "unreserve"
|
||||
# TODO(sleepsonthefloor): Is this 'elevated' appropriate?
|
||||
try:
|
||||
self.driver.detach_volume(context, volume_id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context,
|
||||
volume_id,
|
||||
{'status': 'error_detaching'})
|
||||
|
||||
self.db.volume_detached(context.elevated(), volume_id)
|
||||
|
||||
# Check for https://bugs.launchpad.net/manila/+bug/1065702
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
if (volume_ref['provider_location'] and
|
||||
volume_ref['name'] not in volume_ref['provider_location']):
|
||||
self.driver.ensure_export(context, volume_ref)
|
||||
|
||||
def _copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
"""Downloads Glance image to the specified volume. """
|
||||
volume_id = volume['id']
|
||||
self.driver.copy_image_to_volume(context, volume,
|
||||
image_service,
|
||||
image_id)
|
||||
LOG.debug(_("Downloaded image %(image_id)s to %(volume_id)s "
|
||||
"successfully") % locals())
|
||||
|
||||
def copy_volume_to_image(self, context, volume_id, image_meta):
|
||||
"""Uploads the specified volume to Glance.
|
||||
|
||||
image_meta is a dictionary containing the following keys:
|
||||
'id', 'container_format', 'disk_format'
|
||||
|
||||
"""
|
||||
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
|
||||
try:
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
self.driver.ensure_export(context.elevated(), volume)
|
||||
image_service, image_id = \
|
||||
glance.get_remote_image_service(context, image_meta['id'])
|
||||
self.driver.copy_volume_to_image(context, volume, image_service,
|
||||
image_meta)
|
||||
LOG.debug(_("Uploaded volume %(volume_id)s to "
|
||||
"image (%(image_id)s) successfully") % locals())
|
||||
except Exception, error:
|
||||
with excutils.save_and_reraise_exception():
|
||||
payload['message'] = unicode(error)
|
||||
finally:
|
||||
if volume['instance_uuid'] is None:
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'available'})
|
||||
else:
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'in-use'})
|
||||
|
||||
def initialize_connection(self, context, volume_id, connector):
|
||||
"""Prepare volume for connection from host represented by connector.
|
||||
|
||||
This method calls the driver initialize_connection and returns
|
||||
it to the caller. The connector parameter is a dictionary with
|
||||
information about the host that will connect to the volume in the
|
||||
following format::
|
||||
|
||||
{
|
||||
'ip': ip,
|
||||
'initiator': initiator,
|
||||
}
|
||||
|
||||
ip: the ip address of the connecting machine
|
||||
|
||||
initiator: the iscsi initiator name of the connecting machine.
|
||||
This can be None if the connecting machine does not support iscsi
|
||||
connections.
|
||||
|
||||
driver is responsible for doing any necessary security setup and
|
||||
returning a connection_info dictionary in the following format::
|
||||
|
||||
{
|
||||
'driver_volume_type': driver_volume_type,
|
||||
'data': data,
|
||||
}
|
||||
|
||||
driver_volume_type: a string to identify the type of volume. This
|
||||
can be used by the calling code to determine the
|
||||
strategy for connecting to the volume. This could
|
||||
be 'iscsi', 'rbd', 'sheepdog', etc.
|
||||
|
||||
data: this is the data that the calling code will use to connect
|
||||
to the volume. Keep in mind that this will be serialized to
|
||||
json in various places, so it should not contain any non-json
|
||||
data types.
|
||||
"""
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
return self.driver.initialize_connection(volume_ref, connector)
|
||||
|
||||
def terminate_connection(self, context, volume_id, connector, force=False):
|
||||
"""Cleanup connection from host represented by connector.
|
||||
|
||||
The format of connector is the same as for initialize_connection.
|
||||
"""
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
self.driver.terminate_connection(volume_ref, connector, force=force)
|
||||
|
||||
@manager.periodic_task
|
||||
def _report_driver_status(self, context):
|
||||
LOG.info(_("Updating volume status"))
|
||||
volume_stats = self.driver.get_volume_stats(refresh=True)
|
||||
if volume_stats:
|
||||
# This will grab info about the host and queue it
|
||||
# to be sent to the Schedulers.
|
||||
self.update_service_capabilities(volume_stats)
|
||||
|
||||
def publish_service_capabilities(self, context):
|
||||
""" Collect driver status and then publish """
|
||||
self._report_driver_status(context)
|
||||
self._publish_service_capabilities(context)
|
||||
|
||||
def _reset_stats(self):
|
||||
LOG.info(_("Clear capabilities"))
|
||||
self._last_volume_stats = []
|
||||
|
||||
def notification(self, context, event):
|
||||
LOG.info(_("Notification {%s} received"), event)
|
||||
self._reset_stats()
|
||||
|
||||
def _notify_about_volume_usage(self,
|
||||
context,
|
||||
volume,
|
||||
event_suffix,
|
||||
extra_usage_info=None):
|
||||
volume_utils.notify_about_volume_usage(
|
||||
context, volume, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
def _notify_about_snapshot_usage(self,
|
||||
context,
|
||||
snapshot,
|
||||
event_suffix,
|
||||
extra_usage_info=None):
|
||||
volume_utils.notify_about_snapshot_usage(
|
||||
context, snapshot, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
@ -1,130 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012, Intel, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Client side of the volume RPC API.
|
||||
"""
|
||||
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import rpc
|
||||
import manila.openstack.common.rpc.proxy
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class VolumeAPI(manila.openstack.common.rpc.proxy.RpcProxy):
|
||||
'''Client side of the volume rpc API.
|
||||
|
||||
API version history:
|
||||
|
||||
1.0 - Initial version.
|
||||
1.1 - Adds clone volume option to create_volume.
|
||||
1.2 - Add publish_service_capabilities() method.
|
||||
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image
|
||||
1.4 - Add request_spec, filter_properties and
|
||||
allow_reschedule arguments to create_volume().
|
||||
'''
|
||||
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
|
||||
def __init__(self, topic=None):
|
||||
super(VolumeAPI, self).__init__(
|
||||
topic=topic or FLAGS.volume_topic,
|
||||
default_version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
def create_volume(self, ctxt, volume, host,
|
||||
request_spec, filter_properties,
|
||||
allow_reschedule=True,
|
||||
snapshot_id=None, image_id=None,
|
||||
source_volid=None):
|
||||
self.cast(ctxt,
|
||||
self.make_msg('create_volume',
|
||||
volume_id=volume['id'],
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties,
|
||||
allow_reschedule=allow_reschedule,
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id,
|
||||
source_volid=source_volid),
|
||||
topic=rpc.queue_get_for(ctxt,
|
||||
self.topic,
|
||||
host),
|
||||
version='1.4')
|
||||
|
||||
def delete_volume(self, ctxt, volume):
|
||||
self.cast(ctxt,
|
||||
self.make_msg('delete_volume',
|
||||
volume_id=volume['id']),
|
||||
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
|
||||
|
||||
def create_snapshot(self, ctxt, volume, snapshot):
|
||||
self.cast(ctxt, self.make_msg('create_snapshot',
|
||||
volume_id=volume['id'],
|
||||
snapshot_id=snapshot['id']),
|
||||
topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
|
||||
|
||||
def delete_snapshot(self, ctxt, snapshot, host):
|
||||
self.cast(ctxt, self.make_msg('delete_snapshot',
|
||||
snapshot_id=snapshot['id']),
|
||||
topic=rpc.queue_get_for(ctxt, self.topic, host))
|
||||
|
||||
def attach_volume(self, ctxt, volume, instance_uuid, mountpoint):
|
||||
return self.call(ctxt, self.make_msg('attach_volume',
|
||||
volume_id=volume['id'],
|
||||
instance_uuid=instance_uuid,
|
||||
mountpoint=mountpoint),
|
||||
topic=rpc.queue_get_for(ctxt,
|
||||
self.topic,
|
||||
volume['host']))
|
||||
|
||||
def detach_volume(self, ctxt, volume):
|
||||
return self.call(ctxt, self.make_msg('detach_volume',
|
||||
volume_id=volume['id']),
|
||||
topic=rpc.queue_get_for(ctxt,
|
||||
self.topic,
|
||||
volume['host']))
|
||||
|
||||
def copy_volume_to_image(self, ctxt, volume, image_meta):
|
||||
self.cast(ctxt, self.make_msg('copy_volume_to_image',
|
||||
volume_id=volume['id'],
|
||||
image_meta=image_meta),
|
||||
topic=rpc.queue_get_for(ctxt,
|
||||
self.topic,
|
||||
volume['host']),
|
||||
version='1.3')
|
||||
|
||||
def initialize_connection(self, ctxt, volume, connector):
|
||||
return self.call(ctxt, self.make_msg('initialize_connection',
|
||||
volume_id=volume['id'],
|
||||
connector=connector),
|
||||
topic=rpc.queue_get_for(ctxt,
|
||||
self.topic,
|
||||
volume['host']))
|
||||
|
||||
def terminate_connection(self, ctxt, volume, connector, force=False):
|
||||
return self.call(ctxt, self.make_msg('terminate_connection',
|
||||
volume_id=volume['id'],
|
||||
connector=connector,
|
||||
force=force),
|
||||
topic=rpc.queue_get_for(ctxt,
|
||||
self.topic,
|
||||
volume['host']))
|
||||
|
||||
def publish_service_capabilities(self, ctxt):
|
||||
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'),
|
||||
version='1.2')
|
@ -1,131 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2012 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Volume-related Utilities and helpers."""
|
||||
|
||||
import os
|
||||
import stat
|
||||
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
from manila.openstack.common.notifier import api as notifier_api
|
||||
from manila.openstack.common import timeutils
|
||||
from manila import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_host_from_queue(queuename):
|
||||
# This assumes the queue is named something like manila-volume
|
||||
# and does not have dot separators in the queue name
|
||||
return queuename.split('@', 1)[0].split('.', 1)[1]
|
||||
|
||||
|
||||
def notify_usage_exists(context, volume_ref, current_period=False):
|
||||
""" Generates 'exists' notification for a volume for usage auditing
|
||||
purposes.
|
||||
|
||||
Generates usage for last completed period, unless 'current_period'
|
||||
is True."""
|
||||
begin, end = utils.last_completed_audit_period()
|
||||
if current_period:
|
||||
audit_start = end
|
||||
audit_end = timeutils.utcnow()
|
||||
else:
|
||||
audit_start = begin
|
||||
audit_end = end
|
||||
|
||||
extra_usage_info = dict(audit_period_beginning=str(audit_start),
|
||||
audit_period_ending=str(audit_end))
|
||||
|
||||
notify_about_volume_usage(context, volume_ref,
|
||||
'exists', extra_usage_info=extra_usage_info)
|
||||
|
||||
|
||||
def null_safe_str(s):
|
||||
return str(s) if s else ''
|
||||
|
||||
|
||||
def _usage_from_volume(context, volume_ref, **kw):
|
||||
usage_info = dict(tenant_id=volume_ref['project_id'],
|
||||
user_id=volume_ref['user_id'],
|
||||
availability_zone=volume_ref['availability_zone'],
|
||||
volume_id=volume_ref['id'],
|
||||
volume_type=volume_ref['volume_type_id'],
|
||||
display_name=volume_ref['display_name'],
|
||||
launched_at=null_safe_str(volume_ref['launched_at']),
|
||||
created_at=null_safe_str(volume_ref['created_at']),
|
||||
status=volume_ref['status'],
|
||||
snapshot_id=volume_ref['snapshot_id'],
|
||||
size=volume_ref['size'])
|
||||
|
||||
usage_info.update(kw)
|
||||
return usage_info
|
||||
|
||||
|
||||
def notify_about_volume_usage(context, volume, event_suffix,
|
||||
extra_usage_info=None, host=None):
|
||||
if not host:
|
||||
host = FLAGS.host
|
||||
|
||||
if not extra_usage_info:
|
||||
extra_usage_info = {}
|
||||
|
||||
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
|
||||
|
||||
notifier_api.notify(context, 'volume.%s' % host,
|
||||
'volume.%s' % event_suffix,
|
||||
notifier_api.INFO, usage_info)
|
||||
|
||||
|
||||
def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info):
|
||||
usage_info = {
|
||||
'tenant_id': snapshot_ref['project_id'],
|
||||
'user_id': snapshot_ref['user_id'],
|
||||
'availability_zone': snapshot_ref.volume['availability_zone'],
|
||||
'volume_id': snapshot_ref['volume_id'],
|
||||
'volume_size': snapshot_ref['volume_size'],
|
||||
'snapshot_id': snapshot_ref['id'],
|
||||
'display_name': snapshot_ref['display_name'],
|
||||
'created_at': str(snapshot_ref['created_at']),
|
||||
'status': snapshot_ref['status'],
|
||||
'deleted': null_safe_str(snapshot_ref['deleted'])
|
||||
}
|
||||
|
||||
usage_info.update(extra_usage_info)
|
||||
return usage_info
|
||||
|
||||
|
||||
def notify_about_snapshot_usage(context, snapshot, event_suffix,
|
||||
extra_usage_info=None, host=None):
|
||||
if not host:
|
||||
host = FLAGS.host
|
||||
|
||||
if not extra_usage_info:
|
||||
extra_usage_info = {}
|
||||
|
||||
usage_info = _usage_from_snapshot(context, snapshot, **extra_usage_info)
|
||||
|
||||
notifier_api.notify(context, 'snapshot.%s' % host,
|
||||
'snapshot.%s' % event_suffix,
|
||||
notifier_api.INFO, usage_info)
|
||||
|
||||
|
||||
def is_block(path):
|
||||
mode = os.stat(path).st_mode
|
||||
return stat.S_ISBLK(mode)
|
@ -1,158 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (c) 2011 Zadara Storage Inc.
|
||||
# Copyright (c) 2011 OpenStack LLC.
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
# Copyright 2011 Ken Pepple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Built-in volume type properties."""
|
||||
|
||||
from manila import context
|
||||
from manila import db
|
||||
from manila import exception
|
||||
from manila import flags
|
||||
from manila.openstack.common import log as logging
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create(context, name, extra_specs={}):
|
||||
"""Creates volume types."""
|
||||
try:
|
||||
type_ref = db.volume_type_create(context,
|
||||
dict(name=name,
|
||||
extra_specs=extra_specs))
|
||||
except exception.DBError, e:
|
||||
LOG.exception(_('DB error: %s') % e)
|
||||
raise exception.VolumeTypeCreateFailed(name=name,
|
||||
extra_specs=extra_specs)
|
||||
return type_ref
|
||||
|
||||
|
||||
def destroy(context, id):
|
||||
"""Marks volume types as deleted."""
|
||||
if id is None:
|
||||
msg = _("id cannot be None")
|
||||
raise exception.InvalidVolumeType(reason=msg)
|
||||
else:
|
||||
db.volume_type_destroy(context, id)
|
||||
|
||||
|
||||
def get_all_types(context, inactive=0, search_opts={}):
|
||||
"""Get all non-deleted volume_types.
|
||||
|
||||
Pass true as argument if you want deleted volume types returned also.
|
||||
|
||||
"""
|
||||
vol_types = db.volume_type_get_all(context, inactive)
|
||||
|
||||
if search_opts:
|
||||
LOG.debug(_("Searching by: %s") % str(search_opts))
|
||||
|
||||
def _check_extra_specs_match(vol_type, searchdict):
|
||||
for k, v in searchdict.iteritems():
|
||||
if (k not in vol_type['extra_specs'].keys()
|
||||
or vol_type['extra_specs'][k] != v):
|
||||
return False
|
||||
return True
|
||||
|
||||
# search_option to filter_name mapping.
|
||||
filter_mapping = {'extra_specs': _check_extra_specs_match}
|
||||
|
||||
result = {}
|
||||
for type_name, type_args in vol_types.iteritems():
|
||||
# go over all filters in the list
|
||||
for opt, values in search_opts.iteritems():
|
||||
try:
|
||||
filter_func = filter_mapping[opt]
|
||||
except KeyError:
|
||||
# no such filter - ignore it, go to next filter
|
||||
continue
|
||||
else:
|
||||
if filter_func(type_args, values):
|
||||
result[type_name] = type_args
|
||||
break
|
||||
vol_types = result
|
||||
return vol_types
|
||||
|
||||
|
||||
def get_volume_type(ctxt, id):
|
||||
"""Retrieves single volume type by id."""
|
||||
if id is None:
|
||||
msg = _("id cannot be None")
|
||||
raise exception.InvalidVolumeType(reason=msg)
|
||||
|
||||
if ctxt is None:
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
return db.volume_type_get(ctxt, id)
|
||||
|
||||
|
||||
def get_volume_type_by_name(context, name):
|
||||
"""Retrieves single volume type by name."""
|
||||
if name is None:
|
||||
msg = _("name cannot be None")
|
||||
raise exception.InvalidVolumeType(reason=msg)
|
||||
|
||||
return db.volume_type_get_by_name(context, name)
|
||||
|
||||
|
||||
def get_default_volume_type():
|
||||
"""Get the default volume type."""
|
||||
name = FLAGS.default_volume_type
|
||||
vol_type = {}
|
||||
|
||||
if name is not None:
|
||||
ctxt = context.get_admin_context()
|
||||
try:
|
||||
vol_type = get_volume_type_by_name(ctxt, name)
|
||||
except exception.VolumeTypeNotFoundByName, e:
|
||||
# Couldn't find volume type with the name in default_volume_type
|
||||
# flag, record this issue and move on
|
||||
#TODO(zhiteng) consider add notification to warn admin
|
||||
LOG.exception(_('Default volume type is not found, '
|
||||
'please check default_volume_type config: %s'), e)
|
||||
|
||||
return vol_type
|
||||
|
||||
|
||||
def is_key_value_present(volume_type_id, key, value, volume_type=None):
|
||||
if volume_type_id is None:
|
||||
return False
|
||||
|
||||
if volume_type is None:
|
||||
volume_type = get_volume_type(context.get_admin_context(),
|
||||
volume_type_id)
|
||||
if (volume_type.get('extra_specs') is None or
|
||||
volume_type['extra_specs'].get(key) != value):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def get_volume_type_extra_specs(volume_type_id, key=False):
|
||||
volume_type = get_volume_type(context.get_admin_context(),
|
||||
volume_type_id)
|
||||
extra_specs = volume_type['extra_specs']
|
||||
if key:
|
||||
if extra_specs.get(key):
|
||||
return extra_specs.get(key)
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return extra_specs
|
Loading…
x
Reference in New Issue
Block a user