Remove Oracle ZFSSA drivers
The Oracle ZFSSA iSCSI and NFS drivers were marked unsupported in the Train release. Oracle has indicated that they don't plan to continue to support the drivers so this patch proposes their removal. Change-Id: I78f8ecbbbf2b46a3be5d802fb58aaa998aed144a
This commit is contained in:
parent
025c0ef4f3
commit
e4d9a43a28
@ -163,10 +163,6 @@ from cinder.volume.drivers.windows import iscsi as \
|
||||
from cinder.volume.drivers.windows import smbfs as \
|
||||
cinder_volume_drivers_windows_smbfs
|
||||
from cinder.volume.drivers import zadara as cinder_volume_drivers_zadara
|
||||
from cinder.volume.drivers.zfssa import zfssaiscsi as \
|
||||
cinder_volume_drivers_zfssa_zfssaiscsi
|
||||
from cinder.volume.drivers.zfssa import zfssanfs as \
|
||||
cinder_volume_drivers_zfssa_zfssanfs
|
||||
from cinder.volume import manager as cinder_volume_manager
|
||||
from cinder.volume.targets import spdknvmf as cinder_volume_targets_spdknvmf
|
||||
from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver
|
||||
@ -369,8 +365,6 @@ def list_opts():
|
||||
cinder_volume_drivers_windows_iscsi.windows_opts,
|
||||
cinder_volume_drivers_windows_smbfs.volume_opts,
|
||||
cinder_volume_drivers_zadara.zadara_opts,
|
||||
cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS,
|
||||
cinder_volume_drivers_zfssa_zfssanfs.ZFSSA_OPTS,
|
||||
cinder_volume_manager.volume_backend_opts,
|
||||
cinder_volume_targets_spdknvmf.spdk_opts,
|
||||
)),
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,379 +0,0 @@
|
||||
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
ZFS Storage Appliance REST API Client Programmatic Interface
|
||||
"""
|
||||
|
||||
import json
|
||||
import ssl
|
||||
import time
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_utils import strutils
|
||||
import six
|
||||
from six.moves import http_client
|
||||
from six.moves import urllib
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Status(object):
|
||||
"""Result HTTP Status"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
#: Request return OK
|
||||
OK = http_client.OK
|
||||
|
||||
#: New resource created successfully
|
||||
CREATED = http_client.CREATED
|
||||
|
||||
#: Command accepted
|
||||
ACCEPTED = http_client.ACCEPTED
|
||||
|
||||
#: Command returned OK but no data will be returned
|
||||
NO_CONTENT = http_client.NO_CONTENT
|
||||
|
||||
#: Bad Request
|
||||
BAD_REQUEST = http_client.BAD_REQUEST
|
||||
|
||||
#: User is not authorized
|
||||
UNAUTHORIZED = http_client.UNAUTHORIZED
|
||||
|
||||
#: The request is not allowed
|
||||
FORBIDDEN = http_client.FORBIDDEN
|
||||
|
||||
#: The requested resource was not found
|
||||
NOT_FOUND = http_client.NOT_FOUND
|
||||
|
||||
#: The request is not allowed
|
||||
NOT_ALLOWED = http_client.METHOD_NOT_ALLOWED
|
||||
|
||||
#: Request timed out
|
||||
TIMEOUT = http_client.REQUEST_TIMEOUT
|
||||
|
||||
#: Invalid request
|
||||
CONFLICT = http_client.CONFLICT
|
||||
|
||||
#: Service Unavailable
|
||||
BUSY = http_client.SERVICE_UNAVAILABLE
|
||||
|
||||
|
||||
class RestResult(object):
|
||||
"""Result from a REST API operation"""
|
||||
def __init__(self, response=None, err=None):
|
||||
"""Initialize a RestResult containing the results from a REST call.
|
||||
|
||||
:param response: HTTP response
|
||||
"""
|
||||
self.response = response
|
||||
self.error = err
|
||||
self.data = ""
|
||||
self.status = 0
|
||||
if self.response:
|
||||
self.status = self.response.getcode()
|
||||
result = self.response.read()
|
||||
while result:
|
||||
self.data += result.decode("utf-8")
|
||||
result = self.response.read()
|
||||
|
||||
if self.error:
|
||||
self.status = self.error.code
|
||||
self.data = http_client.responses[self.status]
|
||||
|
||||
LOG.debug('Response code: %s', self.status)
|
||||
LOG.debug('Response data: %s', self.data)
|
||||
|
||||
def get_header(self, name):
|
||||
"""Get an HTTP header with the given name from the results
|
||||
|
||||
:param name: HTTP header name
|
||||
:return: The header value or None if no value is found
|
||||
"""
|
||||
if self.response is None:
|
||||
return None
|
||||
info = self.response.info()
|
||||
return info.get(name)
|
||||
|
||||
|
||||
class RestClientError(Exception):
|
||||
"""Exception for ZFS REST API client errors"""
|
||||
def __init__(self, status, name="ERR_INTERNAL", message=None):
|
||||
|
||||
"""Create a REST Response exception
|
||||
|
||||
:param status: HTTP response status
|
||||
:param name: The name of the REST API error type
|
||||
:param message: Descriptive error message returned from REST call
|
||||
"""
|
||||
super(RestClientError, self).__init__(message)
|
||||
self.code = status
|
||||
self.name = name
|
||||
self.msg = message
|
||||
if status in http_client.responses:
|
||||
self.msg = http_client.responses[status]
|
||||
|
||||
def __str__(self):
|
||||
return "%d %s %s" % (self.code, self.name, self.msg)
|
||||
|
||||
|
||||
class RestClientURL(object):
|
||||
"""ZFSSA urllib client"""
|
||||
def __init__(self, url, **kwargs):
|
||||
"""Initialize a REST client.
|
||||
|
||||
:param url: The ZFSSA REST API URL
|
||||
:key session: HTTP Cookie value of x-auth-session obtained from a
|
||||
normal BUI login.
|
||||
:key timeout: Time in seconds to wait for command to complete.
|
||||
(Default is 60 seconds)
|
||||
"""
|
||||
self.url = url
|
||||
self.local = kwargs.get("local", False)
|
||||
self.base_path = kwargs.get("base_path", "/api")
|
||||
self.timeout = kwargs.get("timeout", 60)
|
||||
self.headers = None
|
||||
if kwargs.get('session'):
|
||||
self.headers['x-auth-session'] = kwargs.get('session')
|
||||
|
||||
self.headers = {"content-type": "application/json"}
|
||||
self.do_logout = False
|
||||
self.auth_str = None
|
||||
|
||||
def _path(self, path, base_path=None):
|
||||
"""build rest url path"""
|
||||
if path.startswith("http://") or path.startswith("https://"):
|
||||
return path
|
||||
if base_path is None:
|
||||
base_path = self.base_path
|
||||
if not path.startswith(base_path) and not (
|
||||
self.local and ("/api" + path).startswith(base_path)):
|
||||
path = "%s%s" % (base_path, path)
|
||||
if self.local and path.startswith("/api"):
|
||||
path = path[4:]
|
||||
return self.url + path
|
||||
|
||||
def _authorize(self):
|
||||
"""Performs authorization setting x-auth-session"""
|
||||
self.headers['authorization'] = 'Basic %s' % self.auth_str
|
||||
if 'x-auth-session' in self.headers:
|
||||
del self.headers['x-auth-session']
|
||||
|
||||
try:
|
||||
result = self.post("/access/v1")
|
||||
del self.headers['authorization']
|
||||
if result.status == http_client.CREATED:
|
||||
self.headers['x-auth-session'] = \
|
||||
result.get_header('x-auth-session')
|
||||
self.do_logout = True
|
||||
LOG.info('ZFSSA version: %s',
|
||||
result.get_header('x-zfssa-version'))
|
||||
|
||||
elif result.status == http_client.NOT_FOUND:
|
||||
raise RestClientError(result.status, name="ERR_RESTError",
|
||||
message="REST Not Available: \
|
||||
Please Upgrade")
|
||||
|
||||
except RestClientError:
|
||||
del self.headers['authorization']
|
||||
raise
|
||||
|
||||
def login(self, auth_str):
|
||||
"""Login to an appliance using a user name and password.
|
||||
|
||||
Start a session like what is done logging into the BUI. This is not a
|
||||
requirement to run REST commands, since the protocol is stateless.
|
||||
What is does is set up a cookie session so that some server side
|
||||
caching can be done. If login is used remember to call logout when
|
||||
finished.
|
||||
|
||||
:param auth_str: Authorization string (base64)
|
||||
"""
|
||||
self.auth_str = auth_str
|
||||
self._authorize()
|
||||
|
||||
def logout(self):
|
||||
"""Logout of an appliance"""
|
||||
result = None
|
||||
try:
|
||||
result = self.delete("/access/v1", base_path="/api")
|
||||
except RestClientError:
|
||||
pass
|
||||
|
||||
self.headers.clear()
|
||||
self.do_logout = False
|
||||
return result
|
||||
|
||||
def islogin(self):
|
||||
"""return if client is login"""
|
||||
return self.do_logout
|
||||
|
||||
@staticmethod
|
||||
def mkpath(*args, **kwargs):
|
||||
"""Make a path?query string for making a REST request
|
||||
|
||||
:cmd_params args: The path part
|
||||
:cmd_params kwargs: The query part
|
||||
"""
|
||||
buf = six.StringIO()
|
||||
query = "?"
|
||||
for arg in args:
|
||||
buf.write("/")
|
||||
buf.write(arg)
|
||||
for k in kwargs:
|
||||
buf.write(query)
|
||||
if query == "?":
|
||||
query = "&"
|
||||
buf.write(k)
|
||||
buf.write("=")
|
||||
buf.write(kwargs[k])
|
||||
return buf.getvalue()
|
||||
|
||||
def request(self, path, request, body=None, **kwargs):
|
||||
"""Make an HTTP request and return the results
|
||||
|
||||
:param path: Path used with the initialized URL to make a request
|
||||
:param request: HTTP request type (GET, POST, PUT, DELETE)
|
||||
:param body: HTTP body of request
|
||||
:key accept: Set HTTP 'Accept' header with this value
|
||||
:key base_path: Override the base_path for this request
|
||||
:key content: Set HTTP 'Content-Type' header with this value
|
||||
"""
|
||||
out_hdrs = dict.copy(self.headers)
|
||||
if kwargs.get("accept"):
|
||||
out_hdrs['accept'] = kwargs.get("accept")
|
||||
|
||||
if body:
|
||||
if isinstance(body, dict):
|
||||
body = json.dumps(body)
|
||||
body = body.encode("utf-8")
|
||||
out_hdrs['content-length'] = len(body)
|
||||
else:
|
||||
body = None
|
||||
|
||||
zfssaurl = self._path(path, kwargs.get("base_path"))
|
||||
req = urllib.request.Request(zfssaurl, body, out_hdrs)
|
||||
req.get_method = lambda: request
|
||||
maxreqretries = kwargs.get("maxreqretries", 10)
|
||||
retry = 0
|
||||
response = None
|
||||
|
||||
LOG.debug('Request: %(request)s %(url)s',
|
||||
{'request': request, 'url': zfssaurl})
|
||||
LOG.debug('Out headers: %s', out_hdrs)
|
||||
if body and body != '':
|
||||
# body may contain chap secret so must be masked
|
||||
LOG.debug('Body: %s', strutils.mask_password(body))
|
||||
|
||||
context = None
|
||||
if hasattr(ssl, '_create_unverified_context'):
|
||||
context = ssl._create_unverified_context()
|
||||
else:
|
||||
context = None
|
||||
|
||||
while retry < maxreqretries:
|
||||
try:
|
||||
if context:
|
||||
# only schemes that can be used will be http or https if it
|
||||
# is given in the path variable, or the path will begin
|
||||
# with the REST API location meaning invalid or unwanted
|
||||
# schemes cannot be used
|
||||
response = urllib.request.urlopen(req, # nosec
|
||||
timeout=self.timeout,
|
||||
context=context)
|
||||
else:
|
||||
response = urllib.request.urlopen(req, # nosec : see above
|
||||
timeout=self.timeout)
|
||||
except urllib.error.HTTPError as err:
|
||||
if err.code == http_client.NOT_FOUND:
|
||||
LOG.debug('REST Not Found: %s', err.code)
|
||||
else:
|
||||
LOG.error('REST Not Available: %s', err.code)
|
||||
|
||||
if err.code == http_client.SERVICE_UNAVAILABLE and \
|
||||
retry < maxreqretries:
|
||||
retry += 1
|
||||
time.sleep(1)
|
||||
LOG.error('Server Busy retry request: %s', retry)
|
||||
continue
|
||||
if (err.code == http_client.UNAUTHORIZED or
|
||||
err.code == http_client.INTERNAL_SERVER_ERROR) and \
|
||||
'/access/v1' not in zfssaurl:
|
||||
try:
|
||||
LOG.error('Authorizing request: %(zfssaurl)s '
|
||||
'retry: %(retry)d.',
|
||||
{'zfssaurl': zfssaurl, 'retry': retry})
|
||||
self._authorize()
|
||||
req.add_header('x-auth-session',
|
||||
self.headers['x-auth-session'])
|
||||
except RestClientError:
|
||||
pass
|
||||
retry += 1
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
return RestResult(err=err)
|
||||
|
||||
except urllib.error.URLError as err:
|
||||
LOG.error('URLError: %s', err.reason)
|
||||
raise RestClientError(-1, name="ERR_URLError",
|
||||
message=err.reason)
|
||||
|
||||
break
|
||||
|
||||
if (response and
|
||||
(response.getcode() == http_client.SERVICE_UNAVAILABLE and
|
||||
retry >= maxreqretries)):
|
||||
raise RestClientError(response.getcode(), name="ERR_HTTPError",
|
||||
message="REST Not Available: Disabled")
|
||||
|
||||
return RestResult(response=response)
|
||||
|
||||
def get(self, path, **kwargs):
|
||||
"""Make an HTTP GET request
|
||||
|
||||
:param path: Path to resource.
|
||||
"""
|
||||
return self.request(path, "GET", **kwargs)
|
||||
|
||||
def post(self, path, body="", **kwargs):
|
||||
"""Make an HTTP POST request
|
||||
|
||||
:param path: Path to resource.
|
||||
:param body: Post data content
|
||||
"""
|
||||
return self.request(path, "POST", body, **kwargs)
|
||||
|
||||
def put(self, path, body="", **kwargs):
|
||||
"""Make an HTTP PUT request
|
||||
|
||||
:param path: Path to resource.
|
||||
:param body: Put data content
|
||||
"""
|
||||
return self.request(path, "PUT", body, **kwargs)
|
||||
|
||||
def delete(self, path, **kwargs):
|
||||
"""Make an HTTP DELETE request
|
||||
|
||||
:param path: Path to resource that will be deleted.
|
||||
"""
|
||||
return self.request(path, "DELETE", **kwargs)
|
||||
|
||||
def head(self, path, **kwargs):
|
||||
"""Make an HTTP HEAD request
|
||||
|
||||
:param path: Path to resource.
|
||||
"""
|
||||
return self.request(path, "HEAD", **kwargs)
|
@ -1,160 +0,0 @@
|
||||
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
ZFS Storage Appliance WebDAV Client
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from oslo_log import log
|
||||
from six.moves import http_client
|
||||
from six.moves import urllib
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
bad_gateway_err = _('Check the state of the http service. Also ensure that '
|
||||
'the https port number is the same as the one specified '
|
||||
'in cinder.conf.')
|
||||
|
||||
WebDAVHTTPErrors = {
|
||||
http_client.UNAUTHORIZED: _('User not authorized to perform WebDAV '
|
||||
'operations.'),
|
||||
http_client.BAD_GATEWAY: bad_gateway_err,
|
||||
http_client.FORBIDDEN: _('Check access permissions for the ZFS share '
|
||||
'assigned to this driver.'),
|
||||
http_client.NOT_FOUND: _('The source volume for this WebDAV operation not '
|
||||
'found.'),
|
||||
http_client.INSUFFICIENT_STORAGE: _('Not enough storage space in the ZFS '
|
||||
'share to perform this operation.')
|
||||
}
|
||||
|
||||
WebDAVErrors = {
|
||||
'BadStatusLine': _('http service may have been abruptly disabled or put '
|
||||
'to maintenance state in the middle of this '
|
||||
'operation.'),
|
||||
'Bad_Gateway': bad_gateway_err
|
||||
}
|
||||
|
||||
propertyupdate_data = """<?xml version="1.0"?>
|
||||
<D:propertyupdate xmlns:D="DAV:">
|
||||
<D:set>
|
||||
<D:prop>
|
||||
<D:prop_name>prop_val</D:prop_name>
|
||||
</D:prop>
|
||||
</D:set>
|
||||
</D:propertyupdate>"""
|
||||
|
||||
|
||||
class WebDAVClientError(exception.VolumeDriverException):
|
||||
message = _("The WebDAV request failed. Reason: %(msg)s, "
|
||||
"Return code/reason: %(code)s, Source Volume: %(src)s, "
|
||||
"Destination Volume: %(dst)s, Method: %(method)s.")
|
||||
|
||||
|
||||
class ZFSSAWebDAVClient(object):
|
||||
def __init__(self, url, auth_str, **kwargs):
|
||||
"""Initialize WebDAV Client"""
|
||||
self.https_path = url
|
||||
self.auth_str = auth_str
|
||||
|
||||
def _lookup_error(self, error):
|
||||
msg = ''
|
||||
if error in http_client.responses:
|
||||
msg = http_client.responses[error]
|
||||
|
||||
if error in WebDAVHTTPErrors:
|
||||
msg = WebDAVHTTPErrors[error]
|
||||
elif error in WebDAVErrors:
|
||||
msg = WebDAVErrors[error]
|
||||
|
||||
return msg
|
||||
|
||||
def build_data(self, data, propname, value):
|
||||
res = data.replace('prop_name', propname)
|
||||
res = res.replace('prop_val', value)
|
||||
return res
|
||||
|
||||
def set_file_prop(self, filename, propname, propval):
|
||||
data = self.build_data(propertyupdate_data, propname, propval)
|
||||
return self.request(src_file=filename, data=data, method='PROPPATCH')
|
||||
|
||||
def request(self, src_file="", dst_file="", method="", maxretries=10,
|
||||
data=""):
|
||||
retry = 0
|
||||
src_url = self.https_path + "/" + src_file
|
||||
dst_url = self.https_path + "/" + dst_file
|
||||
request = urllib.request.Request(url=src_url, data=data)
|
||||
|
||||
if dst_file != "":
|
||||
request.add_header('Destination', dst_url)
|
||||
if method == "PROPPATCH":
|
||||
request.add_header('Translate', 'F')
|
||||
|
||||
request.add_header("Authorization", "Basic %s" % self.auth_str)
|
||||
|
||||
request.get_method = lambda: method
|
||||
|
||||
LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s',
|
||||
{'method': method, 'src': src_url, 'des': dst_url})
|
||||
|
||||
while retry < maxretries:
|
||||
try:
|
||||
# URLs are prepended with self.https_path which is safe
|
||||
# meaning that the URL will either be safe or nonexistant
|
||||
response = urllib.request.urlopen( # nosec
|
||||
request, timeout=None)
|
||||
except urllib.error.HTTPError as err:
|
||||
LOG.error('WebDAV returned with %(code)s error during '
|
||||
'%(method)s call.',
|
||||
{'code': err.code, 'method': method})
|
||||
|
||||
if err.code == http_client.INTERNAL_SERVER_ERROR:
|
||||
LOG.error('WebDAV operation failed with error code: '
|
||||
'%(code)s reason: %(reason)s Retry attempt '
|
||||
'%(retry)s in progress.',
|
||||
{'code': err.code,
|
||||
'reason': err.reason,
|
||||
'retry': retry})
|
||||
if retry < maxretries:
|
||||
retry += 1
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
msg = self._lookup_error(err.code)
|
||||
raise WebDAVClientError(msg=msg, code=err.code,
|
||||
src=src_file, dst=dst_file,
|
||||
method=method)
|
||||
|
||||
except http_client.BadStatusLine as err:
|
||||
msg = self._lookup_error('BadStatusLine')
|
||||
code = 'http_client.BadStatusLine'
|
||||
raise WebDAVClientError(msg=msg, code=code,
|
||||
src=src_file, dst=dst_file,
|
||||
method=method)
|
||||
|
||||
except urllib.error.URLError as err:
|
||||
reason = ''
|
||||
if getattr(err, 'reason'):
|
||||
reason = err.reason
|
||||
|
||||
msg = self._lookup_error('Bad_Gateway')
|
||||
raise WebDAVClientError(msg=msg, code=reason,
|
||||
src=src_file,
|
||||
dst=dst_file, method=method)
|
||||
|
||||
break
|
||||
return response
|
File diff suppressed because it is too large
Load Diff
@ -1,792 +0,0 @@
|
||||
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
ZFS Storage Appliance NFS Cinder Volume Driver
|
||||
"""
|
||||
import datetime as dt
|
||||
import errno
|
||||
import math
|
||||
import os
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_serialization import base64
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.image import image_utils
|
||||
from cinder import interface
|
||||
from cinder.objects.volume import Volume
|
||||
from cinder import utils
|
||||
from cinder.volume import configuration
|
||||
from cinder.volume.drivers import nfs
|
||||
from cinder.volume.drivers.san import san
|
||||
from cinder.volume.drivers.zfssa import zfssarest
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
|
||||
ZFSSA_OPTS = [
|
||||
cfg.StrOpt('zfssa_data_ip',
|
||||
help='Data path IP address'),
|
||||
cfg.StrOpt('zfssa_https_port', default='443',
|
||||
help='HTTPS port number'),
|
||||
cfg.StrOpt('zfssa_nfs_mount_options', default='',
|
||||
help='Options to be passed while mounting share over nfs'),
|
||||
cfg.StrOpt('zfssa_nfs_pool', default='',
|
||||
help='Storage pool name.'),
|
||||
cfg.StrOpt('zfssa_nfs_project', default='NFSProject',
|
||||
help='Project name.'),
|
||||
cfg.StrOpt('zfssa_nfs_share', default='nfs_share',
|
||||
help='Share name.'),
|
||||
cfg.StrOpt('zfssa_nfs_share_compression', default='off',
|
||||
choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
|
||||
help='Data compression.'),
|
||||
cfg.StrOpt('zfssa_nfs_share_logbias', default='latency',
|
||||
choices=['latency', 'throughput'],
|
||||
help='Synchronous write bias-latency, throughput.'),
|
||||
cfg.IntOpt('zfssa_rest_timeout',
|
||||
help='REST connection timeout. (seconds)'),
|
||||
cfg.BoolOpt('zfssa_enable_local_cache', default=True,
|
||||
help='Flag to enable local caching: True, False.'),
|
||||
cfg.StrOpt('zfssa_cache_directory', default='os-cinder-cache',
|
||||
help='Name of directory inside zfssa_nfs_share where cache '
|
||||
'volumes are stored.'),
|
||||
cfg.StrOpt('zfssa_manage_policy', default='loose',
|
||||
choices=['loose', 'strict'],
|
||||
help='Driver policy for volume manage.')
|
||||
]
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ZFSSA_OPTS, group=configuration.SHARED_CONF_GROUP)
|
||||
|
||||
|
||||
def factory_zfssa():
|
||||
return zfssarest.ZFSSANfsApi()
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
class ZFSSANFSDriver(nfs.NfsDriver):
|
||||
"""ZFSSA Cinder NFS volume driver.
|
||||
|
||||
Version history:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
1.0.1:
|
||||
Backend enabled volume migration.
|
||||
Local cache feature.
|
||||
1.0.2:
|
||||
Volume manage/unmanage support.
|
||||
"""
|
||||
VERSION = '1.0.2'
|
||||
volume_backend_name = 'ZFSSA_NFS'
|
||||
protocol = driver_prefix = driver_volume_type = 'nfs'
|
||||
|
||||
# ThirdPartySystems wiki page
|
||||
CI_WIKI_NAME = "Oracle_ZFSSA_CI"
|
||||
|
||||
# TODO(jsbryant) Remove driver in the 'U' release as Oracle
|
||||
# is dropping support.
|
||||
SUPPORTED = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ZFSSANFSDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(ZFSSA_OPTS)
|
||||
self.configuration.append_config_values(san.san_opts)
|
||||
self.zfssa = None
|
||||
self._stats = None
|
||||
|
||||
def do_setup(self, context):
|
||||
self.configuration.max_over_subscription_ratio = (
|
||||
volume_utils.get_max_over_subscription_ratio(
|
||||
self.configuration.max_over_subscription_ratio,
|
||||
supports_auto=False))
|
||||
|
||||
if not self.configuration.max_over_subscription_ratio > 0:
|
||||
msg = _("Config 'max_over_subscription_ratio' invalid. Must be > "
|
||||
"0: %s") % self.configuration.max_over_subscription_ratio
|
||||
LOG.error(msg)
|
||||
raise exception.NfsException(msg)
|
||||
|
||||
packages = ('mount.nfs', '/usr/sbin/mount')
|
||||
for package in packages:
|
||||
try:
|
||||
self._execute(package, check_exit_code=False, run_as_root=True)
|
||||
break
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
LOG.error('%s is not installed.', package)
|
||||
else:
|
||||
msg = utils.build_or_str(packages, '%s needs to be installed.')
|
||||
raise exception.NfsException(msg)
|
||||
|
||||
lcfg = self.configuration
|
||||
LOG.info('Connecting to host: %s.', lcfg.san_ip)
|
||||
|
||||
host = lcfg.san_ip
|
||||
user = lcfg.san_login
|
||||
password = lcfg.san_password
|
||||
https_port = lcfg.zfssa_https_port
|
||||
|
||||
credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip']
|
||||
|
||||
for cred in credentials:
|
||||
if not getattr(lcfg, cred, None):
|
||||
exception_msg = _('%s not set in cinder.conf') % cred
|
||||
LOG.error(exception_msg)
|
||||
raise exception.CinderException(exception_msg)
|
||||
|
||||
self.zfssa = factory_zfssa()
|
||||
self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout)
|
||||
|
||||
auth_str = base64.encode_as_text('%s:%s' % (user, password))
|
||||
self.zfssa.login(auth_str)
|
||||
|
||||
self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
|
||||
compression=lcfg.zfssa_nfs_share_compression,
|
||||
logbias=lcfg.zfssa_nfs_share_logbias)
|
||||
|
||||
share_args = {
|
||||
'sharedav': 'rw',
|
||||
'sharenfs': 'rw',
|
||||
'root_permissions': '777',
|
||||
'compression': lcfg.zfssa_nfs_share_compression,
|
||||
'logbias': lcfg.zfssa_nfs_share_logbias
|
||||
}
|
||||
|
||||
self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
|
||||
lcfg.zfssa_nfs_share, share_args)
|
||||
|
||||
share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
|
||||
lcfg.zfssa_nfs_project,
|
||||
lcfg.zfssa_nfs_share)
|
||||
|
||||
mountpoint = share_details['mountpoint']
|
||||
|
||||
self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint
|
||||
https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
|
||||
'/shares' + mountpoint
|
||||
|
||||
LOG.debug('NFS mount path: %s', self.mount_path)
|
||||
LOG.debug('WebDAV path to the share: %s', https_path)
|
||||
|
||||
self.shares = {}
|
||||
mnt_opts = self.configuration.zfssa_nfs_mount_options
|
||||
self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None
|
||||
|
||||
# Initialize the WebDAV client
|
||||
self.zfssa.set_webdav(https_path, auth_str)
|
||||
|
||||
# Edit http service so that WebDAV requests are always authenticated
|
||||
args = {'https_port': https_port,
|
||||
'require_login': True}
|
||||
|
||||
self.zfssa.modify_service('http', args)
|
||||
self.zfssa.enable_service('http')
|
||||
|
||||
if lcfg.zfssa_enable_local_cache:
|
||||
LOG.debug('Creating local cache directory %s.',
|
||||
lcfg.zfssa_cache_directory)
|
||||
self.zfssa.create_directory(lcfg.zfssa_cache_directory)
|
||||
|
||||
def _ensure_shares_mounted(self):
|
||||
try:
|
||||
self._ensure_share_mounted(self.mount_path)
|
||||
except Exception as exc:
|
||||
LOG.error('Exception during mounting %s.', exc)
|
||||
|
||||
self._mounted_shares = [self.mount_path]
|
||||
LOG.debug('Available shares %s', self._mounted_shares)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Check that driver can login.
|
||||
|
||||
Check also for properly configured pool, project and share
|
||||
Check that the http and nfs services are enabled
|
||||
"""
|
||||
lcfg = self.configuration
|
||||
|
||||
self.zfssa.verify_pool(lcfg.zfssa_nfs_pool)
|
||||
self.zfssa.verify_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project)
|
||||
self.zfssa.verify_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
|
||||
lcfg.zfssa_nfs_share)
|
||||
self.zfssa.verify_service('http')
|
||||
self.zfssa.verify_service('nfs')
|
||||
|
||||
def create_volume(self, volume):
|
||||
ret = super(ZFSSANFSDriver, self).create_volume(volume)
|
||||
self.zfssa.set_file_props(volume.name, {'cinder_managed': 'True'})
|
||||
return ret
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot of a volume."""
|
||||
LOG.info('Creating snapshot: %s', snapshot['name'])
|
||||
lcfg = self.configuration
|
||||
snap_name = self._create_snapshot_name()
|
||||
self.zfssa.create_snapshot(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
|
||||
lcfg.zfssa_nfs_share, snap_name)
|
||||
|
||||
src_file = snap_name + '/' + snapshot['volume_name']
|
||||
|
||||
try:
|
||||
self.zfssa.create_snapshot_of_volume_file(src_file=src_file,
|
||||
dst_file=
|
||||
snapshot['name'])
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug('Error thrown during snapshot: %s creation',
|
||||
snapshot['name'])
|
||||
finally:
|
||||
self.zfssa.delete_snapshot(lcfg.zfssa_nfs_pool,
|
||||
lcfg.zfssa_nfs_project,
|
||||
lcfg.zfssa_nfs_share, snap_name)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
LOG.info('Deleting snapshot: %s', snapshot['name'])
|
||||
self.zfssa.delete_snapshot_of_volume_file(src_file=snapshot['name'])
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot, method='COPY'):
|
||||
LOG.info('Creatng volume from snapshot. volume: %s',
|
||||
volume['name'])
|
||||
LOG.info('Source Snapshot: %s', snapshot['name'])
|
||||
|
||||
self._ensure_shares_mounted()
|
||||
self.zfssa.create_volume_from_snapshot_file(src_file=snapshot['name'],
|
||||
dst_file=volume['name'],
|
||||
method=method)
|
||||
|
||||
volume['provider_location'] = self.mount_path
|
||||
|
||||
if volume['size'] != snapshot['volume_size']:
|
||||
try:
|
||||
self.extend_volume(volume, volume['size'])
|
||||
except Exception:
|
||||
vol_path = self.local_path(volume)
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error('Error in extending volume size: Volume: '
|
||||
'%(volume)s Vol_Size: %(vol_size)d with '
|
||||
'Snapshot: %(snapshot)s Snap_Size: '
|
||||
'%(snap_size)d',
|
||||
{'volume': volume['name'],
|
||||
'vol_size': volume['size'],
|
||||
'snapshot': snapshot['name'],
|
||||
'snap_size': snapshot['volume_size']})
|
||||
self._execute('rm', '-f', vol_path, run_as_root=True)
|
||||
|
||||
volume_origin = {'origin': snapshot['volume_name'],
|
||||
'cinder_managed': 'True'}
|
||||
self.zfssa.set_file_props(volume['name'], volume_origin)
|
||||
|
||||
return {'provider_location': volume['provider_location']}
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a snapshot and then clones the snapshot into a volume."""
|
||||
LOG.info('new cloned volume: %s', volume['name'])
|
||||
LOG.info('source volume for cloning: %s', src_vref['name'])
|
||||
|
||||
snapshot = {'volume_name': src_vref['name'],
|
||||
'volume_id': src_vref['id'],
|
||||
'volume_size': src_vref['size'],
|
||||
'name': self._create_snapshot_name()}
|
||||
|
||||
self.create_snapshot(snapshot)
|
||||
return self.create_volume_from_snapshot(volume, snapshot,
|
||||
method='MOVE')
|
||||
|
||||
def delete_volume(self, volume):
|
||||
LOG.debug('Deleting volume %s.', volume.name)
|
||||
lcfg = self.configuration
|
||||
try:
|
||||
vol_props = self.zfssa.get_volume(volume.name)
|
||||
except exception.VolumeNotFound:
|
||||
return
|
||||
super(ZFSSANFSDriver, self).delete_volume(volume)
|
||||
|
||||
if vol_props['origin'].startswith(lcfg.zfssa_cache_directory):
|
||||
LOG.info('Checking origin %(origin)s of volume %(volume)s.',
|
||||
{'origin': vol_props['origin'],
|
||||
'volume': volume.name})
|
||||
self._check_origin(vol_props['origin'])
|
||||
|
||||
@utils.synchronized('zfssanfs', external=True)
|
||||
def clone_image(self, context, volume,
|
||||
image_location, image_meta,
|
||||
image_service):
|
||||
"""Create a volume efficiently from an existing image.
|
||||
|
||||
Verify the image ID being used:
|
||||
|
||||
(1) If there is no existing cache volume, create one and transfer
|
||||
image data to it. Take a snapshot.
|
||||
|
||||
(2) If a cache volume already exists, verify if it is either alternated
|
||||
or updated. If so try to remove it, raise exception if removal fails.
|
||||
Create a new cache volume as in (1).
|
||||
|
||||
Clone a volume from the cache volume and returns it to Cinder.
|
||||
|
||||
A file lock is placed on this method to prevent:
|
||||
(a) a race condition when a cache volume has been verified, but then
|
||||
gets deleted before it is cloned.
|
||||
|
||||
(b) failure of subsequent clone_image requests if the first request is
|
||||
still pending.
|
||||
"""
|
||||
LOG.debug('Cloning image %(image)s to volume %(volume)s',
|
||||
{'image': image_meta['id'], 'volume': volume['name']})
|
||||
lcfg = self.configuration
|
||||
cachevol_size = 0
|
||||
if not lcfg.zfssa_enable_local_cache:
|
||||
return None, False
|
||||
|
||||
with image_utils.TemporaryImages.fetch(
|
||||
image_service, context, image_meta['id']) as tmp_image:
|
||||
info = image_utils.qemu_img_info(tmp_image)
|
||||
cachevol_size = int(math.ceil(float(info.virtual_size) / units.Gi))
|
||||
|
||||
if cachevol_size > volume['size']:
|
||||
exception_msg = ('Image size %(img_size)dGB is larger '
|
||||
'than volume size %(vol_size)dGB.',
|
||||
{'img_size': cachevol_size,
|
||||
'vol_size': volume['size']})
|
||||
LOG.error(exception_msg)
|
||||
return None, False
|
||||
|
||||
updated_at = six.text_type(image_meta['updated_at'].isoformat())
|
||||
cachevol_props = {
|
||||
'id': image_meta['id'],
|
||||
'size': cachevol_size,
|
||||
'updated_at': updated_at,
|
||||
'image_id': image_meta['id'],
|
||||
}
|
||||
|
||||
try:
|
||||
cachevol_name = self._verify_cache_volume(context,
|
||||
image_meta,
|
||||
image_service,
|
||||
cachevol_props)
|
||||
# A cache volume should be ready by now
|
||||
# Create a clone from the cache volume
|
||||
cache_vol = {
|
||||
'name': cachevol_name,
|
||||
'size': cachevol_size,
|
||||
'id': image_meta['id'],
|
||||
}
|
||||
clone_vol = self.create_cloned_volume(volume, cache_vol)
|
||||
self._update_origin(volume['name'], cachevol_name)
|
||||
except exception.VolumeBackendAPIException as exc:
|
||||
exception_msg = ('Cannot clone image %(image)s to '
|
||||
'volume %(volume)s. Error: %(error)s.',
|
||||
{'volume': volume['name'],
|
||||
'image': image_meta['id'],
|
||||
'error': exc.msg})
|
||||
LOG.error(exception_msg)
|
||||
return None, False
|
||||
|
||||
return clone_vol, True
|
||||
|
||||
def _verify_cache_volume(self, context, img_meta,
|
||||
img_service, cachevol_props):
|
||||
"""Verify if we have a cache volume that we want.
|
||||
|
||||
If we don't, create one.
|
||||
If we do, check if it's been updated:
|
||||
* If so, delete it and recreate a new volume
|
||||
* If not, we are good.
|
||||
|
||||
If it's out of date, delete it and create a new one.
|
||||
|
||||
After the function returns, there should be a cache volume available,
|
||||
ready for cloning.
|
||||
"""
|
||||
lcfg = self.configuration
|
||||
cache_dir = '%s/' % lcfg.zfssa_cache_directory
|
||||
cache_vol_obj = Volume()
|
||||
cache_vol_obj.provider_location = self.mount_path + '/' + cache_dir
|
||||
cache_vol_obj._name_id = cachevol_props['id']
|
||||
cachevol_name = cache_dir + cache_vol_obj.name
|
||||
|
||||
LOG.debug('Verifying cache volume %s:', cachevol_name)
|
||||
|
||||
try:
|
||||
cache_vol = self.zfssa.get_volume(cachevol_name)
|
||||
except exception.VolumeNotFound:
|
||||
# There is no existing cache volume, create one:
|
||||
LOG.debug('Cache volume not found. Creating one...')
|
||||
return self._create_cache_volume(context,
|
||||
img_meta,
|
||||
img_service,
|
||||
cachevol_props)
|
||||
|
||||
# A cache volume does exist, check if it's updated:
|
||||
if ((cache_vol['updated_at'] != cachevol_props['updated_at']) or
|
||||
(cache_vol['image_id'] != cachevol_props['image_id'])):
|
||||
if cache_vol['numclones'] > 0:
|
||||
# The cache volume is updated, but has clones
|
||||
exception_msg = (_('Cannot delete '
|
||||
'cache volume: %(cachevol_name)s. '
|
||||
'It was updated at %(updated_at)s '
|
||||
'and currently has %(numclones)d '
|
||||
'volume instances.'),
|
||||
{'cachevol_name': cachevol_name,
|
||||
'updated_at': cachevol_props['updated_at'],
|
||||
'numclones': cache_vol['numclones']})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
# The cache volume is updated, but has no clone, so we delete it
|
||||
# and re-create a new one:
|
||||
super(ZFSSANFSDriver, self).delete_volume(cache_vol_obj)
|
||||
return self._create_cache_volume(context,
|
||||
img_meta,
|
||||
img_service,
|
||||
cachevol_props)
|
||||
|
||||
return cachevol_name
|
||||
|
||||
def _create_cache_volume(self, context, img_meta,
|
||||
img_service, cachevol_props):
|
||||
"""Create a cache volume from an image.
|
||||
|
||||
Returns name of the cache volume.
|
||||
"""
|
||||
lcfg = self.configuration
|
||||
cache_dir = '%s/' % lcfg.zfssa_cache_directory
|
||||
cache_vol = Volume()
|
||||
cache_vol.provider_location = self.mount_path
|
||||
cache_vol._name_id = cachevol_props['id']
|
||||
cache_vol.size = cachevol_props['size']
|
||||
cache_vol_name = cache_dir + cache_vol.name
|
||||
|
||||
LOG.debug('Creating cache volume %s', cache_vol_name)
|
||||
try:
|
||||
self.create_volume(cache_vol)
|
||||
LOG.debug('Copying image data:')
|
||||
super(ZFSSANFSDriver, self).copy_image_to_volume(context,
|
||||
cache_vol,
|
||||
img_service,
|
||||
img_meta['id'])
|
||||
self.zfssa.webdavclient.request(src_file=cache_vol.name,
|
||||
dst_file=cache_vol_name,
|
||||
method='MOVE')
|
||||
|
||||
except Exception as exc:
|
||||
exc_msg = (_('Fail to create cache volume %(volume)s. '
|
||||
'Error: %(err)s'),
|
||||
{'volume': cache_vol_name,
|
||||
'err': six.text_type(exc)})
|
||||
LOG.error(exc_msg)
|
||||
self.zfssa.delete_file(cache_vol_name)
|
||||
raise exception.VolumeBackendAPIException(data=exc_msg)
|
||||
|
||||
cachevol_meta = {
|
||||
'updated_at': cachevol_props['updated_at'],
|
||||
'image_id': cachevol_props['image_id'],
|
||||
}
|
||||
cachevol_meta.update({'numclones': '0'})
|
||||
self.zfssa.set_file_props(cache_vol_name, cachevol_meta)
|
||||
return cache_vol_name
|
||||
|
||||
def _create_snapshot_name(self):
|
||||
"""Creates a snapshot name from the date and time."""
|
||||
|
||||
return ('cinder-zfssa-nfs-snapshot-%s' %
|
||||
dt.datetime.utcnow().isoformat())
|
||||
|
||||
def _get_share_capacity_info(self):
|
||||
"""Get available and used capacity info for the NFS share."""
|
||||
lcfg = self.configuration
|
||||
share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
|
||||
lcfg.zfssa_nfs_project,
|
||||
lcfg.zfssa_nfs_share)
|
||||
|
||||
free = share_details['space_available']
|
||||
used = share_details['space_total']
|
||||
return free, used
|
||||
|
||||
@utils.synchronized('zfssanfs', external=True)
|
||||
def _check_origin(self, origin):
|
||||
"""Verify the cache volume of a bootable volume.
|
||||
|
||||
If the cache no longer has clone, it will be deleted.
|
||||
"""
|
||||
try:
|
||||
cachevol_props = self.zfssa.get_volume(origin)
|
||||
except exception.VolumeNotFound:
|
||||
LOG.debug('Origin %s does not exist', origin)
|
||||
return
|
||||
|
||||
numclones = cachevol_props['numclones']
|
||||
LOG.debug('Number of clones: %d', numclones)
|
||||
if numclones <= 1:
|
||||
# This cache vol does not have any other clone
|
||||
self.zfssa.delete_file(origin)
|
||||
else:
|
||||
cachevol_props = {'numclones': six.text_type(numclones - 1)}
|
||||
self.zfssa.set_file_props(origin, cachevol_props)
|
||||
|
||||
def _update_origin(self, vol_name, cachevol_name):
|
||||
"""Update WebDAV property of a volume.
|
||||
|
||||
WebDAV properties are used to keep track of:
|
||||
(1) The number of clones of a cache volume.
|
||||
(2) The cache volume name (origin) of a bootable volume.
|
||||
|
||||
To avoid race conditions when multiple volumes are created and needed
|
||||
to be updated, a file lock is used to ensure that the properties are
|
||||
updated properly.
|
||||
"""
|
||||
volume_origin = {'origin': cachevol_name}
|
||||
self.zfssa.set_file_props(vol_name, volume_origin)
|
||||
|
||||
cache_props = self.zfssa.get_volume(cachevol_name)
|
||||
cache_props.update({'numclones':
|
||||
six.text_type(cache_props['numclones'] + 1)})
|
||||
self.zfssa.set_file_props(cachevol_name, cache_props)
|
||||
|
||||
def _update_volume_stats(self):
|
||||
"""Get volume stats from zfssa"""
|
||||
self._ensure_shares_mounted()
|
||||
data = {}
|
||||
lcfg = self.configuration
|
||||
backend_name = self.configuration.safe_get('volume_backend_name')
|
||||
data['volume_backend_name'] = backend_name or self.__class__.__name__
|
||||
data['vendor_name'] = 'Oracle'
|
||||
data['driver_version'] = self.VERSION
|
||||
data['storage_protocol'] = self.protocol
|
||||
|
||||
asn = self.zfssa.get_asn()
|
||||
data['location_info'] = '%s:%s' % (asn, lcfg.zfssa_nfs_share)
|
||||
|
||||
free, used = self._get_share_capacity_info()
|
||||
capacity = float(free) + float(used)
|
||||
ratio_used = used / capacity
|
||||
|
||||
data['QoS_support'] = False
|
||||
data['reserved_percentage'] = 0
|
||||
|
||||
used_percentage_limit = 100 - self.configuration.reserved_percentage
|
||||
used_ratio_limit = used_percentage_limit / 100.0
|
||||
if (ratio_used > used_ratio_limit or
|
||||
ratio_used >= self.configuration.max_over_subscription_ratio):
|
||||
data['reserved_percentage'] = 100
|
||||
|
||||
data['total_capacity_gb'] = float(capacity) / units.Gi
|
||||
data['free_capacity_gb'] = float(free) / units.Gi
|
||||
|
||||
share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
|
||||
lcfg.zfssa_nfs_project,
|
||||
lcfg.zfssa_nfs_share)
|
||||
pool_details = self.zfssa.get_pool_details(lcfg.zfssa_nfs_pool)
|
||||
|
||||
data['zfssa_compression'] = share_details['compression']
|
||||
data['zfssa_encryption'] = share_details['encryption']
|
||||
data['zfssa_logbias'] = share_details['logbias']
|
||||
data['zfssa_poolprofile'] = pool_details['profile']
|
||||
data['zfssa_sparse'] = six.text_type(lcfg.nfs_sparsed_volumes)
|
||||
|
||||
self._stats = data
|
||||
|
||||
def migrate_volume(self, ctxt, volume, host):
|
||||
LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, '
|
||||
'host: %(host)s, status=%(status)s',
|
||||
{'id': volume['id'],
|
||||
'host': host,
|
||||
'status': volume['status']})
|
||||
|
||||
lcfg = self.configuration
|
||||
default_ret = (False, None)
|
||||
|
||||
if volume['status'] != "available":
|
||||
LOG.debug('Only available volumes can be migrated using backend '
|
||||
'assisted migration. Defaulting to generic migration.')
|
||||
return default_ret
|
||||
|
||||
if (host['capabilities']['vendor_name'] != 'Oracle' or
|
||||
host['capabilities']['storage_protocol'] != self.protocol):
|
||||
LOG.debug('Source and destination drivers need to be Oracle iSCSI '
|
||||
'to use backend assisted migration. Defaulting to '
|
||||
'generic migration.')
|
||||
return default_ret
|
||||
|
||||
if 'location_info' not in host['capabilities']:
|
||||
LOG.debug('Could not find location_info in capabilities reported '
|
||||
'by the destination driver. Defaulting to generic '
|
||||
'migration.')
|
||||
return default_ret
|
||||
|
||||
loc_info = host['capabilities']['location_info']
|
||||
|
||||
try:
|
||||
(tgt_asn, tgt_share) = loc_info.split(':')
|
||||
except ValueError:
|
||||
LOG.error("Location info needed for backend enabled volume "
|
||||
"migration not in correct format: %s. Continuing "
|
||||
"with generic volume migration.", loc_info)
|
||||
return default_ret
|
||||
|
||||
src_asn = self.zfssa.get_asn()
|
||||
|
||||
if tgt_asn == src_asn and lcfg.zfssa_nfs_share == tgt_share:
|
||||
LOG.info('Source and destination ZFSSA shares are the same. '
|
||||
'Do nothing. volume: %s', volume['name'])
|
||||
return (True, None)
|
||||
|
||||
return (False, None)
|
||||
|
||||
def update_migrated_volume(self, ctxt, volume, new_volume,
|
||||
original_volume_status):
|
||||
"""Return model update for migrated volume.
|
||||
|
||||
:param volume: The original volume that was migrated to this backend
|
||||
:param new_volume: The migration volume object that was created on
|
||||
this backend as part of the migration process
|
||||
:param original_volume_status: The status of the original volume
|
||||
:returns: model_update to update DB with any needed changes
|
||||
"""
|
||||
|
||||
original_name = CONF.volume_name_template % volume['id']
|
||||
current_name = CONF.volume_name_template % new_volume['id']
|
||||
|
||||
LOG.debug('Renaming migrated volume: %(cur)s to %(org)s.',
|
||||
{'cur': current_name,
|
||||
'org': original_name})
|
||||
self.zfssa.create_volume_from_snapshot_file(src_file=current_name,
|
||||
dst_file=original_name,
|
||||
method='MOVE')
|
||||
provider_location = new_volume['provider_location']
|
||||
return {'_name_id': None, 'provider_location': provider_location}
|
||||
|
||||
def manage_existing(self, volume, existing_ref):
|
||||
"""Manage an existing volume in the ZFSSA backend.
|
||||
|
||||
:param volume: Reference to the new volume.
|
||||
:param existing_ref: Reference to the existing volume to be managed.
|
||||
"""
|
||||
existing_vol_name = self._get_existing_vol_name(existing_ref)
|
||||
try:
|
||||
vol_props = self.zfssa.get_volume(existing_vol_name)
|
||||
except exception.VolumeNotFound:
|
||||
err_msg = (_("Volume %s doesn't exist on the ZFSSA backend.") %
|
||||
existing_vol_name)
|
||||
LOG.error(err_msg)
|
||||
raise exception.InvalidInput(reason=err_msg)
|
||||
|
||||
self._verify_volume_to_manage(existing_vol_name, vol_props)
|
||||
|
||||
try:
|
||||
self.zfssa.rename_volume(existing_vol_name, volume['name'])
|
||||
except Exception:
|
||||
LOG.error("Failed to rename volume %(existing)s to %(new)s. "
|
||||
"Volume manage failed.",
|
||||
{'existing': existing_vol_name,
|
||||
'new': volume['name']})
|
||||
raise
|
||||
|
||||
try:
|
||||
self.zfssa.set_file_props(volume['name'],
|
||||
{'cinder_managed': 'True'})
|
||||
except Exception:
|
||||
self.zfssa.rename_volume(volume['name'], existing_vol_name)
|
||||
LOG.error("Failed to set properties for volume %(existing)s. "
|
||||
"Volume manage failed.",
|
||||
{'existing': volume['name']})
|
||||
raise
|
||||
|
||||
return {'provider_location': self.mount_path}
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
"""Return size of the volume to be managed by manage_existing."""
|
||||
existing_vol_name = self._get_existing_vol_name(existing_ref)
|
||||
|
||||
# The ZFSSA NFS driver only has one mounted share.
|
||||
local_share_mount = self._get_mount_point_for_share(
|
||||
self._mounted_shares[0])
|
||||
local_vol_path = os.path.join(local_share_mount, existing_vol_name)
|
||||
|
||||
try:
|
||||
if os.path.isfile(local_vol_path):
|
||||
size = int(math.ceil(float(
|
||||
utils.get_file_size(local_vol_path)) / units.Gi))
|
||||
except (OSError, ValueError):
|
||||
err_msg = (_("Failed to get size of existing volume: %(vol)s. "
|
||||
"Volume Manage failed."), {'vol': existing_vol_name})
|
||||
LOG.error(err_msg)
|
||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||
|
||||
LOG.debug("Size volume: %(vol)s to be migrated is: %(size)s.",
|
||||
{'vol': existing_vol_name, 'size': size})
|
||||
|
||||
return size
|
||||
|
||||
def _verify_volume_to_manage(self, name, vol_props):
|
||||
lcfg = self.configuration
|
||||
|
||||
if lcfg.zfssa_manage_policy != 'strict':
|
||||
return
|
||||
|
||||
if vol_props['cinder_managed'] == "":
|
||||
err_msg = (_("Unknown if the volume: %s to be managed is "
|
||||
"already being managed by Cinder. Aborting manage "
|
||||
"volume. Please add 'cinder_managed' custom schema "
|
||||
"property to the volume and set its value to False. "
|
||||
"Alternatively, Set the value of cinder config "
|
||||
"policy 'zfssa_manage_policy' to 'loose' to "
|
||||
"remove this restriction.") % name)
|
||||
LOG.error(err_msg)
|
||||
raise exception.InvalidInput(reason=err_msg)
|
||||
|
||||
if vol_props['cinder_managed'] == 'True':
|
||||
msg = (_("Volume: %s is already being managed by Cinder.") % name)
|
||||
LOG.error(msg)
|
||||
raise exception.ManageExistingAlreadyManaged(volume_ref=name)
|
||||
|
||||
def unmanage(self, volume):
|
||||
"""Remove an existing volume from cinder management.
|
||||
|
||||
:param volume: Reference to the volume to be unmanaged.
|
||||
"""
|
||||
new_name = 'unmanaged-' + volume['name']
|
||||
try:
|
||||
self.zfssa.rename_volume(volume['name'], new_name)
|
||||
except Exception:
|
||||
LOG.error("Failed to rename volume %(existing)s to %(new)s. "
|
||||
"Volume unmanage failed.",
|
||||
{'existing': volume['name'],
|
||||
'new': new_name})
|
||||
raise
|
||||
|
||||
try:
|
||||
self.zfssa.set_file_props(new_name, {'cinder_managed': 'False'})
|
||||
except Exception:
|
||||
self.zfssa.rename_volume(new_name, volume['name'])
|
||||
LOG.error("Failed to set properties for volume %(existing)s. "
|
||||
"Volume unmanage failed.",
|
||||
{'existing': volume['name']})
|
||||
raise
|
||||
|
||||
def _get_existing_vol_name(self, existing_ref):
|
||||
if 'source-name' not in existing_ref:
|
||||
msg = _("Reference to volume to be managed must contain "
|
||||
"source-name.")
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_ref, reason=msg)
|
||||
return existing_ref['source-name']
|
File diff suppressed because it is too large
Load Diff
@ -1,269 +0,0 @@
|
||||
=========================================
|
||||
Oracle ZFS Storage Appliance iSCSI driver
|
||||
=========================================
|
||||
|
||||
Oracle ZFS Storage Appliances (ZFSSAs) provide advanced software to
|
||||
protect data, speed tuning and troubleshooting, and deliver high
|
||||
performance and high availability. Through the Oracle ZFSSA iSCSI
|
||||
Driver, OpenStack Block Storage can use an Oracle ZFSSA as a block
|
||||
storage resource. The driver enables you to create iSCSI volumes that an
|
||||
OpenStack Block Storage server can allocate to any virtual machine
|
||||
running on a compute host.
|
||||
|
||||
Requirements
|
||||
~~~~~~~~~~~~
|
||||
|
||||
The Oracle ZFSSA iSCSI Driver, version ``1.0.0`` and later, supports
|
||||
ZFSSA software release ``2013.1.2.0`` and later.
|
||||
|
||||
Supported operations
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Create, delete, attach, detach, manage, and unmanage volumes.
|
||||
- Create and delete snapshots.
|
||||
- Create volume from snapshot.
|
||||
- Extend a volume.
|
||||
- Attach and detach volumes.
|
||||
- Get volume stats.
|
||||
- Clone volumes.
|
||||
- Migrate a volume.
|
||||
- Local cache of a bootable volume.
|
||||
|
||||
Configuration
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
#. Enable RESTful service on the ZFSSA Storage Appliance.
|
||||
|
||||
#. Create a new user on the appliance with the following authorizations:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
scope=stmf - allow_configure=true
|
||||
scope=nas - allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rename=true, allow_rollback=true, allow_takeSnap=true
|
||||
scope=schema - allow_modify=true
|
||||
|
||||
You can create a role with authorizations as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration roles
|
||||
zfssa:configuration roles> role OpenStackRole
|
||||
zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack Cinder Driver"
|
||||
zfssa:configuration roles OpenStackRole (uncommitted)> commit
|
||||
zfssa:configuration roles> select OpenStackRole
|
||||
zfssa:configuration roles OpenStackRole> authorizations create
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=stmf
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> commit
|
||||
zfssa:configuration roles OpenStackRole> authorizations create
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=nas
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_clone=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createProject=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createShare=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeSpaceProps=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeGeneralProps=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_destroy=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rename=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rollback=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_takeSnap=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> commit
|
||||
|
||||
You can create a user with a specific role as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration users
|
||||
zfssa:configuration users> user cinder
|
||||
zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Cinder Driver"
|
||||
zfssa:configuration users cinder (uncommitted)> set initial_password=12345
|
||||
zfssa:configuration users cinder (uncommitted)> commit
|
||||
zfssa:configuration users> select cinder set roles=OpenStackRole
|
||||
|
||||
.. note::
|
||||
|
||||
You can also run this `workflow
|
||||
<https://openstackci.oracle.com/openstack_docs/zfssa_cinder_workflow.akwf>`__
|
||||
to automate the above tasks.
|
||||
Refer to `Oracle documentation
|
||||
<https://docs.oracle.com/cd/E37831_01/html/E52872/godgw.html>`__
|
||||
on how to download, view, and execute a workflow.
|
||||
|
||||
#. Ensure that the ZFSSA iSCSI service is online. If the ZFSSA iSCSI service is
|
||||
not online, enable the service by using the BUI, CLI or REST API in the
|
||||
appliance.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration services iscsi
|
||||
zfssa:configuration services iscsi> enable
|
||||
zfssa:configuration services iscsi> show
|
||||
Properties:
|
||||
<status>= online
|
||||
...
|
||||
|
||||
Define the following required properties in the ``cinder.conf`` file:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
volume_driver = cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver
|
||||
san_ip = myhost
|
||||
san_login = username
|
||||
san_password = password
|
||||
zfssa_pool = mypool
|
||||
zfssa_project = myproject
|
||||
zfssa_initiator_group = default
|
||||
zfssa_target_portal = w.x.y.z:3260
|
||||
zfssa_target_interfaces = e1000g0
|
||||
|
||||
Optionally, you can define additional properties.
|
||||
|
||||
Target interfaces can be seen as follows in the CLI:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration net interfaces
|
||||
zfssa:configuration net interfaces> show
|
||||
Interfaces:
|
||||
INTERFACE STATE CLASS LINKS ADDRS LABEL
|
||||
e1000g0 up ip e1000g0 1.10.20.30/24 Untitled Interface
|
||||
...
|
||||
|
||||
.. note::
|
||||
|
||||
Do not use management interfaces for ``zfssa_target_interfaces``.
|
||||
|
||||
#. Configure the cluster:
|
||||
|
||||
If a cluster is used as the cinder storage resource, the following
|
||||
verifications are required on your Oracle ZFS Storage Appliance:
|
||||
|
||||
- Verify that both the pool and the network interface are of type
|
||||
singleton and are not locked to the current controller. This
|
||||
approach ensures that the pool and the interface used for data
|
||||
always belong to the active controller, regardless of the current
|
||||
state of the cluster.
|
||||
|
||||
- Verify that the management IP, data IP and storage pool belong to
|
||||
the same head.
|
||||
|
||||
.. note::
|
||||
|
||||
Most configuration settings, including service properties, users, roles,
|
||||
and iSCSI initiator definitions are replicated on both heads
|
||||
automatically. If the driver modifies any of these settings, they will be
|
||||
modified automatically on both heads.
|
||||
|
||||
.. note::
|
||||
|
||||
A short service interruption occurs during failback or takeover,
|
||||
but once the process is complete, the ``cinder-volume`` service should be able
|
||||
to access the pool through the data IP.
|
||||
|
||||
ZFSSA assisted volume migration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ZFSSA iSCSI driver supports storage assisted volume migration
|
||||
starting in the Liberty release. This feature uses remote replication
|
||||
feature on the ZFSSA. Volumes can be migrated between two backends
|
||||
configured not only to the same ZFSSA but also between two separate
|
||||
ZFSSAs altogether.
|
||||
|
||||
The following conditions must be met in order to use ZFSSA assisted
|
||||
volume migration:
|
||||
|
||||
- Both the source and target backends are configured to ZFSSAs.
|
||||
|
||||
- Remote replication service on the source and target appliance is enabled.
|
||||
|
||||
- The ZFSSA to which the target backend is configured should be configured as a
|
||||
target in the remote replication service of the ZFSSA configured to the
|
||||
source backend. The remote replication target needs to be configured even
|
||||
when the source and the destination for volume migration are the same ZFSSA.
|
||||
Define ``zfssa_replication_ip`` in the ``cinder.conf`` file of the source
|
||||
backend as the IP address used to register the target ZFSSA in the remote
|
||||
replication service of the source ZFSSA.
|
||||
|
||||
- The name of the iSCSI target group(``zfssa_target_group``) on the source and
|
||||
the destination ZFSSA is the same.
|
||||
|
||||
- The volume is not attached and is in available state.
|
||||
|
||||
If any of the above conditions are not met, the driver will proceed with
|
||||
generic volume migration.
|
||||
|
||||
The ZFSSA user on the source and target appliances will need to have
|
||||
additional role authorizations for assisted volume migration to work. In
|
||||
scope nas, set ``allow_rrtarget`` and ``allow_rrsource`` to ``true``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=nas
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rrtarget=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rrsource=true
|
||||
|
||||
ZFSSA local cache
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The local cache feature enables ZFSSA drivers to serve the usage of bootable
|
||||
volumes significantly better. With the feature, the first bootable volume
|
||||
created from an image is cached, so that subsequent volumes can be created
|
||||
directly from the cache, instead of having image data transferred over the
|
||||
network multiple times.
|
||||
|
||||
The following conditions must be met in order to use ZFSSA local cache feature:
|
||||
|
||||
- A storage pool needs to be configured.
|
||||
|
||||
- REST and iSCSI services need to be turned on.
|
||||
|
||||
- On an OpenStack controller, ``cinder.conf`` needs to contain necessary
|
||||
properties used to configure and set up the ZFSSA iSCSI driver, including the
|
||||
following new properties:
|
||||
|
||||
- ``zfssa_enable_local_cache``: (True/False) To enable/disable the feature.
|
||||
|
||||
- ``zfssa_cache_project``: The ZFSSA project name where cache volumes are
|
||||
stored.
|
||||
|
||||
Every cache volume has two additional properties stored as ZFSSA custom
|
||||
schema. It is important that the schema are not altered outside of Block
|
||||
Storage when the driver is in use:
|
||||
|
||||
- ``image_id``: stores the image id as in Image service.
|
||||
|
||||
- ``updated_at``: stores the most current timestamp when the image is updated
|
||||
in Image service.
|
||||
|
||||
Supported extra specs
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extra specs provide the OpenStack storage admin the flexibility to create
|
||||
volumes with different characteristics from the ones specified in the
|
||||
``cinder.conf`` file. The admin will specify the volume properties as keys
|
||||
at volume type creation. When a user requests a volume of this volume type,
|
||||
the volume will be created with the properties specified as extra specs.
|
||||
|
||||
The following extra specs scoped keys are supported by the driver:
|
||||
|
||||
- ``zfssa:volblocksize``
|
||||
|
||||
- ``zfssa:sparse``
|
||||
|
||||
- ``zfssa:compression``
|
||||
|
||||
- ``zfssa:logbias``
|
||||
|
||||
Volume types can be created using the :command:`openstack volume type create`
|
||||
command.
|
||||
Extra spec keys can be added using :command:`openstack volume type set`
|
||||
command.
|
||||
|
||||
Driver options
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The Oracle ZFSSA iSCSI Driver supports these options:
|
||||
|
||||
.. config-table::
|
||||
:config-target: ZFS Storage Appliance iSCSI
|
||||
|
||||
cinder.volume.drivers.zfssa.zfssaiscsi
|
@ -1,300 +0,0 @@
|
||||
=======================================
|
||||
Oracle ZFS Storage Appliance NFS driver
|
||||
=======================================
|
||||
|
||||
The Oracle ZFS Storage Appliance (ZFSSA) NFS driver enables the ZFSSA to
|
||||
be used seamlessly as a block storage resource. The driver enables you
|
||||
to create volumes on a ZFS share that is NFS mounted.
|
||||
|
||||
Requirements
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Oracle ZFS Storage Appliance Software version ``2013.1.2.0`` or later.
|
||||
|
||||
Supported operations
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Create, delete, attach, detach, manage, and unmanage volumes.
|
||||
|
||||
- Create and delete snapshots.
|
||||
|
||||
- Create a volume from a snapshot.
|
||||
|
||||
- Extend a volume.
|
||||
|
||||
- Copy an image to a volume.
|
||||
|
||||
- Copy a volume to an image.
|
||||
|
||||
- Clone a volume.
|
||||
|
||||
- Volume migration.
|
||||
|
||||
- Local cache of a bootable volume
|
||||
|
||||
Appliance configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Appliance configuration using the command-line interface (CLI) is
|
||||
described below. To access the CLI, ensure SSH remote access is enabled,
|
||||
which is the default. You can also perform configuration using the
|
||||
browser user interface (BUI) or the RESTful API. Please refer to the
|
||||
`Oracle ZFS Storage Appliance
|
||||
documentation <http://www.oracle.com/technetwork/documentation/oracle-unified-ss-193371.html>`__
|
||||
for details on how to configure the Oracle ZFS Storage Appliance using
|
||||
the BUI, CLI, and RESTful API.
|
||||
|
||||
#. Log in to the Oracle ZFS Storage Appliance CLI and enable the REST
|
||||
service. REST service needs to stay online for this driver to function.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:>configuration services rest enable
|
||||
|
||||
#. Create a new storage pool on the appliance if you do not want to use an
|
||||
existing one. This storage pool is named ``'mypool'`` for the sake of this
|
||||
documentation.
|
||||
|
||||
#. Create a new project and share in the storage pool (``mypool``) if you do
|
||||
not want to use existing ones. This driver will create a project and share
|
||||
by the names specified in the ``cinder.conf`` file, if a project and share
|
||||
by that name does not already exist in the storage pool (``mypool``).
|
||||
The project and share are named ``NFSProject`` and ``nfs_share``' in the
|
||||
sample ``cinder.conf`` file as entries below.
|
||||
|
||||
#. To perform driver operations, create a role with the following
|
||||
authorizations:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
scope=svc - allow_administer=true, allow_restart=true, allow_configure=true
|
||||
scope=nas - pool=pool_name, project=project_name, share=share_name, allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true, allow_changeAccessProps=true, allow_changeProtocolProps=true
|
||||
|
||||
The following examples show how to create a role with authorizations.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration roles
|
||||
zfssa:configuration roles> role OpenStackRole
|
||||
zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack NFS Cinder Driver"
|
||||
zfssa:configuration roles OpenStackRole (uncommitted)> commit
|
||||
zfssa:configuration roles> select OpenStackRole
|
||||
zfssa:configuration roles OpenStackRole> authorizations create
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=svc
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_administer=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_restart=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> commit
|
||||
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration roles OpenStackRole authorizations> set scope=nas
|
||||
|
||||
The following properties need to be set when the scope of this role needs to
|
||||
be limited to a pool (``mypool``), a project (``NFSProject``) and a share
|
||||
(``nfs_share``) created in the steps above. This will prevent the user
|
||||
assigned to this role from being used to modify other pools, projects and
|
||||
shares.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set pool=mypool
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set project=NFSProject
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set share=nfs_share
|
||||
|
||||
#. The following properties only need to be set when a share and project has
|
||||
not been created following the steps above and wish to allow the driver to
|
||||
create them for you.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createProject=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_createShare=true
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_clone=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeSpaceProps=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_destroy=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_rollback=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_takeSnap=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeAccessProps=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_changeProtocolProps=true
|
||||
zfssa:configuration roles OpenStackRole auth (uncommitted)> commit
|
||||
|
||||
#. Create a new user or modify an existing one and assign the new role to
|
||||
the user.
|
||||
|
||||
The following example shows how to create a new user and assign the new
|
||||
role to the user.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration users
|
||||
zfssa:configuration users> user cinder
|
||||
zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Cinder Driver"
|
||||
zfssa:configuration users cinder (uncommitted)> set initial_password=12345
|
||||
zfssa:configuration users cinder (uncommitted)> commit
|
||||
zfssa:configuration users> select cinder set roles=OpenStackRole
|
||||
|
||||
#. Ensure that NFS and HTTP services on the appliance are online. Note the
|
||||
HTTPS port number for later entry in the cinder service configuration file
|
||||
(``cinder.conf``). This driver uses WebDAV over HTTPS to create snapshots
|
||||
and clones of volumes, and therefore needs to have the HTTP service online.
|
||||
|
||||
The following example illustrates enabling the services and showing their
|
||||
properties.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration services nfs
|
||||
zfssa:configuration services nfs> enable
|
||||
zfssa:configuration services nfs> show
|
||||
Properties:
|
||||
<status>= online
|
||||
...
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:configuration services http> enable
|
||||
zfssa:configuration services http> show
|
||||
Properties:
|
||||
<status>= online
|
||||
require_login = true
|
||||
protocols = http/https
|
||||
listen_port = 80
|
||||
https_port = 443
|
||||
|
||||
.. note::
|
||||
|
||||
You can also run this `workflow
|
||||
<https://openstackci.oracle.com/openstack_docs/zfssa_cinder_workflow.akwf>`__
|
||||
to automate the above tasks.
|
||||
Refer to `Oracle documentation
|
||||
<https://docs.oracle.com/cd/E37831_01/html/E52872/godgw.html>`__
|
||||
on how to download, view, and execute a workflow.
|
||||
|
||||
#. Create a network interface to be used exclusively for data. An existing
|
||||
network interface may also be used. The following example illustrates how to
|
||||
make a network interface for data traffic flow only.
|
||||
|
||||
.. note::
|
||||
|
||||
For better performance and reliability, it is recommended to configure a
|
||||
separate subnet exclusively for data traffic in your cloud environment.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration net interfaces
|
||||
zfssa:configuration net interfaces> select igbx
|
||||
zfssa:configuration net interfaces igbx> set admin=false
|
||||
zfssa:configuration net interfaces igbx> commit
|
||||
|
||||
#. For clustered controller systems, the following verification is required in
|
||||
addition to the above steps. Skip this step if a standalone system is used.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
zfssa:> configuration cluster resources list
|
||||
|
||||
Verify that both the newly created pool and the network interface are of
|
||||
type ``singleton`` and are not locked to the current controller. This
|
||||
approach ensures that the pool and the interface used for data always belong
|
||||
to the active controller, regardless of the current state of the cluster.
|
||||
Verify that both the network interface used for management and data, and the
|
||||
storage pool belong to the same head.
|
||||
|
||||
.. note::
|
||||
|
||||
There will be a short service interruption during failback/takeover, but
|
||||
once the process is complete, the driver should be able to access the
|
||||
ZFSSA for data as well as for management.
|
||||
|
||||
Cinder service configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
#. Define the following required properties in the ``cinder.conf``
|
||||
configuration file:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
volume_driver = cinder.volume.drivers.zfssa.zfssanfs.ZFSSANFSDriver
|
||||
san_ip = myhost
|
||||
san_login = username
|
||||
san_password = password
|
||||
zfssa_data_ip = mydata
|
||||
zfssa_nfs_pool = mypool
|
||||
|
||||
.. note::
|
||||
|
||||
Management interface ``san_ip`` can be used instead of ``zfssa_data_ip``,
|
||||
but it is not recommended.
|
||||
|
||||
#. You can also define the following additional properties in the
|
||||
``cinder.conf`` configuration file:
|
||||
|
||||
.. code:: ini
|
||||
|
||||
zfssa_nfs_project = NFSProject
|
||||
zfssa_nfs_share = nfs_share
|
||||
zfssa_nfs_mount_options =
|
||||
zfssa_nfs_share_compression = off
|
||||
zfssa_nfs_share_logbias = latency
|
||||
zfssa_https_port = 443
|
||||
|
||||
.. note::
|
||||
|
||||
The driver does not use the file specified in the ``nfs_shares_config``
|
||||
option.
|
||||
|
||||
ZFSSA local cache
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The local cache feature enables ZFSSA drivers to serve the usage of
|
||||
bootable volumes significantly better. With the feature, the first
|
||||
bootable volume created from an image is cached, so that subsequent
|
||||
volumes can be created directly from the cache, instead of having image
|
||||
data transferred over the network multiple times.
|
||||
|
||||
The following conditions must be met in order to use ZFSSA local cache
|
||||
feature:
|
||||
|
||||
- A storage pool needs to be configured.
|
||||
|
||||
- REST and NFS services need to be turned on.
|
||||
|
||||
- On an OpenStack controller, ``cinder.conf`` needs to contain
|
||||
necessary properties used to configure and set up the ZFSSA NFS
|
||||
driver, including the following new properties:
|
||||
|
||||
zfssa_enable_local_cache
|
||||
(True/False) To enable/disable the feature.
|
||||
|
||||
zfssa_cache_directory
|
||||
The directory name inside zfssa_nfs_share where cache volumes
|
||||
are stored.
|
||||
|
||||
Every cache volume has two additional properties stored as WebDAV
|
||||
properties. It is important that they are not altered outside of Block
|
||||
Storage when the driver is in use:
|
||||
|
||||
image_id
|
||||
stores the image id as in Image service.
|
||||
|
||||
updated_at
|
||||
stores the most current timestamp when the image is
|
||||
updated in Image service.
|
||||
|
||||
Driver options
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The Oracle ZFS Storage Appliance NFS driver supports these options:
|
||||
|
||||
.. config-table::
|
||||
:config-target: ZFS Storage Appliance NFS
|
||||
|
||||
cinder.volume.drivers.zfssa.zfssanfs
|
||||
|
||||
This driver shares additional NFS configuration options with the generic
|
||||
NFS driver. For a description of these, see :ref:`cinder-storage_nfs`.
|
@ -147,9 +147,6 @@ title=Nimble Storage Driver (iSCSI)
|
||||
[driver.prophetstor]
|
||||
title=ProphetStor Flexvisor Driver (iSCSI, NFS)
|
||||
|
||||
[driver.oracle_zfssa]
|
||||
title=Oracle ZFSSA Driver (iSCSI, NFS)
|
||||
|
||||
[driver.pure]
|
||||
title=Pure Storage Driver (iSCSI, FC)
|
||||
|
||||
@ -248,7 +245,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=complete
|
||||
driver.nfs=complete
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=missing
|
||||
driver.prophetstor=missing
|
||||
driver.pure=complete
|
||||
driver.qnap=complete
|
||||
@ -314,7 +310,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=complete
|
||||
driver.nfs=complete
|
||||
driver.nimble=complete
|
||||
driver.oracle_zfssa=complete
|
||||
driver.prophetstor=complete
|
||||
driver.pure=complete
|
||||
driver.qnap=complete
|
||||
@ -380,7 +375,6 @@ driver.netapp_solidfire=missing
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=missing
|
||||
driver.prophetstor=missing
|
||||
driver.pure=missing
|
||||
driver.qnap=missing
|
||||
@ -449,7 +443,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=missing
|
||||
driver.prophetstor=missing
|
||||
driver.pure=missing
|
||||
driver.qnap=missing
|
||||
@ -517,7 +510,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=missing
|
||||
driver.prophetstor=missing
|
||||
driver.pure=complete
|
||||
driver.qnap=missing
|
||||
@ -586,7 +578,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=missing
|
||||
driver.prophetstor=complete
|
||||
driver.pure=complete
|
||||
driver.qnap=missing
|
||||
@ -654,7 +645,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=missing
|
||||
driver.nfs=complete
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=complete
|
||||
driver.prophetstor=missing
|
||||
driver.pure=complete
|
||||
driver.qnap=missing
|
||||
@ -723,7 +713,6 @@ driver.netapp_solidfire=missing
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=complete
|
||||
driver.prophetstor=missing
|
||||
driver.pure=missing
|
||||
driver.qnap=missing
|
||||
@ -792,7 +781,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=complete
|
||||
driver.prophetstor=missing
|
||||
driver.pure=complete
|
||||
driver.qnap=missing
|
||||
@ -858,7 +846,6 @@ driver.netapp_solidfire=complete
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=missing
|
||||
driver.prophetstor=missing
|
||||
driver.pure=missing
|
||||
driver.qnap=missing
|
||||
@ -928,7 +915,6 @@ driver.netapp_solidfire=missing
|
||||
driver.nexenta=missing
|
||||
driver.nfs=missing
|
||||
driver.nimble=missing
|
||||
driver.oracle_zfssa=missing
|
||||
driver.prophetstor=missing
|
||||
driver.pure=missing
|
||||
driver.qnap=missing
|
||||
|
Loading…
Reference in New Issue
Block a user