Add ZFSonLinux share driver

Add support for ZFS (on Linux) filesystem.

Following features are supported:
- Create/delete share
- Create/delete snapshot
- Create share from snapshot
- Extend/shrink share
- Update NFS IP-based access rules using new interface.
(SMB support planned for future).
- create/delete/update/promote share replica

ZFS-related notes:
- Any amount of ZFS zpools can be used by share driver.
- Allowed to configure default options for ZFS datasets that are used
for share creation.
- Any amount of nested datasets is allowed to be used.
- All share replicas are read-only.
- All share replicas are synched periodically, not continuously.
So, status 'in_sync' means latest sync was successful.
Time range between syncs equals to value of
config global opt 'replica_state_update_interval'.

Driver-related notes:
- Able to use remote ZFSonLinux storage as well as local.

Other made changes:
- updated driver private data DB methods removing filtering by host
  as redundant operation. Replication requires some common metadata
  storage and filtering by host breaks it. It is safe to do so, because
  if driver gets some ID of entity then it is allowed to read its info
  too.

Implements bp zfsonlinux-driver
DocImpact
Change-Id: I3ddd3767184e4843037de0ac75ff18dce709b6dc
This commit is contained in:
Valeriy Ponomaryov 2016-02-03 18:24:41 +02:00 committed by vponomaryov
parent 83d7bc60f4
commit 312cd3321a
21 changed files with 3511 additions and 5 deletions

@ -141,7 +141,7 @@ elif [[ "$DRIVER" == "generic" ]]; then
fi
fi
if [[ "$DRIVER" == "lvm" ]]; then
if [[ "$DRIVER" == "lvm" ]]; then
MANILA_TEMPEST_CONCURRENCY=8
RUN_MANILA_CG_TESTS=False
RUN_MANILA_MANAGE_TESTS=False
@ -159,6 +159,26 @@ if [[ "$DRIVER" == "lvm" ]]; then
samba_daemon_name=smb
fi
sudo service $samba_daemon_name restart
elif [[ "$DRIVER" == "zfsonlinux" ]]; then
MANILA_TEMPEST_CONCURRENCY=8
RUN_MANILA_CG_TESTS=False
RUN_MANILA_MANAGE_TESTS=False
iniset $TEMPEST_CONFIG share run_migration_tests False
iniset $TEMPEST_CONFIG share run_quota_tests True
iniset $TEMPEST_CONFIG share run_replication_tests True
iniset $TEMPEST_CONFIG share run_shrink_tests True
iniset $TEMPEST_CONFIG share enable_ip_rules_for_protocols 'nfs'
iniset $TEMPEST_CONFIG share enable_user_rules_for_protocols ''
iniset $TEMPEST_CONFIG share enable_cert_rules_for_protocols ''
iniset $TEMPEST_CONFIG share enable_ro_access_level_for_protocols 'nfs'
iniset $TEMPEST_CONFIG share build_timeout 180
iniset $TEMPEST_CONFIG share share_creation_retry_number 0
iniset $TEMPEST_CONFIG share capability_storage_protocol 'NFS'
iniset $TEMPEST_CONFIG share enable_protocols 'nfs'
iniset $TEMPEST_CONFIG share suppress_errors_in_cleanup False
iniset $TEMPEST_CONFIG share multitenancy_enabled False
iniset $TEMPEST_CONFIG share multi_backend True
iniset $TEMPEST_CONFIG share backend_replication_type 'readable'
fi
# Enable consistency group tests

@ -64,6 +64,9 @@ fi
if [[ "$DRIVER" == "lvm" ]]; then
echo "SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver" >> $localrc_path
echo "SHARE_BACKING_FILE_SIZE=32000M" >> $localrc_path
elif [[ "$DRIVER" == "zfsonlinux" ]]; then
echo "SHARE_DRIVER=manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" >> $localrc_path
echo "RUN_MANILA_REPLICATION_TESTS=True" >> $localrc_path
fi
# Enabling isolated metadata in Neutron is required because

@ -38,12 +38,25 @@ function _clean_manila_lvm_backing_file {
fi
}
function _clean_zfsonlinux_data {
for filename in "$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/*; do
if [[ $(sudo zpool list | grep $filename) ]]; then
echo "Destroying zpool named $filename"
sudo zpool destroy -f $filename
file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR$filename"
echo "Destroying file named $file"
rm -f $file
fi
done
}
# cleanup_manila - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_manila {
# All stuff, that are created by share drivers will be cleaned up by other services.
_clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX
_clean_manila_lvm_backing_file $SHARE_GROUP
_clean_zfsonlinux_data
}
# configure_default_backends - configures default Manila backends with generic driver.
@ -426,6 +439,45 @@ function init_manila {
mkdir -p $MANILA_STATE_PATH/shares
fi
elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then
if is_service_enabled m-shr; then
mkdir -p $MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR
file_counter=0
for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do
if [[ $file_counter == 0 ]]; then
# NOTE(vponomaryov): create two pools for first ZFS backend
# to cover different use cases that are supported by driver:
# - Support of more than one zpool for share backend.
# - Support of nested datasets.
local first_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/alpha
local second_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/betta
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $first_file
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $second_file
sudo zpool create alpha $first_file
sudo zpool create betta $second_file
# Create subdir (nested dataset) for second pool
sudo zfs create betta/subdir
iniset $MANILA_CONF $BE zfs_zpool_list alpha,betta/subdir
elif [[ $file_counter == 1 ]]; then
local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/gamma
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file
sudo zpool create gamma $file
iniset $MANILA_CONF $BE zfs_zpool_list gamma
else
local filename=file"$file_counter"
local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/"$filename"
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file
sudo zpool create $filename $file
iniset $MANILA_CONF $BE zfs_zpool_list $filename
fi
iniset $MANILA_CONF $BE zfs_share_export_ip $MANILA_ZFSONLINUX_SHARE_EXPORT_IP
iniset $MANILA_CONF $BE zfs_service_ip $MANILA_ZFSONLINUX_SERVICE_IP
iniset $MANILA_CONF $BE zfs_dataset_creation_options $MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS
iniset $MANILA_CONF $BE zfs_ssh_username $MANILA_ZFSONLINUX_SSH_USERNAME
iniset $MANILA_CONF $BE replication_domain $MANILA_ZFSONLINUX_REPLICATION_DOMAIN
let "file_counter=file_counter+1"
done
fi
fi
# Create cache dir
@ -446,6 +498,32 @@ function install_manila {
sudo yum install -y nfs-utils nfs-utils-lib samba
fi
fi
elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then
if is_service_enabled m-shr; then
if is_ubuntu; then
sudo apt-get install -y nfs-kernel-server nfs-common samba
# NOTE(vponomaryov): following installation is valid for Ubuntu 'trusty'.
sudo apt-get install -y software-properties-common
sudo apt-add-repository --yes ppa:zfs-native/stable
sudo apt-get -y -q update && sudo apt-get -y -q upgrade
sudo apt-get install -y linux-headers-generic
sudo apt-get install -y build-essential
sudo apt-get install -y ubuntu-zfs
sudo modprobe zfs
# TODO(vponomaryov): remove following line when we have this
# in 'requirements.txt' file.
# Package 'nsenter' is expected to be installed on host with
# ZFS, if it is remote for manila-share service host.
sudo pip install nsenter
else
echo "Manila Devstack plugin does not support installation "\
"of ZFS packages for non-'Ubuntu-trusty' distros. "\
"Please, install it first by other means or add its support "\
"for your distro."
exit 1
fi
fi
fi
# install manila-ui if horizon is enabled
@ -457,6 +535,8 @@ function install_manila {
#configure_samba - Configure node as Samba server
function configure_samba {
if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then
# TODO(vponomaryov): add here condition for ZFSonLinux driver too
# when it starts to support SAMBA
samba_daemon_name=smbd
if is_service_enabled m-shr; then
if is_fedora; then

@ -141,6 +141,19 @@ SMB_CONF=${SMB_CONF:-/etc/samba/smb.conf}
SMB_PRIVATE_DIR=${SMB_PRIVATE_DIR:-/var/lib/samba/private}
CONFIGURE_BACKING_FILE=${CONFIGURE_BACKING_FILE:-"True"}
# Options for configuration of ZFSonLinux driver
# 'MANILA_ZFSONLINUX_ZPOOL_SIZE' defines size of each zpool. That value
# will be used for creation of sparse files.
MANILA_ZFSONLINUX_ZPOOL_SIZE=${MANILA_ZFSONLINUX_ZPOOL_SIZE:-"30G"}
MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR=${MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR:-"/opt/stack/data/manila/zfsonlinux"}
MANILA_ZFSONLINUX_SHARE_EXPORT_IP=${MANILA_ZFSONLINUX_SHARE_EXPORT_IP:-"127.0.0.1"}
MANILA_ZFSONLINUX_SERVICE_IP=${MANILA_ZFSONLINUX_SERVICE_IP:-"127.0.0.1"}
MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS=${MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS:-"compression=gzip"}
MANILA_ZFSONLINUX_SSH_USERNAME=${MANILA_ZFSONLINUX_SSH_USERNAME:-"stack"}
# If MANILA_ZFSONLINUX_REPLICATION_DOMAIN is set to empty value then
# Manila will consider replication feature as disabled for ZFSonLinux share driver.
MANILA_ZFSONLINUX_REPLICATION_DOMAIN=${MANILA_ZFSONLINUX_REPLICATION_DOMAIN:-"ZFSonLinux"}
# Enable manila services
# ----------------------
# We have to add Manila to enabled services for screen_it to work

@ -98,6 +98,7 @@ Share backends
.. toctree::
:maxdepth: 3
zfs_on_linux_driver
netapp_cluster_mode_driver
emc_isilon_driver
emc_vnx_driver

@ -33,6 +33,8 @@ Mapping of share drivers and share features support
+----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
| Driver name | create/delete share | manage/unmanage share | extend share | shrink share | create/delete snapshot | create share from snapshot | manage/unmanage snapshot |
+========================================+=============================+=======================+==============+==============+========================+============================+==========================+
| ZFSonLinux | DHSS = False (M) | \- | M | M | M | M | \- |
+----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
| Generic (Cinder as back-end) | DHSS = True (J) & False (K) | K | L | L | J | J | DHSS = False (M) |
+----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
| NetApp Clustered Data ONTAP | DHSS = True (J) & False (K) | L | L | L | J | J | \- |
@ -76,6 +78,8 @@ Mapping of share drivers and share access rules support
+ Driver name +--------------+----------------+------------+--------------+----------------+------------+
| | IP | USER | Cert | IP | USER | Cert |
+========================================+==============+================+============+==============+================+============+
| ZFSonLinux | NFS (M) | \- | \- | NFS (M) | \- | \- |
+----------------------------------------+--------------+----------------+------------+--------------+----------------+------------+
| Generic (Cinder as back-end) | NFS,CIFS (J) | \- | \- | NFS (K) | \- | \- |
+----------------------------------------+--------------+----------------+------------+--------------+----------------+------------+
| NetApp Clustered Data ONTAP | NFS (J) | CIFS (J) | \- | NFS (K) | CIFS (M) | \- |
@ -113,6 +117,8 @@ Mapping of share drivers and security services support
+----------------------------------------+------------------+-----------------+------------------+
| Driver name | Active Directory | LDAP | Kerberos |
+========================================+==================+=================+==================+
| ZFSonLinux | \- | \- | \- |
+----------------------------------------+------------------+-----------------+------------------+
| Generic (Cinder as back-end) | \- | \- | \- |
+----------------------------------------+------------------+-----------------+------------------+
| NetApp Clustered Data ONTAP | J | J | J |

@ -0,0 +1,153 @@
..
Copyright (c) 2016 Mirantis Inc.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
ZFS (on Linux) Driver
=====================
Manila ZFSonLinux share driver uses ZFS filesystem for exporting NFS shares.
Written and tested using Linux version of ZFS.
Requirements
------------
* 'NFS' daemon that can be handled via "exportfs" app.
* 'ZFS' filesystem packages, either Kernel or FUSE versions.
* ZFS zpools that are going to be used by Manila should exist and be
configured as desired. Manila will not change zpool configuration.
* For remote ZFS hosts according to manila-share service host SSH should be
installed.
* For ZFS hosts that support replication:
* SSH access for each other should be passwordless.
* IP used for share exports should be available by ZFS hosts for each other.
* Username should be the same for accessing each of ZFS hosts.
Supported Operations
--------------------
The following operations are supported:
* Create NFS Share
* Delete NFS Share
* Allow NFS Share access
* Only IP access type is supported for NFS
* Both access levels are supported - 'RW' and 'RO'
* Deny NFS Share access
* Create snapshot
* Delete snapshot
* Create share from snapshot
* Extend share
* Shrink share
* Replication (experimental):
* Create/update/delete/promote replica operations are supported
Possibilities
-------------
* Any amount of ZFS zpools can be used by share driver.
* Allowed to configure default options for ZFS datasets that are used
for share creation.
* Any amount of nested datasets is allowed to be used.
* All share replicas are read-only, only active one is RW.
* All share replicas are synchronized periodically, not continuously.
So, status 'in_sync' means latest sync was successful.
Time range between syncs equals to value of
config global opt 'replica_state_update_interval'.
Restrictions
------------
The ZFSonLinux share driver has the following restrictions:
* Only IP access type is supported for NFS.
* Only FLAT network is supported.
* 'Promote share replica' operation will switch roles of
current 'secondary' replica and 'active'. It does not make more than
one active replica available.
* 'Manage share' operation is not yet implemented.
* 'SaMBa' based sharing is not yet implemented.
Known problems
--------------
* Better to avoid usage of Neutron on the same node where ZFS is installed.
It leads to bug - https://bugs.launchpad.net/neutron/+bug/1546723
The ZFSonLinux share driver has workaround for it and requires 'nsenter' be
installed on the system where ZFS is installed.
* 'Promote share replica' operation will make ZFS filesystem that became
secondary as RO only on NFS level. On ZFS level system will
stay mounted as was - RW.
Backend Configuration
---------------------
The following parameters need to be configured in the manila configuration file
for the ZFSonLinux driver:
* share_driver = manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver
* driver_handles_share_servers = False
* replication_domain = custom_str_value_as_domain_name
* if empty, then replication will be disabled
* if set then will be able to be used as replication peer for other
backend with same value.
* zfs_share_export_ip = <user_facing IP address of ZFS host>
* zfs_service_ip = <IP address of service network interface of ZFS host>
* zfs_zpool_list = zpoolname1,zpoolname2/nested_dataset_for_zpool2
* can be one or more zpools
* can contain nested datasets
* zfs_dataset_creation_options = <list of ZFS dataset options>
* readonly,quota,sharenfs and sharesmb options will be ignored
* zfs_dataset_name_prefix = <prefix>
* Prefix to be used in each dataset name.
* zfs_dataset_snapshot_name_prefix = <prefix>
* Prefix to be used in each dataset snapshot name.
* zfs_use_ssh = <boolean_value>
* set 'False' if ZFS located on the same host as 'manila-share' service
* set 'True' if 'manila-share' service should use SSH for ZFS configuration
* zfs_ssh_username = <ssh_username>
* required for replication operations
* required for SSH'ing to ZFS host if 'zfs_use_ssh' is set to 'True'
* zfs_ssh_user_password = <ssh_user_password>
* password for 'zfs_ssh_username' of ZFS host.
* used only if 'zfs_use_ssh' is set to 'True'
* zfs_ssh_private_key_path = <path_to_private_ssh_key>
* used only if 'zfs_use_ssh' is set to 'True'
* zfs_share_helpers = NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper
* Approach for setting up helpers is similar to various other share driver
* At least one helper should be used.
* zfs_replica_snapshot_prefix = <prefix>
* Prefix to be used in dataset snapshot names that are created
by 'update replica' operation.
Restart of :term:`manila-share` service is needed for the configuration
changes to take effect.
The :mod:`manila.share.drivers.zfsonlinux.driver` Module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: manila.share.drivers.zfsonlinux.driver
:noindex:
:members:
:undoc-members:
:show-inheritance:
The :mod:`manila.share.drivers.zfsonlinux.utils` Module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: manila.share.drivers.zfsonlinux.utils
:noindex:
:members:
:undoc-members:
:show-inheritance:

@ -136,3 +136,15 @@ dbus-removeexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --sy
# manila/share/drivers/ganesha/manager.py:
rmconf: RegExpFilter, sh, root, sh, -c, rm -f /.*/\*\.conf$
# ZFS commands
# manila/share/drivers/zfsonlinux/driver.py
# manila/share/drivers/zfsonlinux/utils.py
zpool: CommandFilter, zpool, root
# manila/share/drivers/zfsonlinux/driver.py
# manila/share/drivers/zfsonlinux/utils.py
zfs: CommandFilter, zfs, root
# manila/share/drivers/zfsonlinux/driver.py
nsenter: CommandFilter, /usr/local/bin/nsenter, root

@ -2812,10 +2812,12 @@ def share_server_backend_details_delete(context, share_server_id,
def _driver_private_data_query(session, context, host, entity_id, key=None,
read_deleted=False):
query = model_query(context, models.DriverPrivateData,
session=session, read_deleted=read_deleted)\
.filter_by(host=host)\
.filter_by(entity_uuid=entity_id)
query = model_query(
context, models.DriverPrivateData, session=session,
read_deleted=read_deleted,
).filter_by(
entity_uuid=entity_id,
)
if isinstance(key, list):
return query.filter(models.DriverPrivateData.key.in_(key))

@ -118,6 +118,10 @@ class NetworkBadConfigurationException(NetworkException):
message = _("Bad network configuration: %(reason)s.")
class BadConfigurationException(ManilaException):
message = _("Bad configuration: %(reason)s.")
class NotAuthorized(ManilaException):
message = _("Not authorized.")
code = 403
@ -659,6 +663,10 @@ class HDFSException(ManilaException):
message = _("HDFS exception occurred!")
class ZFSonLinuxException(ManilaException):
message = _("ZFSonLinux exception occurred: %(msg)s")
class QBException(ManilaException):
message = _("Quobyte exception occurred: %(msg)s")

@ -68,6 +68,7 @@ import manila.share.drivers.quobyte.quobyte
import manila.share.drivers.service_instance
import manila.share.drivers.windows.service_instance
import manila.share.drivers.windows.winrm_helper
import manila.share.drivers.zfsonlinux.driver
import manila.share.drivers.zfssa.zfssashare
import manila.share.drivers_private_data
import manila.share.hook
@ -136,6 +137,7 @@ _global_opt_lists = [
manila.share.drivers.service_instance.share_servers_handling_mode_opts,
manila.share.drivers.windows.service_instance.windows_share_server_opts,
manila.share.drivers.windows.winrm_helper.winrm_opts,
manila.share.drivers.zfsonlinux.driver.zfsonlinux_opts,
manila.share.drivers.zfssa.zfssashare.ZFSSA_OPTS,
manila.share.hook.hook_options,
manila.share.manager.share_manager_opts,

@ -237,6 +237,9 @@ class ShareDriver(object):
unhandled share-servers that are not tracked by Manila.
Share drivers are allowed to work only in one of two possible
driver modes, that is why only one should be chosen.
:param config_opts: tuple, list or set of config option lists
that should be registered in driver's configuration right after
this attribute is created. Useful for usage with mixin classes.
"""
super(ShareDriver, self).__init__()
self.configuration = kwargs.get('configuration', None)
@ -267,6 +270,9 @@ class ShareDriver(object):
config_group_name=admin_network_config_group,
label='admin')
for config_opt_set in kwargs.get('config_opts', []):
self.configuration.append_config_values(config_opt_set)
if hasattr(self, 'init_execute_mixin'):
# Instance with 'ExecuteMixin'
self.init_execute_mixin(*args, **kwargs) # pylint: disable=E1101

@ -0,0 +1,901 @@
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module with ZFSonLinux share driver that utilizes ZFS filesystem resources
and exports them as shares.
"""
import time
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import timeutils
from manila.common import constants
from manila import exception
from manila.i18n import _, _LW
from manila.share import driver
from manila.share.drivers.zfsonlinux import utils as zfs_utils
from manila.share import utils as share_utils
from manila import utils
zfsonlinux_opts = [
cfg.StrOpt(
"zfs_share_export_ip",
required=True,
help="IP to be added to user-facing export location. Required."),
cfg.StrOpt(
"zfs_service_ip",
required=True,
help="IP to be added to admin-facing export location. Required."),
cfg.ListOpt(
"zfs_zpool_list",
required=True,
help="Specify list of zpools that are allowed to be used by backend. "
"Can contain nested datasets. Examples: "
"Without nested dataset: 'zpool_name'. "
"With nested dataset: 'zpool_name/nested_dataset_name'. "
"Required."),
cfg.ListOpt(
"zfs_dataset_creation_options",
help="Define here list of options that should be applied "
"for each dataset creation if needed. Example: "
"compression=gzip,dedup=off. "
"Note that, for secondary replicas option 'readonly' will be set "
"to 'on' and for active replicas to 'off' in any way. "
"Also, 'quota' will be equal to share size. Optional."),
cfg.StrOpt(
"zfs_dataset_name_prefix",
default='manila_share_',
help="Prefix to be used in each dataset name. Optional."),
cfg.StrOpt(
"zfs_dataset_snapshot_name_prefix",
default='manila_share_snapshot_',
help="Prefix to be used in each dataset snapshot name. Optional."),
cfg.BoolOpt(
"zfs_use_ssh",
default=False,
help="Remote ZFS storage hostname that should be used for SSH'ing. "
"Optional."),
cfg.StrOpt(
"zfs_ssh_username",
help="SSH user that will be used in 2 cases: "
"1) By manila-share service in case it is located on different "
"host than its ZFS storage. "
"2) By manila-share services with other ZFS backends that "
"perform replication. "
"It is expected that SSH'ing will be key-based, passwordless. "
"This user should be passwordless sudoer. Optional."),
cfg.StrOpt(
"zfs_ssh_user_password",
secret=True,
help="Password for user that is used for SSH'ing ZFS storage host. "
"Not used for replication operations. They require "
"passwordless SSH access. Optional."),
cfg.StrOpt(
"zfs_ssh_private_key_path",
help="Path to SSH private key that should be used for SSH'ing ZFS "
"storage host. Not used for replication operations. Optional."),
cfg.ListOpt(
"zfs_share_helpers",
required=True,
default=[
"NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper",
],
help="Specify list of share export helpers for ZFS storage. "
"It should look like following: "
"'FOO_protocol=foo.FooClass,BAR_protocol=bar.BarClass'. "
"Required."),
cfg.StrOpt(
"zfs_replica_snapshot_prefix",
required=True,
default="tmp_snapshot_for_replication_",
help="Set snapshot prefix for usage in ZFS replication. Required."),
]
CONF = cfg.CONF
CONF.register_opts(zfsonlinux_opts)
LOG = log.getLogger(__name__)
def ensure_share_server_not_provided(f):
def wrap(self, context, *args, **kwargs):
server = kwargs.get('share_server')
if server:
raise exception.InvalidInput(
reason=_("Share server handling is not available. "
"But 'share_server' was provided. '%s'. "
"Share network should not be used.") % server.get(
"id", server))
return f(self, context, *args, **kwargs)
return wrap
class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(
[False], *args, config_opts=[zfsonlinux_opts], **kwargs)
self.replica_snapshot_prefix = (
self.configuration.zfs_replica_snapshot_prefix)
self.backend_name = self.configuration.safe_get(
'share_backend_name') or 'ZFSonLinux'
self.zpool_list = self._get_zpool_list()
self.dataset_creation_options = (
self.configuration.zfs_dataset_creation_options)
self.share_export_ip = self.configuration.zfs_share_export_ip
self.service_ip = self.configuration.zfs_service_ip
self.private_storage = kwargs.get('private_storage')
self._helpers = {}
def _get_zpool_list(self):
zpools = []
for zpool in self.configuration.zfs_zpool_list:
zpool_name = zpool.split('/')[0]
if zpool_name in zpools:
raise exception.BadConfigurationException(
reason=_("Using the same zpool twice is prohibited. "
"Duplicate is '%(zpool)s'. List of zpools: "
"%(zpool_list)s.") % {
'zpool': zpool,
'zpool_list': ', '.join(
self.configuration.zfs_zpool_list)})
zpools.append(zpool_name)
return zpools
@zfs_utils.zfs_dataset_synchronized
def _delete_dataset_or_snapshot_with_retry(self, name):
"""Attempts to destroy some dataset or snapshot with retries."""
# NOTE(vponomaryov): it is possible to see 'dataset is busy' error
# under the load. So, we are ok to perform retry in this case.
mountpoint = self.get_zfs_option(name, 'mountpoint')
if '@' not in name:
# NOTE(vponomaryov): check that dataset has no open files.
start_point = time.time()
while time.time() - start_point < 60:
try:
out, err = self.execute('lsof', '-w', mountpoint)
except exception.ProcessExecutionError:
# NOTE(vponomaryov): lsof returns code 1 if search
# didn't give results.
break
LOG.debug("Cannot destroy dataset '%(name)s', it has "
"opened files. Will wait 2 more seconds. "
"Out: \n%(out)s", {
'name': name, 'out': out})
time.sleep(2)
else:
raise exception.ZFSonLinuxException(
msg=_("Could not destroy '%s' dataset, "
"because it had opened files.") % name)
try:
self.zfs('destroy', '-f', name)
return
except exception.ProcessExecutionError as e:
LOG.debug("Failed to run command, got error: %s\n"
"Assuming other namespace-based services hold "
"ZFS mounts.", e)
# NOTE(vponomaryov): perform workaround for Neutron bug #1546723
# We should release ZFS mount from all namespaces. It should not be
# there at all.
get_pids_cmd = (
"(echo $(grep -s %s /proc/*/mounts) ) 2>&1 " % mountpoint)
try:
raw_pids, err = self.execute('bash', '-c', get_pids_cmd)
except exception.ProcessExecutionError as e:
LOG.warning(
_LW("Failed to get list of PIDs that hold ZFS dataset "
"mountpoint. Got following error: %s"), e)
else:
pids = [s.split('/')[0] for s in raw_pids.split('/proc/') if s]
LOG.debug(
"List of pids that hold ZFS mount '%(mnt)s': %(list)s", {
'mnt': mountpoint, 'list': ' '.join(pids)})
for pid in pids:
try:
self.execute(
'sudo', 'nsenter', '--mnt', '--target=%s' % pid,
'/bin/umount', mountpoint)
except exception.ProcessExecutionError as e:
LOG.warning(
_LW("Failed to run command with release of "
"ZFS dataset mount, got error: %s"), e)
# NOTE(vponomaryov): sleep some time after unmount operations.
time.sleep(1)
# NOTE(vponomaryov): Now, when no file usages and mounts of dataset
# exist, destroy dataset.
self.zfs_with_retry('destroy', '-f', name)
def _setup_helpers(self):
"""Setups share helper for ZFS backend."""
self._helpers = {}
helpers = self.configuration.zfs_share_helpers
if helpers:
for helper_str in helpers:
share_proto, __, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
self._helpers[share_proto.upper()] = helper(
self.configuration)
else:
raise exception.BadConfigurationException(
reason=_(
"No share helpers selected for ZFSonLinux Driver. "
"Please specify using config option 'zfs_share_helpers'."))
def _get_share_helper(self, share_proto):
"""Returns share helper specific for used share protocol."""
helper = self._helpers.get(share_proto)
if helper:
return helper
else:
raise exception.InvalidShare(
reason=_("Wrong, unsupported or disabled protocol - "
"'%s'.") % share_proto)
def do_setup(self, context):
"""Perform basic setup and checks."""
super(self.__class__, self).do_setup(context)
self._setup_helpers()
for ip in (self.share_export_ip, self.service_ip):
if not utils.is_valid_ip_address(ip, 4):
raise exception.BadConfigurationException(
reason=_("Wrong IP address provided: "
"%s") % self.share_export_ip)
if not self.zpool_list:
raise exception.BadConfigurationException(
reason=_("No zpools specified for usage: "
"%s") % self.zpool_list)
if self.configuration.zfs_use_ssh:
# Check workability of SSH executor
self.ssh_executor('whoami')
def _get_pools_info(self):
"""Returns info about all pools used by backend."""
pools = []
for zpool in self.zpool_list:
free_size = self.get_zpool_option(zpool, 'free')
free_size = utils.translate_string_size_to_float(free_size)
total_size = self.get_zpool_option(zpool, 'size')
total_size = utils.translate_string_size_to_float(total_size)
pool = {
'pool_name': zpool,
'total_capacity_gb': float(total_size),
'free_capacity_gb': float(free_size),
'reserved_percentage':
self.configuration.reserved_share_percentage,
}
if self.configuration.replication_domain:
pool['replication_type'] = 'readable'
pools.append(pool)
return pools
def _update_share_stats(self):
"""Retrieves share stats info."""
data = {
'share_backend_name': self.backend_name,
'storage_protocol': 'NFS',
'reserved_percentage':
self.configuration.reserved_share_percentage,
'consistency_group_support': None,
'snapshot_support': True,
'driver_name': 'ZFS',
'pools': self._get_pools_info(),
}
if self.configuration.replication_domain:
data['replication_type'] = 'readable'
super(self.__class__, self)._update_share_stats(data)
def _get_share_name(self, share_id):
"""Returns name of dataset used for given share."""
prefix = self.configuration.zfs_dataset_name_prefix or ''
return prefix + share_id.replace('-', '_')
def _get_snapshot_name(self, snapshot_id):
"""Returns name of dataset snapshot used for given share snapshot."""
prefix = self.configuration.zfs_dataset_snapshot_name_prefix or ''
return prefix + snapshot_id.replace('-', '_')
def _get_dataset_creation_options(self, share, is_readonly=False):
"""Returns list of options to be used for dataset creation."""
if not self.dataset_creation_options:
return []
options = []
for option in self.dataset_creation_options:
if any(v in option for v in ('readonly', 'sharenfs', 'sharesmb')):
continue
options.append(option)
if is_readonly:
options.append('readonly=on')
else:
options.append('readonly=off')
options.append('quota=%sG' % share['size'])
return options
def _get_dataset_name(self, share):
"""Returns name of dataset used for given share."""
pool_name = share_utils.extract_host(share['host'], level='pool')
# Pick pool with nested dataset name if set up
for pool in self.configuration.zfs_zpool_list:
pool_data = pool.split('/')
if (pool_name == pool_data[0] and len(pool_data) > 1):
pool_name = pool
if pool_name[-1] == '/':
pool_name = pool_name[0:-1]
break
dataset_name = self._get_share_name(share['id'])
full_dataset_name = '%(pool)s/%(dataset)s' % {
'pool': pool_name, 'dataset': dataset_name}
return full_dataset_name
@ensure_share_server_not_provided
def create_share(self, context, share, share_server=None):
"""Is called to create a share."""
options = self._get_dataset_creation_options(share, is_readonly=False)
cmd = ['create']
for option in options:
cmd.extend(['-o', option])
dataset_name = self._get_dataset_name(share)
cmd.append(dataset_name)
ssh_cmd = '%(username)s@%(host)s' % {
'username': self.configuration.zfs_ssh_username,
'host': self.service_ip,
}
pool_name = share_utils.extract_host(share['host'], level='pool')
self.private_storage.update(
share['id'], {
'entity_type': 'share',
'dataset_name': dataset_name,
'ssh_cmd': ssh_cmd, # used in replication
'pool_name': pool_name, # used in replication
'provided_options': ' '.join(self.dataset_creation_options),
'used_options': ' '.join(options),
}
)
self.zfs(*cmd)
return self._get_share_helper(
share['share_proto']).create_exports(dataset_name)
@ensure_share_server_not_provided
def delete_share(self, context, share, share_server=None):
"""Is called to remove a share."""
pool_name = self.private_storage.get(share['id'], 'pool_name')
dataset_name = self.private_storage.get(share['id'], 'dataset_name')
if not dataset_name:
dataset_name = self._get_dataset_name(share)
out, err = self.zfs('list', '-r', pool_name)
data = self.parse_zfs_answer(out)
for datum in data:
if datum['NAME'] != dataset_name:
continue
# Delete dataset's snapshots first
out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
snapshots = self.parse_zfs_answer(out)
full_snapshot_prefix = (
dataset_name + '@' + self.replica_snapshot_prefix)
for snap in snapshots:
if full_snapshot_prefix in snap['NAME']:
self._delete_dataset_or_snapshot_with_retry(snap['NAME'])
self._get_share_helper(
share['share_proto']).remove_exports(dataset_name)
self._delete_dataset_or_snapshot_with_retry(dataset_name)
break
else:
LOG.warning(
_LW("Share with '%(id)s' ID and '%(name)s' NAME is "
"absent on backend. Nothing has been deleted."),
{'id': share['id'], 'name': dataset_name})
self.private_storage.delete(share['id'])
@ensure_share_server_not_provided
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create a snapshot."""
dataset_name = self.private_storage.get(
snapshot['share_id'], 'dataset_name')
snapshot_name = self._get_snapshot_name(snapshot['id'])
snapshot_name = dataset_name + '@' + snapshot_name
self.private_storage.update(
snapshot['id'], {
'entity_type': 'snapshot',
'snapshot_name': snapshot_name,
}
)
self.zfs('snapshot', snapshot_name)
@ensure_share_server_not_provided
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove a snapshot."""
snapshot_name = self.private_storage.get(
snapshot['id'], 'snapshot_name')
pool_name = snapshot_name.split('/')[0]
out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
data = self.parse_zfs_answer(out)
for datum in data:
if datum['NAME'] == snapshot_name:
self._delete_dataset_or_snapshot_with_retry(snapshot_name)
break
else:
LOG.warning(
_LW("Snapshot with '%(id)s' ID and '%(name)s' NAME is "
"absent on backend. Nothing has been deleted."),
{'id': snapshot['id'], 'name': snapshot_name})
self.private_storage.delete(snapshot['id'])
@ensure_share_server_not_provided
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create a share from snapshot."""
dataset_name = self._get_dataset_name(share)
ssh_cmd = '%(username)s@%(host)s' % {
'username': self.configuration.zfs_ssh_username,
'host': self.service_ip,
}
pool_name = share_utils.extract_host(share['host'], level='pool')
self.private_storage.update(
share['id'], {
'entity_type': 'share',
'dataset_name': dataset_name,
'ssh_cmd': ssh_cmd, # used in replication
'pool_name': pool_name, # used in replication
'provided_options': 'Cloned from source',
'used_options': 'Cloned from source',
}
)
snapshot_name = self.private_storage.get(
snapshot['id'], 'snapshot_name')
self.zfs(
'clone', snapshot_name, dataset_name,
'-o', 'quota=%sG' % share['size'],
)
return self._get_share_helper(
share['share_proto']).create_exports(dataset_name)
def get_pool(self, share):
"""Return pool name where the share resides on.
:param share: The share hosted by the driver.
"""
pool_name = share_utils.extract_host(share['host'], level='pool')
return pool_name
@ensure_share_server_not_provided
def ensure_share(self, context, share, share_server=None):
"""Invoked to ensure that given share is exported."""
dataset_name = self.private_storage.get(share['id'], 'dataset_name')
if not dataset_name:
dataset_name = self._get_dataset_name(share)
pool_name = share_utils.extract_host(share['host'], level='pool')
out, err = self.zfs('list', '-r', pool_name)
data = self.parse_zfs_answer(out)
for datum in data:
if datum['NAME'] == dataset_name:
sharenfs = self.get_zfs_option(dataset_name, 'sharenfs')
if sharenfs != 'off':
self.zfs('share', dataset_name)
export_locations = self._get_share_helper(
share['share_proto']).get_exports(dataset_name)
return export_locations
else:
raise exception.ShareResourceNotFound(share_id=share['id'])
def get_network_allocations_number(self):
"""ZFS does not handle networking. Return 0."""
return 0
@ensure_share_server_not_provided
def extend_share(self, share, new_size, share_server=None):
"""Extends size of existing share."""
dataset_name = self._get_dataset_name(share)
self.zfs('set', 'quota=%sG' % new_size, dataset_name)
@ensure_share_server_not_provided
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
dataset_name = self._get_dataset_name(share)
consumed_space = self.get_zfs_option(dataset_name, 'used')
consumed_space = utils.translate_string_size_to_float(consumed_space)
if consumed_space >= new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
self.zfs('set', 'quota=%sG' % new_size, dataset_name)
@ensure_share_server_not_provided
def update_access(self, context, share, access_rules, add_rules=None,
delete_rules=None, share_server=None):
"""Updates access rules for given share."""
dataset_name = self._get_dataset_name(share)
return self._get_share_helper(share['share_proto']).update_access(
dataset_name, access_rules, add_rules, delete_rules)
def unmanage(self, share):
"""Removes the specified share from Manila management."""
self.private_storage.delete(share['id'])
def _get_replication_snapshot_prefix(self, replica):
"""Returns replica-based snapshot prefix."""
replication_snapshot_prefix = "%s_%s" % (
self.replica_snapshot_prefix, replica['id'].replace('-', '_'))
return replication_snapshot_prefix
def _get_replication_snapshot_tag(self, replica):
"""Returns replica- and time-based snapshot tag."""
current_time = timeutils.utcnow().isoformat()
snapshot_tag = "%s_time_%s" % (
self._get_replication_snapshot_prefix(replica), current_time)
return snapshot_tag
def _get_active_replica(self, replica_list):
for replica in replica_list:
if replica['replica_state'] == constants.REPLICA_STATE_ACTIVE:
return replica
msg = _("Active replica not found.")
raise exception.ReplicationException(reason=msg)
@ensure_share_server_not_provided
def create_replica(self, context, replica_list, new_replica,
access_rules, share_server=None):
"""Replicates the active replica to a new replica on this backend."""
active_replica = self._get_active_replica(replica_list)
src_dataset_name = self.private_storage.get(
active_replica['id'], 'dataset_name')
ssh_to_src_cmd = self.private_storage.get(
active_replica['id'], 'ssh_cmd')
dst_dataset_name = self._get_dataset_name(new_replica)
ssh_cmd = '%(username)s@%(host)s' % {
'username': self.configuration.zfs_ssh_username,
'host': self.service_ip,
}
snapshot_tag = self._get_replication_snapshot_tag(new_replica)
src_snapshot_name = (
'%(dataset_name)s@%(snapshot_tag)s' % {
'snapshot_tag': snapshot_tag,
'dataset_name': src_dataset_name,
}
)
# Save valuable data to DB
self.private_storage.update(active_replica['id'], {
'repl_snapshot_tag': snapshot_tag,
})
self.private_storage.update(new_replica['id'], {
'entity_type': 'replica',
'replica_type': 'readable',
'dataset_name': dst_dataset_name,
'ssh_cmd': ssh_cmd,
'pool_name': share_utils.extract_host(
new_replica['host'], level='pool'),
'repl_snapshot_tag': snapshot_tag,
})
# Create temporary snapshot. It will exist until following replica sync
# After it - new one will appear and so in loop.
self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'snapshot', src_snapshot_name,
)
# Send/receive temporary snapshot
out, err = self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'send', '-vDR', src_snapshot_name, '|',
'ssh', ssh_cmd,
'sudo', 'zfs', 'receive', '-v', dst_dataset_name,
)
msg = ("Info about replica '%(replica_id)s' creation is following: "
"\n%(out)s")
LOG.debug(msg, {'replica_id': new_replica['id'], 'out': out})
# Make replica readonly
self.zfs('set', 'readonly=on', dst_dataset_name)
# Set original share size as quota to new replica
self.zfs('set', 'quota=%sG' % active_replica['size'], dst_dataset_name)
# Apply access rules from original share
self._get_share_helper(new_replica['share_proto']).update_access(
dst_dataset_name, access_rules, make_all_ro=True)
return {
'export_locations': self._get_share_helper(
new_replica['share_proto']).create_exports(dst_dataset_name),
'replica_state': constants.REPLICA_STATE_IN_SYNC,
'access_rules_status': constants.STATUS_ACTIVE,
}
@ensure_share_server_not_provided
def delete_replica(self, context, replica_list, replica,
share_server=None):
"""Deletes a replica. This is called on the destination backend."""
pool_name = self.private_storage.get(replica['id'], 'pool_name')
dataset_name = self.private_storage.get(replica['id'], 'dataset_name')
if not dataset_name:
dataset_name = self._get_dataset_name(replica)
# Delete dataset's snapshots first
out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
data = self.parse_zfs_answer(out)
for datum in data:
if dataset_name in datum['NAME']:
self._delete_dataset_or_snapshot_with_retry(datum['NAME'])
# Now we delete dataset itself
out, err = self.zfs('list', '-r', pool_name)
data = self.parse_zfs_answer(out)
for datum in data:
if datum['NAME'] == dataset_name:
self._get_share_helper(
replica['share_proto']).remove_exports(dataset_name)
self._delete_dataset_or_snapshot_with_retry(dataset_name)
break
else:
LOG.warning(
_LW("Share replica with '%(id)s' ID and '%(name)s' NAME is "
"absent on backend. Nothing has been deleted."),
{'id': replica['id'], 'name': dataset_name})
self.private_storage.delete(replica['id'])
@ensure_share_server_not_provided
def update_replica_state(self, context, replica_list, replica,
access_rules, share_server=None):
"""Syncs replica and updates its 'replica_state'."""
active_replica = self._get_active_replica(replica_list)
src_dataset_name = self.private_storage.get(
active_replica['id'], 'dataset_name')
ssh_to_src_cmd = self.private_storage.get(
active_replica['id'], 'ssh_cmd')
ssh_to_dst_cmd = self.private_storage.get(
replica['id'], 'ssh_cmd')
dst_dataset_name = self.private_storage.get(
replica['id'], 'dataset_name')
# Create temporary snapshot
previous_snapshot_tag = self.private_storage.get(
replica['id'], 'repl_snapshot_tag')
snapshot_tag = self._get_replication_snapshot_tag(replica)
src_snapshot_name = src_dataset_name + '@' + snapshot_tag
self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'snapshot', src_snapshot_name,
)
# Make sure it is readonly
self.zfs('set', 'readonly=on', dst_dataset_name)
# Send/receive diff between previous snapshot and last one
out, err = self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'send', '-vDRI',
previous_snapshot_tag, src_snapshot_name, '|',
'ssh', ssh_to_dst_cmd,
'sudo', 'zfs', 'receive', '-vF', dst_dataset_name,
)
msg = ("Info about last replica '%(replica_id)s' sync is following: "
"\n%(out)s")
LOG.debug(msg, {'replica_id': replica['id'], 'out': out})
# Update DB data that will be used on following replica sync
self.private_storage.update(active_replica['id'], {
'repl_snapshot_tag': snapshot_tag,
})
self.private_storage.update(
replica['id'], {'repl_snapshot_tag': snapshot_tag})
# Destroy all snapshots on dst filesystem except referenced ones.
snap_references = set()
for repl in replica_list:
snap_references.add(
self.private_storage.get(repl['id'], 'repl_snapshot_tag'))
dst_pool_name = dst_dataset_name.split('/')[0]
out, err = self.zfs('list', '-r', '-t', 'snapshot', dst_pool_name)
data = self.parse_zfs_answer(out)
for datum in data:
if (dst_dataset_name in datum['NAME'] and
datum['NAME'].split('@')[-1] not in snap_references):
self._delete_dataset_or_snapshot_with_retry(datum['NAME'])
# Destroy all snapshots on src filesystem except referenced ones.
src_pool_name = src_snapshot_name.split('/')[0]
out, err = self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', src_pool_name,
)
data = self.parse_zfs_answer(out)
full_src_snapshot_prefix = (
src_dataset_name + '@' +
self._get_replication_snapshot_prefix(replica))
for datum in data:
if (full_src_snapshot_prefix in datum['NAME'] and
datum['NAME'].split('@')[-1] not in snap_references):
self.execute_with_retry(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'destroy', '-f', datum['NAME'],
)
# Apply access rules from original share
# TODO(vponomaryov): we should remove somehow rules that were
# deleted on active replica after creation of secondary replica.
# For the moment there will be difference and it can be considered
# as a bug.
self._get_share_helper(replica['share_proto']).update_access(
dst_dataset_name, access_rules, make_all_ro=True)
# Return results
return constants.REPLICA_STATE_IN_SYNC
@ensure_share_server_not_provided
def promote_replica(self, context, replica_list, replica, access_rules,
share_server=None):
"""Promotes secondary replica to active and active to secondary."""
active_replica = self._get_active_replica(replica_list)
src_dataset_name = self.private_storage.get(
active_replica['id'], 'dataset_name')
ssh_to_src_cmd = self.private_storage.get(
active_replica['id'], 'ssh_cmd')
dst_dataset_name = self.private_storage.get(
replica['id'], 'dataset_name')
replica_dict = {
r['id']: {
'id': r['id'],
# NOTE(vponomaryov): access rules will be updated in next
# 'sync' operation.
'access_rules_status': constants.STATUS_OUT_OF_SYNC,
}
for r in replica_list
}
try:
# Mark currently active replica as readonly
self.execute(
'ssh', ssh_to_src_cmd,
'set', 'readonly=on', src_dataset_name,
)
# Create temporary snapshot of currently active replica
snapshot_tag = self._get_replication_snapshot_tag(active_replica)
src_snapshot_name = src_dataset_name + '@' + snapshot_tag
self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'snapshot', src_snapshot_name,
)
# Apply temporary snapshot to all replicas
for repl in replica_list:
if repl['replica_state'] == constants.REPLICA_STATE_ACTIVE:
continue
previous_snapshot_tag = self.private_storage.get(
repl['id'], 'repl_snapshot_tag')
dataset_name = self.private_storage.get(
repl['id'], 'dataset_name')
ssh_to_dst_cmd = self.private_storage.get(
repl['id'], 'ssh_cmd')
try:
# Send/receive diff between previous snapshot and last one
out, err = self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'send', '-vDRI',
previous_snapshot_tag, src_snapshot_name, '|',
'ssh', ssh_to_dst_cmd,
'sudo', 'zfs', 'receive', '-vF', dataset_name,
)
except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"),
{'id': repl['id'], 'e': e})
replica_dict[repl['id']]['replica_state'] = (
constants.REPLICA_STATE_OUT_OF_SYNC)
continue
msg = ("Info about last replica '%(replica_id)s' "
"sync is following: \n%(out)s")
LOG.debug(msg, {'replica_id': repl['id'], 'out': out})
# Update latest replication snapshot for replica
self.private_storage.update(
repl['id'], {'repl_snapshot_tag': snapshot_tag})
# Update latest replication snapshot for currently active replica
self.private_storage.update(
active_replica['id'], {'repl_snapshot_tag': snapshot_tag})
replica_dict[active_replica['id']]['replica_state'] = (
constants.REPLICA_STATE_IN_SYNC)
except Exception as e:
LOG.warning(
_LW("Failed to update currently active replica. \n%s"), e)
replica_dict[active_replica['id']]['replica_state'] = (
constants.REPLICA_STATE_OUT_OF_SYNC)
# Create temporary snapshot of new replica and sync it with other
# secondary replicas.
snapshot_tag = self._get_replication_snapshot_tag(replica)
src_snapshot_name = dst_dataset_name + '@' + snapshot_tag
ssh_to_src_cmd = self.private_storage.get(replica['id'], 'ssh_cmd')
self.zfs('snapshot', src_snapshot_name)
for repl in replica_list:
if (repl['replica_state'] == constants.REPLICA_STATE_ACTIVE or
repl['id'] == replica['id']):
continue
previous_snapshot_tag = self.private_storage.get(
repl['id'], 'repl_snapshot_tag')
dataset_name = self.private_storage.get(
repl['id'], 'dataset_name')
ssh_to_dst_cmd = self.private_storage.get(
repl['id'], 'ssh_cmd')
try:
# Send/receive diff between previous snapshot and last one
out, err = self.execute(
'ssh', ssh_to_src_cmd,
'sudo', 'zfs', 'send', '-vDRI',
previous_snapshot_tag, src_snapshot_name, '|',
'ssh', ssh_to_dst_cmd,
'sudo', 'zfs', 'receive', '-vF', dataset_name,
)
except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"),
{'id': repl['id'], 'e': e})
replica_dict[repl['id']]['replica_state'] = (
constants.REPLICA_STATE_OUT_OF_SYNC)
continue
msg = ("Info about last replica '%(replica_id)s' "
"sync is following: \n%(out)s")
LOG.debug(msg, {'replica_id': repl['id'], 'out': out})
# Update latest replication snapshot for replica
self.private_storage.update(
repl['id'], {'repl_snapshot_tag': snapshot_tag})
# Update latest replication snapshot for new active replica
self.private_storage.update(
replica['id'], {'repl_snapshot_tag': snapshot_tag})
replica_dict[replica['id']]['replica_state'] = (
constants.REPLICA_STATE_ACTIVE)
self._get_share_helper(replica['share_proto']).update_access(
dst_dataset_name, access_rules)
replica_dict[replica['id']]['access_rules_status'] = (
constants.STATUS_ACTIVE)
self.zfs('set', 'readonly=off', dst_dataset_name)
return list(replica_dict.values())

@ -0,0 +1,283 @@
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module for storing ZFSonLinux driver utility stuff such as:
- Common ZFS code
- Share helpers
"""
# TODO(vponomaryov): add support of SaMBa
import abc
from oslo_log import log
import six
from manila.common import constants
from manila import exception
from manila.i18n import _, _LI, _LW
from manila.share import driver
from manila.share.drivers.ganesha import utils as ganesha_utils
from manila import utils
LOG = log.getLogger(__name__)
def zfs_dataset_synchronized(f):
def wrapped_func(self, *args, **kwargs):
key = "zfs-dataset-%s" % args[0]
@utils.synchronized(key, external=True)
def source_func(self, *args, **kwargs):
return f(self, *args, **kwargs)
return source_func(self, *args, **kwargs)
return wrapped_func
class ExecuteMixin(driver.ExecuteMixin):
def init_execute_mixin(self, *args, **kwargs):
"""Init method for mixin called in the end of driver's __init__()."""
super(ExecuteMixin, self).init_execute_mixin(*args, **kwargs)
if self.configuration.zfs_use_ssh:
self.ssh_executor = ganesha_utils.SSHExecutor(
ip=self.configuration.zfs_service_ip,
port=22,
conn_timeout=self.configuration.ssh_conn_timeout,
login=self.configuration.zfs_ssh_username,
password=self.configuration.zfs_ssh_user_password,
privatekey=self.configuration.zfs_ssh_private_key_path,
max_size=10,
)
else:
self.ssh_executor = None
def execute(self, *cmd, **kwargs):
"""Common interface for running shell commands."""
executor = self._execute
if self.ssh_executor:
executor = self.ssh_executor
if cmd[0] == 'sudo':
kwargs['run_as_root'] = True
cmd = cmd[1:]
return executor(*cmd, **kwargs)
@utils.retry(exception.ProcessExecutionError,
interval=5, retries=36, backoff_rate=1)
def execute_with_retry(self, *cmd, **kwargs):
"""Retry wrapper over common shell interface."""
try:
return self.execute(*cmd, **kwargs)
except exception.ProcessExecutionError as e:
LOG.warning(_LW("Failed to run command, got error: %s"), e)
raise
def _get_option(self, resource_name, option_name, pool_level=False):
"""Returns value of requested zpool or zfs dataset option."""
app = 'zpool' if pool_level else 'zfs'
out, err = self.execute('sudo', app, 'get', option_name, resource_name)
data = self.parse_zfs_answer(out)
option = data[0]['VALUE']
return option
def parse_zfs_answer(self, string):
"""Returns list of dicts with data returned by ZFS shell commands."""
lines = string.split('\n')
if len(lines) < 2:
return []
keys = list(filter(None, lines[0].split(' ')))
data = []
for line in lines[1:]:
values = list(filter(None, line.split(' ')))
if not values:
continue
data.append(dict(zip(keys, values)))
return data
def get_zpool_option(self, zpool_name, option_name):
"""Returns value of requested zpool option."""
return self._get_option(zpool_name, option_name, True)
def get_zfs_option(self, dataset_name, option_name):
"""Returns value of requested zfs dataset option."""
return self._get_option(dataset_name, option_name, False)
def zfs(self, *cmd, **kwargs):
"""ZFS shell commands executor."""
return self.execute('sudo', 'zfs', *cmd, **kwargs)
def zfs_with_retry(self, *cmd, **kwargs):
"""ZFS shell commands executor with retries."""
return self.execute_with_retry('sudo', 'zfs', *cmd, **kwargs)
@six.add_metaclass(abc.ABCMeta)
class NASHelperBase(object):
"""Base class for share helpers of 'ZFS on Linux' driver."""
def __init__(self, configuration):
"""Init share helper.
:param configuration: share driver 'configuration' instance
:return: share helper instance.
"""
self.configuration = configuration
self.init_execute_mixin() # pylint: disable=E1101
self.verify_setup()
@abc.abstractmethod
def verify_setup(self):
"""Performs checks for required stuff."""
@abc.abstractmethod
def create_exports(self, dataset_name):
"""Creates share exports."""
@abc.abstractmethod
def get_exports(self, dataset_name, service):
"""Gets/reads share exports."""
@abc.abstractmethod
def remove_exports(self, dataset_name):
"""Removes share exports."""
@abc.abstractmethod
def update_access(self, dataset_name, access_rules, add_rules=None,
delete_rules=None):
"""Update access rules for specified ZFS dataset."""
class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
"""Helper class for handling ZFS datasets as NFS shares.
Kernel and Fuse versions of ZFS have different syntax for setting up access
rules, and this Helper designed to satisfy both making autodetection.
"""
@property
def is_kernel_version(self):
"""Says whether Kernel version of ZFS is used or not."""
if not hasattr(self, '_is_kernel_version'):
try:
self.execute('modinfo', 'zfs')
self._is_kernel_version = True
except exception.ProcessExecutionError as e:
LOG.info(
_LI("Looks like ZFS kernel module is absent. "
"Assuming FUSE version is installed. Error: %s"), e)
self._is_kernel_version = False
return self._is_kernel_version
def verify_setup(self):
"""Performs checks for required stuff."""
out, err = self.execute('which', 'exportfs')
if not out:
raise exception.ZFSonLinuxException(
msg=_("Utility 'exportfs' is not installed."))
try:
self.execute('sudo', 'exportfs')
except exception.ProcessExecutionError as e:
msg = _("Call of 'exportfs' utility returned error: %s")
LOG.exception(msg, e)
raise
def create_exports(self, dataset_name):
"""Creates NFS share exports for given ZFS dataset."""
return self.get_exports(dataset_name)
def get_exports(self, dataset_name):
"""Gets/reads NFS share export for given ZFS dataset."""
mountpoint = self.get_zfs_option(dataset_name, 'mountpoint')
return [
{
"path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint},
"metadata": {
},
"is_admin_only": is_admin_only,
} for ip, is_admin_only in (
(self.configuration.zfs_share_export_ip, False),
(self.configuration.zfs_service_ip, True))
]
@zfs_dataset_synchronized
def remove_exports(self, dataset_name):
"""Removes NFS share exports for given ZFS dataset."""
sharenfs = self.get_zfs_option(dataset_name, 'sharenfs')
if sharenfs == 'off':
return
self.zfs("set", "sharenfs=off", dataset_name)
@zfs_dataset_synchronized
def update_access(self, dataset_name, access_rules, add_rules=None,
delete_rules=None, make_all_ro=False):
"""Update access rules for given ZFS dataset exported as NFS share."""
rw_rules = []
ro_rules = []
for rule in access_rules:
if rule['access_type'].lower() != 'ip':
msg = _("Only IP access type allowed for NFS protocol.")
raise exception.InvalidShareAccess(reason=msg)
if (rule['access_level'] == constants.ACCESS_LEVEL_RW and
not make_all_ro):
rw_rules.append(rule['access_to'])
elif (rule['access_level'] in (constants.ACCESS_LEVEL_RW,
constants.ACCESS_LEVEL_RO)):
ro_rules.append(rule['access_to'])
else:
msg = _("Unsupported access level provided - "
"%s.") % rule['access_level']
raise exception.InvalidShareAccess(reason=msg)
rules = []
if self.is_kernel_version:
if rw_rules:
rules.append(
"rw=%s,no_root_squash" % ":".join(rw_rules))
if ro_rules:
rules.append("ro=%s,no_root_squash" % ":".join(ro_rules))
rules_str = "sharenfs=" + (','.join(rules) or 'off')
else:
for rule in rw_rules:
rules.append("%s:rw,no_root_squash" % rule)
for rule in ro_rules:
rules.append("%s:ro,no_root_squash" % rule)
rules_str = "sharenfs=" + (' '.join(rules) or 'off')
out, err = self.zfs('list', '-r', dataset_name.split('/')[0])
data = self.parse_zfs_answer(out)
for datum in data:
if datum['NAME'] == dataset_name:
self.zfs("set", rules_str, dataset_name)
break
else:
LOG.warning(
_LW("Dataset with '%(name)s' NAME is absent on backend. "
"Access rules were not applied."), {'name': dataset_name})
# NOTE(vponomaryov): Setting of ZFS share options does not remove rules
# that were added and then removed. So, remove them explicitly.
if delete_rules and access_rules:
mountpoint = self.get_zfs_option(dataset_name, 'mountpoint')
for rule in delete_rules:
if rule['access_type'].lower() != 'ip':
continue
export_location = rule['access_to'] + ':' + mountpoint
self.execute('sudo', 'exportfs', '-u', export_location)

@ -43,6 +43,12 @@ def set_defaults(conf):
'manila.tests.fake_driver.FakeShareDriver')
_safe_set_of_opts(conf, 'auth_strategy', 'noauth')
_safe_set_of_opts(conf, 'zfs_share_export_ip', '1.1.1.1')
_safe_set_of_opts(conf, 'zfs_service_ip', '2.2.2.2')
_safe_set_of_opts(conf, 'zfs_zpool_list', ['foo', 'bar'])
_safe_set_of_opts(conf, 'zfs_share_helpers', 'NFS=foo.bar.Helper')
_safe_set_of_opts(conf, 'zfs_replica_snapshot_prefix', 'foo_prefix_')
def _safe_set_of_opts(conf, *args, **kwargs):
try:

File diff suppressed because it is too large Load Diff

@ -0,0 +1,497 @@
# Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import ddt
import mock
from oslo_config import cfg
from manila import exception
from manila.share.drivers.ganesha import utils as ganesha_utils
from manila.share.drivers.zfsonlinux import utils as zfs_utils
from manila import test
CONF = cfg.CONF
def get_fake_configuration(*args, **kwargs):
fake_config_options = {
"zfs_use_ssh": kwargs.get("zfs_use_ssh", False),
"zfs_share_export_ip": kwargs.get(
"zfs_share_export_ip", "240.241.242.243"),
"zfs_service_ip": kwargs.get("zfs_service_ip", "240.241.242.244"),
"ssh_conn_timeout": kwargs.get("ssh_conn_timeout", 123),
"zfs_ssh_username": kwargs.get(
"zfs_ssh_username", 'fake_username'),
"zfs_ssh_user_password": kwargs.get(
"zfs_ssh_user_password", 'fake_pass'),
"zfs_ssh_private_key_path": kwargs.get(
"zfs_ssh_private_key_path", '/fake/path'),
"append_config_values": mock.Mock(),
}
return type("FakeConfig", (object, ), fake_config_options)
class FakeShareDriver(zfs_utils.ExecuteMixin):
def __init__(self, *args, **kwargs):
self.configuration = get_fake_configuration(*args, **kwargs)
self.init_execute_mixin(*args, **kwargs)
@ddt.ddt
class ExecuteMixinTestCase(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.ssh_executor = self.mock_object(ganesha_utils, 'SSHExecutor')
self.driver = FakeShareDriver()
def test_init(self):
self.assertIsNone(self.driver.ssh_executor)
self.assertEqual(0, self.ssh_executor.call_count)
def test_init_ssh(self):
driver = FakeShareDriver(zfs_use_ssh=True)
self.assertIsNotNone(driver.ssh_executor)
self.ssh_executor.assert_called_once_with(
ip=driver.configuration.zfs_service_ip,
port=22,
conn_timeout=driver.configuration.ssh_conn_timeout,
login=driver.configuration.zfs_ssh_username,
password=driver.configuration.zfs_ssh_user_password,
privatekey=driver.configuration.zfs_ssh_private_key_path,
max_size=10,
)
def test_local_shell_execute(self):
self.mock_object(self.driver, '_execute')
self.driver.execute('fake', '--foo', '--bar')
self.assertEqual(0, self.ssh_executor.call_count)
self.driver._execute.assert_called_once_with(
'fake', '--foo', '--bar')
def test_local_shell_execute_with_sudo(self):
self.mock_object(self.driver, '_execute')
self.driver.execute('sudo', 'fake', '--foo', '--bar')
self.assertEqual(0, self.ssh_executor.call_count)
self.driver._execute.assert_called_once_with(
'fake', '--foo', '--bar', run_as_root=True)
def test_ssh_execute(self):
driver = FakeShareDriver(zfs_use_ssh=True)
self.mock_object(driver, '_execute')
driver.execute('fake', '--foo', '--bar')
self.assertEqual(0, driver._execute.call_count)
self.ssh_executor.return_value.assert_called_once_with(
'fake', '--foo', '--bar')
def test_ssh_execute_with_sudo(self):
driver = FakeShareDriver(zfs_use_ssh=True)
self.mock_object(driver, '_execute')
driver.execute('sudo', 'fake', '--foo', '--bar')
self.assertEqual(0, driver._execute.call_count)
self.ssh_executor.return_value.assert_called_once_with(
'fake', '--foo', '--bar', run_as_root=True)
def test_execute_with_retry(self):
self.mock_object(time, 'sleep')
self.mock_object(self.driver, 'execute', mock.Mock(
side_effect=[exception.ProcessExecutionError('FAKE'), None]))
self.driver.execute_with_retry('foo', 'bar')
self.assertEqual(2, self.driver.execute.call_count)
self.driver.execute.assert_has_calls(
[mock.call('foo', 'bar'), mock.call('foo', 'bar')])
def test_execute_with_retry_exceeded(self):
self.mock_object(time, 'sleep')
self.mock_object(self.driver, 'execute', mock.Mock(
side_effect=exception.ProcessExecutionError('FAKE')))
self.assertRaises(
exception.ProcessExecutionError,
self.driver.execute_with_retry,
'foo', 'bar',
)
self.assertEqual(36, self.driver.execute.call_count)
@ddt.data(True, False)
def test__get_option(self, pool_level):
out = """NAME PROPERTY VALUE SOURCE\n
foo_resource_name bar_option_name some_value local"""
self.mock_object(
self.driver, '_execute', mock.Mock(return_value=(out, '')))
res_name = 'foo_resource_name'
opt_name = 'bar_option_name'
result = self.driver._get_option(
res_name, opt_name, pool_level=pool_level)
self.assertEqual('some_value', result)
self.driver._execute.assert_called_once_with(
'zpool' if pool_level else 'zfs', 'get', opt_name, res_name,
run_as_root=True)
def test_parse_zfs_answer(self):
not_parsed_str = ''
not_parsed_str = """NAME PROPERTY VALUE SOURCE\n
foo_res opt_1 bar local
foo_res opt_2 foo default
foo_res opt_3 some_value local"""
expected = [
{'NAME': 'foo_res', 'PROPERTY': 'opt_1', 'VALUE': 'bar',
'SOURCE': 'local'},
{'NAME': 'foo_res', 'PROPERTY': 'opt_2', 'VALUE': 'foo',
'SOURCE': 'default'},
{'NAME': 'foo_res', 'PROPERTY': 'opt_3', 'VALUE': 'some_value',
'SOURCE': 'local'},
]
result = self.driver.parse_zfs_answer(not_parsed_str)
self.assertEqual(expected, result)
def test_parse_zfs_answer_empty(self):
result = self.driver.parse_zfs_answer('')
self.assertEqual([], result)
def test_get_zpool_option(self):
self.mock_object(self.driver, '_get_option')
zpool_name = 'foo_resource_name'
opt_name = 'bar_option_name'
result = self.driver.get_zpool_option(zpool_name, opt_name)
self.assertEqual(self.driver._get_option.return_value, result)
self.driver._get_option.assert_called_once_with(
zpool_name, opt_name, True)
def test_get_zfs_option(self):
self.mock_object(self.driver, '_get_option')
dataset_name = 'foo_resource_name'
opt_name = 'bar_option_name'
result = self.driver.get_zfs_option(dataset_name, opt_name)
self.assertEqual(self.driver._get_option.return_value, result)
self.driver._get_option.assert_called_once_with(
dataset_name, opt_name, False)
def test_zfs(self):
self.mock_object(self.driver, 'execute')
self.mock_object(self.driver, 'execute_with_retry')
self.driver.zfs('foo', 'bar')
self.assertEqual(0, self.driver.execute_with_retry.call_count)
self.driver.execute.asssert_called_once_with(
'sudo', 'zfs', 'foo', 'bar')
def test_zfs_with_retrying(self):
self.mock_object(self.driver, 'execute')
self.mock_object(self.driver, 'execute_with_retry')
self.driver.zfs_with_retry('foo', 'bar')
self.assertEqual(0, self.driver.execute.call_count)
self.driver.execute_with_retry.asssert_called_once_with(
'sudo', 'zfs', 'foo', 'bar')
@ddt.ddt
class NFSviaZFSHelperTestCase(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
configuration = get_fake_configuration()
self.out = "fake_out"
self.mock_object(
zfs_utils.utils, "execute", mock.Mock(return_value=(self.out, "")))
self.helper = zfs_utils.NFSviaZFSHelper(configuration)
def test_init(self):
zfs_utils.utils.execute.assert_has_calls([
mock.call("which", "exportfs"),
mock.call("exportfs", run_as_root=True),
])
def test_verify_setup_exportfs_not_installed(self):
zfs_utils.utils.execute.reset_mock()
zfs_utils.utils.execute.side_effect = [('', '')]
self.assertRaises(
exception.ZFSonLinuxException, self.helper.verify_setup)
zfs_utils.utils.execute.assert_called_once_with("which", "exportfs")
def test_verify_setup_error_calling_exportfs(self):
zfs_utils.utils.execute.reset_mock()
zfs_utils.utils.execute.side_effect = [
('fake_out', ''), exception.ProcessExecutionError('Fake')]
self.assertRaises(
exception.ProcessExecutionError, self.helper.verify_setup)
zfs_utils.utils.execute.assert_has_calls([
mock.call("which", "exportfs"),
mock.call("exportfs", run_as_root=True),
])
def test_is_kernel_version_true(self):
zfs_utils.utils.execute.reset_mock()
self.assertTrue(self.helper.is_kernel_version)
zfs_utils.utils.execute.assert_has_calls([
mock.call("modinfo", "zfs"),
])
def test_is_kernel_version_false(self):
zfs_utils.utils.execute.reset_mock()
zfs_utils.utils.execute.side_effect = (
exception.ProcessExecutionError('Fake'))
self.assertFalse(self.helper.is_kernel_version)
zfs_utils.utils.execute.assert_has_calls([
mock.call("modinfo", "zfs"),
])
def test_is_kernel_version_second_call(self):
zfs_utils.utils.execute.reset_mock()
self.assertTrue(self.helper.is_kernel_version)
self.assertTrue(self.helper.is_kernel_version)
zfs_utils.utils.execute.assert_has_calls([
mock.call("modinfo", "zfs"),
])
def test_create_exports(self):
self.mock_object(self.helper, 'get_exports')
result = self.helper.create_exports('foo')
self.assertEqual(
self.helper.get_exports.return_value, result)
def test_get_exports(self):
self.mock_object(
self.helper, 'get_zfs_option', mock.Mock(return_value='fake_mp'))
expected = [
{
"path": "%s:fake_mp" % ip,
"metadata": {},
"is_admin_only": is_admin_only,
} for ip, is_admin_only in (
(self.helper.configuration.zfs_share_export_ip, False),
(self.helper.configuration.zfs_service_ip, True))
]
result = self.helper.get_exports('foo')
self.assertEqual(expected, result)
self.helper.get_zfs_option.assert_called_once_with('foo', 'mountpoint')
def test_remove_exports(self):
zfs_utils.utils.execute.reset_mock()
self.mock_object(
self.helper, 'get_zfs_option', mock.Mock(return_value='bar'))
self.helper.remove_exports('foo')
self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs')
zfs_utils.utils.execute.assert_called_once_with(
'zfs', 'set', 'sharenfs=off', 'foo', run_as_root=True)
def test_remove_exports_that_absent(self):
zfs_utils.utils.execute.reset_mock()
self.mock_object(
self.helper, 'get_zfs_option', mock.Mock(return_value='off'))
self.helper.remove_exports('foo')
self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs')
self.assertEqual(0, zfs_utils.utils.execute.call_count)
@ddt.data(
(('fake_modinfo_result', ''),
('sharenfs=rw=1.1.1.1:3.3.3.3,no_root_squash,'
'ro=2.2.2.2,no_root_squash'), False),
(('fake_modinfo_result', ''),
('sharenfs=ro=1.1.1.1:2.2.2.2:3.3.3.3,no_root_squash'), True),
(exception.ProcessExecutionError('Fake'),
('sharenfs=1.1.1.1:rw,no_root_squash 3.3.3.3:rw,'
'no_root_squash 2.2.2.2:ro,no_root_squash'), False),
(exception.ProcessExecutionError('Fake'),
('sharenfs=1.1.1.1:ro,no_root_squash 2.2.2.2:ro,'
'no_root_squash 3.3.3.3:ro,no_root_squash'), True),
)
@ddt.unpack
def test_update_access_rw_and_ro(self, modinfo_response, access_str,
make_all_ro):
zfs_utils.utils.execute.reset_mock()
dataset_name = 'zpoolz/foo_dataset_name/fake'
zfs_utils.utils.execute.side_effect = [
modinfo_response,
("""NAME USED AVAIL REFER MOUNTPOINT\n
%(dn)s 2.58M 14.8G 27.5K /%(dn)s\n
%(dn)s_some_other 3.58M 15.8G 28.5K /%(dn)s\n
""" % {'dn': dataset_name}, ''),
('fake_set_opt_result', ''),
("""NAME PROPERTY VALUE SOURCE\n
%s mountpoint /%s default\n
""" % (dataset_name, dataset_name), ''),
('fake_1_result', ''),
('fake_2_result', ''),
('fake_3_result', ''),
]
access_rules = [
{'access_type': 'ip', 'access_level': 'rw',
'access_to': '1.1.1.1'},
{'access_type': 'ip', 'access_level': 'ro',
'access_to': '2.2.2.2'},
{'access_type': 'ip', 'access_level': 'rw',
'access_to': '3.3.3.3'},
]
delete_rules = [
{'access_type': 'ip', 'access_level': 'rw',
'access_to': '4.4.4.4'},
{'access_type': 'ip', 'access_level': 'ro',
'access_to': '5.5.5.5'},
{'access_type': 'user', 'access_level': 'rw',
'access_to': '6.6.6.6'},
{'access_type': 'user', 'access_level': 'ro',
'access_to': '7.7.7.7'},
]
self.helper.update_access(
dataset_name, access_rules, [], delete_rules,
make_all_ro=make_all_ro)
zfs_utils.utils.execute.assert_has_calls([
mock.call('modinfo', 'zfs'),
mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
mock.call(
'zfs', 'set',
access_str,
dataset_name, run_as_root=True),
mock.call(
'zfs', 'get', 'mountpoint', dataset_name, run_as_root=True),
mock.call(
'exportfs', '-u', '4.4.4.4:/%s' % dataset_name,
run_as_root=True),
mock.call(
'exportfs', '-u', '5.5.5.5:/%s' % dataset_name,
run_as_root=True),
])
def test_update_access_dataset_not_found(self):
self.mock_object(zfs_utils.LOG, 'warning')
zfs_utils.utils.execute.reset_mock()
dataset_name = 'zpoolz/foo_dataset_name/fake'
zfs_utils.utils.execute.side_effect = [
('fake_modinfo_result', ''),
('fake_dataset_not_found_result', ''),
('fake_set_opt_result', ''),
]
access_rules = [
{'access_type': 'ip', 'access_level': 'rw',
'access_to': '1.1.1.1'},
{'access_type': 'ip', 'access_level': 'ro',
'access_to': '1.1.1.2'},
]
self.helper.update_access(dataset_name, access_rules, [], [])
zfs_utils.utils.execute.assert_has_calls([
mock.call('modinfo', 'zfs'),
mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
])
zfs_utils.LOG.warning.assert_called_once_with(
mock.ANY, {'name': dataset_name})
@ddt.data(exception.ProcessExecutionError('Fake'), ('Ok', ''))
def test_update_access_no_rules(self, first_execute_result):
zfs_utils.utils.execute.reset_mock()
dataset_name = 'zpoolz/foo_dataset_name/fake'
zfs_utils.utils.execute.side_effect = [
('fake_modinfo_result', ''),
("""NAME USED AVAIL REFER MOUNTPOINT\n
%s 2.58M 14.8G 27.5K /%s\n
""" % (dataset_name, dataset_name), ''),
('fake_set_opt_result', ''),
]
self.helper.update_access(dataset_name, [], [], [])
zfs_utils.utils.execute.assert_has_calls([
mock.call('modinfo', 'zfs'),
mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
mock.call('zfs', 'set', 'sharenfs=off', dataset_name,
run_as_root=True),
])
@ddt.data('user', 'cert', 'cephx', '', 'fake', 'i', 'p')
def test_update_access_not_ip_access_type(self, access_type):
zfs_utils.utils.execute.reset_mock()
dataset_name = 'zpoolz/foo_dataset_name/fake'
access_rules = [
{'access_type': access_type, 'access_level': 'rw',
'access_to': '1.1.1.1'},
{'access_type': 'ip', 'access_level': 'ro',
'access_to': '1.1.1.2'},
]
self.assertRaises(
exception.InvalidShareAccess,
self.helper.update_access,
dataset_name, access_rules, access_rules, [],
)
self.assertEqual(0, zfs_utils.utils.execute.call_count)
@ddt.data('', 'r', 'o', 'w', 'fake', 'su')
def test_update_access_neither_rw_nor_ro_access_level(self, access_level):
zfs_utils.utils.execute.reset_mock()
dataset_name = 'zpoolz/foo_dataset_name/fake'
access_rules = [
{'access_type': 'ip', 'access_level': access_level,
'access_to': '1.1.1.1'},
{'access_type': 'ip', 'access_level': 'ro',
'access_to': '1.1.1.2'},
]
self.assertRaises(
exception.InvalidShareAccess,
self.helper.update_access,
dataset_name, access_rules, access_rules, [],
)
self.assertEqual(0, zfs_utils.utils.execute.call_count)

@ -353,6 +353,47 @@ class GenericUtilsTestCase(test.TestCase):
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection, cmd)
@ddt.data(
(("3G", "G"), 3.0),
(("4.1G", "G"), 4.1),
(("5.23G", "G"), 5.23),
(("9728M", "G"), 9.5),
(("8192K", "G"), 0.0078125),
(("2T", "G"), 2048.0),
(("2.1T", "G"), 2150.4),
(("3P", "G"), 3145728.0),
(("3.4P", "G"), 3565158.4),
(("9728M", "M"), 9728.0),
(("9728.2381T", "T"), 9728.2381),
(("0", "G"), 0.0),
(("512", "M"), 0.00048828125),
(("2097152.", "M"), 2.0),
((".1024", "K"), 0.0001),
(("2048G", "T"), 2.0),
(("65536G", "P"), 0.0625),
)
@ddt.unpack
def test_translate_string_size_to_float_positive(self, request, expected):
actual = utils.translate_string_size_to_float(*request)
self.assertEqual(expected, actual)
@ddt.data(
(None, "G"),
("fake", "G"),
("1fake", "G"),
("2GG", "G"),
("1KM", "G"),
("K1M", "G"),
("M1K", "G"),
("", "G"),
(23, "G"),
(23.0, "G"),
)
@ddt.unpack
def test_translate_string_size_to_float_negative(self, string, multiplier):
actual = utils.translate_string_size_to_float(string, multiplier)
self.assertIsNone(actual)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""

@ -613,3 +613,45 @@ def require_driver_initialized(func):
raise exception.DriverNotInitialized(driver=driver_name)
return func(self, *args, **kwargs)
return wrapper
def translate_string_size_to_float(string, multiplier='G'):
"""Translates human-readable storage size to float value.
Supported values for 'multiplier' are following:
K - kilo | 1
M - mega | 1024
G - giga | 1024 * 1024
T - tera | 1024 * 1024 * 1024
P = peta | 1024 * 1024 * 1024 * 1024
returns:
- float if correct input data provided
- None if incorrect
"""
if not isinstance(string, six.string_types):
return None
multipliers = ('K', 'M', 'G', 'T', 'P')
mapping = {
k: 1024.0 ** v
for k, v in zip(multipliers, range(len(multipliers)))
}
if multiplier not in multipliers:
raise exception.ManilaException(
"'multiplier' arg should be one of following: "
"'%(multipliers)s'. But it is '%(multiplier)s'." % {
'multiplier': multiplier,
'multipliers': "', '".join(multipliers),
}
)
try:
value = float(string) / 1024.0
value = value / mapping[multiplier]
return value
except (ValueError, TypeError):
matched = re.match(
r"^(\d+\.*\d*)([%s])$" % ','.join(multipliers), string)
if matched:
value = float(matched.groups()[0])
multiplier = mapping[matched.groups()[1]] / mapping[multiplier]
return value * multiplier