diff --git a/contrib/ci/post_test_hook.sh b/contrib/ci/post_test_hook.sh
index 5934575365..87bdfea4dd 100755
--- a/contrib/ci/post_test_hook.sh
+++ b/contrib/ci/post_test_hook.sh
@@ -141,7 +141,7 @@ elif [[ "$DRIVER" == "generic" ]]; then
     fi
 fi
 
-if [[ "$DRIVER" == "lvm"  ]]; then
+if [[ "$DRIVER" == "lvm" ]]; then
     MANILA_TEMPEST_CONCURRENCY=8
     RUN_MANILA_CG_TESTS=False
     RUN_MANILA_MANAGE_TESTS=False
@@ -159,6 +159,26 @@ if [[ "$DRIVER" == "lvm"  ]]; then
         samba_daemon_name=smb
     fi
     sudo service $samba_daemon_name restart
+elif [[ "$DRIVER" == "zfsonlinux" ]]; then
+    MANILA_TEMPEST_CONCURRENCY=8
+    RUN_MANILA_CG_TESTS=False
+    RUN_MANILA_MANAGE_TESTS=False
+    iniset $TEMPEST_CONFIG share run_migration_tests False
+    iniset $TEMPEST_CONFIG share run_quota_tests True
+    iniset $TEMPEST_CONFIG share run_replication_tests True
+    iniset $TEMPEST_CONFIG share run_shrink_tests True
+    iniset $TEMPEST_CONFIG share enable_ip_rules_for_protocols 'nfs'
+    iniset $TEMPEST_CONFIG share enable_user_rules_for_protocols ''
+    iniset $TEMPEST_CONFIG share enable_cert_rules_for_protocols ''
+    iniset $TEMPEST_CONFIG share enable_ro_access_level_for_protocols 'nfs'
+    iniset $TEMPEST_CONFIG share build_timeout 180
+    iniset $TEMPEST_CONFIG share share_creation_retry_number 0
+    iniset $TEMPEST_CONFIG share capability_storage_protocol 'NFS'
+    iniset $TEMPEST_CONFIG share enable_protocols 'nfs'
+    iniset $TEMPEST_CONFIG share suppress_errors_in_cleanup False
+    iniset $TEMPEST_CONFIG share multitenancy_enabled False
+    iniset $TEMPEST_CONFIG share multi_backend True
+    iniset $TEMPEST_CONFIG share backend_replication_type 'readable'
 fi
 
 # Enable consistency group tests
diff --git a/contrib/ci/pre_test_hook.sh b/contrib/ci/pre_test_hook.sh
index 1473e459e3..9375516a69 100755
--- a/contrib/ci/pre_test_hook.sh
+++ b/contrib/ci/pre_test_hook.sh
@@ -64,6 +64,9 @@ fi
 if [[ "$DRIVER" == "lvm" ]]; then
     echo "SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver" >> $localrc_path
     echo "SHARE_BACKING_FILE_SIZE=32000M" >> $localrc_path
+elif [[ "$DRIVER" == "zfsonlinux" ]]; then
+    echo "SHARE_DRIVER=manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" >> $localrc_path
+    echo "RUN_MANILA_REPLICATION_TESTS=True" >> $localrc_path
 fi
 
 # Enabling isolated metadata in Neutron is required because
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 8002c40f1f..d8323433dd 100755
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -38,12 +38,25 @@ function _clean_manila_lvm_backing_file {
     fi
 }
 
+function _clean_zfsonlinux_data {
+    for filename in "$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/*; do
+        if [[ $(sudo zpool list | grep $filename) ]]; then
+            echo "Destroying zpool named $filename"
+            sudo zpool destroy -f $filename
+            file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR$filename"
+            echo "Destroying file named $file"
+            rm -f $file
+        fi
+    done
+}
+
 # cleanup_manila - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_manila {
     # All stuff, that are created by share drivers will be cleaned up by other services.
     _clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX
     _clean_manila_lvm_backing_file $SHARE_GROUP
+    _clean_zfsonlinux_data
 }
 
 # configure_default_backends - configures default Manila backends with generic driver.
@@ -426,6 +439,45 @@ function init_manila {
 
             mkdir -p $MANILA_STATE_PATH/shares
         fi
+    elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then
+        if is_service_enabled m-shr; then
+            mkdir -p $MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR
+            file_counter=0
+            for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do
+                if [[ $file_counter == 0 ]]; then
+                    # NOTE(vponomaryov): create two pools for first ZFS backend
+                    # to cover different use cases that are supported by driver:
+                    # - Support of more than one zpool for share backend.
+                    # - Support of nested datasets.
+                    local first_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/alpha
+                    local second_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/betta
+                    truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $first_file
+                    truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $second_file
+                    sudo zpool create alpha $first_file
+                    sudo zpool create betta $second_file
+                    # Create subdir (nested dataset) for second pool
+                    sudo zfs create betta/subdir
+                    iniset $MANILA_CONF $BE zfs_zpool_list alpha,betta/subdir
+                elif [[ $file_counter == 1 ]]; then
+                    local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/gamma
+                    truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file
+                    sudo zpool create gamma $file
+                    iniset $MANILA_CONF $BE zfs_zpool_list gamma
+                else
+                    local filename=file"$file_counter"
+                    local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/"$filename"
+                    truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file
+                    sudo zpool create $filename $file
+                    iniset $MANILA_CONF $BE zfs_zpool_list $filename
+                fi
+                iniset $MANILA_CONF $BE zfs_share_export_ip $MANILA_ZFSONLINUX_SHARE_EXPORT_IP
+                iniset $MANILA_CONF $BE zfs_service_ip $MANILA_ZFSONLINUX_SERVICE_IP
+                iniset $MANILA_CONF $BE zfs_dataset_creation_options $MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS
+                iniset $MANILA_CONF $BE zfs_ssh_username $MANILA_ZFSONLINUX_SSH_USERNAME
+                iniset $MANILA_CONF $BE replication_domain $MANILA_ZFSONLINUX_REPLICATION_DOMAIN
+                let "file_counter=file_counter+1"
+            done
+        fi
     fi
 
     # Create cache dir
@@ -446,6 +498,32 @@ function install_manila {
                 sudo yum install -y nfs-utils nfs-utils-lib samba
             fi
         fi
+    elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then
+        if is_service_enabled m-shr; then
+            if is_ubuntu; then
+                sudo apt-get install -y nfs-kernel-server nfs-common samba
+                # NOTE(vponomaryov): following installation is valid for Ubuntu 'trusty'.
+                sudo apt-get install -y software-properties-common
+                sudo apt-add-repository --yes ppa:zfs-native/stable
+                sudo apt-get -y -q update && sudo apt-get -y -q upgrade
+                sudo apt-get install -y linux-headers-generic
+                sudo apt-get install -y build-essential
+                sudo apt-get install -y ubuntu-zfs
+                sudo modprobe zfs
+
+                # TODO(vponomaryov): remove following line when we have this
+                # in 'requirements.txt' file.
+                # Package 'nsenter' is expected to be installed on host with
+                # ZFS, if it is remote for manila-share service host.
+                sudo pip install nsenter
+            else
+                echo "Manila Devstack plugin does not support installation "\
+                    "of ZFS packages for non-'Ubuntu-trusty' distros. "\
+                    "Please, install it first by other means or add its support "\
+                    "for your distro."
+                exit 1
+            fi
+        fi
     fi
 
     # install manila-ui if horizon is enabled
@@ -457,6 +535,8 @@ function install_manila {
 #configure_samba - Configure node as Samba server
 function configure_samba {
     if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then
+        # TODO(vponomaryov): add here condition for ZFSonLinux driver too
+        # when it starts to support SAMBA
         samba_daemon_name=smbd
         if is_service_enabled m-shr; then
             if is_fedora; then
diff --git a/devstack/settings b/devstack/settings
index 7641bdaa2f..7a546fad65 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -141,6 +141,19 @@ SMB_CONF=${SMB_CONF:-/etc/samba/smb.conf}
 SMB_PRIVATE_DIR=${SMB_PRIVATE_DIR:-/var/lib/samba/private}
 CONFIGURE_BACKING_FILE=${CONFIGURE_BACKING_FILE:-"True"}
 
+# Options for configuration of ZFSonLinux driver
+# 'MANILA_ZFSONLINUX_ZPOOL_SIZE' defines size of each zpool. That value
+# will be used for creation of sparse files.
+MANILA_ZFSONLINUX_ZPOOL_SIZE=${MANILA_ZFSONLINUX_ZPOOL_SIZE:-"30G"}
+MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR=${MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR:-"/opt/stack/data/manila/zfsonlinux"}
+MANILA_ZFSONLINUX_SHARE_EXPORT_IP=${MANILA_ZFSONLINUX_SHARE_EXPORT_IP:-"127.0.0.1"}
+MANILA_ZFSONLINUX_SERVICE_IP=${MANILA_ZFSONLINUX_SERVICE_IP:-"127.0.0.1"}
+MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS=${MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS:-"compression=gzip"}
+MANILA_ZFSONLINUX_SSH_USERNAME=${MANILA_ZFSONLINUX_SSH_USERNAME:-"stack"}
+# If MANILA_ZFSONLINUX_REPLICATION_DOMAIN is set to empty value then
+# Manila will consider replication feature as disabled for ZFSonLinux share driver.
+MANILA_ZFSONLINUX_REPLICATION_DOMAIN=${MANILA_ZFSONLINUX_REPLICATION_DOMAIN:-"ZFSonLinux"}
+
 # Enable manila services
 # ----------------------
 # We have to add Manila to enabled services for screen_it to work
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 8f7972a956..0e7534b299 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -98,6 +98,7 @@ Share backends
 .. toctree::
    :maxdepth: 3
 
+   zfs_on_linux_driver
    netapp_cluster_mode_driver
    emc_isilon_driver
    emc_vnx_driver
diff --git a/doc/source/devref/share_back_ends_feature_support_mapping.rst b/doc/source/devref/share_back_ends_feature_support_mapping.rst
index 8f48761cd1..42578e0167 100644
--- a/doc/source/devref/share_back_ends_feature_support_mapping.rst
+++ b/doc/source/devref/share_back_ends_feature_support_mapping.rst
@@ -33,6 +33,8 @@ Mapping of share drivers and share features support
 +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
 |               Driver name              |     create/delete share     | manage/unmanage share | extend share | shrink share | create/delete snapshot | create share from snapshot | manage/unmanage snapshot |
 +========================================+=============================+=======================+==============+==============+========================+============================+==========================+
+|               ZFSonLinux               |       DHSS = False (M)      |          \-           |       M      |       M      |            M           |              M             |            \-            |
++----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
 |      Generic (Cinder as back-end)      | DHSS = True (J) & False (K) |           K           |       L      |       L      |            J           |              J             |     DHSS = False (M)     |
 +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+
 |       NetApp Clustered Data ONTAP      | DHSS = True (J) & False (K) |           L           |       L      |       L      |            J           |              J             |            \-            |
@@ -76,6 +78,8 @@ Mapping of share drivers and share access rules support
 +             Driver name                +--------------+----------------+------------+--------------+----------------+------------+
 |                                        |      IP      |      USER      |    Cert    |      IP      |      USER      |    Cert    |
 +========================================+==============+================+============+==============+================+============+
+|               ZFSonLinux               |    NFS (M)   |       \-       |     \-     |    NFS (M)   |       \-       |     \-     |
++----------------------------------------+--------------+----------------+------------+--------------+----------------+------------+
 |      Generic (Cinder as back-end)      | NFS,CIFS (J) |       \-       |     \-     |    NFS (K)   |       \-       |     \-     |
 +----------------------------------------+--------------+----------------+------------+--------------+----------------+------------+
 |       NetApp Clustered Data ONTAP      |    NFS (J)   |    CIFS (J)    |     \-     |    NFS (K)   |    CIFS (M)    |     \-     |
@@ -113,6 +117,8 @@ Mapping of share drivers and security services support
 +----------------------------------------+------------------+-----------------+------------------+
 |              Driver name               | Active Directory |       LDAP      |      Kerberos    |
 +========================================+==================+=================+==================+
+|               ZFSonLinux               |         \-       |         \-      |         \-       |
++----------------------------------------+------------------+-----------------+------------------+
 |      Generic (Cinder as back-end)      |         \-       |         \-      |         \-       |
 +----------------------------------------+------------------+-----------------+------------------+
 |       NetApp Clustered Data ONTAP      |         J        |         J       |         J        |
diff --git a/doc/source/devref/zfs_on_linux_driver.rst b/doc/source/devref/zfs_on_linux_driver.rst
new file mode 100644
index 0000000000..a66aad0060
--- /dev/null
+++ b/doc/source/devref/zfs_on_linux_driver.rst
@@ -0,0 +1,153 @@
+..
+      Copyright (c) 2016 Mirantis Inc.
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+ZFS (on Linux) Driver
+=====================
+
+Manila ZFSonLinux share driver uses ZFS filesystem for exporting NFS shares.
+Written and tested using Linux version of ZFS.
+
+Requirements
+------------
+
+* 'NFS' daemon that can be handled via "exportfs" app.
+* 'ZFS' filesystem packages, either Kernel or FUSE versions.
+* ZFS zpools that are going to be used by Manila should exist and be
+  configured as desired. Manila will not change zpool configuration.
+* For remote ZFS hosts according to manila-share service host SSH should be
+  installed.
+* For ZFS hosts that support replication:
+   * SSH access for each other should be passwordless.
+   * IP used for share exports should be available by ZFS hosts for each other.
+   * Username should be the same for accessing each of ZFS hosts.
+
+Supported Operations
+--------------------
+
+The following operations are supported:
+
+* Create NFS Share
+* Delete NFS Share
+* Allow NFS Share access
+   * Only IP access type is supported for NFS
+   * Both access levels are supported - 'RW' and 'RO'
+* Deny NFS Share access
+* Create snapshot
+* Delete snapshot
+* Create share from snapshot
+* Extend share
+* Shrink share
+* Replication (experimental):
+   * Create/update/delete/promote replica operations are supported
+
+Possibilities
+-------------
+
+* Any amount of ZFS zpools can be used by share driver.
+* Allowed to configure default options for ZFS datasets that are used
+  for share creation.
+* Any amount of nested datasets is allowed to be used.
+* All share replicas are read-only, only active one is RW.
+* All share replicas are synchronized periodically, not continuously.
+  So, status 'in_sync' means latest sync was successful.
+  Time range between syncs equals to value of
+  config global opt 'replica_state_update_interval'.
+
+Restrictions
+------------
+
+The ZFSonLinux share driver has the following restrictions:
+
+* Only IP access type is supported for NFS.
+* Only FLAT network is supported.
+* 'Promote share replica' operation will switch roles of
+  current 'secondary' replica and 'active'. It does not make more than
+  one active replica available.
+* 'Manage share' operation is not yet implemented.
+* 'SaMBa' based sharing is not yet implemented.
+
+Known problems
+--------------
+
+* Better to avoid usage of Neutron on the same node where ZFS is installed.
+  It leads to bug - https://bugs.launchpad.net/neutron/+bug/1546723
+  The ZFSonLinux share driver has workaround for it and requires 'nsenter' be
+  installed on the system where ZFS is installed.
+* 'Promote share replica' operation will make ZFS filesystem that became
+  secondary as RO only on NFS level. On ZFS level system will
+  stay mounted as was - RW.
+
+Backend Configuration
+---------------------
+
+The following parameters need to be configured in the manila configuration file
+for the ZFSonLinux driver:
+
+* share_driver = manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver
+* driver_handles_share_servers = False
+* replication_domain = custom_str_value_as_domain_name
+   * if empty, then replication will be disabled
+   * if set then will be able to be used as replication peer for other
+     backend with same value.
+* zfs_share_export_ip = <user_facing IP address of ZFS host>
+* zfs_service_ip = <IP address of service network interface of ZFS host>
+* zfs_zpool_list = zpoolname1,zpoolname2/nested_dataset_for_zpool2
+   * can be one or more zpools
+   * can contain nested datasets
+* zfs_dataset_creation_options = <list of ZFS dataset options>
+   * readonly,quota,sharenfs and sharesmb options will be ignored
+* zfs_dataset_name_prefix = <prefix>
+   * Prefix to be used in each dataset name.
+* zfs_dataset_snapshot_name_prefix = <prefix>
+   * Prefix to be used in each dataset snapshot name.
+* zfs_use_ssh = <boolean_value>
+   * set 'False' if ZFS located on the same host as 'manila-share' service
+   * set 'True' if 'manila-share' service should use SSH for ZFS configuration
+* zfs_ssh_username = <ssh_username>
+   * required for replication operations
+   * required for SSH'ing to ZFS host if 'zfs_use_ssh' is set to 'True'
+* zfs_ssh_user_password = <ssh_user_password>
+   * password for 'zfs_ssh_username' of ZFS host.
+   * used only if 'zfs_use_ssh' is set to 'True'
+* zfs_ssh_private_key_path = <path_to_private_ssh_key>
+   * used only if 'zfs_use_ssh' is set to 'True'
+* zfs_share_helpers = NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper
+   * Approach for setting up helpers is similar to various other share driver
+   * At least one helper should be used.
+* zfs_replica_snapshot_prefix = <prefix>
+   * Prefix to be used in dataset snapshot names that are created
+     by 'update replica' operation.
+
+Restart of :term:`manila-share` service is needed for the configuration
+changes to take effect.
+
+The :mod:`manila.share.drivers.zfsonlinux.driver` Module
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. automodule:: manila.share.drivers.zfsonlinux.driver
+    :noindex:
+    :members:
+    :undoc-members:
+    :show-inheritance:
+
+The :mod:`manila.share.drivers.zfsonlinux.utils` Module
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. automodule:: manila.share.drivers.zfsonlinux.utils
+    :noindex:
+    :members:
+    :undoc-members:
+    :show-inheritance:
diff --git a/etc/manila/rootwrap.d/share.filters b/etc/manila/rootwrap.d/share.filters
index 197cbcd710..415f6a653c 100644
--- a/etc/manila/rootwrap.d/share.filters
+++ b/etc/manila/rootwrap.d/share.filters
@@ -136,3 +136,15 @@ dbus-removeexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --sy
 
 # manila/share/drivers/ganesha/manager.py:
 rmconf: RegExpFilter, sh, root, sh, -c, rm -f /.*/\*\.conf$
+
+# ZFS commands
+# manila/share/drivers/zfsonlinux/driver.py
+# manila/share/drivers/zfsonlinux/utils.py
+zpool: CommandFilter, zpool, root
+
+# manila/share/drivers/zfsonlinux/driver.py
+# manila/share/drivers/zfsonlinux/utils.py
+zfs: CommandFilter, zfs, root
+
+# manila/share/drivers/zfsonlinux/driver.py
+nsenter: CommandFilter, /usr/local/bin/nsenter, root
diff --git a/manila/db/sqlalchemy/api.py b/manila/db/sqlalchemy/api.py
index 9ef1130f6f..664b40803f 100644
--- a/manila/db/sqlalchemy/api.py
+++ b/manila/db/sqlalchemy/api.py
@@ -2812,10 +2812,12 @@ def share_server_backend_details_delete(context, share_server_id,
 
 def _driver_private_data_query(session, context, host, entity_id, key=None,
                                read_deleted=False):
-    query = model_query(context, models.DriverPrivateData,
-                        session=session, read_deleted=read_deleted)\
-        .filter_by(host=host)\
-        .filter_by(entity_uuid=entity_id)
+    query = model_query(
+        context, models.DriverPrivateData, session=session,
+        read_deleted=read_deleted,
+    ).filter_by(
+        entity_uuid=entity_id,
+    )
 
     if isinstance(key, list):
         return query.filter(models.DriverPrivateData.key.in_(key))
diff --git a/manila/exception.py b/manila/exception.py
index ec02c3bf3f..bd3ea34bc6 100644
--- a/manila/exception.py
+++ b/manila/exception.py
@@ -118,6 +118,10 @@ class NetworkBadConfigurationException(NetworkException):
     message = _("Bad network configuration: %(reason)s.")
 
 
+class BadConfigurationException(ManilaException):
+    message = _("Bad configuration: %(reason)s.")
+
+
 class NotAuthorized(ManilaException):
     message = _("Not authorized.")
     code = 403
@@ -659,6 +663,10 @@ class HDFSException(ManilaException):
     message = _("HDFS exception occurred!")
 
 
+class ZFSonLinuxException(ManilaException):
+    message = _("ZFSonLinux exception occurred: %(msg)s")
+
+
 class QBException(ManilaException):
     message = _("Quobyte exception occurred: %(msg)s")
 
diff --git a/manila/opts.py b/manila/opts.py
index 17ef814a09..98bc2099be 100644
--- a/manila/opts.py
+++ b/manila/opts.py
@@ -68,6 +68,7 @@ import manila.share.drivers.quobyte.quobyte
 import manila.share.drivers.service_instance
 import manila.share.drivers.windows.service_instance
 import manila.share.drivers.windows.winrm_helper
+import manila.share.drivers.zfsonlinux.driver
 import manila.share.drivers.zfssa.zfssashare
 import manila.share.drivers_private_data
 import manila.share.hook
@@ -136,6 +137,7 @@ _global_opt_lists = [
     manila.share.drivers.service_instance.share_servers_handling_mode_opts,
     manila.share.drivers.windows.service_instance.windows_share_server_opts,
     manila.share.drivers.windows.winrm_helper.winrm_opts,
+    manila.share.drivers.zfsonlinux.driver.zfsonlinux_opts,
     manila.share.drivers.zfssa.zfssashare.ZFSSA_OPTS,
     manila.share.hook.hook_options,
     manila.share.manager.share_manager_opts,
diff --git a/manila/share/driver.py b/manila/share/driver.py
index ca36a45abf..83430ecddb 100644
--- a/manila/share/driver.py
+++ b/manila/share/driver.py
@@ -237,6 +237,9 @@ class ShareDriver(object):
             unhandled share-servers that are not tracked by Manila.
             Share drivers are allowed to work only in one of two possible
             driver modes, that is why only one should be chosen.
+        :param config_opts: tuple, list or set of config option lists
+            that should be registered in driver's configuration right after
+            this attribute is created. Useful for usage with mixin classes.
         """
         super(ShareDriver, self).__init__()
         self.configuration = kwargs.get('configuration', None)
@@ -267,6 +270,9 @@ class ShareDriver(object):
                     config_group_name=admin_network_config_group,
                     label='admin')
 
+        for config_opt_set in kwargs.get('config_opts', []):
+            self.configuration.append_config_values(config_opt_set)
+
         if hasattr(self, 'init_execute_mixin'):
             # Instance with 'ExecuteMixin'
             self.init_execute_mixin(*args, **kwargs)  # pylint: disable=E1101
diff --git a/manila/share/drivers/zfsonlinux/__init__.py b/manila/share/drivers/zfsonlinux/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/manila/share/drivers/zfsonlinux/driver.py b/manila/share/drivers/zfsonlinux/driver.py
new file mode 100644
index 0000000000..c4adba4b8c
--- /dev/null
+++ b/manila/share/drivers/zfsonlinux/driver.py
@@ -0,0 +1,901 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Module with ZFSonLinux share driver that utilizes ZFS filesystem resources
+and exports them as shares.
+"""
+
+import time
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import importutils
+from oslo_utils import timeutils
+
+from manila.common import constants
+from manila import exception
+from manila.i18n import _, _LW
+from manila.share import driver
+from manila.share.drivers.zfsonlinux import utils as zfs_utils
+from manila.share import utils as share_utils
+from manila import utils
+
+
+zfsonlinux_opts = [
+    cfg.StrOpt(
+        "zfs_share_export_ip",
+        required=True,
+        help="IP to be added to user-facing export location. Required."),
+    cfg.StrOpt(
+        "zfs_service_ip",
+        required=True,
+        help="IP to be added to admin-facing export location. Required."),
+    cfg.ListOpt(
+        "zfs_zpool_list",
+        required=True,
+        help="Specify list of zpools that are allowed to be used by backend. "
+             "Can contain nested datasets. Examples: "
+             "Without nested dataset: 'zpool_name'. "
+             "With nested dataset: 'zpool_name/nested_dataset_name'. "
+             "Required."),
+    cfg.ListOpt(
+        "zfs_dataset_creation_options",
+        help="Define here list of options that should be applied "
+             "for each dataset creation if needed. Example: "
+             "compression=gzip,dedup=off. "
+             "Note that, for secondary replicas option 'readonly' will be set "
+             "to 'on' and for active replicas to 'off' in any way. "
+             "Also, 'quota' will be equal to share size. Optional."),
+    cfg.StrOpt(
+        "zfs_dataset_name_prefix",
+        default='manila_share_',
+        help="Prefix to be used in each dataset name. Optional."),
+    cfg.StrOpt(
+        "zfs_dataset_snapshot_name_prefix",
+        default='manila_share_snapshot_',
+        help="Prefix to be used in each dataset snapshot name. Optional."),
+    cfg.BoolOpt(
+        "zfs_use_ssh",
+        default=False,
+        help="Remote ZFS storage hostname that should be used for SSH'ing. "
+             "Optional."),
+    cfg.StrOpt(
+        "zfs_ssh_username",
+        help="SSH user that will be used in 2 cases: "
+             "1) By manila-share service in case it is located on different "
+             "host than its ZFS storage. "
+             "2) By manila-share services with other ZFS backends that "
+             "perform replication. "
+             "It is expected that SSH'ing will be key-based, passwordless. "
+             "This user should be passwordless sudoer. Optional."),
+    cfg.StrOpt(
+        "zfs_ssh_user_password",
+        secret=True,
+        help="Password for user that is used for SSH'ing ZFS storage host. "
+             "Not used for replication operations. They require "
+             "passwordless SSH access. Optional."),
+    cfg.StrOpt(
+        "zfs_ssh_private_key_path",
+        help="Path to SSH private key that should be used for SSH'ing ZFS "
+             "storage host. Not used for replication operations. Optional."),
+    cfg.ListOpt(
+        "zfs_share_helpers",
+        required=True,
+        default=[
+            "NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper",
+        ],
+        help="Specify list of share export helpers for ZFS storage. "
+             "It should look like following: "
+             "'FOO_protocol=foo.FooClass,BAR_protocol=bar.BarClass'. "
+             "Required."),
+    cfg.StrOpt(
+        "zfs_replica_snapshot_prefix",
+        required=True,
+        default="tmp_snapshot_for_replication_",
+        help="Set snapshot prefix for usage in ZFS replication. Required."),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(zfsonlinux_opts)
+LOG = log.getLogger(__name__)
+
+
+def ensure_share_server_not_provided(f):
+
+    def wrap(self, context, *args, **kwargs):
+        server = kwargs.get('share_server')
+        if server:
+            raise exception.InvalidInput(
+                reason=_("Share server handling is not available. "
+                         "But 'share_server' was provided. '%s'. "
+                         "Share network should not be used.") % server.get(
+                             "id", server))
+        return f(self, context, *args, **kwargs)
+
+    return wrap
+
+
+class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver):
+
+    def __init__(self, *args, **kwargs):
+        super(self.__class__, self).__init__(
+            [False], *args, config_opts=[zfsonlinux_opts], **kwargs)
+        self.replica_snapshot_prefix = (
+            self.configuration.zfs_replica_snapshot_prefix)
+        self.backend_name = self.configuration.safe_get(
+            'share_backend_name') or 'ZFSonLinux'
+        self.zpool_list = self._get_zpool_list()
+        self.dataset_creation_options = (
+            self.configuration.zfs_dataset_creation_options)
+        self.share_export_ip = self.configuration.zfs_share_export_ip
+        self.service_ip = self.configuration.zfs_service_ip
+        self.private_storage = kwargs.get('private_storage')
+        self._helpers = {}
+
+    def _get_zpool_list(self):
+        zpools = []
+        for zpool in self.configuration.zfs_zpool_list:
+            zpool_name = zpool.split('/')[0]
+            if zpool_name in zpools:
+                raise exception.BadConfigurationException(
+                    reason=_("Using the same zpool twice is prohibited. "
+                             "Duplicate is '%(zpool)s'. List of zpools: "
+                             "%(zpool_list)s.") % {
+                                 'zpool': zpool,
+                                 'zpool_list': ', '.join(
+                                     self.configuration.zfs_zpool_list)})
+            zpools.append(zpool_name)
+        return zpools
+
+    @zfs_utils.zfs_dataset_synchronized
+    def _delete_dataset_or_snapshot_with_retry(self, name):
+        """Attempts to destroy some dataset or snapshot with retries."""
+        # NOTE(vponomaryov): it is possible to see 'dataset is busy' error
+        # under the load. So, we are ok to perform retry in this case.
+        mountpoint = self.get_zfs_option(name, 'mountpoint')
+        if '@' not in name:
+            # NOTE(vponomaryov): check that dataset has no open files.
+            start_point = time.time()
+            while time.time() - start_point < 60:
+                try:
+                    out, err = self.execute('lsof', '-w', mountpoint)
+                except exception.ProcessExecutionError:
+                    # NOTE(vponomaryov): lsof returns code 1 if search
+                    # didn't give results.
+                    break
+                LOG.debug("Cannot destroy dataset '%(name)s', it has "
+                          "opened files. Will wait 2 more seconds. "
+                          "Out: \n%(out)s", {
+                              'name': name, 'out': out})
+                time.sleep(2)
+            else:
+                raise exception.ZFSonLinuxException(
+                    msg=_("Could not destroy '%s' dataset, "
+                          "because it had opened files.") % name)
+
+            try:
+                self.zfs('destroy', '-f', name)
+                return
+            except exception.ProcessExecutionError as e:
+                LOG.debug("Failed to run command, got error: %s\n"
+                          "Assuming other namespace-based services hold "
+                          "ZFS mounts.", e)
+
+            # NOTE(vponomaryov): perform workaround for Neutron bug #1546723
+            # We should release ZFS mount from all namespaces. It should not be
+            # there at all.
+            get_pids_cmd = (
+                "(echo $(grep -s %s /proc/*/mounts) ) 2>&1 " % mountpoint)
+            try:
+                raw_pids, err = self.execute('bash', '-c', get_pids_cmd)
+            except exception.ProcessExecutionError as e:
+                LOG.warning(
+                    _LW("Failed to get list of PIDs that hold ZFS dataset "
+                        "mountpoint. Got following error: %s"), e)
+            else:
+                pids = [s.split('/')[0] for s in raw_pids.split('/proc/') if s]
+                LOG.debug(
+                    "List of pids that hold ZFS mount '%(mnt)s': %(list)s", {
+                        'mnt': mountpoint, 'list': ' '.join(pids)})
+                for pid in pids:
+                    try:
+                        self.execute(
+                            'sudo', 'nsenter', '--mnt', '--target=%s' % pid,
+                            '/bin/umount', mountpoint)
+                    except exception.ProcessExecutionError as e:
+                        LOG.warning(
+                            _LW("Failed to run command with release of "
+                                "ZFS dataset mount, got error: %s"), e)
+
+            # NOTE(vponomaryov): sleep some time after unmount operations.
+            time.sleep(1)
+
+        # NOTE(vponomaryov): Now, when no file usages and mounts of dataset
+        # exist, destroy dataset.
+        self.zfs_with_retry('destroy', '-f', name)
+
+    def _setup_helpers(self):
+        """Setups share helper for ZFS backend."""
+        self._helpers = {}
+        helpers = self.configuration.zfs_share_helpers
+        if helpers:
+            for helper_str in helpers:
+                share_proto, __, import_str = helper_str.partition('=')
+                helper = importutils.import_class(import_str)
+                self._helpers[share_proto.upper()] = helper(
+                    self.configuration)
+        else:
+            raise exception.BadConfigurationException(
+                reason=_(
+                    "No share helpers selected for ZFSonLinux Driver. "
+                    "Please specify using config option 'zfs_share_helpers'."))
+
+    def _get_share_helper(self, share_proto):
+        """Returns share helper specific for used share protocol."""
+        helper = self._helpers.get(share_proto)
+        if helper:
+            return helper
+        else:
+            raise exception.InvalidShare(
+                reason=_("Wrong, unsupported or disabled protocol - "
+                         "'%s'.") % share_proto)
+
+    def do_setup(self, context):
+        """Perform basic setup and checks."""
+        super(self.__class__, self).do_setup(context)
+        self._setup_helpers()
+        for ip in (self.share_export_ip, self.service_ip):
+            if not utils.is_valid_ip_address(ip, 4):
+                raise exception.BadConfigurationException(
+                    reason=_("Wrong IP address provided: "
+                             "%s") % self.share_export_ip)
+
+        if not self.zpool_list:
+            raise exception.BadConfigurationException(
+                reason=_("No zpools specified for usage: "
+                         "%s") % self.zpool_list)
+
+        if self.configuration.zfs_use_ssh:
+            # Check workability of SSH executor
+            self.ssh_executor('whoami')
+
+    def _get_pools_info(self):
+        """Returns info about all pools used by backend."""
+        pools = []
+        for zpool in self.zpool_list:
+            free_size = self.get_zpool_option(zpool, 'free')
+            free_size = utils.translate_string_size_to_float(free_size)
+            total_size = self.get_zpool_option(zpool, 'size')
+            total_size = utils.translate_string_size_to_float(total_size)
+            pool = {
+                'pool_name': zpool,
+                'total_capacity_gb': float(total_size),
+                'free_capacity_gb': float(free_size),
+                'reserved_percentage':
+                    self.configuration.reserved_share_percentage,
+            }
+            if self.configuration.replication_domain:
+                pool['replication_type'] = 'readable'
+            pools.append(pool)
+        return pools
+
+    def _update_share_stats(self):
+        """Retrieves share stats info."""
+        data = {
+            'share_backend_name': self.backend_name,
+            'storage_protocol': 'NFS',
+            'reserved_percentage':
+                self.configuration.reserved_share_percentage,
+            'consistency_group_support': None,
+            'snapshot_support': True,
+            'driver_name': 'ZFS',
+            'pools': self._get_pools_info(),
+        }
+        if self.configuration.replication_domain:
+            data['replication_type'] = 'readable'
+        super(self.__class__, self)._update_share_stats(data)
+
+    def _get_share_name(self, share_id):
+        """Returns name of dataset used for given share."""
+        prefix = self.configuration.zfs_dataset_name_prefix or ''
+        return prefix + share_id.replace('-', '_')
+
+    def _get_snapshot_name(self, snapshot_id):
+        """Returns name of dataset snapshot used for given share snapshot."""
+        prefix = self.configuration.zfs_dataset_snapshot_name_prefix or ''
+        return prefix + snapshot_id.replace('-', '_')
+
+    def _get_dataset_creation_options(self, share, is_readonly=False):
+        """Returns list of options to be used for dataset creation."""
+        if not self.dataset_creation_options:
+            return []
+        options = []
+        for option in self.dataset_creation_options:
+            if any(v in option for v in ('readonly', 'sharenfs', 'sharesmb')):
+                continue
+            options.append(option)
+        if is_readonly:
+            options.append('readonly=on')
+        else:
+            options.append('readonly=off')
+        options.append('quota=%sG' % share['size'])
+        return options
+
+    def _get_dataset_name(self, share):
+        """Returns name of dataset used for given share."""
+        pool_name = share_utils.extract_host(share['host'], level='pool')
+
+        # Pick pool with nested dataset name if set up
+        for pool in self.configuration.zfs_zpool_list:
+            pool_data = pool.split('/')
+            if (pool_name == pool_data[0] and len(pool_data) > 1):
+                pool_name = pool
+                if pool_name[-1] == '/':
+                    pool_name = pool_name[0:-1]
+                break
+
+        dataset_name = self._get_share_name(share['id'])
+        full_dataset_name = '%(pool)s/%(dataset)s' % {
+            'pool': pool_name, 'dataset': dataset_name}
+
+        return full_dataset_name
+
+    @ensure_share_server_not_provided
+    def create_share(self, context, share, share_server=None):
+        """Is called to create a share."""
+        options = self._get_dataset_creation_options(share, is_readonly=False)
+        cmd = ['create']
+        for option in options:
+            cmd.extend(['-o', option])
+        dataset_name = self._get_dataset_name(share)
+        cmd.append(dataset_name)
+
+        ssh_cmd = '%(username)s@%(host)s' % {
+            'username': self.configuration.zfs_ssh_username,
+            'host': self.service_ip,
+        }
+        pool_name = share_utils.extract_host(share['host'], level='pool')
+        self.private_storage.update(
+            share['id'], {
+                'entity_type': 'share',
+                'dataset_name': dataset_name,
+                'ssh_cmd': ssh_cmd,  # used in replication
+                'pool_name': pool_name,  # used in replication
+                'provided_options': ' '.join(self.dataset_creation_options),
+                'used_options': ' '.join(options),
+            }
+        )
+
+        self.zfs(*cmd)
+
+        return self._get_share_helper(
+            share['share_proto']).create_exports(dataset_name)
+
+    @ensure_share_server_not_provided
+    def delete_share(self, context, share, share_server=None):
+        """Is called to remove a share."""
+        pool_name = self.private_storage.get(share['id'], 'pool_name')
+        dataset_name = self.private_storage.get(share['id'], 'dataset_name')
+        if not dataset_name:
+            dataset_name = self._get_dataset_name(share)
+
+        out, err = self.zfs('list', '-r', pool_name)
+        data = self.parse_zfs_answer(out)
+        for datum in data:
+            if datum['NAME'] != dataset_name:
+                continue
+
+            # Delete dataset's snapshots first
+            out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
+            snapshots = self.parse_zfs_answer(out)
+            full_snapshot_prefix = (
+                dataset_name + '@' + self.replica_snapshot_prefix)
+            for snap in snapshots:
+                if full_snapshot_prefix in snap['NAME']:
+                    self._delete_dataset_or_snapshot_with_retry(snap['NAME'])
+
+            self._get_share_helper(
+                share['share_proto']).remove_exports(dataset_name)
+            self._delete_dataset_or_snapshot_with_retry(dataset_name)
+            break
+        else:
+            LOG.warning(
+                _LW("Share with '%(id)s' ID and '%(name)s' NAME is "
+                    "absent on backend. Nothing has been deleted."),
+                {'id': share['id'], 'name': dataset_name})
+        self.private_storage.delete(share['id'])
+
+    @ensure_share_server_not_provided
+    def create_snapshot(self, context, snapshot, share_server=None):
+        """Is called to create a snapshot."""
+        dataset_name = self.private_storage.get(
+            snapshot['share_id'], 'dataset_name')
+        snapshot_name = self._get_snapshot_name(snapshot['id'])
+        snapshot_name = dataset_name + '@' + snapshot_name
+        self.private_storage.update(
+            snapshot['id'], {
+                'entity_type': 'snapshot',
+                'snapshot_name': snapshot_name,
+            }
+        )
+        self.zfs('snapshot', snapshot_name)
+
+    @ensure_share_server_not_provided
+    def delete_snapshot(self, context, snapshot, share_server=None):
+        """Is called to remove a snapshot."""
+        snapshot_name = self.private_storage.get(
+            snapshot['id'], 'snapshot_name')
+        pool_name = snapshot_name.split('/')[0]
+
+        out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
+        data = self.parse_zfs_answer(out)
+        for datum in data:
+            if datum['NAME'] == snapshot_name:
+                self._delete_dataset_or_snapshot_with_retry(snapshot_name)
+                break
+        else:
+            LOG.warning(
+                _LW("Snapshot with '%(id)s' ID and '%(name)s' NAME is "
+                    "absent on backend. Nothing has been deleted."),
+                {'id': snapshot['id'], 'name': snapshot_name})
+        self.private_storage.delete(snapshot['id'])
+
+    @ensure_share_server_not_provided
+    def create_share_from_snapshot(self, context, share, snapshot,
+                                   share_server=None):
+        """Is called to create a share from snapshot."""
+        dataset_name = self._get_dataset_name(share)
+        ssh_cmd = '%(username)s@%(host)s' % {
+            'username': self.configuration.zfs_ssh_username,
+            'host': self.service_ip,
+        }
+        pool_name = share_utils.extract_host(share['host'], level='pool')
+        self.private_storage.update(
+            share['id'], {
+                'entity_type': 'share',
+                'dataset_name': dataset_name,
+                'ssh_cmd': ssh_cmd,  # used in replication
+                'pool_name': pool_name,  # used in replication
+                'provided_options': 'Cloned from source',
+                'used_options': 'Cloned from source',
+            }
+        )
+        snapshot_name = self.private_storage.get(
+            snapshot['id'], 'snapshot_name')
+
+        self.zfs(
+            'clone', snapshot_name, dataset_name,
+            '-o', 'quota=%sG' % share['size'],
+        )
+
+        return self._get_share_helper(
+            share['share_proto']).create_exports(dataset_name)
+
+    def get_pool(self, share):
+        """Return pool name where the share resides on.
+
+        :param share: The share hosted by the driver.
+        """
+        pool_name = share_utils.extract_host(share['host'], level='pool')
+        return pool_name
+
+    @ensure_share_server_not_provided
+    def ensure_share(self, context, share, share_server=None):
+        """Invoked to ensure that given share is exported."""
+        dataset_name = self.private_storage.get(share['id'], 'dataset_name')
+        if not dataset_name:
+            dataset_name = self._get_dataset_name(share)
+
+        pool_name = share_utils.extract_host(share['host'], level='pool')
+        out, err = self.zfs('list', '-r', pool_name)
+        data = self.parse_zfs_answer(out)
+        for datum in data:
+            if datum['NAME'] == dataset_name:
+                sharenfs = self.get_zfs_option(dataset_name, 'sharenfs')
+                if sharenfs != 'off':
+                    self.zfs('share', dataset_name)
+                export_locations = self._get_share_helper(
+                    share['share_proto']).get_exports(dataset_name)
+                return export_locations
+        else:
+            raise exception.ShareResourceNotFound(share_id=share['id'])
+
+    def get_network_allocations_number(self):
+        """ZFS does not handle networking. Return 0."""
+        return 0
+
+    @ensure_share_server_not_provided
+    def extend_share(self, share, new_size, share_server=None):
+        """Extends size of existing share."""
+        dataset_name = self._get_dataset_name(share)
+        self.zfs('set', 'quota=%sG' % new_size, dataset_name)
+
+    @ensure_share_server_not_provided
+    def shrink_share(self, share, new_size, share_server=None):
+        """Shrinks size of existing share."""
+        dataset_name = self._get_dataset_name(share)
+        consumed_space = self.get_zfs_option(dataset_name, 'used')
+        consumed_space = utils.translate_string_size_to_float(consumed_space)
+        if consumed_space >= new_size:
+            raise exception.ShareShrinkingPossibleDataLoss(
+                share_id=share['id'])
+        self.zfs('set', 'quota=%sG' % new_size, dataset_name)
+
+    @ensure_share_server_not_provided
+    def update_access(self, context, share, access_rules, add_rules=None,
+                      delete_rules=None, share_server=None):
+        """Updates access rules for given share."""
+        dataset_name = self._get_dataset_name(share)
+        return self._get_share_helper(share['share_proto']).update_access(
+            dataset_name, access_rules, add_rules, delete_rules)
+
+    def unmanage(self, share):
+        """Removes the specified share from Manila management."""
+        self.private_storage.delete(share['id'])
+
+    def _get_replication_snapshot_prefix(self, replica):
+        """Returns replica-based snapshot prefix."""
+        replication_snapshot_prefix = "%s_%s" % (
+            self.replica_snapshot_prefix, replica['id'].replace('-', '_'))
+        return replication_snapshot_prefix
+
+    def _get_replication_snapshot_tag(self, replica):
+        """Returns replica- and time-based snapshot tag."""
+        current_time = timeutils.utcnow().isoformat()
+        snapshot_tag = "%s_time_%s" % (
+            self._get_replication_snapshot_prefix(replica), current_time)
+        return snapshot_tag
+
+    def _get_active_replica(self, replica_list):
+        for replica in replica_list:
+            if replica['replica_state'] == constants.REPLICA_STATE_ACTIVE:
+                return replica
+        msg = _("Active replica not found.")
+        raise exception.ReplicationException(reason=msg)
+
+    @ensure_share_server_not_provided
+    def create_replica(self, context, replica_list, new_replica,
+                       access_rules, share_server=None):
+        """Replicates the active replica to a new replica on this backend."""
+        active_replica = self._get_active_replica(replica_list)
+        src_dataset_name = self.private_storage.get(
+            active_replica['id'], 'dataset_name')
+        ssh_to_src_cmd = self.private_storage.get(
+            active_replica['id'], 'ssh_cmd')
+        dst_dataset_name = self._get_dataset_name(new_replica)
+
+        ssh_cmd = '%(username)s@%(host)s' % {
+            'username': self.configuration.zfs_ssh_username,
+            'host': self.service_ip,
+        }
+
+        snapshot_tag = self._get_replication_snapshot_tag(new_replica)
+        src_snapshot_name = (
+            '%(dataset_name)s@%(snapshot_tag)s' % {
+                'snapshot_tag': snapshot_tag,
+                'dataset_name': src_dataset_name,
+            }
+        )
+        # Save valuable data to DB
+        self.private_storage.update(active_replica['id'], {
+            'repl_snapshot_tag': snapshot_tag,
+        })
+        self.private_storage.update(new_replica['id'], {
+            'entity_type': 'replica',
+            'replica_type': 'readable',
+            'dataset_name': dst_dataset_name,
+            'ssh_cmd': ssh_cmd,
+            'pool_name': share_utils.extract_host(
+                new_replica['host'], level='pool'),
+            'repl_snapshot_tag': snapshot_tag,
+        })
+
+        # Create temporary snapshot. It will exist until following replica sync
+        # After it - new one will appear and so in loop.
+        self.execute(
+            'ssh', ssh_to_src_cmd,
+            'sudo', 'zfs', 'snapshot', src_snapshot_name,
+        )
+
+        # Send/receive temporary snapshot
+        out, err = self.execute(
+            'ssh', ssh_to_src_cmd,
+            'sudo', 'zfs', 'send', '-vDR', src_snapshot_name, '|',
+            'ssh', ssh_cmd,
+            'sudo', 'zfs', 'receive', '-v', dst_dataset_name,
+        )
+        msg = ("Info about replica '%(replica_id)s' creation is following: "
+               "\n%(out)s")
+        LOG.debug(msg, {'replica_id': new_replica['id'], 'out': out})
+
+        # Make replica readonly
+        self.zfs('set', 'readonly=on', dst_dataset_name)
+
+        # Set original share size as quota to new replica
+        self.zfs('set', 'quota=%sG' % active_replica['size'], dst_dataset_name)
+
+        # Apply access rules from original share
+        self._get_share_helper(new_replica['share_proto']).update_access(
+            dst_dataset_name, access_rules, make_all_ro=True)
+
+        return {
+            'export_locations': self._get_share_helper(
+                new_replica['share_proto']).create_exports(dst_dataset_name),
+            'replica_state': constants.REPLICA_STATE_IN_SYNC,
+            'access_rules_status': constants.STATUS_ACTIVE,
+        }
+
+    @ensure_share_server_not_provided
+    def delete_replica(self, context, replica_list, replica,
+                       share_server=None):
+        """Deletes a replica. This is called on the destination backend."""
+        pool_name = self.private_storage.get(replica['id'], 'pool_name')
+        dataset_name = self.private_storage.get(replica['id'], 'dataset_name')
+        if not dataset_name:
+            dataset_name = self._get_dataset_name(replica)
+
+        # Delete dataset's snapshots first
+        out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name)
+        data = self.parse_zfs_answer(out)
+        for datum in data:
+            if dataset_name in datum['NAME']:
+                self._delete_dataset_or_snapshot_with_retry(datum['NAME'])
+
+        # Now we delete dataset itself
+        out, err = self.zfs('list', '-r', pool_name)
+        data = self.parse_zfs_answer(out)
+        for datum in data:
+            if datum['NAME'] == dataset_name:
+                self._get_share_helper(
+                    replica['share_proto']).remove_exports(dataset_name)
+                self._delete_dataset_or_snapshot_with_retry(dataset_name)
+                break
+        else:
+            LOG.warning(
+                _LW("Share replica with '%(id)s' ID and '%(name)s' NAME is "
+                    "absent on backend. Nothing has been deleted."),
+                {'id': replica['id'], 'name': dataset_name})
+        self.private_storage.delete(replica['id'])
+
+    @ensure_share_server_not_provided
+    def update_replica_state(self, context, replica_list, replica,
+                             access_rules, share_server=None):
+        """Syncs replica and updates its 'replica_state'."""
+        active_replica = self._get_active_replica(replica_list)
+        src_dataset_name = self.private_storage.get(
+            active_replica['id'], 'dataset_name')
+        ssh_to_src_cmd = self.private_storage.get(
+            active_replica['id'], 'ssh_cmd')
+        ssh_to_dst_cmd = self.private_storage.get(
+            replica['id'], 'ssh_cmd')
+        dst_dataset_name = self.private_storage.get(
+            replica['id'], 'dataset_name')
+
+        # Create temporary snapshot
+        previous_snapshot_tag = self.private_storage.get(
+            replica['id'], 'repl_snapshot_tag')
+        snapshot_tag = self._get_replication_snapshot_tag(replica)
+        src_snapshot_name = src_dataset_name + '@' + snapshot_tag
+        self.execute(
+            'ssh', ssh_to_src_cmd,
+            'sudo', 'zfs', 'snapshot', src_snapshot_name,
+        )
+
+        # Make sure it is readonly
+        self.zfs('set', 'readonly=on', dst_dataset_name)
+
+        # Send/receive diff between previous snapshot and last one
+        out, err = self.execute(
+            'ssh', ssh_to_src_cmd,
+            'sudo', 'zfs', 'send', '-vDRI',
+            previous_snapshot_tag, src_snapshot_name, '|',
+            'ssh', ssh_to_dst_cmd,
+            'sudo', 'zfs', 'receive', '-vF', dst_dataset_name,
+        )
+        msg = ("Info about last replica '%(replica_id)s' sync is following: "
+               "\n%(out)s")
+        LOG.debug(msg, {'replica_id': replica['id'], 'out': out})
+
+        # Update DB data that will be used on following replica sync
+        self.private_storage.update(active_replica['id'], {
+            'repl_snapshot_tag': snapshot_tag,
+        })
+        self.private_storage.update(
+            replica['id'], {'repl_snapshot_tag': snapshot_tag})
+
+        # Destroy all snapshots on dst filesystem except referenced ones.
+        snap_references = set()
+        for repl in replica_list:
+            snap_references.add(
+                self.private_storage.get(repl['id'], 'repl_snapshot_tag'))
+
+        dst_pool_name = dst_dataset_name.split('/')[0]
+        out, err = self.zfs('list', '-r', '-t', 'snapshot', dst_pool_name)
+        data = self.parse_zfs_answer(out)
+        for datum in data:
+            if (dst_dataset_name in datum['NAME'] and
+                    datum['NAME'].split('@')[-1] not in snap_references):
+                self._delete_dataset_or_snapshot_with_retry(datum['NAME'])
+
+        # Destroy all snapshots on src filesystem except referenced ones.
+        src_pool_name = src_snapshot_name.split('/')[0]
+        out, err = self.execute(
+            'ssh', ssh_to_src_cmd,
+            'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', src_pool_name,
+        )
+        data = self.parse_zfs_answer(out)
+        full_src_snapshot_prefix = (
+            src_dataset_name + '@' +
+            self._get_replication_snapshot_prefix(replica))
+        for datum in data:
+            if (full_src_snapshot_prefix in datum['NAME'] and
+                    datum['NAME'].split('@')[-1] not in snap_references):
+                self.execute_with_retry(
+                    'ssh', ssh_to_src_cmd,
+                    'sudo', 'zfs', 'destroy', '-f', datum['NAME'],
+                )
+
+        # Apply access rules from original share
+        # TODO(vponomaryov): we should remove somehow rules that were
+        # deleted on active replica after creation of secondary replica.
+        # For the moment there will be difference and it can be considered
+        # as a bug.
+        self._get_share_helper(replica['share_proto']).update_access(
+            dst_dataset_name, access_rules, make_all_ro=True)
+
+        # Return results
+        return constants.REPLICA_STATE_IN_SYNC
+
+    @ensure_share_server_not_provided
+    def promote_replica(self, context, replica_list, replica, access_rules,
+                        share_server=None):
+        """Promotes secondary replica to active and active to secondary."""
+        active_replica = self._get_active_replica(replica_list)
+        src_dataset_name = self.private_storage.get(
+            active_replica['id'], 'dataset_name')
+        ssh_to_src_cmd = self.private_storage.get(
+            active_replica['id'], 'ssh_cmd')
+        dst_dataset_name = self.private_storage.get(
+            replica['id'], 'dataset_name')
+        replica_dict = {
+            r['id']: {
+                'id': r['id'],
+                # NOTE(vponomaryov): access rules will be updated in next
+                # 'sync' operation.
+                'access_rules_status': constants.STATUS_OUT_OF_SYNC,
+            }
+            for r in replica_list
+        }
+        try:
+            # Mark currently active replica as readonly
+            self.execute(
+                'ssh', ssh_to_src_cmd,
+                'set', 'readonly=on', src_dataset_name,
+            )
+
+            # Create temporary snapshot of currently active replica
+            snapshot_tag = self._get_replication_snapshot_tag(active_replica)
+            src_snapshot_name = src_dataset_name + '@' + snapshot_tag
+            self.execute(
+                'ssh', ssh_to_src_cmd,
+                'sudo', 'zfs', 'snapshot', src_snapshot_name,
+            )
+
+            # Apply temporary snapshot to all replicas
+            for repl in replica_list:
+                if repl['replica_state'] == constants.REPLICA_STATE_ACTIVE:
+                    continue
+                previous_snapshot_tag = self.private_storage.get(
+                    repl['id'], 'repl_snapshot_tag')
+                dataset_name = self.private_storage.get(
+                    repl['id'], 'dataset_name')
+                ssh_to_dst_cmd = self.private_storage.get(
+                    repl['id'], 'ssh_cmd')
+
+                try:
+                    # Send/receive diff between previous snapshot and last one
+                    out, err = self.execute(
+                        'ssh', ssh_to_src_cmd,
+                        'sudo', 'zfs', 'send', '-vDRI',
+                        previous_snapshot_tag, src_snapshot_name, '|',
+                        'ssh', ssh_to_dst_cmd,
+                        'sudo', 'zfs', 'receive', '-vF', dataset_name,
+                    )
+                except exception.ProcessExecutionError as e:
+                    LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"),
+                                {'id': repl['id'], 'e': e})
+                    replica_dict[repl['id']]['replica_state'] = (
+                        constants.REPLICA_STATE_OUT_OF_SYNC)
+                    continue
+
+                msg = ("Info about last replica '%(replica_id)s' "
+                       "sync is following: \n%(out)s")
+                LOG.debug(msg, {'replica_id': repl['id'], 'out': out})
+
+                # Update latest replication snapshot for replica
+                self.private_storage.update(
+                    repl['id'], {'repl_snapshot_tag': snapshot_tag})
+
+            # Update latest replication snapshot for currently active replica
+            self.private_storage.update(
+                active_replica['id'], {'repl_snapshot_tag': snapshot_tag})
+
+            replica_dict[active_replica['id']]['replica_state'] = (
+                constants.REPLICA_STATE_IN_SYNC)
+        except Exception as e:
+            LOG.warning(
+                _LW("Failed to update currently active replica. \n%s"), e)
+
+            replica_dict[active_replica['id']]['replica_state'] = (
+                constants.REPLICA_STATE_OUT_OF_SYNC)
+
+            # Create temporary snapshot of new replica and sync it with other
+            # secondary replicas.
+            snapshot_tag = self._get_replication_snapshot_tag(replica)
+            src_snapshot_name = dst_dataset_name + '@' + snapshot_tag
+            ssh_to_src_cmd = self.private_storage.get(replica['id'], 'ssh_cmd')
+            self.zfs('snapshot', src_snapshot_name)
+            for repl in replica_list:
+                if (repl['replica_state'] == constants.REPLICA_STATE_ACTIVE or
+                        repl['id'] == replica['id']):
+                    continue
+                previous_snapshot_tag = self.private_storage.get(
+                    repl['id'], 'repl_snapshot_tag')
+                dataset_name = self.private_storage.get(
+                    repl['id'], 'dataset_name')
+                ssh_to_dst_cmd = self.private_storage.get(
+                    repl['id'], 'ssh_cmd')
+
+                try:
+                    # Send/receive diff between previous snapshot and last one
+                    out, err = self.execute(
+                        'ssh', ssh_to_src_cmd,
+                        'sudo', 'zfs', 'send', '-vDRI',
+                        previous_snapshot_tag, src_snapshot_name, '|',
+                        'ssh', ssh_to_dst_cmd,
+                        'sudo', 'zfs', 'receive', '-vF', dataset_name,
+                    )
+                except exception.ProcessExecutionError as e:
+                    LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"),
+                                {'id': repl['id'], 'e': e})
+                    replica_dict[repl['id']]['replica_state'] = (
+                        constants.REPLICA_STATE_OUT_OF_SYNC)
+                    continue
+
+                msg = ("Info about last replica '%(replica_id)s' "
+                       "sync is following: \n%(out)s")
+                LOG.debug(msg, {'replica_id': repl['id'], 'out': out})
+
+                # Update latest replication snapshot for replica
+                self.private_storage.update(
+                    repl['id'], {'repl_snapshot_tag': snapshot_tag})
+
+            # Update latest replication snapshot for new active replica
+            self.private_storage.update(
+                replica['id'], {'repl_snapshot_tag': snapshot_tag})
+
+        replica_dict[replica['id']]['replica_state'] = (
+            constants.REPLICA_STATE_ACTIVE)
+
+        self._get_share_helper(replica['share_proto']).update_access(
+            dst_dataset_name, access_rules)
+
+        replica_dict[replica['id']]['access_rules_status'] = (
+            constants.STATUS_ACTIVE)
+
+        self.zfs('set', 'readonly=off', dst_dataset_name)
+
+        return list(replica_dict.values())
diff --git a/manila/share/drivers/zfsonlinux/utils.py b/manila/share/drivers/zfsonlinux/utils.py
new file mode 100644
index 0000000000..d6803f2525
--- /dev/null
+++ b/manila/share/drivers/zfsonlinux/utils.py
@@ -0,0 +1,283 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Module for storing ZFSonLinux driver utility stuff such as:
+ - Common ZFS code
+ - Share helpers
+"""
+
+# TODO(vponomaryov): add support of SaMBa
+
+import abc
+
+from oslo_log import log
+import six
+
+from manila.common import constants
+from manila import exception
+from manila.i18n import _, _LI, _LW
+from manila.share import driver
+from manila.share.drivers.ganesha import utils as ganesha_utils
+from manila import utils
+
+LOG = log.getLogger(__name__)
+
+
+def zfs_dataset_synchronized(f):
+
+    def wrapped_func(self, *args, **kwargs):
+        key = "zfs-dataset-%s" % args[0]
+
+        @utils.synchronized(key, external=True)
+        def source_func(self, *args, **kwargs):
+            return f(self, *args, **kwargs)
+
+        return source_func(self, *args, **kwargs)
+
+    return wrapped_func
+
+
+class ExecuteMixin(driver.ExecuteMixin):
+
+    def init_execute_mixin(self, *args, **kwargs):
+        """Init method for mixin called in the end of driver's __init__()."""
+        super(ExecuteMixin, self).init_execute_mixin(*args, **kwargs)
+        if self.configuration.zfs_use_ssh:
+            self.ssh_executor = ganesha_utils.SSHExecutor(
+                ip=self.configuration.zfs_service_ip,
+                port=22,
+                conn_timeout=self.configuration.ssh_conn_timeout,
+                login=self.configuration.zfs_ssh_username,
+                password=self.configuration.zfs_ssh_user_password,
+                privatekey=self.configuration.zfs_ssh_private_key_path,
+                max_size=10,
+            )
+        else:
+            self.ssh_executor = None
+
+    def execute(self, *cmd, **kwargs):
+        """Common interface for running shell commands."""
+        executor = self._execute
+        if self.ssh_executor:
+            executor = self.ssh_executor
+        if cmd[0] == 'sudo':
+            kwargs['run_as_root'] = True
+            cmd = cmd[1:]
+        return executor(*cmd, **kwargs)
+
+    @utils.retry(exception.ProcessExecutionError,
+                 interval=5, retries=36, backoff_rate=1)
+    def execute_with_retry(self, *cmd, **kwargs):
+        """Retry wrapper over common shell interface."""
+        try:
+            return self.execute(*cmd, **kwargs)
+        except exception.ProcessExecutionError as e:
+            LOG.warning(_LW("Failed to run command, got error: %s"), e)
+            raise
+
+    def _get_option(self, resource_name, option_name, pool_level=False):
+        """Returns value of requested zpool or zfs dataset option."""
+        app = 'zpool' if pool_level else 'zfs'
+
+        out, err = self.execute('sudo', app, 'get', option_name, resource_name)
+
+        data = self.parse_zfs_answer(out)
+        option = data[0]['VALUE']
+        return option
+
+    def parse_zfs_answer(self, string):
+        """Returns list of dicts with data returned by ZFS shell commands."""
+        lines = string.split('\n')
+        if len(lines) < 2:
+            return []
+        keys = list(filter(None, lines[0].split(' ')))
+        data = []
+        for line in lines[1:]:
+            values = list(filter(None, line.split(' ')))
+            if not values:
+                continue
+            data.append(dict(zip(keys, values)))
+        return data
+
+    def get_zpool_option(self, zpool_name, option_name):
+        """Returns value of requested zpool option."""
+        return self._get_option(zpool_name, option_name, True)
+
+    def get_zfs_option(self, dataset_name, option_name):
+        """Returns value of requested zfs dataset option."""
+        return self._get_option(dataset_name, option_name, False)
+
+    def zfs(self, *cmd, **kwargs):
+        """ZFS shell commands executor."""
+        return self.execute('sudo', 'zfs', *cmd, **kwargs)
+
+    def zfs_with_retry(self, *cmd, **kwargs):
+        """ZFS shell commands executor with retries."""
+        return self.execute_with_retry('sudo', 'zfs', *cmd, **kwargs)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NASHelperBase(object):
+    """Base class for share helpers of 'ZFS on Linux' driver."""
+
+    def __init__(self, configuration):
+        """Init share helper.
+
+        :param configuration: share driver 'configuration' instance
+        :return: share helper instance.
+        """
+        self.configuration = configuration
+        self.init_execute_mixin()  # pylint: disable=E1101
+        self.verify_setup()
+
+    @abc.abstractmethod
+    def verify_setup(self):
+        """Performs checks for required stuff."""
+
+    @abc.abstractmethod
+    def create_exports(self, dataset_name):
+        """Creates share exports."""
+
+    @abc.abstractmethod
+    def get_exports(self, dataset_name, service):
+        """Gets/reads share exports."""
+
+    @abc.abstractmethod
+    def remove_exports(self, dataset_name):
+        """Removes share exports."""
+
+    @abc.abstractmethod
+    def update_access(self, dataset_name, access_rules, add_rules=None,
+                      delete_rules=None):
+        """Update access rules for specified ZFS dataset."""
+
+
+class NFSviaZFSHelper(ExecuteMixin, NASHelperBase):
+    """Helper class for handling ZFS datasets as NFS shares.
+
+    Kernel and Fuse versions of ZFS have different syntax for setting up access
+    rules, and this Helper designed to satisfy both making autodetection.
+    """
+
+    @property
+    def is_kernel_version(self):
+        """Says whether Kernel version of ZFS is used or not."""
+        if not hasattr(self, '_is_kernel_version'):
+            try:
+                self.execute('modinfo', 'zfs')
+                self._is_kernel_version = True
+            except exception.ProcessExecutionError as e:
+                LOG.info(
+                    _LI("Looks like ZFS kernel module is absent. "
+                        "Assuming FUSE version is installed. Error: %s"), e)
+                self._is_kernel_version = False
+        return self._is_kernel_version
+
+    def verify_setup(self):
+        """Performs checks for required stuff."""
+        out, err = self.execute('which', 'exportfs')
+        if not out:
+            raise exception.ZFSonLinuxException(
+                msg=_("Utility 'exportfs' is not installed."))
+        try:
+            self.execute('sudo', 'exportfs')
+        except exception.ProcessExecutionError as e:
+            msg = _("Call of 'exportfs' utility returned error: %s")
+            LOG.exception(msg, e)
+            raise
+
+    def create_exports(self, dataset_name):
+        """Creates NFS share exports for given ZFS dataset."""
+        return self.get_exports(dataset_name)
+
+    def get_exports(self, dataset_name):
+        """Gets/reads NFS share export for given ZFS dataset."""
+        mountpoint = self.get_zfs_option(dataset_name, 'mountpoint')
+        return [
+            {
+                "path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint},
+                "metadata": {
+                },
+                "is_admin_only": is_admin_only,
+            } for ip, is_admin_only in (
+                (self.configuration.zfs_share_export_ip, False),
+                (self.configuration.zfs_service_ip, True))
+        ]
+
+    @zfs_dataset_synchronized
+    def remove_exports(self, dataset_name):
+        """Removes NFS share exports for given ZFS dataset."""
+        sharenfs = self.get_zfs_option(dataset_name, 'sharenfs')
+        if sharenfs == 'off':
+            return
+        self.zfs("set", "sharenfs=off", dataset_name)
+
+    @zfs_dataset_synchronized
+    def update_access(self, dataset_name, access_rules, add_rules=None,
+                      delete_rules=None, make_all_ro=False):
+        """Update access rules for given ZFS dataset exported as NFS share."""
+        rw_rules = []
+        ro_rules = []
+        for rule in access_rules:
+            if rule['access_type'].lower() != 'ip':
+                msg = _("Only IP access type allowed for NFS protocol.")
+                raise exception.InvalidShareAccess(reason=msg)
+            if (rule['access_level'] == constants.ACCESS_LEVEL_RW and
+                    not make_all_ro):
+                rw_rules.append(rule['access_to'])
+            elif (rule['access_level'] in (constants.ACCESS_LEVEL_RW,
+                                           constants.ACCESS_LEVEL_RO)):
+                ro_rules.append(rule['access_to'])
+            else:
+                msg = _("Unsupported access level provided - "
+                        "%s.") % rule['access_level']
+                raise exception.InvalidShareAccess(reason=msg)
+
+        rules = []
+        if self.is_kernel_version:
+            if rw_rules:
+                rules.append(
+                    "rw=%s,no_root_squash" % ":".join(rw_rules))
+            if ro_rules:
+                rules.append("ro=%s,no_root_squash" % ":".join(ro_rules))
+            rules_str = "sharenfs=" + (','.join(rules) or 'off')
+        else:
+            for rule in rw_rules:
+                rules.append("%s:rw,no_root_squash" % rule)
+            for rule in ro_rules:
+                rules.append("%s:ro,no_root_squash" % rule)
+            rules_str = "sharenfs=" + (' '.join(rules) or 'off')
+
+        out, err = self.zfs('list', '-r', dataset_name.split('/')[0])
+        data = self.parse_zfs_answer(out)
+        for datum in data:
+            if datum['NAME'] == dataset_name:
+                self.zfs("set", rules_str, dataset_name)
+                break
+        else:
+            LOG.warning(
+                _LW("Dataset with '%(name)s' NAME is absent on backend. "
+                    "Access rules were not applied."), {'name': dataset_name})
+
+        # NOTE(vponomaryov): Setting of ZFS share options does not remove rules
+        # that were added and then removed. So, remove them explicitly.
+        if delete_rules and access_rules:
+            mountpoint = self.get_zfs_option(dataset_name, 'mountpoint')
+            for rule in delete_rules:
+                if rule['access_type'].lower() != 'ip':
+                    continue
+                export_location = rule['access_to'] + ':' + mountpoint
+                self.execute('sudo', 'exportfs', '-u', export_location)
diff --git a/manila/tests/conf_fixture.py b/manila/tests/conf_fixture.py
index d92622e2f1..e8495fef1b 100644
--- a/manila/tests/conf_fixture.py
+++ b/manila/tests/conf_fixture.py
@@ -43,6 +43,12 @@ def set_defaults(conf):
                       'manila.tests.fake_driver.FakeShareDriver')
     _safe_set_of_opts(conf, 'auth_strategy', 'noauth')
 
+    _safe_set_of_opts(conf, 'zfs_share_export_ip', '1.1.1.1')
+    _safe_set_of_opts(conf, 'zfs_service_ip', '2.2.2.2')
+    _safe_set_of_opts(conf, 'zfs_zpool_list', ['foo', 'bar'])
+    _safe_set_of_opts(conf, 'zfs_share_helpers', 'NFS=foo.bar.Helper')
+    _safe_set_of_opts(conf, 'zfs_replica_snapshot_prefix', 'foo_prefix_')
+
 
 def _safe_set_of_opts(conf, *args, **kwargs):
     try:
diff --git a/manila/tests/share/drivers/zfsonlinux/__init__.py b/manila/tests/share/drivers/zfsonlinux/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/manila/tests/share/drivers/zfsonlinux/test_driver.py b/manila/tests/share/drivers/zfsonlinux/test_driver.py
new file mode 100644
index 0000000000..dfe340f91e
--- /dev/null
+++ b/manila/tests/share/drivers/zfsonlinux/test_driver.py
@@ -0,0 +1,1430 @@
+# Copyright (c) 2016 Mirantis, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import ddt
+import mock
+from oslo_config import cfg
+
+from manila import context
+from manila import exception
+from manila.share.drivers.ganesha import utils as ganesha_utils
+from manila.share.drivers.zfsonlinux import driver as zfs_driver
+from manila import test
+
+CONF = cfg.CONF
+
+
+class FakeConfig(object):
+    def __init__(self, *args, **kwargs):
+        self.driver_handles_share_servers = False
+        self.share_backend_name = 'FAKE_BACKEND_NAME'
+        self.zfs_share_export_ip = kwargs.get(
+            "zfs_share_export_ip", "1.1.1.1")
+        self.zfs_service_ip = kwargs.get("zfs_service_ip", "2.2.2.2")
+        self.zfs_zpool_list = kwargs.get(
+            "zfs_zpool_list", ["foo", "bar/subbar", "quuz"])
+        self.zfs_use_ssh = kwargs.get("zfs_use_ssh", False)
+        self.zfs_share_export_ip = kwargs.get(
+            "zfs_share_export_ip", "240.241.242.243")
+        self.zfs_service_ip = kwargs.get("zfs_service_ip", "240.241.242.244")
+        self.ssh_conn_timeout = kwargs.get("ssh_conn_timeout", 123)
+        self.zfs_ssh_username = kwargs.get(
+            "zfs_ssh_username", 'fake_username')
+        self.zfs_ssh_user_password = kwargs.get(
+            "zfs_ssh_user_password", 'fake_pass')
+        self.zfs_ssh_private_key_path = kwargs.get(
+            "zfs_ssh_private_key_path", '/fake/path')
+        self.zfs_replica_snapshot_prefix = kwargs.get(
+            "zfs_replica_snapshot_prefix", "tmp_snapshot_for_replication_")
+        self.zfs_dataset_creation_options = kwargs.get(
+            "zfs_dataset_creation_options", ["fook=foov", "bark=barv"])
+        self.network_config_group = kwargs.get(
+            "network_config_group", "fake_network_config_group")
+        self.admin_network_config_group = kwargs.get(
+            "admin_network_config_group", "fake_admin_network_config_group")
+        self.config_group = kwargs.get("config_group", "fake_config_group")
+        self.reserved_share_percentage = kwargs.get(
+            "reserved_share_percentage", 0)
+
+    def safe_get(self, key):
+        return getattr(self, key)
+
+    def append_config_values(self, *args, **kwargs):
+        pass
+
+
+class FakeDriverPrivateStorage(object):
+
+    def __init__(self):
+        self.storage = {}
+
+    def update(self, entity_id, data):
+        if entity_id not in self.storage:
+            self.storage[entity_id] = {}
+        self.storage[entity_id].update(data)
+
+    def get(self, entity_id, key):
+        return self.storage.get(entity_id, {}).get(key)
+
+    def delete(self, entity_id):
+        self.storage.pop(entity_id, None)
+
+
+@ddt.ddt
+class ZFSonLinuxShareDriverTestCase(test.TestCase):
+
+    def setUp(self):
+        self.mock_object(zfs_driver.CONF, '_check_required_opts')
+        super(self.__class__, self).setUp()
+        self._context = context.get_admin_context()
+        self.ssh_executor = self.mock_object(ganesha_utils, 'SSHExecutor')
+        self.configuration = FakeConfig()
+        self.private_storage = FakeDriverPrivateStorage()
+        self.driver = zfs_driver.ZFSonLinuxShareDriver(
+            configuration=self.configuration,
+            private_storage=self.private_storage)
+
+    def test_init(self):
+        self.assertTrue(hasattr(self.driver, 'replica_snapshot_prefix'))
+        self.assertEqual(
+            self.driver.replica_snapshot_prefix,
+            self.configuration.zfs_replica_snapshot_prefix)
+        self.assertEqual(
+            self.driver.backend_name,
+            self.configuration.share_backend_name)
+        self.assertEqual(
+            self.driver.zpool_list, ['foo', 'bar', 'quuz'])
+        self.assertEqual(
+            self.driver.dataset_creation_options,
+            self.configuration.zfs_dataset_creation_options)
+        self.assertEqual(
+            self.driver.share_export_ip,
+            self.configuration.zfs_share_export_ip)
+        self.assertEqual(
+            self.driver.service_ip,
+            self.configuration.zfs_service_ip)
+        self.assertEqual(
+            self.driver.private_storage,
+            self.private_storage)
+        self.assertTrue(hasattr(self.driver, '_helpers'))
+        self.assertEqual(self.driver._helpers, {})
+        for attr_name in ('execute', 'execute_with_retry', 'parse_zfs_answer',
+                          'get_zpool_option', 'get_zfs_option', 'zfs',
+                          'zfs_with_retry'):
+            self.assertTrue(hasattr(self.driver, attr_name))
+
+    def test_init_error_with_duplicated_zpools(self):
+        configuration = FakeConfig(
+            zfs_zpool_list=['foo', 'bar', 'foo/quuz'])
+        self.assertRaises(
+            exception.BadConfigurationException,
+            zfs_driver.ZFSonLinuxShareDriver,
+            configuration=configuration,
+            private_storage=self.private_storage
+        )
+
+    def test__setup_helpers(self):
+        mock_import_class = self.mock_object(
+            zfs_driver.importutils, 'import_class')
+        self.configuration.zfs_share_helpers = ['FOO=foo.module.WithHelper']
+
+        result = self.driver._setup_helpers()
+
+        self.assertIsNone(result)
+        mock_import_class.assert_called_once_with('foo.module.WithHelper')
+        mock_import_class.return_value.assert_called_once_with(
+            self.configuration)
+        self.assertEqual(
+            self.driver._helpers,
+            {'FOO': mock_import_class.return_value.return_value})
+
+    def test__setup_helpers_error(self):
+        self.configuration.zfs_share_helpers = []
+        self.assertRaises(
+            exception.BadConfigurationException, self.driver._setup_helpers)
+
+    def test__get_share_helper(self):
+        self.driver._helpers = {'FOO': 'BAR'}
+
+        result = self.driver._get_share_helper('FOO')
+
+        self.assertEqual('BAR', result)
+
+    @ddt.data({}, {'foo': 'bar'})
+    def test__get_share_helper_error(self, share_proto):
+        self.assertRaises(
+            exception.InvalidShare, self.driver._get_share_helper, 'NFS')
+
+    @ddt.data(True, False)
+    def test_do_setup(self, use_ssh):
+        self.mock_object(self.driver, '_setup_helpers')
+        self.mock_object(self.driver, 'ssh_executor')
+        self.configuration.zfs_use_ssh = use_ssh
+
+        self.driver.do_setup('fake_context')
+
+        self.driver._setup_helpers.assert_called_once_with()
+        if use_ssh:
+            self.driver.ssh_executor.assert_called_once_with('whoami')
+        else:
+            self.assertEqual(0, self.driver.ssh_executor.call_count)
+
+    @ddt.data(
+        ('foo', '127.0.0.1'),
+        ('127.0.0.1', 'foo'),
+        ('256.0.0.1', '127.0.0.1'),
+        ('::1/128', '127.0.0.1'),
+        ('127.0.0.1', '::1/128'),
+    )
+    @ddt.unpack
+    def test_do_setup_error_on_ip_addresses_configuration(
+            self, share_export_ip, service_ip):
+        self.mock_object(self.driver, '_setup_helpers')
+        self.driver.share_export_ip = share_export_ip
+        self.driver.service_ip = service_ip
+
+        self.assertRaises(
+            exception.BadConfigurationException,
+            self.driver.do_setup, 'fake_context')
+
+        self.driver._setup_helpers.assert_called_once_with()
+
+    @ddt.data([], '', None)
+    def test_do_setup_no_zpools_configured(self, zpool_list):
+        self.mock_object(self.driver, '_setup_helpers')
+        self.driver.zpool_list = zpool_list
+
+        self.assertRaises(
+            exception.BadConfigurationException,
+            self.driver.do_setup, 'fake_context')
+
+        self.driver._setup_helpers.assert_called_once_with()
+
+    @ddt.data(None, '', 'foo_replication_domain')
+    def test__get_pools_info(self, replication_domain):
+        self.mock_object(
+            self.driver, 'get_zpool_option',
+            mock.Mock(side_effect=['2G', '3G', '5G', '4G']))
+        self.configuration.replication_domain = replication_domain
+        self.driver.zpool_list = ['foo', 'bar']
+        expected = [
+            {'pool_name': 'foo', 'total_capacity_gb': 3.0,
+             'free_capacity_gb': 2.0, 'reserved_percentage': 0},
+            {'pool_name': 'bar', 'total_capacity_gb': 4.0,
+             'free_capacity_gb': 5.0, 'reserved_percentage': 0},
+        ]
+        if replication_domain:
+            for pool in expected:
+                pool['replication_type'] = 'readable'
+
+        result = self.driver._get_pools_info()
+
+        self.assertEqual(expected, result)
+        self.driver.get_zpool_option.assert_has_calls([
+            mock.call('foo', 'free'),
+            mock.call('foo', 'size'),
+            mock.call('bar', 'free'),
+            mock.call('bar', 'size'),
+        ])
+
+    @ddt.data(None, '', 'foo_replication_domain')
+    def test__update_share_stats(self, replication_domain):
+        self.configuration.replication_domain = replication_domain
+        self.mock_object(self.driver, '_get_pools_info')
+        self.assertEqual({}, self.driver._stats)
+        expected = {
+            'consistency_group_support': None,
+            'driver_handles_share_servers': False,
+            'driver_name': 'ZFS',
+            'driver_version': '1.0',
+            'free_capacity_gb': 'unknown',
+            'pools': self.driver._get_pools_info.return_value,
+            'qos': False,
+            'replication_domain': replication_domain,
+            'reserved_percentage': 0,
+            'share_backend_name': self.driver.backend_name,
+            'snapshot_support': True,
+            'storage_protocol': 'NFS',
+            'total_capacity_gb': 'unknown',
+            'vendor_name': 'Open Source',
+        }
+        if replication_domain:
+            expected['replication_type'] = 'readable'
+
+        self.driver._update_share_stats()
+
+        self.assertEqual(expected, self.driver._stats)
+        self.driver._get_pools_info.assert_called_once_with()
+
+    @ddt.data('', 'foo', 'foo-bar', 'foo_bar', 'foo-bar_quuz')
+    def test__get_share_name(self, share_id):
+        prefix = 'fake_prefix_'
+        self.configuration.zfs_dataset_name_prefix = prefix
+        self.configuration.zfs_dataset_snapshot_name_prefix = 'quuz'
+        expected = prefix + share_id.replace('-', '_')
+
+        result = self.driver._get_share_name(share_id)
+
+        self.assertEqual(expected, result)
+
+    @ddt.data('', 'foo', 'foo-bar', 'foo_bar', 'foo-bar_quuz')
+    def test__get_snapshot_name(self, snapshot_id):
+        prefix = 'fake_prefix_'
+        self.configuration.zfs_dataset_name_prefix = 'quuz'
+        self.configuration.zfs_dataset_snapshot_name_prefix = prefix
+        expected = prefix + snapshot_id.replace('-', '_')
+
+        result = self.driver._get_snapshot_name(snapshot_id)
+
+        self.assertEqual(expected, result)
+
+    def test__get_dataset_creation_options_not_set(self):
+        self.driver.dataset_creation_options = []
+
+        result = self.driver._get_dataset_creation_options(share={})
+
+        self.assertEqual([], result)
+
+    @ddt.data(True, False)
+    def test__get_dataset_creation_options(self, is_readonly):
+        self.driver.dataset_creation_options = [
+            'readonly=quuz', 'sharenfs=foo', 'sharesmb=bar', 'k=v', 'q=w',
+        ]
+        share = {'size': 5}
+        readonly = 'readonly=%s' % ('on' if is_readonly else 'off')
+        expected = [readonly, 'k=v', 'q=w', 'quota=5G']
+
+        result = self.driver._get_dataset_creation_options(
+            share=share, is_readonly=is_readonly)
+
+        self.assertEqual(sorted(expected), sorted(result))
+
+    @ddt.data('bar/quuz', 'bar/quuz/', 'bar')
+    def test__get_dataset_name(self, second_zpool):
+        self.configuration.zfs_zpool_list = ['foo', second_zpool]
+        prefix = 'fake_prefix_'
+        self.configuration.zfs_dataset_name_prefix = prefix
+        share = {'id': 'abc-def_ghi', 'host': 'hostname@backend_name#bar'}
+
+        result = self.driver._get_dataset_name(share)
+
+        if second_zpool[-1] == '/':
+            second_zpool = second_zpool[0:-1]
+        expected = '%s/%sabc_def_ghi' % (second_zpool, prefix)
+        self.assertEqual(expected, result)
+
+    def test_create_share(self):
+        mock_get_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.mock_object(self.driver, 'zfs')
+        context = 'fake_context'
+        share = {
+            'id': 'fake_share_id',
+            'host': 'hostname@backend_name#bar',
+            'share_proto': 'NFS',
+            'size': 4,
+        }
+        self.configuration.zfs_dataset_name_prefix = 'some_prefix_'
+        self.configuration.zfs_ssh_username = 'someuser'
+        self.driver.share_export_ip = '1.1.1.1'
+        self.driver.service_ip = '2.2.2.2'
+        dataset_name = 'bar/subbar/some_prefix_fake_share_id'
+
+        result = self.driver.create_share(context, share, share_server=None)
+
+        self.assertEqual(
+            mock_get_helper.return_value.create_exports.return_value,
+            result,
+        )
+        self.assertEqual(
+            'share',
+            self.driver.private_storage.get(share['id'], 'entity_type'))
+        self.assertEqual(
+            dataset_name,
+            self.driver.private_storage.get(share['id'], 'dataset_name'))
+        self.assertEqual(
+            'someuser@2.2.2.2',
+            self.driver.private_storage.get(share['id'], 'ssh_cmd'))
+        self.assertEqual(
+            'bar',
+            self.driver.private_storage.get(share['id'], 'pool_name'))
+        self.driver.zfs.assert_called_once_with(
+            'create', '-o', 'fook=foov', '-o', 'bark=barv',
+            '-o', 'readonly=off', '-o', 'quota=4G',
+            'bar/subbar/some_prefix_fake_share_id')
+        mock_get_helper.assert_has_calls([
+            mock.call('NFS'), mock.call().create_exports(dataset_name)
+        ])
+
+    def test_create_share_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.create_share,
+            'fake_context', 'fake_share', share_server={'id': 'fake_server'},
+        )
+
+    def test_delete_share(self):
+        dataset_name = 'bar/subbar/some_prefix_fake_share_id'
+        mock_delete = self.mock_object(
+            self.driver, '_delete_dataset_or_snapshot_with_retry')
+        self.mock_object(self.driver, '_get_share_helper')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.mock_object(
+            self.driver, 'zfs', mock.Mock(return_value=('a', 'b')))
+        snap_name = '%s@%s' % (
+            dataset_name, self.driver.replica_snapshot_prefix)
+        self.mock_object(
+            self.driver, 'parse_zfs_answer',
+            mock.Mock(
+                side_effect=[
+                    [{'NAME': 'fake_dataset_name'}, {'NAME': dataset_name}],
+                    [{'NAME': 'snap_name'},
+                     {'NAME': '%s@foo' % dataset_name},
+                     {'NAME': snap_name}],
+                ]))
+        context = 'fake_context'
+        share = {
+            'id': 'fake_share_id',
+            'host': 'hostname@backend_name#bar',
+            'share_proto': 'NFS',
+            'size': 4,
+        }
+        self.configuration.zfs_dataset_name_prefix = 'some_prefix_'
+        self.configuration.zfs_ssh_username = 'someuser'
+        self.driver.share_export_ip = '1.1.1.1'
+        self.driver.service_ip = '2.2.2.2'
+        self.driver.private_storage.update(
+            share['id'],
+            {'pool_name': 'bar', 'dataset_name': dataset_name}
+        )
+
+        self.driver.delete_share(context, share, share_server=None)
+
+        self.driver.zfs.assert_has_calls([
+            mock.call('list', '-r', 'bar'),
+            mock.call('list', '-r', '-t', 'snapshot', 'bar'),
+        ])
+        self.driver._get_share_helper.assert_has_calls([
+            mock.call('NFS'), mock.call().remove_exports(dataset_name)])
+        self.driver.parse_zfs_answer.assert_has_calls([
+            mock.call('a'), mock.call('a')])
+        mock_delete.assert_has_calls([
+            mock.call(snap_name),
+            mock.call(dataset_name),
+        ])
+        self.assertEqual(0, zfs_driver.LOG.warning.call_count)
+
+    def test_delete_share_absent(self):
+        dataset_name = 'bar/subbar/some_prefix_fake_share_id'
+        mock_delete = self.mock_object(
+            self.driver, '_delete_dataset_or_snapshot_with_retry')
+        self.mock_object(self.driver, '_get_share_helper')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.mock_object(
+            self.driver, 'zfs', mock.Mock(return_value=('a', 'b')))
+        snap_name = '%s@%s' % (
+            dataset_name, self.driver.replica_snapshot_prefix)
+        self.mock_object(
+            self.driver, 'parse_zfs_answer',
+            mock.Mock(side_effect=[[], [{'NAME': snap_name}]]))
+        context = 'fake_context'
+        share = {
+            'id': 'fake_share_id',
+            'host': 'hostname@backend_name#bar',
+            'size': 4,
+        }
+        self.configuration.zfs_dataset_name_prefix = 'some_prefix_'
+        self.configuration.zfs_ssh_username = 'someuser'
+        self.driver.share_export_ip = '1.1.1.1'
+        self.driver.service_ip = '2.2.2.2'
+        self.driver.private_storage.update(share['id'], {'pool_name': 'bar'})
+
+        self.driver.delete_share(context, share, share_server=None)
+
+        self.assertEqual(0, self.driver._get_share_helper.call_count)
+        self.assertEqual(0, mock_delete.call_count)
+        self.driver.zfs.assert_called_once_with('list', '-r', 'bar')
+        self.driver.parse_zfs_answer.assert_called_once_with('a')
+        zfs_driver.LOG.warning.assert_called_once_with(
+            mock.ANY, {'id': share['id'], 'name': dataset_name})
+
+    def test_delete_share_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.delete_share,
+            'fake_context', 'fake_share', share_server={'id': 'fake_server'},
+        )
+
+    def test_create_snapshot(self):
+        self.configuration.zfs_dataset_snapshot_name_prefix = 'prefx_'
+        self.mock_object(self.driver, 'zfs')
+        snapshot = {
+            'id': 'fake_snapshot_id',
+            'host': 'hostname@backend_name#bar',
+            'size': 4,
+            'share_id': 'fake_share_id'
+        }
+        snapshot_name = 'foo_data_set_name@prefx_fake_snapshot_id'
+        self.driver.private_storage.update(
+            snapshot['share_id'], {'dataset_name': 'foo_data_set_name'})
+
+        self.driver.create_snapshot('fake_context', snapshot)
+
+        self.driver.zfs.assert_called_once_with(
+            'snapshot', snapshot_name)
+        self.assertEqual(
+            snapshot_name,
+            self.driver.private_storage.get(
+                snapshot['id'], 'snapshot_name'))
+
+    def test_delete_snapshot(self):
+        snap_name = 'foo_zpool/bar_dataset_name@prefix_fake_snapshot_id'
+        mock_delete = self.mock_object(
+            self.driver, '_delete_dataset_or_snapshot_with_retry')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.mock_object(
+            self.driver, 'zfs', mock.Mock(return_value=('a', 'b')))
+        self.mock_object(
+            self.driver, 'parse_zfs_answer',
+            mock.Mock(side_effect=[
+                [{'NAME': 'some_other_dataset@snapshot_name'},
+                 {'NAME': snap_name}],
+                []]))
+        context = 'fake_context'
+        snapshot = {
+            'id': 'fake_snapshot_id',
+            'host': 'hostname@backend_name#bar',
+            'size': 4,
+            'share_id': 'fake_share_id',
+        }
+        self.driver.private_storage.update(
+            snapshot['id'], {'snapshot_name': snap_name})
+
+        self.driver.delete_snapshot(context, snapshot, share_server=None)
+
+        self.assertEqual(0, zfs_driver.LOG.warning.call_count)
+        self.driver.zfs.assert_called_once_with(
+            'list', '-r', '-t', 'snapshot', 'foo_zpool')
+        self.driver.parse_zfs_answer.assert_called_once_with('a')
+        mock_delete.assert_called_once_with(snap_name)
+
+    def test_delete_snapshot_absent(self):
+        snap_name = 'foo_zpool/bar_dataset_name@prefix_fake_snapshot_id'
+        mock_delete = self.mock_object(
+            self.driver, '_delete_dataset_or_snapshot_with_retry')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.mock_object(
+            self.driver, 'zfs', mock.Mock(return_value=('a', 'b')))
+        self.mock_object(
+            self.driver, 'parse_zfs_answer',
+            mock.Mock(side_effect=[[], [{'NAME': snap_name}]]))
+        context = 'fake_context'
+        snapshot = {
+            'id': 'fake_snapshot_id',
+            'host': 'hostname@backend_name#bar',
+            'size': 4,
+            'share_id': 'fake_share_id',
+        }
+        self.driver.private_storage.update(
+            snapshot['id'], {'snapshot_name': snap_name})
+
+        self.driver.delete_snapshot(context, snapshot, share_server=None)
+
+        self.assertEqual(0, mock_delete.call_count)
+        self.driver.zfs.assert_called_once_with(
+            'list', '-r', '-t', 'snapshot', 'foo_zpool')
+        self.driver.parse_zfs_answer.assert_called_once_with('a')
+        zfs_driver.LOG.warning.assert_called_once_with(
+            mock.ANY, {'id': snapshot['id'], 'name': snap_name})
+
+    def test_delete_snapshot_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.delete_snapshot,
+            'fake_context', 'fake_snapshot',
+            share_server={'id': 'fake_server'},
+        )
+
+    def test_create_share_from_snapshot(self):
+        mock_get_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.mock_object(self.driver, 'zfs')
+        context = 'fake_context'
+        share = {
+            'id': 'fake_share_id',
+            'host': 'hostname@backend_name#bar',
+            'share_proto': 'NFS',
+            'size': 4,
+        }
+        snapshot = {
+            'id': 'fake_snapshot_id',
+            'host': 'hostname@backend_name#bar',
+            'size': 4,
+            'share_id': share['id'],
+        }
+        snap_name = 'foo_zpool/bar_dataset_name@prefix_fake_snapshot_id'
+        self.configuration.zfs_dataset_name_prefix = 'some_prefix_'
+        self.configuration.zfs_ssh_username = 'someuser'
+        self.driver.share_export_ip = '1.1.1.1'
+        self.driver.service_ip = '2.2.2.2'
+        dataset_name = 'bar/subbar/some_prefix_fake_share_id'
+        self.driver.private_storage.update(
+            snapshot['id'], {'snapshot_name': snap_name})
+
+        result = self.driver.create_share_from_snapshot(
+            context, share, snapshot, share_server=None)
+
+        self.assertEqual(
+            mock_get_helper.return_value.create_exports.return_value,
+            result,
+        )
+        self.assertEqual(
+            'share',
+            self.driver.private_storage.get(share['id'], 'entity_type'))
+        self.assertEqual(
+            dataset_name,
+            self.driver.private_storage.get(share['id'], 'dataset_name'))
+        self.assertEqual(
+            'someuser@2.2.2.2',
+            self.driver.private_storage.get(share['id'], 'ssh_cmd'))
+        self.assertEqual(
+            'bar',
+            self.driver.private_storage.get(share['id'], 'pool_name'))
+        self.driver.zfs.assert_called_once_with(
+            'clone', snap_name, 'bar/subbar/some_prefix_fake_share_id',
+            '-o', 'quota=4G')
+        mock_get_helper.assert_has_calls([
+            mock.call('NFS'), mock.call().create_exports(dataset_name)
+        ])
+
+    def test_create_share_from_snapshot_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.create_share_from_snapshot,
+            'fake_context', 'fake_share', 'fake_snapshot',
+            share_server={'id': 'fake_server'},
+        )
+
+    def test_get_pool(self):
+        share = {'host': 'hostname@backend_name#bar'}
+
+        result = self.driver.get_pool(share)
+
+        self.assertEqual('bar', result)
+
+    @ddt.data('on', 'off', 'rw=1.1.1.1')
+    def test_ensure_share(self, get_zfs_option_answer):
+        share = {
+            'id': 'fake_share_id',
+            'host': 'hostname@backend_name#bar',
+            'share_proto': 'NFS',
+        }
+        dataset_name = 'foo_zpool/foo_fs'
+        self.mock_object(
+            self.driver, '_get_dataset_name',
+            mock.Mock(return_value=dataset_name))
+        self.mock_object(
+            self.driver, 'get_zfs_option',
+            mock.Mock(return_value=get_zfs_option_answer))
+        mock_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.mock_object(
+            self.driver, 'zfs', mock.Mock(return_value=('a', 'b')))
+        self.mock_object(
+            self.driver, 'parse_zfs_answer',
+            mock.Mock(side_effect=[[{'NAME': 'fake1'},
+                                    {'NAME': dataset_name},
+                                    {'NAME': 'fake2'}]]))
+
+        result = self.driver.ensure_share('fake_context', share)
+
+        self.driver.get_zfs_option.assert_called_once_with(
+            dataset_name, 'sharenfs')
+        mock_helper.assert_called_once_with(
+            share['share_proto'])
+        mock_helper.return_value.get_exports.assert_called_once_with(
+            dataset_name)
+        expected_calls = [mock.call('list', '-r', 'bar')]
+        if get_zfs_option_answer != 'off':
+            expected_calls.append(mock.call('share', dataset_name))
+        self.driver.zfs.assert_has_calls(expected_calls)
+        self.driver.parse_zfs_answer.assert_called_once_with('a')
+        self.driver._get_dataset_name.assert_called_once_with(share)
+        self.assertEqual(
+            mock_helper.return_value.get_exports.return_value,
+            result,
+        )
+
+    def test_ensure_share_absent(self):
+        share = {'id': 'fake_share_id', 'host': 'hostname@backend_name#bar'}
+        dataset_name = 'foo_zpool/foo_fs'
+        self.driver.private_storage.update(
+            share['id'], {'dataset_name': dataset_name})
+        self.mock_object(self.driver, 'get_zfs_option')
+        self.mock_object(self.driver, '_get_share_helper')
+        self.mock_object(
+            self.driver, 'zfs', mock.Mock(return_value=('a', 'b')))
+        self.mock_object(
+            self.driver, 'parse_zfs_answer',
+            mock.Mock(side_effect=[[], [{'NAME': dataset_name}]]))
+
+        self.assertRaises(
+            exception.ShareResourceNotFound,
+            self.driver.ensure_share,
+            'fake_context', share,
+        )
+
+        self.assertEqual(0, self.driver.get_zfs_option.call_count)
+        self.assertEqual(0, self.driver._get_share_helper.call_count)
+        self.driver.zfs.assert_called_once_with('list', '-r', 'bar')
+        self.driver.parse_zfs_answer.assert_called_once_with('a')
+
+    def test_ensure_share_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.ensure_share,
+            'fake_context', 'fake_share', share_server={'id': 'fake_server'},
+        )
+
+    def test_get_network_allocations_number(self):
+        self.assertEqual(0, self.driver.get_network_allocations_number())
+
+    def test_extend_share(self):
+        dataset_name = 'foo_zpool/foo_fs'
+        self.mock_object(
+            self.driver, '_get_dataset_name',
+            mock.Mock(return_value=dataset_name))
+        self.mock_object(self.driver, 'zfs')
+
+        self.driver.extend_share('fake_share', 5)
+
+        self.driver._get_dataset_name.assert_called_once_with('fake_share')
+        self.driver.zfs.assert_called_once_with(
+            'set', 'quota=5G', dataset_name)
+
+    def test_extend_share_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.extend_share,
+            'fake_context', 'fake_share', 5,
+            share_server={'id': 'fake_server'},
+        )
+
+    def test_shrink_share(self):
+        dataset_name = 'foo_zpool/foo_fs'
+        self.mock_object(
+            self.driver, '_get_dataset_name',
+            mock.Mock(return_value=dataset_name))
+        self.mock_object(self.driver, 'zfs')
+        self.mock_object(
+            self.driver, 'get_zfs_option', mock.Mock(return_value='4G'))
+        share = {'id': 'fake_share_id'}
+
+        self.driver.shrink_share(share, 5)
+
+        self.driver._get_dataset_name.assert_called_once_with(share)
+        self.driver.get_zfs_option.assert_called_once_with(
+            dataset_name, 'used')
+        self.driver.zfs.assert_called_once_with(
+            'set', 'quota=5G', dataset_name)
+
+    def test_shrink_share_data_loss(self):
+        dataset_name = 'foo_zpool/foo_fs'
+        self.mock_object(
+            self.driver, '_get_dataset_name',
+            mock.Mock(return_value=dataset_name))
+        self.mock_object(self.driver, 'zfs')
+        self.mock_object(
+            self.driver, 'get_zfs_option', mock.Mock(return_value='6G'))
+        share = {'id': 'fake_share_id'}
+
+        self.assertRaises(
+            exception.ShareShrinkingPossibleDataLoss,
+            self.driver.shrink_share, share, 5)
+
+        self.driver._get_dataset_name.assert_called_once_with(share)
+        self.driver.get_zfs_option.assert_called_once_with(
+            dataset_name, 'used')
+        self.assertEqual(0, self.driver.zfs.call_count)
+
+    def test_shrink_share_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.shrink_share,
+            'fake_context', 'fake_share', 5,
+            share_server={'id': 'fake_server'},
+        )
+
+    def test__get_replication_snapshot_prefix(self):
+        replica = {'id': 'foo-_bar-_id'}
+        self.driver.replica_snapshot_prefix = 'PrEfIx'
+
+        result = self.driver._get_replication_snapshot_prefix(replica)
+
+        self.assertEqual('PrEfIx_foo__bar__id', result)
+
+    def test__get_replication_snapshot_tag(self):
+        replica = {'id': 'foo-_bar-_id'}
+        self.driver.replica_snapshot_prefix = 'PrEfIx'
+        mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow')
+
+        result = self.driver._get_replication_snapshot_tag(replica)
+
+        self.assertEqual(
+            ('PrEfIx_foo__bar__id_time_'
+             '%s' % mock_utcnow.return_value.isoformat.return_value),
+            result)
+        mock_utcnow.assert_called_once_with()
+        mock_utcnow.return_value.isoformat.assert_called_once_with()
+
+    def test__get_active_replica(self):
+        replica_list = [
+            {'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+             'id': '1'},
+            {'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE,
+             'id': '2'},
+            {'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC,
+             'id': '3'},
+        ]
+
+        result = self.driver._get_active_replica(replica_list)
+
+        self.assertEqual(replica_list[1], result)
+
+    def test__get_active_replica_not_found(self):
+        replica_list = [
+            {'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+             'id': '1'},
+            {'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC,
+             'id': '3'},
+        ]
+
+        self.assertRaises(
+            exception.ReplicationException,
+            self.driver._get_active_replica,
+            replica_list,
+        )
+
+    def test_update_access(self):
+        self.mock_object(self.driver, '_get_dataset_name')
+        mock_helper = self.mock_object(self.driver, '_get_share_helper')
+        share = {'share_proto': 'NFS'}
+
+        result = self.driver.update_access(
+            'fake_context', share, [1], [2], [3])
+
+        self.driver._get_dataset_name.assert_called_once_with(share)
+        self.assertEqual(
+            mock_helper.return_value.update_access.return_value,
+            result,
+        )
+
+    def test_update_access_with_share_server(self):
+        self.assertRaises(
+            exception.InvalidInput,
+            self.driver.update_access,
+            'fake_context', 'fake_share', [], [], [],
+            share_server={'id': 'fake_server'},
+        )
+
+    def test_unmanage(self):
+        share = {'id': 'fake_share_id'}
+        self.mock_object(self.driver.private_storage, 'delete')
+
+        self.driver.unmanage(share)
+
+        self.driver.private_storage.delete.assert_called_once_with(share['id'])
+
+    def test__delete_dataset_or_snapshot_with_retry_snapshot(self):
+        self.mock_object(self.driver, 'get_zfs_option')
+        self.mock_object(self.driver, 'zfs_with_retry')
+
+        self.driver._delete_dataset_or_snapshot_with_retry('foo@bar')
+
+        self.driver.get_zfs_option.assert_called_once_with(
+            'foo@bar', 'mountpoint')
+        self.driver.zfs_with_retry.assert_called_once_with(
+            'destroy', '-f', 'foo@bar')
+
+    def test__delete_dataset_or_snapshot_with_retry_of(self):
+        self.mock_object(self.driver, 'get_zfs_option')
+        self.mock_object(
+            self.driver, 'execute', mock.Mock(return_value=('a', 'b')))
+        self.mock_object(zfs_driver.time, 'sleep')
+        self.mock_object(zfs_driver.LOG, 'debug')
+        self.mock_object(
+            zfs_driver.time, 'time', mock.Mock(side_effect=range(1, 70, 2)))
+        dataset_name = 'fake/dataset/name'
+
+        self.assertRaises(
+            exception.ZFSonLinuxException,
+            self.driver._delete_dataset_or_snapshot_with_retry,
+            dataset_name,
+        )
+
+        self.driver.get_zfs_option.assert_called_once_with(
+            dataset_name, 'mountpoint')
+        self.assertEqual(31, zfs_driver.time.time.call_count)
+        self.assertEqual(29, zfs_driver.time.sleep.call_count)
+        self.assertEqual(29, zfs_driver.LOG.debug.call_count)
+
+    def test__delete_dataset_or_snapshot_with_retry_temp_of(self):
+        self.mock_object(self.driver, 'get_zfs_option')
+        self.mock_object(self.driver, 'zfs')
+        self.mock_object(
+            self.driver, 'execute', mock.Mock(side_effect=[
+                ('a', 'b'),
+                exception.ProcessExecutionError(
+                    'FAKE lsof returns not found')]))
+        self.mock_object(zfs_driver.time, 'sleep')
+        self.mock_object(zfs_driver.LOG, 'debug')
+        self.mock_object(
+            zfs_driver.time, 'time', mock.Mock(side_effect=range(1, 70, 2)))
+        dataset_name = 'fake/dataset/name'
+
+        self.driver._delete_dataset_or_snapshot_with_retry(dataset_name)
+
+        self.driver.get_zfs_option.assert_called_once_with(
+            dataset_name, 'mountpoint')
+        self.assertEqual(3, zfs_driver.time.time.call_count)
+        self.assertEqual(2, self.driver.execute.call_count)
+        self.assertEqual(1, zfs_driver.LOG.debug.call_count)
+        zfs_driver.LOG.debug.assert_called_once_with(
+            mock.ANY, {'name': dataset_name, 'out': 'a'})
+        zfs_driver.time.sleep.assert_called_once_with(2)
+        self.driver.zfs.assert_called_once_with('destroy', '-f', dataset_name)
+
+    def test__delete_dataset_or_snapshot_with_retry_mount_holders(self):
+        mnt = 'fake_mnt'
+        self.mock_object(
+            self.driver, 'get_zfs_option', mock.Mock(return_value=mnt))
+        self.mock_object(self.driver, 'zfs_with_retry')
+        self.mock_object(
+            self.driver, 'zfs',
+            mock.Mock(side_effect=exception.ProcessExecutionError('FAKE')))
+        pids_raw = '/proc/1234/foo /proc/5678/bar'
+        self.mock_object(
+            self.driver, 'execute', mock.Mock(side_effect=[
+                ('a', 'b'),
+                exception.ProcessExecutionError('Fake lsof empty'),
+                (pids_raw, 'c'),
+                exception.ProcessExecutionError('Fake PID not found error'),
+                ('d', 'e'),
+            ]))
+        self.mock_object(zfs_driver.time, 'sleep')
+        self.mock_object(zfs_driver.LOG, 'debug')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.mock_object(
+            zfs_driver.time, 'time', mock.Mock(side_effect=range(1, 70, 2)))
+        dataset_name = 'fake/dataset/name'
+
+        self.driver._delete_dataset_or_snapshot_with_retry(dataset_name)
+
+        self.driver.get_zfs_option.assert_called_once_with(
+            dataset_name, 'mountpoint')
+        self.assertEqual(3, zfs_driver.time.time.call_count)
+        self.assertEqual(3, zfs_driver.LOG.debug.call_count)
+        self.assertEqual(5, self.driver.execute.call_count)
+        self.driver.execute.assert_has_calls([
+            mock.call('lsof', '-w', mnt),
+            mock.call('lsof', '-w', mnt),
+            mock.call('bash', '-c',
+                      '(echo $(grep -s %s /proc/*/mounts) ) 2>&1 ' % mnt),
+            mock.call('sudo', 'nsenter', '--mnt', '--target=1234',
+                      '/bin/umount', mnt),
+            mock.call('sudo', 'nsenter', '--mnt', '--target=5678',
+                      '/bin/umount', mnt),
+        ])
+        zfs_driver.time.sleep.assert_has_calls([mock.call(2), mock.call(1)])
+        self.driver.zfs.assert_called_once_with('destroy', '-f', dataset_name)
+        zfs_driver.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY)
+        self.driver.zfs_with_retry.asssert_called_once_with(
+            'destroy', '-f', dataset_name)
+
+    def test__delete_dataset_or_snapshot_with_retry_mount_holders_error(self):
+        mnt = 'fake_mnt'
+        self.mock_object(
+            self.driver, 'get_zfs_option', mock.Mock(return_value=mnt))
+        self.mock_object(self.driver, 'zfs_with_retry')
+        self.mock_object(
+            self.driver, 'zfs',
+            mock.Mock(side_effect=exception.ProcessExecutionError('FAKE')))
+        self.mock_object(
+            self.driver, 'execute', mock.Mock(side_effect=[
+                ('a', 'b'),
+                exception.ProcessExecutionError('Fake lsof empty'),
+                exception.ProcessExecutionError('Failed to get list of PIDs'),
+            ]))
+        self.mock_object(zfs_driver.time, 'sleep')
+        self.mock_object(zfs_driver.LOG, 'debug')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.mock_object(
+            zfs_driver.time, 'time', mock.Mock(side_effect=range(1, 70, 2)))
+        dataset_name = 'fake/dataset/name'
+
+        self.driver._delete_dataset_or_snapshot_with_retry(dataset_name)
+
+        self.driver.get_zfs_option.assert_called_once_with(
+            dataset_name, 'mountpoint')
+        self.assertEqual(3, zfs_driver.time.time.call_count)
+        self.assertEqual(2, zfs_driver.LOG.debug.call_count)
+        self.assertEqual(3, self.driver.execute.call_count)
+        self.driver.execute.assert_has_calls([
+            mock.call('lsof', '-w', mnt),
+            mock.call('lsof', '-w', mnt),
+            mock.call('bash', '-c',
+                      '(echo $(grep -s %s /proc/*/mounts) ) 2>&1 ' % mnt),
+        ])
+        zfs_driver.time.sleep.assert_has_calls([mock.call(2), mock.call(1)])
+        self.driver.zfs.assert_called_once_with('destroy', '-f', dataset_name)
+        zfs_driver.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY)
+        self.driver.zfs_with_retry.asssert_called_once_with(
+            'destroy', '-f', dataset_name)
+
+    def test_create_replica(self):
+        active_replica = {
+            'id': 'fake_active_replica_id',
+            'host': 'hostname1@backend_name1#foo',
+            'size': 5,
+            'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE,
+        }
+        replica_list = [active_replica]
+        new_replica = {
+            'id': 'fake_new_replica_id',
+            'host': 'hostname2@backend_name2#bar',
+            'share_proto': 'NFS',
+            'replica_state': None,
+        }
+        dst_dataset_name = (
+            'bar/subbar/fake_dataset_name_prefix%s' % new_replica['id'])
+        access_rules = ['foo_rule', 'bar_rule']
+        self.driver.private_storage.update(
+            active_replica['id'],
+            {'dataset_name': 'fake/active/dataset/name',
+             'ssh_cmd': 'fake_ssh_cmd'}
+        )
+        self.mock_object(
+            self.driver, 'execute',
+            mock.Mock(side_effect=[('a', 'b'), ('c', 'd')]))
+        self.mock_object(self.driver, 'zfs')
+        mock_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix'
+        mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow')
+        mock_utcnow.return_value.isoformat.return_value = 'some_time'
+
+        result = self.driver.create_replica(
+            'fake_context', replica_list, new_replica, access_rules)
+
+        expected = {
+            'export_locations': (
+                mock_helper.return_value.create_exports.return_value),
+            'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+            'access_rules_status': zfs_driver.constants.STATUS_ACTIVE,
+        }
+        self.assertEqual(expected, result)
+        mock_helper.assert_has_calls([
+            mock.call('NFS'),
+            mock.call().update_access(
+                dst_dataset_name, access_rules, make_all_ro=True),
+            mock.call('NFS'),
+            mock.call().create_exports(dst_dataset_name),
+        ])
+        self.driver.zfs.assert_has_calls([
+            mock.call('set', 'readonly=on', dst_dataset_name),
+            mock.call('set', 'quota=%sG' % active_replica['size'],
+                      dst_dataset_name),
+        ])
+        src_snapshot_name = (
+            'fake/active/dataset/name@'
+            'tmp_snapshot_for_replication__fake_new_replica_id_time_some_time')
+        self.driver.execute.assert_has_calls([
+            mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'snapshot',
+                      src_snapshot_name),
+            mock.call(
+                'ssh', 'fake_ssh_cmd',
+                'sudo', 'zfs', 'send', '-vDR', src_snapshot_name, '|',
+                'ssh', 'fake_username@240.241.242.244',
+                'sudo', 'zfs', 'receive', '-v', dst_dataset_name
+            ),
+        ])
+        mock_utcnow.assert_called_once_with()
+        mock_utcnow.return_value.isoformat.assert_called_once_with()
+
+    def test_delete_replica_not_found(self):
+        dataset_name = 'foo/dataset/name'
+        pool_name = 'foo_pool'
+        replica = {'id': 'fake_replica_id'}
+        replica_list = [replica]
+        self.mock_object(
+            self.driver, '_get_dataset_name',
+            mock.Mock(return_value=dataset_name))
+        self.mock_object(
+            self.driver, 'zfs',
+            mock.Mock(side_effect=[('a', 'b'), ('c', 'd')]))
+        self.mock_object(
+            self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], []]))
+        self.mock_object(self.driver, '_delete_dataset_or_snapshot_with_retry')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.mock_object(self.driver, '_get_share_helper')
+        self.driver.private_storage.update(
+            replica['id'], {'pool_name': pool_name})
+
+        self.driver.delete_replica('fake_context', replica_list, replica)
+
+        zfs_driver.LOG.warning.assert_called_once_with(
+            mock.ANY, {'id': replica['id'], 'name': dataset_name})
+        self.assertEqual(0, self.driver._get_share_helper.call_count)
+        self.assertEqual(
+            0, self.driver._delete_dataset_or_snapshot_with_retry.call_count)
+        self.driver._get_dataset_name.assert_called_once_with(replica)
+        self.driver.zfs.assert_has_calls([
+            mock.call('list', '-r', '-t', 'snapshot', pool_name),
+            mock.call('list', '-r', pool_name),
+        ])
+        self.driver.parse_zfs_answer.assert_has_calls([
+            mock.call('a'), mock.call('c'),
+        ])
+
+    def test_delete_replica(self):
+        dataset_name = 'foo/dataset/name'
+        pool_name = 'foo_pool'
+        replica = {'id': 'fake_replica_id', 'share_proto': 'NFS'}
+        replica_list = [replica]
+        self.mock_object(
+            self.driver, '_get_dataset_name',
+            mock.Mock(return_value=dataset_name))
+        self.mock_object(
+            self.driver, 'zfs',
+            mock.Mock(side_effect=[('a', 'b'), ('c', 'd')]))
+        self.mock_object(
+            self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[
+                [{'NAME': 'some_other_dataset@snapshot'},
+                 {'NAME': dataset_name + '@foo_snap'}],
+                [{'NAME': 'some_other_dataset'},
+                 {'NAME': dataset_name}],
+            ]))
+        mock_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.mock_object(self.driver, '_delete_dataset_or_snapshot_with_retry')
+        self.mock_object(zfs_driver.LOG, 'warning')
+        self.driver.private_storage.update(
+            replica['id'],
+            {'pool_name': pool_name, 'dataset_name': dataset_name})
+
+        self.driver.delete_replica('fake_context', replica_list, replica)
+
+        self.assertEqual(0, zfs_driver.LOG.warning.call_count)
+        self.assertEqual(0, self.driver._get_dataset_name.call_count)
+        self.driver._delete_dataset_or_snapshot_with_retry.assert_has_calls([
+            mock.call(dataset_name + '@foo_snap'),
+            mock.call(dataset_name),
+        ])
+        self.driver.zfs.assert_has_calls([
+            mock.call('list', '-r', '-t', 'snapshot', pool_name),
+            mock.call('list', '-r', pool_name),
+        ])
+        self.driver.parse_zfs_answer.assert_has_calls([
+            mock.call('a'), mock.call('c'),
+        ])
+        mock_helper.assert_called_once_with(replica['share_proto'])
+        mock_helper.return_value.remove_exports.assert_called_once_with(
+            dataset_name)
+
+    def test_update_replica(self):
+        active_replica = {
+            'id': 'fake_active_replica_id',
+            'host': 'hostname1@backend_name1#foo',
+            'size': 5,
+            'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE,
+        }
+        replica = {
+            'id': 'fake_new_replica_id',
+            'host': 'hostname2@backend_name2#bar',
+            'share_proto': 'NFS',
+            'replica_state': None,
+        }
+        replica_list = [replica, active_replica]
+        dst_dataset_name = (
+            'bar/subbar/fake_dataset_name_prefix%s' % replica['id'])
+        src_dataset_name = (
+            'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id'])
+        access_rules = ['foo_rule', 'bar_rule']
+        old_repl_snapshot_tag = (
+            self.driver._get_replication_snapshot_prefix(
+                active_replica) + 'foo')
+        snap_tag_prefix = self.driver._get_replication_snapshot_prefix(
+            replica)
+        self.driver.private_storage.update(
+            active_replica['id'],
+            {'dataset_name': src_dataset_name,
+             'ssh_cmd': 'fake_src_ssh_cmd',
+             'repl_snapshot_tag': old_repl_snapshot_tag}
+        )
+        self.driver.private_storage.update(
+            replica['id'],
+            {'dataset_name': dst_dataset_name,
+             'ssh_cmd': 'fake_dst_ssh_cmd',
+             'repl_snapshot_tag': old_repl_snapshot_tag}
+        )
+        self.mock_object(
+            self.driver, 'execute',
+            mock.Mock(side_effect=[('a', 'b'), ('c', 'd'), ('e', 'f')]))
+        self.mock_object(self.driver, 'execute_with_retry',
+                         mock.Mock(side_effect=[('g', 'h')]))
+        self.mock_object(self.driver, 'zfs',
+                         mock.Mock(side_effect=[('j', 'k'), ('l', 'm')]))
+        self.mock_object(
+            self.driver, 'parse_zfs_answer',
+            mock.Mock(side_effect=[
+                ({'NAME': dst_dataset_name + '@' + old_repl_snapshot_tag},
+                 {'NAME': dst_dataset_name + '@%s_time_some_time' %
+                  snap_tag_prefix},
+                 {'NAME': 'other/dataset/name1@' + old_repl_snapshot_tag}),
+                ({'NAME': src_dataset_name + '@' + old_repl_snapshot_tag},
+                 {'NAME': src_dataset_name + '@' + snap_tag_prefix + 'quuz'},
+                 {'NAME': 'other/dataset/name2@' + old_repl_snapshot_tag}),
+            ])
+        )
+        mock_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix'
+        mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow')
+        mock_utcnow.return_value.isoformat.return_value = 'some_time'
+        mock_delete_snapshot = self.mock_object(
+            self.driver, '_delete_dataset_or_snapshot_with_retry')
+
+        result = self.driver.update_replica_state(
+            'fake_context', replica_list, replica, access_rules)
+
+        self.assertEqual(zfs_driver.constants.REPLICA_STATE_IN_SYNC, result)
+        mock_helper.assert_called_once_with('NFS')
+        mock_helper.return_value.update_access.assert_called_once_with(
+            dst_dataset_name, access_rules, make_all_ro=True)
+        self.driver.execute_with_retry.assert_called_once_with(
+            'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'destroy', '-f',
+            src_dataset_name + '@' + snap_tag_prefix + 'quuz')
+        self.driver.execute.assert_has_calls([
+            mock.call(
+                'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'snapshot',
+                src_dataset_name + '@' +
+                self.driver._get_replication_snapshot_tag(replica)),
+            mock.call(
+                'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'send',
+                '-vDRI', old_repl_snapshot_tag,
+                src_dataset_name + '@%s' % snap_tag_prefix + '_time_some_time',
+                '|', 'ssh', 'fake_dst_ssh_cmd',
+                'sudo', 'zfs', 'receive', '-vF', dst_dataset_name),
+            mock.call(
+                'ssh', 'fake_src_ssh_cmd',
+                'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', 'bar'),
+        ])
+        mock_delete_snapshot.assert_called_once_with(
+            dst_dataset_name + '@' + old_repl_snapshot_tag)
+        self.driver.parse_zfs_answer.assert_has_calls(
+            [mock.call('l'), mock.call('e')])
+
+    def test_promote_replica_active_available(self):
+        active_replica = {
+            'id': 'fake_active_replica_id',
+            'host': 'hostname1@backend_name1#foo',
+            'size': 5,
+            'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE,
+        }
+        replica = {
+            'id': 'fake_first_replica_id',
+            'host': 'hostname2@backend_name2#bar',
+            'share_proto': 'NFS',
+            'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+        }
+        second_replica = {
+            'id': 'fake_second_replica_id',
+            'host': 'hostname3@backend_name3#quuz',
+            'share_proto': 'NFS',
+            'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+        }
+        replica_list = [replica, active_replica, second_replica]
+        dst_dataset_name = (
+            'bar/subbar/fake_dataset_name_prefix%s' % replica['id'])
+        src_dataset_name = (
+            'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id'])
+        access_rules = ['foo_rule', 'bar_rule']
+        old_repl_snapshot_tag = (
+            self.driver._get_replication_snapshot_prefix(
+                active_replica) + 'foo')
+        snap_tag_prefix = self.driver._get_replication_snapshot_prefix(
+            active_replica) + '_time_some_time'
+        self.driver.private_storage.update(
+            active_replica['id'],
+            {'dataset_name': src_dataset_name,
+             'ssh_cmd': 'fake_src_ssh_cmd',
+             'repl_snapshot_tag': old_repl_snapshot_tag}
+        )
+        for repl in (replica, second_replica):
+            self.driver.private_storage.update(
+                repl['id'],
+                {'dataset_name': (
+                    'bar/subbar/fake_dataset_name_prefix%s' % repl['id']),
+                 'ssh_cmd': 'fake_dst_ssh_cmd',
+                 'repl_snapshot_tag': old_repl_snapshot_tag}
+            )
+        self.mock_object(
+            self.driver, 'execute',
+            mock.Mock(side_effect=[
+                ('a', 'b'),
+                ('c', 'd'),
+                ('e', 'f'),
+                exception.ProcessExecutionError('Second replica sync failure'),
+            ]))
+        self.mock_object(self.driver, 'zfs',
+                         mock.Mock(side_effect=[('g', 'h')]))
+        mock_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix'
+        mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow')
+        mock_utcnow.return_value.isoformat.return_value = 'some_time'
+        mock_delete_snapshot = self.mock_object(
+            self.driver, '_delete_dataset_or_snapshot_with_retry')
+
+        result = self.driver.promote_replica(
+            'fake_context', replica_list, replica, access_rules)
+
+        expected = [
+            {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC,
+             'id': 'fake_active_replica_id',
+             'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC},
+            {'access_rules_status': zfs_driver.constants.STATUS_ACTIVE,
+             'id': 'fake_first_replica_id',
+             'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE},
+            {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC,
+             'id': 'fake_second_replica_id',
+             'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC},
+        ]
+        for repl in expected:
+            self.assertIn(repl, result)
+        self.assertEqual(3, len(result))
+        mock_helper.assert_called_once_with('NFS')
+        mock_helper.return_value.update_access.assert_called_once_with(
+            dst_dataset_name, access_rules)
+        self.driver.zfs.assert_called_once_with(
+            'set', 'readonly=off', dst_dataset_name)
+        self.assertEqual(0, mock_delete_snapshot.call_count)
+        for repl in (active_replica, replica):
+            self.assertEqual(
+                snap_tag_prefix,
+                self.driver.private_storage.get(
+                    repl['id'], 'repl_snapshot_tag'))
+        self.assertEqual(
+            old_repl_snapshot_tag,
+            self.driver.private_storage.get(
+                second_replica['id'], 'repl_snapshot_tag'))
+
+    def test_promote_replica_active_not_available(self):
+        active_replica = {
+            'id': 'fake_active_replica_id',
+            'host': 'hostname1@backend_name1#foo',
+            'size': 5,
+            'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE,
+        }
+        replica = {
+            'id': 'fake_first_replica_id',
+            'host': 'hostname2@backend_name2#bar',
+            'share_proto': 'NFS',
+            'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+        }
+        second_replica = {
+            'id': 'fake_second_replica_id',
+            'host': 'hostname3@backend_name3#quuz',
+            'share_proto': 'NFS',
+            'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+        }
+        third_replica = {
+            'id': 'fake_third_replica_id',
+            'host': 'hostname4@backend_name4#fff',
+            'share_proto': 'NFS',
+            'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC,
+        }
+        replica_list = [replica, active_replica, second_replica, third_replica]
+        dst_dataset_name = (
+            'bar/subbar/fake_dataset_name_prefix%s' % replica['id'])
+        src_dataset_name = (
+            'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id'])
+        access_rules = ['foo_rule', 'bar_rule']
+        old_repl_snapshot_tag = (
+            self.driver._get_replication_snapshot_prefix(
+                active_replica) + 'foo')
+        snap_tag_prefix = self.driver._get_replication_snapshot_prefix(
+            replica) + '_time_some_time'
+        self.driver.private_storage.update(
+            active_replica['id'],
+            {'dataset_name': src_dataset_name,
+             'ssh_cmd': 'fake_src_ssh_cmd',
+             'repl_snapshot_tag': old_repl_snapshot_tag}
+        )
+        for repl in (replica, second_replica, third_replica):
+            self.driver.private_storage.update(
+                repl['id'],
+                {'dataset_name': (
+                    'bar/subbar/fake_dataset_name_prefix%s' % repl['id']),
+                 'ssh_cmd': 'fake_dst_ssh_cmd',
+                 'repl_snapshot_tag': old_repl_snapshot_tag}
+            )
+        self.mock_object(
+            self.driver, 'execute',
+            mock.Mock(side_effect=[
+                exception.ProcessExecutionError('Active replica failure'),
+                ('a', 'b'),
+                exception.ProcessExecutionError('Second replica sync failure'),
+                ('c', 'd'),
+            ]))
+        self.mock_object(self.driver, 'zfs',
+                         mock.Mock(side_effect=[('g', 'h'), ('i', 'j')]))
+        mock_helper = self.mock_object(self.driver, '_get_share_helper')
+        self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix'
+        mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow')
+        mock_utcnow.return_value.isoformat.return_value = 'some_time'
+        mock_delete_snapshot = self.mock_object(
+            self.driver, '_delete_dataset_or_snapshot_with_retry')
+
+        result = self.driver.promote_replica(
+            'fake_context', replica_list, replica, access_rules)
+
+        expected = [
+            {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC,
+             'id': 'fake_active_replica_id',
+             'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC},
+            {'access_rules_status': zfs_driver.constants.STATUS_ACTIVE,
+             'id': 'fake_first_replica_id',
+             'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE},
+            {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC,
+             'id': 'fake_second_replica_id'},
+            {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC,
+             'id': 'fake_third_replica_id',
+             'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC},
+        ]
+        for repl in expected:
+            self.assertIn(repl, result)
+        self.assertEqual(4, len(result))
+        mock_helper.assert_called_once_with('NFS')
+        mock_helper.return_value.update_access.assert_called_once_with(
+            dst_dataset_name, access_rules)
+        self.driver.zfs.assert_has_calls([
+            mock.call('snapshot', dst_dataset_name + '@' + snap_tag_prefix),
+            mock.call('set', 'readonly=off', dst_dataset_name),
+        ])
+        self.assertEqual(0, mock_delete_snapshot.call_count)
+        for repl in (second_replica, replica):
+            self.assertEqual(
+                snap_tag_prefix,
+                self.driver.private_storage.get(
+                    repl['id'], 'repl_snapshot_tag'))
+        for repl in (active_replica, third_replica):
+            self.assertEqual(
+                old_repl_snapshot_tag,
+                self.driver.private_storage.get(
+                    repl['id'], 'repl_snapshot_tag'))
diff --git a/manila/tests/share/drivers/zfsonlinux/test_utils.py b/manila/tests/share/drivers/zfsonlinux/test_utils.py
new file mode 100644
index 0000000000..bf2e0a7167
--- /dev/null
+++ b/manila/tests/share/drivers/zfsonlinux/test_utils.py
@@ -0,0 +1,497 @@
+# Copyright (c) 2016 Mirantis, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+
+import ddt
+import mock
+from oslo_config import cfg
+
+from manila import exception
+from manila.share.drivers.ganesha import utils as ganesha_utils
+from manila.share.drivers.zfsonlinux import utils as zfs_utils
+from manila import test
+
+CONF = cfg.CONF
+
+
+def get_fake_configuration(*args, **kwargs):
+    fake_config_options = {
+        "zfs_use_ssh": kwargs.get("zfs_use_ssh", False),
+        "zfs_share_export_ip": kwargs.get(
+            "zfs_share_export_ip", "240.241.242.243"),
+        "zfs_service_ip": kwargs.get("zfs_service_ip", "240.241.242.244"),
+        "ssh_conn_timeout": kwargs.get("ssh_conn_timeout", 123),
+        "zfs_ssh_username": kwargs.get(
+            "zfs_ssh_username", 'fake_username'),
+        "zfs_ssh_user_password": kwargs.get(
+            "zfs_ssh_user_password", 'fake_pass'),
+        "zfs_ssh_private_key_path": kwargs.get(
+            "zfs_ssh_private_key_path", '/fake/path'),
+        "append_config_values": mock.Mock(),
+    }
+    return type("FakeConfig", (object, ), fake_config_options)
+
+
+class FakeShareDriver(zfs_utils.ExecuteMixin):
+    def __init__(self, *args, **kwargs):
+        self.configuration = get_fake_configuration(*args, **kwargs)
+        self.init_execute_mixin(*args, **kwargs)
+
+
+@ddt.ddt
+class ExecuteMixinTestCase(test.TestCase):
+
+    def setUp(self):
+        super(self.__class__, self).setUp()
+        self.ssh_executor = self.mock_object(ganesha_utils, 'SSHExecutor')
+        self.driver = FakeShareDriver()
+
+    def test_init(self):
+        self.assertIsNone(self.driver.ssh_executor)
+        self.assertEqual(0, self.ssh_executor.call_count)
+
+    def test_init_ssh(self):
+        driver = FakeShareDriver(zfs_use_ssh=True)
+
+        self.assertIsNotNone(driver.ssh_executor)
+        self.ssh_executor.assert_called_once_with(
+            ip=driver.configuration.zfs_service_ip,
+            port=22,
+            conn_timeout=driver.configuration.ssh_conn_timeout,
+            login=driver.configuration.zfs_ssh_username,
+            password=driver.configuration.zfs_ssh_user_password,
+            privatekey=driver.configuration.zfs_ssh_private_key_path,
+            max_size=10,
+        )
+
+    def test_local_shell_execute(self):
+        self.mock_object(self.driver, '_execute')
+
+        self.driver.execute('fake', '--foo', '--bar')
+
+        self.assertEqual(0, self.ssh_executor.call_count)
+        self.driver._execute.assert_called_once_with(
+            'fake', '--foo', '--bar')
+
+    def test_local_shell_execute_with_sudo(self):
+        self.mock_object(self.driver, '_execute')
+
+        self.driver.execute('sudo', 'fake', '--foo', '--bar')
+
+        self.assertEqual(0, self.ssh_executor.call_count)
+        self.driver._execute.assert_called_once_with(
+            'fake', '--foo', '--bar', run_as_root=True)
+
+    def test_ssh_execute(self):
+        driver = FakeShareDriver(zfs_use_ssh=True)
+
+        self.mock_object(driver, '_execute')
+
+        driver.execute('fake', '--foo', '--bar')
+
+        self.assertEqual(0, driver._execute.call_count)
+        self.ssh_executor.return_value.assert_called_once_with(
+            'fake', '--foo', '--bar')
+
+    def test_ssh_execute_with_sudo(self):
+        driver = FakeShareDriver(zfs_use_ssh=True)
+
+        self.mock_object(driver, '_execute')
+
+        driver.execute('sudo', 'fake', '--foo', '--bar')
+
+        self.assertEqual(0, driver._execute.call_count)
+        self.ssh_executor.return_value.assert_called_once_with(
+            'fake', '--foo', '--bar', run_as_root=True)
+
+    def test_execute_with_retry(self):
+        self.mock_object(time, 'sleep')
+        self.mock_object(self.driver, 'execute', mock.Mock(
+            side_effect=[exception.ProcessExecutionError('FAKE'), None]))
+        self.driver.execute_with_retry('foo', 'bar')
+
+        self.assertEqual(2, self.driver.execute.call_count)
+        self.driver.execute.assert_has_calls(
+            [mock.call('foo', 'bar'), mock.call('foo', 'bar')])
+
+    def test_execute_with_retry_exceeded(self):
+        self.mock_object(time, 'sleep')
+        self.mock_object(self.driver, 'execute', mock.Mock(
+            side_effect=exception.ProcessExecutionError('FAKE')))
+
+        self.assertRaises(
+            exception.ProcessExecutionError,
+            self.driver.execute_with_retry,
+            'foo', 'bar',
+        )
+
+        self.assertEqual(36, self.driver.execute.call_count)
+
+    @ddt.data(True, False)
+    def test__get_option(self, pool_level):
+        out = """NAME   PROPERTY              VALUE                  SOURCE\n
+foo_resource_name  bar_option_name           some_value              local"""
+        self.mock_object(
+            self.driver, '_execute', mock.Mock(return_value=(out, '')))
+        res_name = 'foo_resource_name'
+        opt_name = 'bar_option_name'
+
+        result = self.driver._get_option(
+            res_name, opt_name, pool_level=pool_level)
+
+        self.assertEqual('some_value', result)
+        self.driver._execute.assert_called_once_with(
+            'zpool' if pool_level else 'zfs', 'get', opt_name, res_name,
+            run_as_root=True)
+
+    def test_parse_zfs_answer(self):
+        not_parsed_str = ''
+        not_parsed_str = """NAME   PROPERTY       VALUE              SOURCE\n
+foo_res  opt_1           bar              local
+foo_res  opt_2           foo              default
+foo_res  opt_3           some_value       local"""
+        expected = [
+            {'NAME': 'foo_res', 'PROPERTY': 'opt_1', 'VALUE': 'bar',
+             'SOURCE': 'local'},
+            {'NAME': 'foo_res', 'PROPERTY': 'opt_2', 'VALUE': 'foo',
+             'SOURCE': 'default'},
+            {'NAME': 'foo_res', 'PROPERTY': 'opt_3', 'VALUE': 'some_value',
+             'SOURCE': 'local'},
+        ]
+
+        result = self.driver.parse_zfs_answer(not_parsed_str)
+
+        self.assertEqual(expected, result)
+
+    def test_parse_zfs_answer_empty(self):
+        result = self.driver.parse_zfs_answer('')
+
+        self.assertEqual([], result)
+
+    def test_get_zpool_option(self):
+        self.mock_object(self.driver, '_get_option')
+        zpool_name = 'foo_resource_name'
+        opt_name = 'bar_option_name'
+
+        result = self.driver.get_zpool_option(zpool_name, opt_name)
+
+        self.assertEqual(self.driver._get_option.return_value, result)
+        self.driver._get_option.assert_called_once_with(
+            zpool_name, opt_name, True)
+
+    def test_get_zfs_option(self):
+        self.mock_object(self.driver, '_get_option')
+        dataset_name = 'foo_resource_name'
+        opt_name = 'bar_option_name'
+
+        result = self.driver.get_zfs_option(dataset_name, opt_name)
+
+        self.assertEqual(self.driver._get_option.return_value, result)
+        self.driver._get_option.assert_called_once_with(
+            dataset_name, opt_name, False)
+
+    def test_zfs(self):
+        self.mock_object(self.driver, 'execute')
+        self.mock_object(self.driver, 'execute_with_retry')
+
+        self.driver.zfs('foo', 'bar')
+
+        self.assertEqual(0, self.driver.execute_with_retry.call_count)
+        self.driver.execute.asssert_called_once_with(
+            'sudo', 'zfs', 'foo', 'bar')
+
+    def test_zfs_with_retrying(self):
+        self.mock_object(self.driver, 'execute')
+        self.mock_object(self.driver, 'execute_with_retry')
+
+        self.driver.zfs_with_retry('foo', 'bar')
+
+        self.assertEqual(0, self.driver.execute.call_count)
+        self.driver.execute_with_retry.asssert_called_once_with(
+            'sudo', 'zfs', 'foo', 'bar')
+
+
+@ddt.ddt
+class NFSviaZFSHelperTestCase(test.TestCase):
+
+    def setUp(self):
+        super(self.__class__, self).setUp()
+        configuration = get_fake_configuration()
+        self.out = "fake_out"
+        self.mock_object(
+            zfs_utils.utils, "execute", mock.Mock(return_value=(self.out, "")))
+        self.helper = zfs_utils.NFSviaZFSHelper(configuration)
+
+    def test_init(self):
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call("which", "exportfs"),
+            mock.call("exportfs", run_as_root=True),
+        ])
+
+    def test_verify_setup_exportfs_not_installed(self):
+        zfs_utils.utils.execute.reset_mock()
+        zfs_utils.utils.execute.side_effect = [('', '')]
+
+        self.assertRaises(
+            exception.ZFSonLinuxException, self.helper.verify_setup)
+
+        zfs_utils.utils.execute.assert_called_once_with("which", "exportfs")
+
+    def test_verify_setup_error_calling_exportfs(self):
+        zfs_utils.utils.execute.reset_mock()
+        zfs_utils.utils.execute.side_effect = [
+            ('fake_out', ''), exception.ProcessExecutionError('Fake')]
+
+        self.assertRaises(
+            exception.ProcessExecutionError, self.helper.verify_setup)
+
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call("which", "exportfs"),
+            mock.call("exportfs", run_as_root=True),
+        ])
+
+    def test_is_kernel_version_true(self):
+        zfs_utils.utils.execute.reset_mock()
+
+        self.assertTrue(self.helper.is_kernel_version)
+
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call("modinfo", "zfs"),
+        ])
+
+    def test_is_kernel_version_false(self):
+        zfs_utils.utils.execute.reset_mock()
+        zfs_utils.utils.execute.side_effect = (
+            exception.ProcessExecutionError('Fake'))
+
+        self.assertFalse(self.helper.is_kernel_version)
+
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call("modinfo", "zfs"),
+        ])
+
+    def test_is_kernel_version_second_call(self):
+        zfs_utils.utils.execute.reset_mock()
+
+        self.assertTrue(self.helper.is_kernel_version)
+        self.assertTrue(self.helper.is_kernel_version)
+
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call("modinfo", "zfs"),
+        ])
+
+    def test_create_exports(self):
+        self.mock_object(self.helper, 'get_exports')
+
+        result = self.helper.create_exports('foo')
+
+        self.assertEqual(
+            self.helper.get_exports.return_value, result)
+
+    def test_get_exports(self):
+        self.mock_object(
+            self.helper, 'get_zfs_option', mock.Mock(return_value='fake_mp'))
+        expected = [
+            {
+                "path": "%s:fake_mp" % ip,
+                "metadata": {},
+                "is_admin_only": is_admin_only,
+            } for ip, is_admin_only in (
+                (self.helper.configuration.zfs_share_export_ip, False),
+                (self.helper.configuration.zfs_service_ip, True))
+        ]
+
+        result = self.helper.get_exports('foo')
+
+        self.assertEqual(expected, result)
+        self.helper.get_zfs_option.assert_called_once_with('foo', 'mountpoint')
+
+    def test_remove_exports(self):
+        zfs_utils.utils.execute.reset_mock()
+        self.mock_object(
+            self.helper, 'get_zfs_option', mock.Mock(return_value='bar'))
+
+        self.helper.remove_exports('foo')
+
+        self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs')
+        zfs_utils.utils.execute.assert_called_once_with(
+            'zfs', 'set', 'sharenfs=off', 'foo', run_as_root=True)
+
+    def test_remove_exports_that_absent(self):
+        zfs_utils.utils.execute.reset_mock()
+        self.mock_object(
+            self.helper, 'get_zfs_option', mock.Mock(return_value='off'))
+
+        self.helper.remove_exports('foo')
+
+        self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs')
+        self.assertEqual(0, zfs_utils.utils.execute.call_count)
+
+    @ddt.data(
+        (('fake_modinfo_result', ''),
+         ('sharenfs=rw=1.1.1.1:3.3.3.3,no_root_squash,'
+          'ro=2.2.2.2,no_root_squash'), False),
+        (('fake_modinfo_result', ''),
+         ('sharenfs=ro=1.1.1.1:2.2.2.2:3.3.3.3,no_root_squash'), True),
+        (exception.ProcessExecutionError('Fake'),
+         ('sharenfs=1.1.1.1:rw,no_root_squash 3.3.3.3:rw,'
+          'no_root_squash 2.2.2.2:ro,no_root_squash'), False),
+        (exception.ProcessExecutionError('Fake'),
+         ('sharenfs=1.1.1.1:ro,no_root_squash 2.2.2.2:ro,'
+          'no_root_squash 3.3.3.3:ro,no_root_squash'), True),
+    )
+    @ddt.unpack
+    def test_update_access_rw_and_ro(self, modinfo_response, access_str,
+                                     make_all_ro):
+        zfs_utils.utils.execute.reset_mock()
+        dataset_name = 'zpoolz/foo_dataset_name/fake'
+        zfs_utils.utils.execute.side_effect = [
+            modinfo_response,
+            ("""NAME            USED  AVAIL  REFER  MOUNTPOINT\n
+%(dn)s                2.58M  14.8G  27.5K  /%(dn)s\n
+%(dn)s_some_other     3.58M  15.8G  28.5K  /%(dn)s\n
+             """ % {'dn': dataset_name}, ''),
+            ('fake_set_opt_result', ''),
+            ("""NAME                     PROPERTY    VALUE            SOURCE\n
+%s          mountpoint  /%s  default\n
+             """ % (dataset_name, dataset_name), ''),
+            ('fake_1_result', ''),
+            ('fake_2_result', ''),
+            ('fake_3_result', ''),
+        ]
+        access_rules = [
+            {'access_type': 'ip', 'access_level': 'rw',
+             'access_to': '1.1.1.1'},
+            {'access_type': 'ip', 'access_level': 'ro',
+             'access_to': '2.2.2.2'},
+            {'access_type': 'ip', 'access_level': 'rw',
+             'access_to': '3.3.3.3'},
+        ]
+        delete_rules = [
+            {'access_type': 'ip', 'access_level': 'rw',
+             'access_to': '4.4.4.4'},
+            {'access_type': 'ip', 'access_level': 'ro',
+             'access_to': '5.5.5.5'},
+            {'access_type': 'user', 'access_level': 'rw',
+             'access_to': '6.6.6.6'},
+            {'access_type': 'user', 'access_level': 'ro',
+             'access_to': '7.7.7.7'},
+        ]
+
+        self.helper.update_access(
+            dataset_name, access_rules, [], delete_rules,
+            make_all_ro=make_all_ro)
+
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call('modinfo', 'zfs'),
+            mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
+            mock.call(
+                'zfs', 'set',
+                access_str,
+                dataset_name, run_as_root=True),
+            mock.call(
+                'zfs', 'get', 'mountpoint', dataset_name, run_as_root=True),
+            mock.call(
+                'exportfs', '-u', '4.4.4.4:/%s' % dataset_name,
+                run_as_root=True),
+            mock.call(
+                'exportfs', '-u', '5.5.5.5:/%s' % dataset_name,
+                run_as_root=True),
+        ])
+
+    def test_update_access_dataset_not_found(self):
+        self.mock_object(zfs_utils.LOG, 'warning')
+        zfs_utils.utils.execute.reset_mock()
+        dataset_name = 'zpoolz/foo_dataset_name/fake'
+        zfs_utils.utils.execute.side_effect = [
+            ('fake_modinfo_result', ''),
+            ('fake_dataset_not_found_result', ''),
+            ('fake_set_opt_result', ''),
+        ]
+        access_rules = [
+            {'access_type': 'ip', 'access_level': 'rw',
+             'access_to': '1.1.1.1'},
+            {'access_type': 'ip', 'access_level': 'ro',
+             'access_to': '1.1.1.2'},
+        ]
+
+        self.helper.update_access(dataset_name, access_rules, [], [])
+
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call('modinfo', 'zfs'),
+            mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
+        ])
+        zfs_utils.LOG.warning.assert_called_once_with(
+            mock.ANY, {'name': dataset_name})
+
+    @ddt.data(exception.ProcessExecutionError('Fake'), ('Ok', ''))
+    def test_update_access_no_rules(self, first_execute_result):
+        zfs_utils.utils.execute.reset_mock()
+        dataset_name = 'zpoolz/foo_dataset_name/fake'
+        zfs_utils.utils.execute.side_effect = [
+            ('fake_modinfo_result', ''),
+            ("""NAME            USED  AVAIL  REFER  MOUNTPOINT\n
+%s          2.58M  14.8G  27.5K  /%s\n
+             """ % (dataset_name, dataset_name), ''),
+            ('fake_set_opt_result', ''),
+        ]
+
+        self.helper.update_access(dataset_name, [], [], [])
+
+        zfs_utils.utils.execute.assert_has_calls([
+            mock.call('modinfo', 'zfs'),
+            mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True),
+            mock.call('zfs', 'set', 'sharenfs=off', dataset_name,
+                      run_as_root=True),
+        ])
+
+    @ddt.data('user', 'cert', 'cephx', '', 'fake', 'i', 'p')
+    def test_update_access_not_ip_access_type(self, access_type):
+        zfs_utils.utils.execute.reset_mock()
+        dataset_name = 'zpoolz/foo_dataset_name/fake'
+        access_rules = [
+            {'access_type': access_type, 'access_level': 'rw',
+             'access_to': '1.1.1.1'},
+            {'access_type': 'ip', 'access_level': 'ro',
+             'access_to': '1.1.1.2'},
+        ]
+
+        self.assertRaises(
+            exception.InvalidShareAccess,
+            self.helper.update_access,
+            dataset_name, access_rules, access_rules, [],
+        )
+
+        self.assertEqual(0, zfs_utils.utils.execute.call_count)
+
+    @ddt.data('', 'r', 'o', 'w', 'fake', 'su')
+    def test_update_access_neither_rw_nor_ro_access_level(self, access_level):
+        zfs_utils.utils.execute.reset_mock()
+        dataset_name = 'zpoolz/foo_dataset_name/fake'
+        access_rules = [
+            {'access_type': 'ip', 'access_level': access_level,
+             'access_to': '1.1.1.1'},
+            {'access_type': 'ip', 'access_level': 'ro',
+             'access_to': '1.1.1.2'},
+        ]
+
+        self.assertRaises(
+            exception.InvalidShareAccess,
+            self.helper.update_access,
+            dataset_name, access_rules, access_rules, [],
+        )
+
+        self.assertEqual(0, zfs_utils.utils.execute.call_count)
diff --git a/manila/tests/test_utils.py b/manila/tests/test_utils.py
index e52ce43ad2..0b5643041c 100644
--- a/manila/tests/test_utils.py
+++ b/manila/tests/test_utils.py
@@ -353,6 +353,47 @@ class GenericUtilsTestCase(test.TestCase):
         self.assertRaises(exception.SSHInjectionThreat,
                           utils.check_ssh_injection, cmd)
 
+    @ddt.data(
+        (("3G", "G"), 3.0),
+        (("4.1G", "G"), 4.1),
+        (("5.23G", "G"), 5.23),
+        (("9728M", "G"), 9.5),
+        (("8192K", "G"), 0.0078125),
+        (("2T", "G"), 2048.0),
+        (("2.1T", "G"), 2150.4),
+        (("3P", "G"), 3145728.0),
+        (("3.4P", "G"), 3565158.4),
+        (("9728M", "M"), 9728.0),
+        (("9728.2381T", "T"), 9728.2381),
+        (("0", "G"), 0.0),
+        (("512", "M"), 0.00048828125),
+        (("2097152.", "M"), 2.0),
+        ((".1024", "K"), 0.0001),
+        (("2048G", "T"), 2.0),
+        (("65536G", "P"), 0.0625),
+    )
+    @ddt.unpack
+    def test_translate_string_size_to_float_positive(self, request, expected):
+        actual = utils.translate_string_size_to_float(*request)
+        self.assertEqual(expected, actual)
+
+    @ddt.data(
+        (None, "G"),
+        ("fake", "G"),
+        ("1fake", "G"),
+        ("2GG", "G"),
+        ("1KM", "G"),
+        ("K1M", "G"),
+        ("M1K", "G"),
+        ("", "G"),
+        (23, "G"),
+        (23.0, "G"),
+    )
+    @ddt.unpack
+    def test_translate_string_size_to_float_negative(self, string, multiplier):
+        actual = utils.translate_string_size_to_float(string, multiplier)
+        self.assertIsNone(actual)
+
 
 class MonkeyPatchTestCase(test.TestCase):
     """Unit test for utils.monkey_patch()."""
diff --git a/manila/utils.py b/manila/utils.py
index 50907effb9..fd0dbc419f 100644
--- a/manila/utils.py
+++ b/manila/utils.py
@@ -613,3 +613,45 @@ def require_driver_initialized(func):
             raise exception.DriverNotInitialized(driver=driver_name)
         return func(self, *args, **kwargs)
     return wrapper
+
+
+def translate_string_size_to_float(string, multiplier='G'):
+    """Translates human-readable storage size to float value.
+
+    Supported values for 'multiplier' are following:
+        K - kilo | 1
+        M - mega | 1024
+        G - giga | 1024 * 1024
+        T - tera | 1024 * 1024 * 1024
+        P = peta | 1024 * 1024 * 1024 * 1024
+
+    returns:
+        - float if correct input data provided
+        - None if incorrect
+    """
+    if not isinstance(string, six.string_types):
+        return None
+    multipliers = ('K', 'M', 'G', 'T', 'P')
+    mapping = {
+        k: 1024.0 ** v
+        for k, v in zip(multipliers, range(len(multipliers)))
+    }
+    if multiplier not in multipliers:
+        raise exception.ManilaException(
+            "'multiplier' arg should be one of following: "
+            "'%(multipliers)s'. But it is '%(multiplier)s'." % {
+                'multiplier': multiplier,
+                'multipliers': "', '".join(multipliers),
+            }
+        )
+    try:
+        value = float(string) / 1024.0
+        value = value / mapping[multiplier]
+        return value
+    except (ValueError, TypeError):
+        matched = re.match(
+            r"^(\d+\.*\d*)([%s])$" % ','.join(multipliers), string)
+        if matched:
+            value = float(matched.groups()[0])
+            multiplier = mapping[matched.groups()[1]] / mapping[multiplier]
+            return value * multiplier