Add storage capacity balance Strategy
This patch adds Storage Capacity Balance Strategy to balance the storage capacity through volume migration. Change-Id: I52ea7ce00deb609a2f668db330f1fbc1c9932613 Implements: blueprint storage-workload-balance
This commit is contained in:
parent
7297603f65
commit
f5bcf9d355
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added storage capacity balance strategy.
|
@ -77,6 +77,7 @@ watcher_strategies =
|
||||
workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance
|
||||
uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow
|
||||
noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor
|
||||
storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance
|
||||
|
||||
watcher_actions =
|
||||
migrate = watcher.applier.actions.migration:Migrate
|
||||
|
@ -70,6 +70,10 @@ class CinderHelper(object):
|
||||
def get_volume_type_list(self):
|
||||
return self.cinder.volume_types.list()
|
||||
|
||||
def get_volume_snapshots_list(self):
|
||||
return self.cinder.volume_snapshots.list(
|
||||
search_opts={'all_tenants': True})
|
||||
|
||||
def get_volume_type_by_backendname(self, backendname):
|
||||
"""Retrun a list of volume type"""
|
||||
volume_type_list = self.get_volume_type_list()
|
||||
|
@ -21,6 +21,8 @@ from watcher.decision_engine.strategy.strategies import dummy_with_scorer
|
||||
from watcher.decision_engine.strategy.strategies import noisy_neighbor
|
||||
from watcher.decision_engine.strategy.strategies import outlet_temp_control
|
||||
from watcher.decision_engine.strategy.strategies import saving_energy
|
||||
from watcher.decision_engine.strategy.strategies import \
|
||||
storage_capacity_balance
|
||||
from watcher.decision_engine.strategy.strategies import uniform_airflow
|
||||
from watcher.decision_engine.strategy.strategies import \
|
||||
vm_workload_consolidation
|
||||
@ -33,6 +35,7 @@ OutletTempControl = outlet_temp_control.OutletTempControl
|
||||
DummyStrategy = dummy_strategy.DummyStrategy
|
||||
DummyWithScorer = dummy_with_scorer.DummyWithScorer
|
||||
SavingEnergy = saving_energy.SavingEnergy
|
||||
StorageCapacityBalance = storage_capacity_balance.StorageCapacityBalance
|
||||
VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation
|
||||
WorkloadBalance = workload_balance.WorkloadBalance
|
||||
WorkloadStabilization = workload_stabilization.WorkloadStabilization
|
||||
@ -42,4 +45,4 @@ NoisyNeighbor = noisy_neighbor.NoisyNeighbor
|
||||
__all__ = ("Actuator", "BasicConsolidation", "OutletTempControl",
|
||||
"DummyStrategy", "DummyWithScorer", "VMWorkloadConsolidation",
|
||||
"WorkloadBalance", "WorkloadStabilization", "UniformAirflow",
|
||||
"NoisyNeighbor", "SavingEnergy")
|
||||
"NoisyNeighbor", "SavingEnergy", "StorageCapacityBalance")
|
||||
|
@ -0,0 +1,409 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 ZTE Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
*Workload balance using cinder volume migration*
|
||||
|
||||
*Description*
|
||||
|
||||
This strategy migrates volumes based on the workload of the
|
||||
cinder pools.
|
||||
It makes decision to migrate a volume whenever a pool's used
|
||||
utilization % is higher than the specified threshold. The volume
|
||||
to be moved should make the pool close to average workload of all
|
||||
cinder pools.
|
||||
|
||||
*Requirements*
|
||||
|
||||
* You must have at least 2 cinder volume pools to run
|
||||
this strategy.
|
||||
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from watcher._i18n import _
|
||||
from watcher.common import cinder_helper
|
||||
from watcher.decision_engine.strategy.strategies import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class StorageCapacityBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
"""Storage capacity balance using cinder volume migration
|
||||
|
||||
*Description*
|
||||
|
||||
This strategy migrates volumes based on the workload of the
|
||||
cinder pools.
|
||||
It makes decision to migrate a volume whenever a pool's used
|
||||
utilization % is higher than the specified threshold. The volume
|
||||
to be moved should make the pool close to average workload of all
|
||||
cinder pools.
|
||||
|
||||
*Requirements*
|
||||
|
||||
* You must have at least 2 cinder volume pools to run
|
||||
this strategy.
|
||||
"""
|
||||
|
||||
def __init__(self, config, osc=None):
|
||||
"""VolumeMigrate using cinder volume migration
|
||||
|
||||
:param config: A mapping containing the configuration of this strategy
|
||||
:type config: :py:class:`~.Struct` instance
|
||||
:param osc: :py:class:`~.OpenStackClients` instance
|
||||
"""
|
||||
super(StorageCapacityBalance, self).__init__(config, osc)
|
||||
self._cinder = None
|
||||
self.volume_threshold = 80.0
|
||||
self.pool_type_cache = dict()
|
||||
self.source_pools = []
|
||||
self.dest_pools = []
|
||||
|
||||
@property
|
||||
def cinder(self):
|
||||
if not self._cinder:
|
||||
self._cinder = cinder_helper.CinderHelper(osc=self.osc)
|
||||
return self._cinder
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "storage_capacity_balance"
|
||||
|
||||
@classmethod
|
||||
def get_display_name(cls):
|
||||
return _("Storage Capacity Balance Strategy")
|
||||
|
||||
@classmethod
|
||||
def get_translatable_display_name(cls):
|
||||
return "Storage Capacity Balance Strategy"
|
||||
|
||||
@classmethod
|
||||
def get_schema(cls):
|
||||
# Mandatory default setting for each element
|
||||
return {
|
||||
"properties": {
|
||||
"volume_threshold": {
|
||||
"description": "volume threshold for capacity balance",
|
||||
"type": "number",
|
||||
"default": 80.0
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_config_opts(cls):
|
||||
return [
|
||||
cfg.ListOpt(
|
||||
"ex_pools",
|
||||
help="exclude pools",
|
||||
default=['local_vstorage']),
|
||||
]
|
||||
|
||||
def get_pools(self, cinder):
|
||||
"""Get all volume pools excepting ex_pools.
|
||||
|
||||
:param cinder: cinder client
|
||||
:return: volume pools
|
||||
"""
|
||||
ex_pools = self.config.ex_pools
|
||||
pools = cinder.get_storage_pool_list()
|
||||
filtered_pools = [p for p in pools
|
||||
if p.pool_name not in ex_pools]
|
||||
return filtered_pools
|
||||
|
||||
def get_volumes(self, cinder):
|
||||
"""Get all volumes with staus in available or in-use and no snapshot.
|
||||
|
||||
:param cinder: cinder client
|
||||
:return: all volumes
|
||||
"""
|
||||
all_volumes = cinder.get_volume_list()
|
||||
valid_status = ['in-use', 'available']
|
||||
|
||||
volume_snapshots = cinder.get_volume_snapshots_list()
|
||||
snapshot_volume_ids = []
|
||||
for snapshot in volume_snapshots:
|
||||
snapshot_volume_ids.append(snapshot.volume_id)
|
||||
|
||||
nosnap_volumes = list(filter(lambda v: v.id not in snapshot_volume_ids,
|
||||
all_volumes))
|
||||
LOG.info("volumes in snap: %s", snapshot_volume_ids)
|
||||
status_volumes = list(
|
||||
filter(lambda v: v.status in valid_status, nosnap_volumes))
|
||||
valid_volumes = [v for v in status_volumes
|
||||
if getattr(v, 'migration_status') == 'success'
|
||||
or getattr(v, 'migration_status') is None]
|
||||
LOG.info("valid volumes: %s", valid_volumes)
|
||||
|
||||
return valid_volumes
|
||||
|
||||
def group_pools(self, pools, threshold):
|
||||
"""group volume pools by threshold.
|
||||
|
||||
:param pools: all volume pools
|
||||
:param threshold: volume threshold
|
||||
:return: under and over threshold pools
|
||||
"""
|
||||
under_pools = list(
|
||||
filter(lambda p: float(p.total_capacity_gb) -
|
||||
float(p.free_capacity_gb) <
|
||||
float(p.total_capacity_gb) * threshold, pools))
|
||||
|
||||
over_pools = list(
|
||||
filter(lambda p: float(p.total_capacity_gb) -
|
||||
float(p.free_capacity_gb) >=
|
||||
float(p.total_capacity_gb) * threshold, pools))
|
||||
|
||||
return over_pools, under_pools
|
||||
|
||||
def get_volume_type_by_name(self, cinder, backendname):
|
||||
# return list of pool type
|
||||
if backendname in self.pool_type_cache.keys():
|
||||
return self.pool_type_cache.get(backendname)
|
||||
|
||||
volume_type_list = cinder.get_volume_type_list()
|
||||
volume_type = list(filter(
|
||||
lambda volume_type:
|
||||
volume_type.extra_specs.get(
|
||||
'volume_backend_name') == backendname, volume_type_list))
|
||||
if volume_type:
|
||||
self.pool_type_cache[backendname] = volume_type
|
||||
return self.pool_type_cache.get(backendname)
|
||||
else:
|
||||
return []
|
||||
|
||||
def migrate_fit(self, volume, threshold):
|
||||
target_pool_name = None
|
||||
if volume.volume_type:
|
||||
LOG.info("volume %s type %s", volume.id, volume.volume_type)
|
||||
return target_pool_name
|
||||
self.dest_pools.sort(
|
||||
key=lambda p: float(p.free_capacity_gb) /
|
||||
float(p.total_capacity_gb))
|
||||
for pool in reversed(self.dest_pools):
|
||||
total_cap = float(pool.total_capacity_gb)
|
||||
allocated = float(pool.allocated_capacity_gb)
|
||||
ratio = pool.max_over_subscription_ratio
|
||||
if total_cap*ratio < allocated+float(volume.size):
|
||||
LOG.info("pool %s allocated over", pool.name)
|
||||
continue
|
||||
free_cap = float(pool.free_capacity_gb)-float(volume.size)
|
||||
if free_cap > (1-threshold)*total_cap:
|
||||
target_pool_name = pool.name
|
||||
index = self.dest_pools.index(pool)
|
||||
setattr(self.dest_pools[index], 'free_capacity_gb',
|
||||
str(free_cap))
|
||||
LOG.info("volume: get pool %s for vol %s", target_pool_name,
|
||||
volume.name)
|
||||
break
|
||||
return target_pool_name
|
||||
|
||||
def check_pool_type(self, volume, dest_pool):
|
||||
target_type = None
|
||||
# check type feature
|
||||
if not volume.volume_type:
|
||||
return target_type
|
||||
volume_type_list = self.cinder.get_volume_type_list()
|
||||
volume_type = list(filter(
|
||||
lambda volume_type:
|
||||
volume_type.name == volume.volume_type, volume_type_list))
|
||||
if volume_type:
|
||||
src_extra_specs = volume_type[0].extra_specs
|
||||
src_extra_specs.pop('volume_backend_name', None)
|
||||
|
||||
backendname = getattr(dest_pool, 'volume_backend_name')
|
||||
dst_pool_type = self.get_volume_type_by_name(self.cinder, backendname)
|
||||
|
||||
for src_key in src_extra_specs.keys():
|
||||
dst_pool_type = [pt for pt in dst_pool_type
|
||||
if pt.extra_specs.get(src_key)
|
||||
== src_extra_specs.get(src_key)]
|
||||
if dst_pool_type:
|
||||
if volume.volume_type:
|
||||
if dst_pool_type[0].name != volume.volume_type:
|
||||
target_type = dst_pool_type[0].name
|
||||
else:
|
||||
target_type = dst_pool_type[0].name
|
||||
return target_type
|
||||
|
||||
def retype_fit(self, volume, threshold):
|
||||
target_type = None
|
||||
self.dest_pools.sort(
|
||||
key=lambda p: float(p.free_capacity_gb) /
|
||||
float(p.total_capacity_gb))
|
||||
for pool in reversed(self.dest_pools):
|
||||
backendname = getattr(pool, 'volume_backend_name')
|
||||
pool_type = self.get_volume_type_by_name(self.cinder, backendname)
|
||||
LOG.info("volume: pool %s, type %s", pool.name, pool_type)
|
||||
if pool_type is None:
|
||||
continue
|
||||
total_cap = float(pool.total_capacity_gb)
|
||||
allocated = float(pool.allocated_capacity_gb)
|
||||
ratio = pool.max_over_subscription_ratio
|
||||
if total_cap*ratio < allocated+float(volume.size):
|
||||
LOG.info("pool %s allocated over", pool.name)
|
||||
continue
|
||||
free_cap = float(pool.free_capacity_gb)-float(volume.size)
|
||||
if free_cap > (1-threshold)*total_cap:
|
||||
target_type = self.check_pool_type(volume, pool)
|
||||
if target_type is None:
|
||||
continue
|
||||
index = self.dest_pools.index(pool)
|
||||
setattr(self.dest_pools[index], 'free_capacity_gb',
|
||||
str(free_cap))
|
||||
LOG.info("volume: get type %s for vol %s", target_type,
|
||||
volume.name)
|
||||
break
|
||||
return target_type
|
||||
|
||||
def get_actions(self, pool, volumes, threshold):
|
||||
"""get volume, pool key-value action
|
||||
|
||||
return: retype, migrate dict
|
||||
"""
|
||||
retype_dicts = dict()
|
||||
migrate_dicts = dict()
|
||||
total_cap = float(pool.total_capacity_gb)
|
||||
used_cap = float(pool.total_capacity_gb)-float(pool.free_capacity_gb)
|
||||
seek_flag = True
|
||||
|
||||
volumes_in_pool = list(
|
||||
filter(lambda v: getattr(v, 'os-vol-host-attr:host') == pool.name,
|
||||
volumes))
|
||||
LOG.info("volumes in pool: %s", str(volumes_in_pool))
|
||||
if not volumes_in_pool:
|
||||
return retype_dicts, migrate_dicts
|
||||
ava_volumes = list(filter(lambda v: v.status == 'available',
|
||||
volumes_in_pool))
|
||||
ava_volumes.sort(key=lambda v: float(v.size))
|
||||
LOG.info("available volumes in pool: %s ", str(ava_volumes))
|
||||
for vol in ava_volumes:
|
||||
vol_flag = False
|
||||
migrate_pool = self.migrate_fit(vol, threshold)
|
||||
if migrate_pool:
|
||||
migrate_dicts[vol.id] = migrate_pool
|
||||
vol_flag = True
|
||||
else:
|
||||
target_type = self.retype_fit(vol, threshold)
|
||||
if target_type:
|
||||
retype_dicts[vol.id] = target_type
|
||||
vol_flag = True
|
||||
if vol_flag:
|
||||
used_cap -= float(vol.size)
|
||||
if used_cap < threshold*total_cap:
|
||||
seek_flag = False
|
||||
break
|
||||
if seek_flag:
|
||||
noboot_volumes = list(
|
||||
filter(lambda v: v.bootable.lower() == 'false'
|
||||
and v.status == 'in-use', volumes_in_pool))
|
||||
noboot_volumes.sort(key=lambda v: float(v.size))
|
||||
LOG.info("noboot volumes: %s ", str(noboot_volumes))
|
||||
for vol in noboot_volumes:
|
||||
vol_flag = False
|
||||
migrate_pool = self.migrate_fit(vol, threshold)
|
||||
if migrate_pool:
|
||||
migrate_dicts[vol.id] = migrate_pool
|
||||
vol_flag = True
|
||||
else:
|
||||
target_type = self.retype_fit(vol, threshold)
|
||||
if target_type:
|
||||
retype_dicts[vol.id] = target_type
|
||||
vol_flag = True
|
||||
if vol_flag:
|
||||
used_cap -= float(vol.size)
|
||||
if used_cap < threshold*total_cap:
|
||||
seek_flag = False
|
||||
break
|
||||
|
||||
if seek_flag:
|
||||
boot_volumes = list(filter(lambda v: v.bootable.lower() == 'true'
|
||||
and v.status == 'in-use', volumes_in_pool))
|
||||
boot_volumes.sort(key=lambda v: float(v.size))
|
||||
LOG.info("boot volumes: %s ", str(boot_volumes))
|
||||
for vol in boot_volumes:
|
||||
vol_flag = False
|
||||
migrate_pool = self.migrate_fit(vol, threshold)
|
||||
if migrate_pool:
|
||||
migrate_dicts[vol.id] = migrate_pool
|
||||
vol_flag = True
|
||||
else:
|
||||
target_type = self.retype_fit(vol, threshold)
|
||||
if target_type:
|
||||
retype_dicts[vol.id] = target_type
|
||||
vol_flag = True
|
||||
if vol_flag:
|
||||
used_cap -= float(vol.size)
|
||||
if used_cap < threshold*total_cap:
|
||||
seek_flag = False
|
||||
break
|
||||
return retype_dicts, migrate_dicts
|
||||
|
||||
def pre_execute(self):
|
||||
"""Pre-execution phase
|
||||
|
||||
This can be used to fetch some pre-requisites or data.
|
||||
"""
|
||||
LOG.info("Initializing Storage Capacity Balance Strategy")
|
||||
self.volume_threshold = self.input_parameters.volume_threshold
|
||||
|
||||
def do_execute(self, audit=None):
|
||||
"""Strategy execution phase
|
||||
|
||||
This phase is where you should put the main logic of your strategy.
|
||||
"""
|
||||
all_pools = self.get_pools(self.cinder)
|
||||
all_volumes = self.get_volumes(self.cinder)
|
||||
threshold = float(self.volume_threshold)/100
|
||||
self.source_pools, self.dest_pools = self.group_pools(
|
||||
all_pools, threshold)
|
||||
LOG.info(" source pools: %s dest pools:%s",
|
||||
self.source_pools, self.dest_pools)
|
||||
if not self.source_pools:
|
||||
LOG.info("No pools require optimization")
|
||||
return
|
||||
|
||||
if not self.dest_pools:
|
||||
LOG.info("No enough pools for optimization")
|
||||
return
|
||||
for source_pool in self.source_pools:
|
||||
retype_actions, migrate_actions = self.get_actions(
|
||||
source_pool, all_volumes, threshold)
|
||||
for vol_id, pool_type in retype_actions.items():
|
||||
vol = [v for v in all_volumes if v.id == vol_id]
|
||||
parameters = {'migration_type': 'retype',
|
||||
'destination_type': pool_type,
|
||||
'resource_name': vol[0].name}
|
||||
self.solution.add_action(action_type='volume_migrate',
|
||||
resource_id=vol_id,
|
||||
input_parameters=parameters)
|
||||
for vol_id, pool_name in migrate_actions.items():
|
||||
vol = [v for v in all_volumes if v.id == vol_id]
|
||||
parameters = {'migration_type': 'migrate',
|
||||
'destination_node': pool_name,
|
||||
'resource_name': vol[0].name}
|
||||
self.solution.add_action(action_type='volume_migrate',
|
||||
resource_id=vol_id,
|
||||
input_parameters=parameters)
|
||||
|
||||
def post_execute(self):
|
||||
"""Post-execution phase
|
||||
|
||||
"""
|
||||
pass
|
@ -0,0 +1,245 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# Copyright (c) 2017 ZTE
|
||||
#
|
||||
# Authors: Canwei Li <li.canwei2@zte.com.cn>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
|
||||
from watcher.common import cinder_helper
|
||||
from watcher.common import clients
|
||||
from watcher.common import utils
|
||||
from watcher.decision_engine.strategy import strategies
|
||||
from watcher.tests import base
|
||||
|
||||
|
||||
class TestStorageCapacityBalance(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestStorageCapacityBalance, self).setUp()
|
||||
|
||||
def test_fake_pool(name, free, total, allocated):
|
||||
fake_pool = mock.MagicMock()
|
||||
fake_pool.name = name
|
||||
fake_pool.pool_name = name.split('#')[1]
|
||||
fake_pool.volume_backend_name = name.split('#')[1]
|
||||
fake_pool.free_capacity_gb = free
|
||||
fake_pool.total_capacity_gb = total
|
||||
fake_pool.allocated_capacity_gb = allocated
|
||||
fake_pool.max_over_subscription_ratio = 1.0
|
||||
|
||||
return fake_pool
|
||||
|
||||
self.fake_pool1 = test_fake_pool('host1@IPSAN-1#pool1',
|
||||
'60', '100', '90')
|
||||
|
||||
self.fake_pool2 = test_fake_pool('host1@IPSAN-1#pool2',
|
||||
'20', '100', '80')
|
||||
|
||||
self.fake_pool3 = test_fake_pool('host1@IPSAN-1#local_vstorage',
|
||||
'20', '100', '80')
|
||||
self.fake_pools = [self.fake_pool1, self.fake_pool2,
|
||||
self.fake_pool3]
|
||||
|
||||
def test_fake_vol(id, name, size, status, bootable,
|
||||
migration_status=None,
|
||||
volume_type=None):
|
||||
fake_vol = mock.MagicMock()
|
||||
fake_vol.id = id
|
||||
fake_vol.name = name
|
||||
fake_vol.size = size
|
||||
fake_vol.status = status
|
||||
fake_vol.bootable = bootable
|
||||
fake_vol.migration_status = migration_status
|
||||
fake_vol.volume_type = volume_type
|
||||
setattr(fake_vol, 'os-vol-host-attr:host', 'host1@IPSAN-1#pool2')
|
||||
|
||||
return fake_vol
|
||||
|
||||
self.fake_vol1 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd861',
|
||||
'test_volume1', 4,
|
||||
'available', 'true', 'success',
|
||||
volume_type='type2')
|
||||
self.fake_vol2 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd862',
|
||||
'test_volume2', 10,
|
||||
'in-use', 'false')
|
||||
self.fake_vol3 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd863',
|
||||
'test_volume3', 4,
|
||||
'in-use', 'true', volume_type='type2')
|
||||
self.fake_vol4 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd864',
|
||||
'test_volume4', 10,
|
||||
'error', 'true')
|
||||
self.fake_vol5 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd865',
|
||||
'test_volume5', 15,
|
||||
'in-use', 'true')
|
||||
|
||||
self.fake_volumes = [self.fake_vol1,
|
||||
self.fake_vol2,
|
||||
self.fake_vol3,
|
||||
self.fake_vol4,
|
||||
self.fake_vol5]
|
||||
|
||||
def test_fake_snap(vol_id):
|
||||
fake_snap = mock.MagicMock()
|
||||
fake_snap.volume_id = vol_id
|
||||
|
||||
return fake_snap
|
||||
|
||||
self.fake_snap = [test_fake_snap(
|
||||
'922d4762-0bc5-4b30-9cb9-48ab644dd865')]
|
||||
|
||||
def test_fake_volume_type(type_name, extra_specs):
|
||||
fake_type = mock.MagicMock()
|
||||
fake_type.name = type_name
|
||||
fake_type.extra_specs = extra_specs
|
||||
|
||||
return fake_type
|
||||
|
||||
self.fake_types = [test_fake_volume_type(
|
||||
'type1', {'volume_backend_name': 'pool1'}),
|
||||
test_fake_volume_type(
|
||||
'type2', {'volume_backend_name': 'pool2'})
|
||||
]
|
||||
|
||||
osc = clients.OpenStackClients()
|
||||
|
||||
p_cinder = mock.patch.object(osc, 'cinder')
|
||||
p_cinder.start()
|
||||
self.addCleanup(p_cinder.stop)
|
||||
self.m_cinder = cinder_helper.CinderHelper(osc=osc)
|
||||
|
||||
p_model = mock.patch.object(
|
||||
strategies.StorageCapacityBalance, "compute_model",
|
||||
new_callable=mock.PropertyMock)
|
||||
self.m_model = p_model.start()
|
||||
self.addCleanup(p_model.stop)
|
||||
|
||||
p_audit_scope = mock.patch.object(
|
||||
strategies.StorageCapacityBalance, "audit_scope",
|
||||
new_callable=mock.PropertyMock
|
||||
)
|
||||
self.m_audit_scope = p_audit_scope.start()
|
||||
self.addCleanup(p_audit_scope.stop)
|
||||
|
||||
self.m_audit_scope.return_value = mock.Mock()
|
||||
self.m_cinder.get_storage_pool_list = mock.Mock(
|
||||
return_value=self.fake_pools)
|
||||
self.m_cinder.get_volume_list = mock.Mock(
|
||||
return_value=self.fake_volumes)
|
||||
self.m_cinder.get_volume_snapshots_list = mock.Mock(
|
||||
return_value=self.fake_snap)
|
||||
self.m_cinder.get_volume_type_list = mock.Mock(
|
||||
return_value=self.fake_types)
|
||||
self.strategy = strategies.StorageCapacityBalance(
|
||||
config=mock.Mock(), osc=osc)
|
||||
self.strategy._cinder = self.m_cinder
|
||||
self.strategy.input_parameters = utils.Struct()
|
||||
self.strategy.input_parameters.update(
|
||||
{'volume_threshold': 80.0})
|
||||
self.strategy.volume_threshold = 80.0
|
||||
|
||||
def test_get_pools(self):
|
||||
self.strategy.config.ex_pools = "local_vstorage"
|
||||
pools = self.strategy.get_pools(self.m_cinder)
|
||||
self.assertEqual(len(pools), 2)
|
||||
|
||||
def test_get_volumes(self):
|
||||
volumes = self.strategy.get_volumes(self.m_cinder)
|
||||
self.assertEqual(len(volumes), 3)
|
||||
|
||||
def test_group_pools(self):
|
||||
self.strategy.config.ex_pools = "local_vstorage"
|
||||
pools = self.strategy.get_pools(self.m_cinder)
|
||||
over_pools, under_pools = self.strategy.group_pools(pools, 0.50)
|
||||
self.assertEqual(len(under_pools), 1)
|
||||
self.assertEqual(len(over_pools), 1)
|
||||
|
||||
over_pools, under_pools = self.strategy.group_pools(pools, 0.85)
|
||||
self.assertEqual(len(under_pools), 2)
|
||||
self.assertEqual(len(over_pools), 0)
|
||||
|
||||
over_pools, under_pools = self.strategy.group_pools(pools, 0.30)
|
||||
self.assertEqual(len(under_pools), 0)
|
||||
self.assertEqual(len(over_pools), 2)
|
||||
|
||||
def test_get_volume_type_by_name(self):
|
||||
vol_type = self.strategy.get_volume_type_by_name(
|
||||
self.m_cinder, 'pool1')
|
||||
self.assertEqual(len(vol_type), 1)
|
||||
|
||||
vol_type = self.strategy.get_volume_type_by_name(
|
||||
self.m_cinder, 'ks3200')
|
||||
self.assertEqual(len(vol_type), 0)
|
||||
|
||||
def test_check_pool_type(self):
|
||||
pool_type = self.strategy.check_pool_type(
|
||||
self.fake_vol3, self.fake_pool1)
|
||||
self.assertIsNotNone(pool_type)
|
||||
|
||||
pool_type = self.strategy.check_pool_type(
|
||||
self.fake_vol3, self.fake_pool2)
|
||||
self.assertIsNone(pool_type)
|
||||
|
||||
def test_migrate_fit(self):
|
||||
self.strategy.config.ex_pools = "local_vstorage"
|
||||
pools = self.strategy.get_pools(self.m_cinder)
|
||||
self.strategy.source_pools, self.strategy.dest_pools = (
|
||||
self.strategy.group_pools(pools, 0.60))
|
||||
target_pool = self.strategy.migrate_fit(self.fake_vol2, 0.60)
|
||||
self.assertIsNotNone(target_pool)
|
||||
|
||||
target_pool = self.strategy.migrate_fit(self.fake_vol3, 0.50)
|
||||
self.assertIsNone(target_pool)
|
||||
|
||||
target_pool = self.strategy.migrate_fit(self.fake_vol5, 0.60)
|
||||
self.assertIsNone(target_pool)
|
||||
|
||||
def test_retype_fit(self):
|
||||
self.strategy.config.ex_pools = "local_vstorage"
|
||||
pools = self.strategy.get_pools(self.m_cinder)
|
||||
self.strategy.source_pools, self.strategy.dest_pools = (
|
||||
self.strategy.group_pools(pools, 0.50))
|
||||
target_pool = self.strategy.retype_fit(self.fake_vol1, 0.50)
|
||||
self.assertIsNotNone(target_pool)
|
||||
|
||||
target_pool = self.strategy.retype_fit(self.fake_vol2, 0.50)
|
||||
self.assertIsNone(target_pool)
|
||||
|
||||
target_pool = self.strategy.retype_fit(self.fake_vol3, 0.50)
|
||||
self.assertIsNotNone(target_pool)
|
||||
|
||||
target_pool = self.strategy.retype_fit(self.fake_vol5, 0.60)
|
||||
self.assertIsNone(target_pool)
|
||||
|
||||
def test_execute(self):
|
||||
self.strategy.input_parameters.update(
|
||||
{'volume_threshold': 45.0})
|
||||
self.strategy.config.ex_pools = "local_vstorage"
|
||||
solution = self.strategy.execute()
|
||||
self.assertEqual(len(solution.actions), 1)
|
||||
|
||||
setattr(self.fake_pool1, 'free_capacity_gb', '60')
|
||||
self.strategy.input_parameters.update(
|
||||
{'volume_threshold': 50.0})
|
||||
solution = self.strategy.execute()
|
||||
self.assertEqual(len(solution.actions), 2)
|
||||
|
||||
setattr(self.fake_pool1, 'free_capacity_gb', '60')
|
||||
self.strategy.input_parameters.update(
|
||||
{'volume_threshold': 60.0})
|
||||
solution = self.strategy.execute()
|
||||
self.assertEqual(len(solution.actions), 3)
|
Loading…
Reference in New Issue
Block a user