Extend Falconstor driver to utilize multiple FSS storage pools

Changed the pool format from integer to key-value pair in configuration
file. Use A for single storage pool and P/O for multiple pools.

DocImpact
Implements: blueprint falconstor-extend-cinder-driver
Change-Id: Ic5362c8284f2d69989d820173c306e5856df111b
This commit is contained in:
Soffie Huang 2017-01-19 16:26:03 +08:00
parent 6ad77cc4ef
commit 213001f931
5 changed files with 202 additions and 65 deletions

View File

@ -32,6 +32,8 @@ ISCSI_DRIVER = DRIVER_PATH + ".iscsi.FSSISCSIDriver"
PRIMARY_IP = '10.0.0.1' PRIMARY_IP = '10.0.0.1'
SECONDARY_IP = '10.0.0.2' SECONDARY_IP = '10.0.0.2'
FAKE_ID = 123 FAKE_ID = 123
FAKE_SINGLE_POOLS = {'A': 1}
FAKE_MULTIPLE_POOLS = {'P': 1, 'O': 2}
FAKE = 'fake' FAKE = 'fake'
FAKE_HOST = 'fakehost' FAKE_HOST = 'fakehost'
API_RESPONSE = {'rc': 0} API_RESPONSE = {'rc': 0}
@ -214,7 +216,7 @@ class FSSDriverTestCase(test.TestCase):
self.mock_config.san_ip = PRIMARY_IP self.mock_config.san_ip = PRIMARY_IP
self.mock_config.san_login = FAKE self.mock_config.san_login = FAKE
self.mock_config.san_password = FAKE self.mock_config.san_password = FAKE
self.mock_config.fss_pool = FAKE_ID self.mock_config.fss_pools = FAKE_SINGLE_POOLS
self.mock_config.san_is_local = False self.mock_config.san_is_local = False
self.mock_config.fss_debug = False self.mock_config.fss_debug = False
self.mock_config.additional_retry_list = False self.mock_config.additional_retry_list = False
@ -237,8 +239,8 @@ class TestFSSISCSIDriver(FSSDriverTestCase):
def test_initialized_should_set_fss_info(self): def test_initialized_should_set_fss_info(self):
self.assertEqual(self.driver.proxy.fss_host, self.assertEqual(self.driver.proxy.fss_host,
self.driver.configuration.san_ip) self.driver.configuration.san_ip)
self.assertEqual(self.driver.proxy.fss_defined_pool, self.assertEqual(self.driver.proxy.fss_defined_pools,
self.driver.configuration.fss_pool) self.driver.configuration.fss_pools)
def test_check_for_setup_error(self): def test_check_for_setup_error(self):
self.assertRaises(exception.VolumeBackendAPIException, self.assertRaises(exception.VolumeBackendAPIException,
@ -527,7 +529,7 @@ class TestRESTProxy(test.TestCase):
configuration.san_ip = FAKE configuration.san_ip = FAKE
configuration.san_login = FAKE configuration.san_login = FAKE
configuration.san_password = FAKE configuration.san_password = FAKE
configuration.fss_pool = FAKE_ID configuration.fss_pools = FAKE_SINGLE_POOLS
configuration.fss_debug = False configuration.fss_debug = False
configuration.additional_retry_list = None configuration.additional_retry_list = None
@ -545,8 +547,9 @@ class TestRESTProxy(test.TestCase):
def test_create_volume(self): def test_create_volume(self):
sizemb = self.proxy._convert_size_to_mb(VOLUME['size']) sizemb = self.proxy._convert_size_to_mb(VOLUME['size'])
volume_name = self.proxy._get_fss_volume_name(VOLUME) volume_name = self.proxy._get_fss_volume_name(VOLUME)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "P")
params = dict(storagepoolid=self.proxy.fss_defined_pool, params = dict(storagepoolid=_pool_id,
sizemb=sizemb, sizemb=sizemb,
category="virtual", category="virtual",
name=volume_name) name=volume_name)
@ -582,11 +585,12 @@ class TestRESTProxy(test.TestCase):
def test_clone_volume(self, mock__get_fss_vid_from_name): def test_clone_volume(self, mock__get_fss_vid_from_name):
self.FSS_MOCK.create_mirror.return_value = API_RESPONSE self.FSS_MOCK.create_mirror.return_value = API_RESPONSE
self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
mirror_params = dict( mirror_params = dict(
category='virtual', category='virtual',
selectioncriteria='anydrive', selectioncriteria='anydrive',
mirrortarget="virtual", mirrortarget="virtual",
storagepoolid=self.proxy.fss_defined_pool storagepoolid=_pool_id
) )
ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME) ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME)
@ -613,9 +617,10 @@ class TestRESTProxy(test.TestCase):
FAKE_ID) FAKE_ID)
sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size']) sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size'])
mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb) mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
self.FSS_MOCK.create_timemark_policy.assert_called_once_with( self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
FAKE_ID, FAKE_ID,
storagepoolid=self.proxy.fss_defined_pool) storagepoolid=_pool_id)
self.FSS_MOCK.create_timemark.assert_called_once_with( self.FSS_MOCK.create_timemark.assert_called_once_with(
FAKE_ID, FAKE_ID,
SNAPSHOT["display_name"]) SNAPSHOT["display_name"])
@ -669,6 +674,7 @@ class TestRESTProxy(test.TestCase):
self.FSS_MOCK.get_timemark.return_value = tm_info self.FSS_MOCK.get_timemark.return_value = tm_info
mock__get_timestamp.return_value = RAWTIMESTAMP mock__get_timestamp.return_value = RAWTIMESTAMP
timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP) timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT) self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID) self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID)
@ -676,7 +682,7 @@ class TestRESTProxy(test.TestCase):
SNAPSHOT['display_name']) SNAPSHOT['display_name'])
self.FSS_MOCK.copy_timemark.assert_called_once_with( self.FSS_MOCK.copy_timemark.assert_called_once_with(
timestamp, timestamp,
storagepoolid=self.proxy.fss_defined_pool, storagepoolid=_pool_id,
name=new_vol_name) name=new_vol_name)
@mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id')
@ -778,13 +784,14 @@ class TestRESTProxy(test.TestCase):
CG_SNAPSHOT['consistencygroup_id']) CG_SNAPSHOT['consistencygroup_id'])
mock__get_fss_gid_from_name.assert_called_once_with(group_name) mock__get_fss_gid_from_name.assert_called_once_with(group_name)
mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID) mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID)
_pool_id = self.proxy._selected_pool_id(FAKE_SINGLE_POOLS, "O")
for vid in vid_list: for vid in vid_list:
self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid) self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid)
mock_create_vdev_snapshot.assert_called_once_with(vid, 1024) mock_create_vdev_snapshot.assert_called_once_with(vid, 1024)
self.FSS_MOCK.create_timemark_policy.assert_called_once_with( self.FSS_MOCK.create_timemark_policy.assert_called_once_with(
vid, vid,
storagepoolid=self.proxy.fss_defined_pool) storagepoolid=_pool_id)
mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name) mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name)

View File

@ -36,7 +36,20 @@ LOG = logging.getLogger(__name__)
FSS_OPTS = [ FSS_OPTS = [
cfg.IntOpt('fss_pool', cfg.IntOpt('fss_pool',
default='', default='',
help='FSS pool id in which FalconStor volumes are stored.'), help='DEPRECATED: FSS pool id in which FalconStor volumes are '
'stored.',
deprecated_since='Pike',
deprecated_reason='This option will be removed once Queens '
'development opens up. Please use fss_pools '
'instead.'),
cfg.DictOpt('fss_pools',
default={},
help='FSS pool id list in which FalconStor volumes are stored.'
' If you have only one pool, use A:<pool-id>. '
'You can also have up to two storage pools, '
'P for primary and O for all supporting devices. '
'The usage is P:<primary-pool-id>,O:<others-pool-id>',
deprecated_name='fss_pool'),
cfg.StrOpt('fss_san_secondary_ip', cfg.StrOpt('fss_san_secondary_ip',
default='', default='',
help='Specifies FSS secondary management IP to be used ' help='Specifies FSS secondary management IP to be used '
@ -54,12 +67,20 @@ CONF.register_opts(FSS_OPTS)
class FalconstorBaseDriver(san.SanDriver): class FalconstorBaseDriver(san.SanDriver):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(FalconstorBaseDriver, self).__init__(*args, **kwargs) super(FalconstorBaseDriver, self).__init__(*args, **kwargs)
if self.configuration: if self.configuration:
self.configuration.append_config_values(FSS_OPTS) self.configuration.append_config_values(FSS_OPTS)
if self.configuration.fss_pool:
self.configuration.fss_pools = {'A': str(
self.configuration.fss_pool)}
LOG.warning("'fss_pool=<pool-id>' is deprecated. Using the "
"fss_pools=A:<pool-id> for single pool or "
"fss_pools=P:<pool-id>,O:<other-pool-id> instead "
"as old format will be removed once Queens development"
" opens up.")
self.proxy = rest_proxy.RESTProxy(self.configuration) self.proxy = rest_proxy.RESTProxy(self.configuration)
self._backend_name = ( self._backend_name = (
self.configuration.safe_get('volume_backend_name') or 'FalconStor') self.configuration.safe_get('volume_backend_name') or 'FalconStor')
@ -71,54 +92,93 @@ class FalconstorBaseDriver(san.SanDriver):
def check_for_setup_error(self): def check_for_setup_error(self):
if self.proxy.session_id is None: if self.proxy.session_id is None:
msg = (_('FSS cinder volume driver not ready: Unable to determine ' msg = _('FSS cinder volume driver not ready: Unable to determine '
'session id.')) 'session id.')
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
if not self.configuration.fss_pool: if self.configuration.fss_pool:
self.configuration.fss_pools = {'A': six.text_type(
self.configuration.fss_pool)}
# The fss_pool is deprecated.
LOG.warning("'fss_pool=<pool-id>' is deprecated. Using the "
"fss_pools=A:<pool-id> for single pool or "
"fss_pools=P:<pool-id>,O:<other-pool-id> instead "
"as old format will be removed once Queens development"
" opens up.")
if not self.configuration.fss_pools:
msg = _('Pool is not available in the cinder configuration ' msg = _('Pool is not available in the cinder configuration '
'fields.') 'fields.')
raise exception.InvalidHost(reason=msg) raise exception.InvalidHost(reason=msg)
self._pool_checking(self.configuration.fss_pools)
self._pool_checking(self.configuration.fss_pool) if self.configuration.san_thin_provision:
if not self.configuration.max_over_subscription_ratio:
msg = _('The max_over_subscription_ratio have to set '
'when thin provisioning enabled.')
raise exception.InvalidConfigurationValue(reason=msg)
def _pool_checking(self, pool_id): def _pool_checking(self, pool_info):
pool_count = 0 pool_count = 0
try: try:
output = self.proxy.list_pool_info(pool_id) if len(pool_info) == 1:
if "name" in output['data']: _pool_state = self._is_single_pool(pool_info)
pool_count = len(re.findall(rest_proxy.GROUP_PREFIX, if not _pool_state:
output['data']['name'])) msg = _('The given pool info does not match.')
if pool_count is 0: raise exception.VolumeBackendAPIException(data=msg)
msg = (_('The given pool info must include the storage pool ' else:
'and naming start with OpenStack-')) _pool_state = self._is_multi_pool(pool_info)
raise exception.VolumeBackendAPIException(data=msg) if not _pool_state:
msg = _('The given pool info does not match.')
raise exception.VolumeBackendAPIException(data=msg)
for index, pool_id in pool_info.items():
output = self.proxy.list_pool_info(pool_id)
if "name" in output['data']:
pool_count = len(re.findall(rest_proxy.GROUP_PREFIX,
output['data']['name']))
if pool_count is 0:
msg = _('The given pool info must include the storage '
'pool and naming start with OpenStack-')
raise exception.VolumeBackendAPIException(data=msg)
except Exception: except Exception:
msg = (_('Unexpected exception during pool checking.')) msg = _('Unexpected exception during pool checking.')
LOG.exception(msg) LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
def _check_multipath(self): def _check_multipath(self):
if self.configuration.use_multipath_for_image_xfer: if self.configuration.use_multipath_for_image_xfer:
if not self.configuration.fss_san_secondary_ip: if not self.configuration.fss_san_secondary_ip:
msg = (_('The san_secondary_ip param is null.')) msg = _('The san_secondary_ip param is null.')
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
output = self.proxy._check_iocluster_state() output = self.proxy._check_iocluster_state()
if not output: if not output:
msg = (_('FSS do not support multipathing.')) msg = _('FSS do not support multipathing.')
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
return output return output
else: else:
return False return False
def _is_single_pool(self, pool_info):
if len(pool_info) == 1 and "A" in pool_info:
return True
else:
return False
def _is_multi_pool(self, pool_info):
if len(pool_info) == 2 and "P" in pool_info and "O" in pool_info:
return True
else:
return False
def create_volume(self, volume): def create_volume(self, volume):
"""Creates a volume. """Creates a volume.
We use the metadata of the volume to create variety volume. We use the metadata of the volume to create variety volume.
Create a thin provisioned volume : Create a thin provisioned volume :
[Usage] create --volume-type FSS --metadata thinprovisioned=true [Usage] create --volume-type FSS-THIN
thinsize=<thin-volume-size> --metadata thinsize=<thin-volume-size> volume-size
Create a LUN that is a Timeview of another LUN at a specified CDP tag: Create a LUN that is a Timeview of another LUN at a specified CDP tag:
[Usage] create --volume-type FSS --metadata timeview=<vid> [Usage] create --volume-type FSS --metadata timeview=<vid>
@ -128,20 +188,25 @@ class FalconstorBaseDriver(san.SanDriver):
[Usage] create --volume-type FSS --metadata timeview=<vid> [Usage] create --volume-type FSS --metadata timeview=<vid>
rawtimestamp=<rawtimestamp> volume-size rawtimestamp=<rawtimestamp> volume-size
Create a mirrored volume :
[Usage] create --volume-type FSS --metadata mirrored=true
""" """
volume_metadata = self._get_volume_metadata(volume) volume_metadata = self._get_volume_metadata(volume)
if not volume_metadata: if not volume_metadata:
volume_name, fss_metadata = self.proxy.create_vdev(volume) volume_name, fss_metadata = self.proxy.create_vdev(volume)
else: else:
if ("timeview" in volume_metadata and if self.configuration.san_thin_provision:
volume_name, fss_metadata = self.proxy.create_thin_vdev(
volume_metadata, volume)
elif ("timeview" in volume_metadata and
("cdptag" in volume_metadata) or ("cdptag" in volume_metadata) or
("rawtimestamp" in volume_metadata)): ("rawtimestamp" in volume_metadata)):
volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag( volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag(
volume_metadata, volume) volume_metadata, volume)
elif ("thinprovisioned" in volume_metadata and elif 'mirrored' in volume_metadata:
"thinsize" in volume_metadata): volume_name, fss_metadata = self.proxy.create_vdev_with_mirror(
volume_name, fss_metadata = self.proxy.create_thin_vdev(
volume_metadata, volume) volume_metadata, volume)
else: else:
volume_name, fss_metadata = self.proxy.create_vdev(volume) volume_name, fss_metadata = self.proxy.create_vdev(volume)
@ -265,6 +330,8 @@ class FalconstorBaseDriver(san.SanDriver):
def get_volume_stats(self, refresh=False): def get_volume_stats(self, refresh=False):
total_capacity = 0 total_capacity = 0
free_space = 0 free_space = 0
# Thin provisioning
thin_enabled = self.configuration.san_thin_provision
if refresh: if refresh:
try: try:
info = self.proxy._get_pools_info() info = self.proxy._get_pools_info()
@ -280,9 +347,15 @@ class FalconstorBaseDriver(san.SanDriver):
"total_capacity_gb": total_capacity, "total_capacity_gb": total_capacity,
"free_capacity_gb": free_space, "free_capacity_gb": free_space,
"reserved_percentage": 0, "reserved_percentage": 0,
"consistencygroup_support": True "consistencygroup_support": True,
"thin_provisioning_support": thin_enabled,
"thick_provisioning_support": not thin_enabled
} }
if thin_enabled:
provisioned_capacity = int(info['used_gb'])
data['provisioned_capacity_gb'] = provisioned_capacity
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio)
self._stats = data self._stats = data
except Exception as exc: except Exception as exc:

View File

@ -41,15 +41,18 @@ class FSSISCSIDriver(fss_common.FalconstorBaseDriver,
1.03 - merge source code 1.03 - merge source code
1.04 - Fixed create_volume_from_snapshot(), create_cloned_volume() 1.04 - Fixed create_volume_from_snapshot(), create_cloned_volume()
metadata TypeError metadata TypeError
2.0.0 - Mitaka driver 2.0.0 - Newton driver
-- fixed consisgroup commands error. -- fixed consisgroup commands error
2.0.1 -- fixed bugs 2.0.1 -- fixed bugs
2.0.2 -- support Multipath 2.0.2 -- support Multipath
3.0.0 - Newton driver 3.0.0 - Ocata driver
-- fixed bugs
4.0.0 - Pike driver
-- extend Cinder driver to utilize multiple FSS storage pools
""" """
VERSION = '3.0.0' VERSION = '4.0.0'
# ThirdPartySystems wiki page # ThirdPartySystems wiki page
CI_WIKI_NAME = "FalconStor_CI" CI_WIKI_NAME = "FalconStor_CI"

View File

@ -16,6 +16,7 @@
import base64 import base64
import json import json
import random import random
import six
import time import time
import uuid import uuid
@ -75,7 +76,7 @@ LOG = logging.getLogger(__name__)
class RESTProxy(object): class RESTProxy(object):
def __init__(self, config): def __init__(self, config):
self.fss_host = config.san_ip self.fss_host = config.san_ip
self.fss_defined_pool = config.fss_pool self.fss_defined_pools = config.fss_pools
if config.additional_retry_list: if config.additional_retry_list:
RETRY_LIST.append(config.additional_retry_list) RETRY_LIST.append(config.additional_retry_list)
@ -117,15 +118,17 @@ class RESTProxy(object):
def _get_pools_info(self): def _get_pools_info(self):
qpools = [] qpools = []
poolinfo = {} poolinfo = {}
total_capacity_gb = 0
used_gb = 0
try: try:
output = self.list_pool_info() output = self.list_pool_info()
if output and "storagepools" in output['data']: if output and "storagepools" in output['data']:
for item in output['data']['storagepools']: for item in output['data']['storagepools']:
if item['name'].startswith(GROUP_PREFIX) and ( if item['name'].startswith(GROUP_PREFIX) and (
self.fss_defined_pool == item['id']): six.text_type(item['id']) in
self.fss_defined_pools.values()):
poolid = int(item['id']) poolid = int(item['id'])
qpools.append(poolid) qpools.append(poolid)
break
if not qpools: if not qpools:
msg = _('The storage pool information is empty or not correct') msg = _('The storage pool information is empty or not correct')
@ -134,18 +137,20 @@ class RESTProxy(object):
# Query pool detail information # Query pool detail information
for poolid in qpools: for poolid in qpools:
output = self.list_pool_info(poolid) output = self.list_pool_info(poolid)
poolinfo['pool_name'] = output['data']['name'] total_capacity_gb += (
poolinfo['total_capacity_gb'] = (
self._convert_size_to_gb(output['data']['size'])) self._convert_size_to_gb(output['data']['size']))
poolinfo['used_gb'] = ( used_gb += (self._convert_size_to_gb(output['data']['used']))
self._convert_size_to_gb(output['data']['used']))
poolinfo['QoS_support'] = False
poolinfo['reserved_percentage'] = 0
except Exception: except Exception:
msg = (_('Unexpected exception during get pools info.')) msg = (_('Unexpected exception during get pools info.'))
LOG.exception(msg) LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
poolinfo['total_capacity_gb'] = total_capacity_gb
poolinfo['used_gb'] = used_gb
poolinfo['QoS_support'] = False
poolinfo['reserved_percentage'] = 0
return poolinfo return poolinfo
def list_pool_info(self, pool_id=None): def list_pool_info(self, pool_id=None):
@ -163,13 +168,26 @@ class RESTProxy(object):
adapter_type = physicaladapters['type'] adapter_type = physicaladapters['type']
return adapter_type return adapter_type
def _selected_pool_id(self, pool_info, pool_type=None):
_pool_id = 0
if len(pool_info) == 1 and "A" in pool_info:
_pool_id = pool_info['A']
elif len(pool_info) == 2 and "P" in pool_info and "O" in pool_info:
if pool_type:
if pool_type == "P":
_pool_id = pool_info['P']
elif pool_type == "O":
_pool_id = pool_info['O']
return _pool_id
def create_vdev(self, volume): def create_vdev(self, volume):
sizemb = self._convert_size_to_mb(volume["size"]) sizemb = self._convert_size_to_mb(volume["size"])
volume_name = self._get_fss_volume_name(volume) volume_name = self._get_fss_volume_name(volume)
params = dict(storagepoolid=self.fss_defined_pool, params = dict(category="virtual",
category="virtual",
sizemb=sizemb, sizemb=sizemb,
name=volume_name) name=volume_name)
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
params.update(storagepoolid=pool_id)
return volume_name, self.FSS.create_vdev(params) return volume_name, self.FSS.create_vdev(params)
def create_tv_from_cdp_tag(self, volume_metadata, volume): def create_tv_from_cdp_tag(self, volume_metadata, volume):
@ -186,13 +204,13 @@ class RESTProxy(object):
volume_name = self._get_fss_volume_name(volume) volume_name = self._get_fss_volume_name(volume)
sizemb = self._convert_size_to_mb(volume['size']) sizemb = self._convert_size_to_mb(volume['size'])
params = dict(name=volume_name, params = dict(name=volume_name,
storage=dict(storagepoolid=self.fss_defined_pool,
sizemb=sizemb),
automaticexpansion=dict(enabled=False), automaticexpansion=dict(enabled=False),
timeviewcopy=True) timeviewcopy=True)
if cdp_tag: if cdp_tag:
params.update(cdpjournaltag=cdp_tag) params.update(cdpjournaltag=cdp_tag)
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
params.update(storage={'storagepoolid': pool_id, 'sizemb': sizemb})
metadata = self.FSS.create_timeview(tv_vid, params) metadata = self.FSS.create_timeview(tv_vid, params)
return volume_name, metadata return volume_name, metadata
@ -200,8 +218,7 @@ class RESTProxy(object):
thin_size = 0 thin_size = 0
size = volume["size"] size = volume["size"]
sizemb = self._convert_size_to_mb(size) sizemb = self._convert_size_to_mb(size)
params = dict(storagepoolid=self.fss_defined_pool, params = {'category': 'virtual'}
category="virtual")
if 'thinprovisioned' in volume_metadata: if 'thinprovisioned' in volume_metadata:
if volume_metadata['thinprovisioned'] is False: if volume_metadata['thinprovisioned'] is False:
@ -232,10 +249,40 @@ class RESTProxy(object):
params.update(thinprovisioning=thin_disk) params.update(thinprovisioning=thin_disk)
params.update(sizemb=thin_size) params.update(sizemb=thin_size)
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
params.update(storagepoolid=pool_id)
volume_name = self._get_fss_volume_name(volume) volume_name = self._get_fss_volume_name(volume)
params.update(name=volume_name) params.update(name=volume_name)
return volume_name, self.FSS.create_vdev(params) return volume_name, self.FSS.create_vdev(params)
def create_vdev_with_mirror(self, volume_metadata, volume):
if 'mirrored' in volume_metadata:
if volume_metadata['mirrored'] is False:
msg = _('If you want to create a mirrored volume, this param '
'must be True.')
raise exception.VolumeBackendAPIException(data=msg)
sizemb = self._convert_size_to_mb(volume["size"])
volume_name = self._get_fss_volume_name(volume)
params = {'category': 'virtual', 'sizemb': sizemb, 'name': volume_name}
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
params.update(storagepoolid=pool_id)
metadata = self.FSS.create_vdev(params)
if metadata:
vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE)
mirror_params = {'category': 'virtual',
'selectioncriteria': 'anydrive',
'mirrortarget': "virtual"}
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
mirror_params.update(storagepoolid=pool_id)
ret = self.FSS.create_mirror(vid, mirror_params)
if ret:
return volume_name, metadata
def _get_fss_vid_from_name(self, volume_name, fss_type=None): def _get_fss_vid_from_name(self, volume_name, fss_type=None):
vid = [] vid = []
output = self.FSS.list_fss_volume_info() output = self.FSS.list_fss_volume_info()
@ -282,7 +329,6 @@ class RESTProxy(object):
return vidlist return vidlist
def clone_volume(self, new_vol_name, source_volume_name): def clone_volume(self, new_vol_name, source_volume_name):
params = dict(storagepoolid=self.fss_defined_pool)
volume_metadata = {} volume_metadata = {}
new_vid = '' new_vid = ''
vid = self._get_fss_vid_from_name(source_volume_name, FSS_SINGLE_TYPE) vid = self._get_fss_vid_from_name(source_volume_name, FSS_SINGLE_TYPE)
@ -291,7 +337,8 @@ class RESTProxy(object):
selectioncriteria='anydrive', selectioncriteria='anydrive',
mirrortarget="virtual" mirrortarget="virtual"
) )
mirror_params.update(params) pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
mirror_params.update(storagepoolid=pool_id)
ret1 = self.FSS.create_mirror(vid, mirror_params) ret1 = self.FSS.create_mirror(vid, mirror_params)
if ret1: if ret1:
@ -331,12 +378,11 @@ class RESTProxy(object):
(snap, tm_policy, vdev_size) = (self.FSS. (snap, tm_policy, vdev_size) = (self.FSS.
_check_if_snapshot_tm_exist(vid)) _check_if_snapshot_tm_exist(vid))
if not snap: if not snap:
self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) self.create_vdev_snapshot(vid, self._convert_size_to_mb(size))
if not tm_policy: if not tm_policy:
self.FSS.create_timemark_policy( pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
vid, storagepoolid=self.fss_defined_pool) self.FSS.create_timemark_policy(vid, storagepoolid=pool_id)
if not snap_name: if not snap_name:
snap_name = "snap-%s" % time.strftime('%Y%m%d%H%M%S') snap_name = "snap-%s" % time.strftime('%Y%m%d%H%M%S')
@ -409,8 +455,9 @@ class RESTProxy(object):
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
timestamp = '%s_%s' % (vid, rawtimestamp) timestamp = '%s_%s' % (vid, rawtimestamp)
pool_id = self._selected_pool_id(self.fss_defined_pools, "P")
output = self.FSS.copy_timemark( output = self.FSS.copy_timemark(
timestamp, storagepoolid=self.fss_defined_pool, name=new_vol_name) timestamp, storagepoolid=pool_id, name=new_vol_name)
if output['rc'] == 0: if output['rc'] == 0:
vid = output['id'] vid = output['id']
self.FSS._random_sleep() self.FSS._random_sleep()
@ -468,12 +515,13 @@ class RESTProxy(object):
return self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) return self.create_vdev_snapshot(vid, self._convert_size_to_mb(size))
def create_vdev_snapshot(self, vid, size): def create_vdev_snapshot(self, vid, size):
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
params = dict( params = dict(
idlist=[vid], idlist=[vid],
selectioncriteria='anydrive', selectioncriteria='anydrive',
policy='alwayswrite', policy='preserveall',
sizemb=size, sizemb=size,
storagepoolid=self.fss_defined_pool storagepoolid=pool_id
) )
return self.FSS.create_vdev_snapshot(params) return self.FSS.create_vdev_snapshot(params)
@ -518,6 +566,7 @@ class RESTProxy(object):
gsnap_name = self._encode_name(cgsnapshot['id']) gsnap_name = self._encode_name(cgsnapshot['id'])
gid = self._get_fss_gid_from_name(group_name) gid = self._get_fss_gid_from_name(group_name)
vidlist = self._get_vdev_id_from_group_id(gid) vidlist = self._get_vdev_id_from_group_id(gid)
pool_id = self._selected_pool_id(self.fss_defined_pools, "O")
for vid in vidlist: for vid in vidlist:
(snap, tm_policy, sizemb) = (self.FSS. (snap, tm_policy, sizemb) = (self.FSS.
@ -525,8 +574,7 @@ class RESTProxy(object):
if not snap: if not snap:
self.create_vdev_snapshot(vid, sizemb) self.create_vdev_snapshot(vid, sizemb)
if not tm_policy: if not tm_policy:
self.FSS.create_timemark_policy( self.FSS.create_timemark_policy(vid, storagepoolid=pool_id)
vid, storagepoolid=self.fss_defined_pool)
group_tm_policy = self.FSS._check_if_group_tm_enabled(gid) group_tm_policy = self.FSS._check_if_group_tm_enabled(gid)
if not group_tm_policy: if not group_tm_policy:
@ -1146,7 +1194,8 @@ class FSSRestCommon(object):
params = dict( params = dict(
idlist=[vid], idlist=[vid],
automatic=dict(enabled=False), automatic=dict(enabled=False),
maxtimemarkcount=MAXSNAPSHOTS maxtimemarkcount=MAXSNAPSHOTS,
retentionpolicy=dict(mode='all'),
) )
if kwargs.get('storagepoolid'): if kwargs.get('storagepoolid'):
params.update(kwargs) params.update(kwargs)

View File

@ -0,0 +1,5 @@
---
features:
- Added ability to specify multiple storage pools in the FalconStor driver.
deprecations:
- The fss_pool option is deprecated. Use fss_pools instead.