Hitachi: support data deduplication and compression
Support data deduplication and compression, by storage assist. The feature can be worked, if user enable deduplication and compression for the DP-pool, by Configuration Manager REST API, and set the extra spec ``hbsd:capacity_saving`` to ``deduplication_compression`` Implements: blueprint hitachi-vsp-support-dedup-comp Change-Id: Ia6dcf8ee2205d32714849c0730c9a3562fa8f485
This commit is contained in:
parent
a92aa06e46
commit
5b38bd138c
@ -22,6 +22,7 @@ import requests
|
|||||||
|
|
||||||
from cinder import context as cinder_context
|
from cinder import context as cinder_context
|
||||||
from cinder.db.sqlalchemy import api as sqlalchemy_api
|
from cinder.db.sqlalchemy import api as sqlalchemy_api
|
||||||
|
from cinder import exception
|
||||||
from cinder.objects import group_snapshot as obj_group_snap
|
from cinder.objects import group_snapshot as obj_group_snap
|
||||||
from cinder.objects import snapshot as obj_snap
|
from cinder.objects import snapshot as obj_snap
|
||||||
from cinder.tests.unit import fake_group
|
from cinder.tests.unit import fake_group
|
||||||
@ -267,6 +268,8 @@ GET_LDEV_RESULT = {
|
|||||||
"attributes": ["CVS", "HDP"],
|
"attributes": ["CVS", "HDP"],
|
||||||
"status": "NML",
|
"status": "NML",
|
||||||
"poolId": 30,
|
"poolId": 30,
|
||||||
|
"dataReductionStatus": "DISABLED",
|
||||||
|
"dataReductionMode": "disabled",
|
||||||
}
|
}
|
||||||
|
|
||||||
GET_LDEV_RESULT_MAPPED = {
|
GET_LDEV_RESULT_MAPPED = {
|
||||||
@ -1497,3 +1500,62 @@ class HBSDMIRRORFCDriverTest(test.TestCase):
|
|||||||
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
|
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
|
||||||
)
|
)
|
||||||
self.assertTupleEqual(actual, ret)
|
self.assertTupleEqual(actual, ret)
|
||||||
|
|
||||||
|
@mock.patch.object(requests.Session, "request")
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_rep_ldev_and_pair_deduplication_compression(
|
||||||
|
self, get_volume_type_extra_specs, get_volume_type, request):
|
||||||
|
get_volume_type_extra_specs.return_value = {
|
||||||
|
'hbsd:topology': 'active_active_mirror_volume',
|
||||||
|
'hbsd:capacity_saving': 'deduplication_compression'}
|
||||||
|
get_volume_type.return_value = {}
|
||||||
|
self.snapshot_count = 0
|
||||||
|
|
||||||
|
def _request_side_effect(
|
||||||
|
method, url, params, json, headers, auth, timeout, verify):
|
||||||
|
if self.configuration.hitachi_storage_id in url:
|
||||||
|
if method in ('POST', 'PUT'):
|
||||||
|
return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||||
|
elif method == 'GET':
|
||||||
|
if ('/remote-mirror-copygroups' in url or
|
||||||
|
'/journals' in url):
|
||||||
|
return FakeResponse(200, NOTFOUND_RESULT)
|
||||||
|
elif '/remote-mirror-copypairs/' in url:
|
||||||
|
return FakeResponse(
|
||||||
|
200, GET_REMOTE_MIRROR_COPYPAIR_RESULT)
|
||||||
|
elif '/ldevs/' in url:
|
||||||
|
return FakeResponse(200, GET_LDEV_RESULT_REP)
|
||||||
|
elif '/snapshots' in url:
|
||||||
|
if self.snapshot_count < 1:
|
||||||
|
self.snapshot_count = self.snapshot_count + 1
|
||||||
|
return FakeResponse(200, GET_SNAPSHOTS_RESULT)
|
||||||
|
else:
|
||||||
|
return FakeResponse(200, NOTFOUND_RESULT)
|
||||||
|
else:
|
||||||
|
if method in ('POST', 'PUT'):
|
||||||
|
return FakeResponse(400, REMOTE_COMPLETED_SUCCEEDED_RESULT)
|
||||||
|
elif method == 'GET':
|
||||||
|
if '/remote-mirror-copygroups' in url:
|
||||||
|
return FakeResponse(200, NOTFOUND_RESULT)
|
||||||
|
elif '/ldevs/' in url:
|
||||||
|
return FakeResponse(200, GET_LDEV_RESULT_REP)
|
||||||
|
if '/ldevs/' in url:
|
||||||
|
return FakeResponse(200, GET_LDEV_RESULT_REP)
|
||||||
|
else:
|
||||||
|
return FakeResponse(
|
||||||
|
200, COMPLETED_SUCCEEDED_RESULT)
|
||||||
|
self.driver.common.rep_primary._stats = {}
|
||||||
|
self.driver.common.rep_primary._stats['pools'] = [
|
||||||
|
{'location_info': {'pool_id': 30}}]
|
||||||
|
self.driver.common.rep_secondary._stats = {}
|
||||||
|
self.driver.common.rep_secondary._stats['pools'] = [
|
||||||
|
{'location_info': {'pool_id': 40}}]
|
||||||
|
request.side_effect = _request_side_effect
|
||||||
|
self.assertRaises(exception.VolumeDriverException,
|
||||||
|
self.driver.create_cloned_volume,
|
||||||
|
TEST_VOLUME[4],
|
||||||
|
TEST_VOLUME[5])
|
||||||
|
self.assertEqual(2, get_volume_type_extra_specs.call_count)
|
||||||
|
self.assertEqual(0, get_volume_type.call_count)
|
||||||
|
self.assertEqual(14, request.call_count)
|
||||||
|
@ -216,6 +216,8 @@ GET_LDEV_RESULT = {
|
|||||||
"attributes": ["CVS", "HDP"],
|
"attributes": ["CVS", "HDP"],
|
||||||
"status": "NML",
|
"status": "NML",
|
||||||
"poolId": 30,
|
"poolId": 30,
|
||||||
|
"dataReductionStatus": "DISABLED",
|
||||||
|
"dataReductionMode": "disabled",
|
||||||
}
|
}
|
||||||
|
|
||||||
GET_LDEV_RESULT_MAPPED = {
|
GET_LDEV_RESULT_MAPPED = {
|
||||||
@ -240,6 +242,16 @@ GET_LDEV_RESULT_PAIR = {
|
|||||||
"status": "NML",
|
"status": "NML",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GET_LDEV_RESULT_PAIR_STATUS_TEST = {
|
||||||
|
"emulationType": "OPEN-V-CVS",
|
||||||
|
"blockCapacity": 2097152,
|
||||||
|
"attributes": ["CVS", "HDP", "HTI"],
|
||||||
|
"status": "TEST",
|
||||||
|
"poolId": 30,
|
||||||
|
"dataReductionStatus": "REHYDRATING",
|
||||||
|
"dataReductionMode": "disabled"
|
||||||
|
}
|
||||||
|
|
||||||
GET_POOL_RESULT = {
|
GET_POOL_RESULT = {
|
||||||
"availableVolumeCapacity": 480144,
|
"availableVolumeCapacity": 480144,
|
||||||
"totalPoolCapacity": 507780,
|
"totalPoolCapacity": 507780,
|
||||||
@ -775,6 +787,25 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.assertEqual('1', ret['provider_location'])
|
self.assertEqual('1', ret['provider_location'])
|
||||||
self.assertEqual(2, request.call_count)
|
self.assertEqual(2, request.call_count)
|
||||||
|
|
||||||
|
@mock.patch.object(requests.Session, "request")
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_volume_deduplication_compression(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
|
extra_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
|
||||||
|
get_volume_type_extra_specs.return_value = extra_specs
|
||||||
|
request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||||
|
self.driver.common._stats = {}
|
||||||
|
self.driver.common._stats['pools'] = [
|
||||||
|
{'location_info': {'pool_id': 30}}]
|
||||||
|
ret = self.driver.create_volume(TEST_VOLUME[3])
|
||||||
|
args, kwargs = request.call_args_list[0]
|
||||||
|
body = kwargs['json']
|
||||||
|
self.assertEqual(body.get('dataReductionMode'),
|
||||||
|
'compression_deduplication')
|
||||||
|
self.assertEqual('1', ret['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
|
self.assertEqual(2, request.call_count)
|
||||||
|
|
||||||
@reduce_retrying_time
|
@reduce_retrying_time
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_volume_timeout(self, request):
|
def test_create_volume_timeout(self, request):
|
||||||
@ -852,17 +883,40 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.assertEqual(1, get_goodness_function.call_count)
|
self.assertEqual(1, get_goodness_function.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||||
def test_create_snapshot(self, volume_get, request):
|
def test_create_snapshot(
|
||||||
|
self, volume_get, get_volume_type_extra_specs, request):
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
self.driver.common._stats = {}
|
self.driver.common._stats = {}
|
||||||
self.driver.common._stats['pools'] = [
|
self.driver.common._stats['pools'] = [
|
||||||
{'location_info': {'pool_id': 30}}]
|
{'location_info': {'pool_id': 30}}]
|
||||||
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
||||||
self.assertEqual('1', ret['provider_location'])
|
self.assertEqual('1', ret['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
|
self.assertEqual(4, request.call_count)
|
||||||
|
|
||||||
|
@mock.patch.object(requests.Session, "request")
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||||
|
def test_create_snapshot_dedup_false(
|
||||||
|
self, volume_get, get_volume_type_extra_specs, request):
|
||||||
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
|
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
|
||||||
|
get_volume_type_extra_specs.return_value = {'hbsd:capacity_saving':
|
||||||
|
'disable'}
|
||||||
|
self.driver.common._stats = {}
|
||||||
|
self.driver.common._stats['pools'] = [
|
||||||
|
{'location_info': {'pool_id': 30}}]
|
||||||
|
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
||||||
|
self.assertEqual('1', ret['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@ -895,32 +949,40 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_cloned_volume(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_cloned_volume(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
self.driver.common._stats = {}
|
self.driver.common._stats = {}
|
||||||
self.driver.common._stats['pools'] = [
|
self.driver.common._stats['pools'] = [
|
||||||
{'location_info': {'pool_id': 30}}]
|
{'location_info': {'pool_id': 30}}]
|
||||||
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
|
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
|
||||||
self.assertEqual('1', vol['provider_location'])
|
self.assertEqual('1', vol['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_volume_from_snapshot(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_volume_from_snapshot(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
self.driver.common._stats = {}
|
self.driver.common._stats = {}
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
self.driver.common._stats['pools'] = [
|
self.driver.common._stats['pools'] = [
|
||||||
{'location_info': {'pool_id': 30}}]
|
{'location_info': {'pool_id': 30}}]
|
||||||
vol = self.driver.create_volume_from_snapshot(
|
vol = self.driver.create_volume_from_snapshot(
|
||||||
TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
||||||
self.assertEqual('1', vol['provider_location'])
|
self.assertEqual('1', vol['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(fczm_utils, "add_fc_zone")
|
@mock.patch.object(fczm_utils, "add_fc_zone")
|
||||||
@ -1166,10 +1228,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_retype(self, request):
|
def test_retype(self, request):
|
||||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
new_specs = {'hbsd:test': 'test'}
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
diff = {}
|
|
||||||
host = {
|
host = {
|
||||||
'capabilities': {
|
'capabilities': {
|
||||||
'location_info': {
|
'location_info': {
|
||||||
@ -1177,9 +1238,17 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
new_type = {'extra_specs': {
|
||||||
|
'hbsd:capacity_saving': 'deduplication_compression'}}
|
||||||
|
old_specs = {'hbsd:capacity_saving': 'disable'}
|
||||||
|
new_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
|
||||||
|
old_type_ref = volume_types.create(self.ctxt, 'old', old_specs)
|
||||||
|
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||||
|
diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'],
|
||||||
|
new_type_ref['id'])[0]
|
||||||
ret = self.driver.retype(
|
ret = self.driver.retype(
|
||||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
self.ctxt, TEST_VOLUME[0], new_type, diff, host)
|
||||||
self.assertEqual(1, request.call_count)
|
self.assertEqual(3, request.call_count)
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@ -1199,7 +1268,10 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.assertTupleEqual(actual, ret)
|
self.assertTupleEqual(actual, ret)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_migrate_volume_diff_pool(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_migrate_volume_diff_pool(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
@ -1224,6 +1296,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host)
|
ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(15, request.call_count)
|
self.assertEqual(15, request.call_count)
|
||||||
actual = (True, {'provider_location': '1'})
|
actual = (True, {'provider_location': '1'})
|
||||||
self.assertTupleEqual(actual, ret)
|
self.assertTupleEqual(actual, ret)
|
||||||
@ -1268,7 +1341,10 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.assertTupleEqual(actual, ret)
|
self.assertTupleEqual(actual, ret)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_group_from_src_volume(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_group_from_src_volume(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
@ -1281,13 +1357,17 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
||||||
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
|
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
|
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
|
||||||
self.assertTupleEqual(actual, ret)
|
self.assertTupleEqual(actual, ret)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_group_from_src_snapshot(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_group_from_src_snapshot(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
@ -1300,6 +1380,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
|
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
|
||||||
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
|
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
|
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
|
||||||
@ -1330,10 +1411,13 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||||
def test_create_group_snapshot_non_cg(
|
def test_create_group_snapshot_non_cg(
|
||||||
self, is_group_a_cg_snapshot_type, volume_get, request):
|
self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
|
||||||
|
volume_get, request):
|
||||||
is_group_a_cg_snapshot_type.return_value = False
|
is_group_a_cg_snapshot_type.return_value = False
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
@ -1344,6 +1428,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
ret = self.driver.create_group_snapshot(
|
ret = self.driver.create_group_snapshot(
|
||||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
{'status': 'available'},
|
{'status': 'available'},
|
||||||
@ -1355,10 +1440,13 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||||
def test_create_group_snapshot_cg(
|
def test_create_group_snapshot_cg(
|
||||||
self, is_group_a_cg_snapshot_type, volume_get, request):
|
self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
|
||||||
|
volume_get, request):
|
||||||
is_group_a_cg_snapshot_type.return_value = True
|
is_group_a_cg_snapshot_type.return_value = True
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
|
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
|
||||||
@ -1370,6 +1458,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
ret = self.driver.create_group_snapshot(
|
ret = self.driver.create_group_snapshot(
|
||||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
None,
|
None,
|
||||||
@ -1422,3 +1511,29 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
hbsd_replication.REST_MIRROR_API_OPTS +
|
hbsd_replication.REST_MIRROR_API_OPTS +
|
||||||
hbsd_replication.REST_MIRROR_SSL_OPTS)
|
hbsd_replication.REST_MIRROR_SSL_OPTS)
|
||||||
self.assertEqual(actual, ret)
|
self.assertEqual(actual, ret)
|
||||||
|
|
||||||
|
@mock.patch.object(requests.Session, "request")
|
||||||
|
def test_is_modifiable_dr_value_new_dr_mode_disabled(self, request):
|
||||||
|
request.side_effect = [
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT_PAIR_STATUS_TEST),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT_PAIR_STATUS_TEST),
|
||||||
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
|
||||||
|
]
|
||||||
|
host = {
|
||||||
|
'capabilities': {
|
||||||
|
'location_info': {
|
||||||
|
'pool_id': 30,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
new_type = {'extra_specs': {'hbsd:capacity_saving': 'disable'}}
|
||||||
|
old_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
|
||||||
|
new_specs = {'hbsd:capacity_saving': 'disable'}
|
||||||
|
old_type_ref = volume_types.create(self.ctxt, 'old', old_specs)
|
||||||
|
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||||
|
diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'],
|
||||||
|
new_type_ref['id'])[0]
|
||||||
|
ret = self.driver.retype(
|
||||||
|
self.ctxt, TEST_VOLUME[0], new_type, diff, host)
|
||||||
|
self.assertEqual(3, request.call_count)
|
||||||
|
self.assertTrue(ret)
|
||||||
|
@ -192,6 +192,8 @@ GET_LDEV_RESULT = {
|
|||||||
"attributes": ["CVS", "HDP"],
|
"attributes": ["CVS", "HDP"],
|
||||||
"status": "NML",
|
"status": "NML",
|
||||||
"poolId": 30,
|
"poolId": 30,
|
||||||
|
"dataReductionStatus": "DISABLED",
|
||||||
|
"dataReductionMode": "disabled",
|
||||||
}
|
}
|
||||||
|
|
||||||
GET_LDEV_RESULT_MAPPED = {
|
GET_LDEV_RESULT_MAPPED = {
|
||||||
@ -621,8 +623,11 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||||
def test_create_snapshot(self, volume_get, request):
|
def test_create_snapshot(
|
||||||
|
self, volume_get, get_volume_type_extra_specs, request):
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
@ -632,6 +637,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
{'location_info': {'pool_id': 30}}]
|
{'location_info': {'pool_id': 30}}]
|
||||||
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
|
||||||
self.assertEqual('1', ret['provider_location'])
|
self.assertEqual('1', ret['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@ -644,32 +650,40 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_cloned_volume(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_cloned_volume(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
self.driver.common._stats = {}
|
self.driver.common._stats = {}
|
||||||
self.driver.common._stats['pools'] = [
|
self.driver.common._stats['pools'] = [
|
||||||
{'location_info': {'pool_id': 30}}]
|
{'location_info': {'pool_id': 30}}]
|
||||||
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
|
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
|
||||||
self.assertEqual('1', vol['provider_location'])
|
self.assertEqual('1', vol['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_volume_from_snapshot(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_volume_from_snapshot(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
FakeResponse(200, GET_SNAPSHOTS_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
self.driver.common._stats = {}
|
self.driver.common._stats = {}
|
||||||
self.driver.common._stats['pools'] = [
|
self.driver.common._stats['pools'] = [
|
||||||
{'location_info': {'pool_id': 30}}]
|
{'location_info': {'pool_id': 30}}]
|
||||||
vol = self.driver.create_volume_from_snapshot(
|
vol = self.driver.create_volume_from_snapshot(
|
||||||
TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
TEST_VOLUME[0], TEST_SNAPSHOT[0])
|
||||||
self.assertEqual('1', vol['provider_location'])
|
self.assertEqual('1', vol['provider_location'])
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@ -866,10 +880,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_retype(self, request):
|
def test_retype(self, request):
|
||||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
new_specs = {'hbsd:test': 'test'}
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
diff = {}
|
|
||||||
host = {
|
host = {
|
||||||
'capabilities': {
|
'capabilities': {
|
||||||
'location_info': {
|
'location_info': {
|
||||||
@ -877,9 +890,17 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
new_type = {'extra_specs': {
|
||||||
|
'hbsd:capacity_saving': 'deduplication_compression'}}
|
||||||
|
old_specs = {'hbsd:capacity_saving': 'disable'}
|
||||||
|
new_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
|
||||||
|
old_type_ref = volume_types.create(self.ctxt, 'old', old_specs)
|
||||||
|
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||||
|
diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'],
|
||||||
|
new_type_ref['id'])[0]
|
||||||
ret = self.driver.retype(
|
ret = self.driver.retype(
|
||||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
self.ctxt, TEST_VOLUME[0], new_type, diff, host)
|
||||||
self.assertEqual(1, request.call_count)
|
self.assertEqual(3, request.call_count)
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@ -932,7 +953,10 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
self.assertTupleEqual(actual, ret)
|
self.assertTupleEqual(actual, ret)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_group_from_src_volume(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_group_from_src_volume(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
@ -945,13 +969,17 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
|
||||||
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
|
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
|
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
|
||||||
self.assertTupleEqual(actual, ret)
|
self.assertTupleEqual(actual, ret)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_create_group_from_src_snapshot(self, request):
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_group_from_src_snapshot(
|
||||||
|
self, get_volume_type_extra_specs, request):
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
@ -964,6 +992,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
|
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
|
||||||
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
|
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
|
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
|
||||||
@ -994,10 +1023,13 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||||
def test_create_group_snapshot_non_cg(
|
def test_create_group_snapshot_non_cg(
|
||||||
self, is_group_a_cg_snapshot_type, volume_get, request):
|
self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
|
||||||
|
volume_get, request):
|
||||||
is_group_a_cg_snapshot_type.return_value = False
|
is_group_a_cg_snapshot_type.return_value = False
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
@ -1008,6 +1040,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
ret = self.driver.create_group_snapshot(
|
ret = self.driver.create_group_snapshot(
|
||||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
{'status': 'available'},
|
{'status': 'available'},
|
||||||
@ -1019,10 +1052,13 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
|
||||||
def test_create_group_snapshot_cg(
|
def test_create_group_snapshot_cg(
|
||||||
self, is_group_a_cg_snapshot_type, volume_get, request):
|
self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
|
||||||
|
volume_get, request):
|
||||||
is_group_a_cg_snapshot_type.return_value = True
|
is_group_a_cg_snapshot_type.return_value = True
|
||||||
|
get_volume_type_extra_specs.return_value = {}
|
||||||
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
|
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
|
||||||
@ -1034,6 +1070,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
ret = self.driver.create_group_snapshot(
|
ret = self.driver.create_group_snapshot(
|
||||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
|
||||||
)
|
)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
None,
|
None,
|
||||||
|
@ -963,8 +963,9 @@ class HPEXPRESTFCDriverTest(test.TestCase):
|
|||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_retype(self, request):
|
def test_retype(self, request):
|
||||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||||
new_specs = {'hbsd:test': 'test'}
|
new_specs = {'hpe_xp:test': 'test'}
|
||||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||||
|
new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
|
||||||
diff = {}
|
diff = {}
|
||||||
host = {
|
host = {
|
||||||
'capabilities': {
|
'capabilities': {
|
||||||
@ -974,7 +975,7 @@ class HPEXPRESTFCDriverTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
ret = self.driver.retype(
|
ret = self.driver.retype(
|
||||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
self.ctxt, TEST_VOLUME[0], new_type, diff, host)
|
||||||
self.assertEqual(1, request.call_count)
|
self.assertEqual(1, request.call_count)
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
|
|
||||||
|
@ -776,8 +776,9 @@ class HPEXPRESTISCSIDriverTest(test.TestCase):
|
|||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_retype(self, request):
|
def test_retype(self, request):
|
||||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||||
new_specs = {'hbsd:test': 'test'}
|
new_specs = {'hpe_xp:test': 'test'}
|
||||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||||
|
new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
|
||||||
diff = {}
|
diff = {}
|
||||||
host = {
|
host = {
|
||||||
'capabilities': {
|
'capabilities': {
|
||||||
@ -787,7 +788,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
ret = self.driver.retype(
|
ret = self.driver.retype(
|
||||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
self.ctxt, TEST_VOLUME[0], new_type, diff, host)
|
||||||
self.assertEqual(1, request.call_count)
|
self.assertEqual(1, request.call_count)
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
|
|
||||||
|
@ -959,6 +959,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
|
|||||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||||
new_specs = {'nec:test': 'test'}
|
new_specs = {'nec:test': 'test'}
|
||||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||||
|
new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
|
||||||
diff = {}
|
diff = {}
|
||||||
host = {
|
host = {
|
||||||
'capabilities': {
|
'capabilities': {
|
||||||
@ -968,7 +969,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
ret = self.driver.retype(
|
ret = self.driver.retype(
|
||||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
self.ctxt, TEST_VOLUME[0], new_type, diff, host)
|
||||||
self.assertEqual(1, request.call_count)
|
self.assertEqual(1, request.call_count)
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
|
|
||||||
|
@ -817,6 +817,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
|
|||||||
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
|
||||||
new_specs = {'nec:test': 'test'}
|
new_specs = {'nec:test': 'test'}
|
||||||
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
|
||||||
|
new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
|
||||||
diff = {}
|
diff = {}
|
||||||
host = {
|
host = {
|
||||||
'capabilities': {
|
'capabilities': {
|
||||||
@ -826,7 +827,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
ret = self.driver.retype(
|
ret = self.driver.retype(
|
||||||
self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
|
self.ctxt, TEST_VOLUME[0], new_type, diff, host)
|
||||||
self.assertEqual(1, request.call_count)
|
self.assertEqual(1, request.call_count)
|
||||||
self.assertTrue(ret)
|
self.assertTrue(ret)
|
||||||
|
|
||||||
|
@ -250,7 +250,7 @@ class HBSDCommon():
|
|||||||
return pool['location_info']['pool_id']
|
return pool['location_info']['pool_id']
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def create_ldev(self, size, pool_id, ldev_range):
|
def create_ldev(self, size, extra_specs, pool_id, ldev_range):
|
||||||
"""Create an LDEV and return its LDEV number."""
|
"""Create an LDEV and return its LDEV number."""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@ -260,10 +260,12 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def create_volume(self, volume):
|
def create_volume(self, volume):
|
||||||
"""Create a volume and return its properties."""
|
"""Create a volume and return its properties."""
|
||||||
|
extra_specs = self.get_volume_extra_specs(volume)
|
||||||
pool_id = self.get_pool_id_of_volume(volume)
|
pool_id = self.get_pool_id_of_volume(volume)
|
||||||
ldev_range = self.storage_info['ldev_range']
|
ldev_range = self.storage_info['ldev_range']
|
||||||
try:
|
try:
|
||||||
ldev = self.create_ldev(volume['size'], pool_id, ldev_range)
|
ldev = self.create_ldev(
|
||||||
|
volume['size'], extra_specs, pool_id, ldev_range)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.output_log(MSG.CREATE_LDEV_FAILED)
|
self.output_log(MSG.CREATE_LDEV_FAILED)
|
||||||
@ -286,14 +288,14 @@ class HBSDCommon():
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def copy_on_storage(
|
def copy_on_storage(
|
||||||
self, pvol, size, pool_id, snap_pool_id, ldev_range,
|
self, pvol, size, extra_specs, pool_id, snap_pool_id, ldev_range,
|
||||||
is_snapshot=False, sync=False, is_rep=False):
|
is_snapshot=False, sync=False, is_rep=False):
|
||||||
"""Create a copy of the specified LDEV on the storage."""
|
"""Create a copy of the specified LDEV on the storage."""
|
||||||
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
|
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
|
||||||
if ldev_info['status'] != 'NML':
|
if ldev_info['status'] != 'NML':
|
||||||
msg = self.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
|
msg = self.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
svol = self.create_ldev(size, pool_id, ldev_range)
|
svol = self.create_ldev(size, extra_specs, pool_id, ldev_range)
|
||||||
try:
|
try:
|
||||||
self.create_pair_on_storage(
|
self.create_pair_on_storage(
|
||||||
pvol, svol, snap_pool_id, is_snapshot=is_snapshot)
|
pvol, svol, snap_pool_id, is_snapshot=is_snapshot)
|
||||||
@ -318,11 +320,13 @@ class HBSDCommon():
|
|||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
size = volume['size']
|
size = volume['size']
|
||||||
|
extra_specs = self.get_volume_extra_specs(volume)
|
||||||
pool_id = self.get_pool_id_of_volume(volume)
|
pool_id = self.get_pool_id_of_volume(volume)
|
||||||
snap_pool_id = self.storage_info['snap_pool_id']
|
snap_pool_id = self.storage_info['snap_pool_id']
|
||||||
ldev_range = self.storage_info['ldev_range']
|
ldev_range = self.storage_info['ldev_range']
|
||||||
new_ldev = self.copy_on_storage(
|
new_ldev = self.copy_on_storage(ldev, size, extra_specs, pool_id,
|
||||||
ldev, size, pool_id, snap_pool_id, ldev_range, is_rep=is_rep)
|
snap_pool_id, ldev_range,
|
||||||
|
is_rep=is_rep)
|
||||||
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
|
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
|
||||||
if is_rep:
|
if is_rep:
|
||||||
self.delete_pair(new_ldev)
|
self.delete_pair(new_ldev)
|
||||||
@ -412,11 +416,13 @@ class HBSDCommon():
|
|||||||
type='volume', id=src_vref['id'])
|
type='volume', id=src_vref['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
size = snapshot['volume_size']
|
size = snapshot['volume_size']
|
||||||
|
extra_specs = self.get_volume_extra_specs(snapshot['volume'])
|
||||||
pool_id = self.get_pool_id_of_volume(snapshot['volume'])
|
pool_id = self.get_pool_id_of_volume(snapshot['volume'])
|
||||||
snap_pool_id = self.storage_info['snap_pool_id']
|
snap_pool_id = self.storage_info['snap_pool_id']
|
||||||
ldev_range = self.storage_info['ldev_range']
|
ldev_range = self.storage_info['ldev_range']
|
||||||
new_ldev = self.copy_on_storage(
|
new_ldev = self.copy_on_storage(
|
||||||
ldev, size, pool_id, snap_pool_id, ldev_range, is_snapshot=True)
|
ldev, size, extra_specs, pool_id, snap_pool_id, ldev_range,
|
||||||
|
is_snapshot=True)
|
||||||
return {
|
return {
|
||||||
'provider_location': str(new_ldev),
|
'provider_location': str(new_ldev),
|
||||||
}
|
}
|
||||||
|
@ -75,9 +75,10 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
|||||||
2.2.2 - Add Target Port Assignment.
|
2.2.2 - Add Target Port Assignment.
|
||||||
2.2.3 - Add port scheduler.
|
2.2.3 - Add port scheduler.
|
||||||
2.3.0 - Support multi pool.
|
2.3.0 - Support multi pool.
|
||||||
2.3.1 - Add specifies format of the names HostGroups/iSCSI Targets.
|
2.3.1 - Update retype and support storage assisted migration.
|
||||||
2.3.2 - Add GAD volume support.
|
2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
|
||||||
2.3.3 - Update retype and support storage assisted migration.
|
2.3.3 - Add GAD volume support.
|
||||||
|
2.3.4 - Support data deduplication and compression.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -75,9 +75,10 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
|||||||
2.2.2 - Add Target Port Assignment.
|
2.2.2 - Add Target Port Assignment.
|
||||||
2.2.3 - Add port scheduler.
|
2.2.3 - Add port scheduler.
|
||||||
2.3.0 - Support multi pool.
|
2.3.0 - Support multi pool.
|
||||||
2.3.1 - Add specifies format of the names HostGroups/iSCSI Targets.
|
2.3.1 - Update retype and support storage assisted migration.
|
||||||
2.3.2 - Add GAD volume support.
|
2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
|
||||||
2.3.3 - Update retype and support storage assisted migration.
|
2.3.3 - Add GAD volume support.
|
||||||
|
2.3.4 - Support data deduplication and compression.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -315,10 +315,12 @@ class HBSDREPLICATION(rest.HBSDREST):
|
|||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def _is_mirror_spec(self, extra_specs):
|
def _is_mirror_spec(self, extra_specs):
|
||||||
|
topology = None
|
||||||
if not extra_specs:
|
if not extra_specs:
|
||||||
return False
|
return False
|
||||||
topology = extra_specs.get(
|
if self.driver_info.get('driver_dir_name'):
|
||||||
self.driver_info['driver_dir_name'] + ':topology')
|
topology = extra_specs.get(
|
||||||
|
self.driver_info['driver_dir_name'] + ':topology')
|
||||||
if topology is None:
|
if topology is None:
|
||||||
return False
|
return False
|
||||||
elif topology == 'active_active_mirror_volume':
|
elif topology == 'active_active_mirror_volume':
|
||||||
@ -330,17 +332,19 @@ class HBSDREPLICATION(rest.HBSDREST):
|
|||||||
value=topology)
|
value=topology)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def _create_rep_ldev(self, volume, rep_type, pvol=None):
|
def _create_rep_ldev(self, volume, extra_specs, rep_type, pvol=None):
|
||||||
"""Create a primary volume and a secondary volume."""
|
"""Create a primary volume and a secondary volume."""
|
||||||
pool_id = self.rep_secondary.storage_info['pool_id'][0]
|
pool_id = self.rep_secondary.storage_info['pool_id'][0]
|
||||||
ldev_range = self.rep_secondary.storage_info['ldev_range']
|
ldev_range = self.rep_secondary.storage_info['ldev_range']
|
||||||
thread = greenthread.spawn(
|
thread = greenthread.spawn(
|
||||||
self.rep_secondary.create_ldev, volume.size, pool_id, ldev_range)
|
self.rep_secondary.create_ldev, volume.size, extra_specs,
|
||||||
|
pool_id, ldev_range)
|
||||||
if pvol is None:
|
if pvol is None:
|
||||||
try:
|
try:
|
||||||
pool_id = self.rep_primary.get_pool_id_of_volume(volume)
|
pool_id = self.rep_primary.get_pool_id_of_volume(volume)
|
||||||
ldev_range = self.rep_primary.storage_info['ldev_range']
|
ldev_range = self.rep_primary.storage_info['ldev_range']
|
||||||
pvol = self.rep_primary.create_ldev(volume.size,
|
pvol = self.rep_primary.create_ldev(volume.size,
|
||||||
|
extra_specs,
|
||||||
pool_id, ldev_range)
|
pool_id, ldev_range)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
self.rep_primary.output_log(MSG.CREATE_LDEV_FAILED)
|
self.rep_primary.output_log(MSG.CREATE_LDEV_FAILED)
|
||||||
@ -491,10 +495,22 @@ class HBSDREPLICATION(rest.HBSDREST):
|
|||||||
copy_group_name, pvol, svol, rep_type, _WAIT_PAIR)
|
copy_group_name, pvol, svol, rep_type, _WAIT_PAIR)
|
||||||
|
|
||||||
def _create_rep_ldev_and_pair(
|
def _create_rep_ldev_and_pair(
|
||||||
self, volume, rep_type, pvol=None):
|
self, volume, extra_specs, rep_type, pvol=None):
|
||||||
"""Create volume and Replication pair."""
|
"""Create volume and Replication pair."""
|
||||||
|
capacity_saving = None
|
||||||
|
if self.driver_info.get('driver_dir_name'):
|
||||||
|
capacity_saving = extra_specs.get(
|
||||||
|
self.driver_info['driver_dir_name'] + ':capacity_saving')
|
||||||
|
if capacity_saving == 'deduplication_compression':
|
||||||
|
msg = self.output_log(
|
||||||
|
MSG.DEDUPLICATION_IS_ENABLED,
|
||||||
|
rep_type=rep_type, volume_id=volume.id,
|
||||||
|
volume_type=volume.volume_type.name, size=volume.size)
|
||||||
|
if pvol is not None:
|
||||||
|
self.rep_primary.delete_ldev(pvol)
|
||||||
|
self.raise_error(msg)
|
||||||
svol = None
|
svol = None
|
||||||
pvol, svol = self._create_rep_ldev(volume, rep_type, pvol)
|
pvol, svol = self._create_rep_ldev(volume, extra_specs, rep_type, pvol)
|
||||||
try:
|
try:
|
||||||
thread = greenthread.spawn(
|
thread = greenthread.spawn(
|
||||||
self.rep_secondary.initialize_pair_connection, svol)
|
self.rep_secondary.initialize_pair_connection, svol)
|
||||||
@ -530,7 +546,7 @@ class HBSDREPLICATION(rest.HBSDREST):
|
|||||||
self._require_rep_secondary()
|
self._require_rep_secondary()
|
||||||
rep_type = self.driver_info['mirror_attr']
|
rep_type = self.driver_info['mirror_attr']
|
||||||
pldev, sldev = self._create_rep_ldev_and_pair(
|
pldev, sldev = self._create_rep_ldev_and_pair(
|
||||||
volume, rep_type)
|
volume, extra_specs, rep_type)
|
||||||
provider_location = _pack_rep_provider_location(
|
provider_location = _pack_rep_provider_location(
|
||||||
pldev, sldev, rep_type)
|
pldev, sldev, rep_type)
|
||||||
return {
|
return {
|
||||||
@ -621,7 +637,8 @@ class HBSDREPLICATION(rest.HBSDREST):
|
|||||||
else:
|
else:
|
||||||
self.rep_primary.delete_ldev(ldev)
|
self.rep_primary.delete_ldev(ldev)
|
||||||
|
|
||||||
def _create_rep_volume_from_src(self, volume, src, src_type, operation):
|
def _create_rep_volume_from_src(
|
||||||
|
self, volume, extra_specs, src, src_type, operation):
|
||||||
"""Create a replication volume from a volume or snapshot and return
|
"""Create a replication volume from a volume or snapshot and return
|
||||||
|
|
||||||
its properties.
|
its properties.
|
||||||
@ -631,7 +648,7 @@ class HBSDREPLICATION(rest.HBSDREST):
|
|||||||
volume, src, src_type, is_rep=True)
|
volume, src, src_type, is_rep=True)
|
||||||
new_ldev = self.rep_primary.get_ldev(data)
|
new_ldev = self.rep_primary.get_ldev(data)
|
||||||
sldev = self._create_rep_ldev_and_pair(
|
sldev = self._create_rep_ldev_and_pair(
|
||||||
volume, rep_type, new_ldev)[1]
|
volume, extra_specs, rep_type, new_ldev)[1]
|
||||||
provider_location = _pack_rep_provider_location(
|
provider_location = _pack_rep_provider_location(
|
||||||
new_ldev, sldev, rep_type)
|
new_ldev, sldev, rep_type)
|
||||||
return {
|
return {
|
||||||
@ -648,7 +665,7 @@ class HBSDREPLICATION(rest.HBSDREST):
|
|||||||
if self._is_mirror_spec(extra_specs):
|
if self._is_mirror_spec(extra_specs):
|
||||||
self._require_rep_secondary()
|
self._require_rep_secondary()
|
||||||
return self._create_rep_volume_from_src(
|
return self._create_rep_volume_from_src(
|
||||||
volume, src, src_type, operation)
|
volume, extra_specs, src, src_type, operation)
|
||||||
return self.rep_primary.create_volume_from_src(volume, src, src_type)
|
return self.rep_primary.create_volume_from_src(volume, src, src_type)
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
|
@ -93,6 +93,26 @@ _MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT = ('2E13', '9900')
|
|||||||
|
|
||||||
_PAIR_TARGET_NAME_BODY_DEFAULT = 'pair00'
|
_PAIR_TARGET_NAME_BODY_DEFAULT = 'pair00'
|
||||||
|
|
||||||
|
_DR_VOL_PATTERN = {
|
||||||
|
'disabled': ('REHYDRATING',),
|
||||||
|
'compression_deduplication': ('ENABLED',),
|
||||||
|
None: ('DELETING',),
|
||||||
|
}
|
||||||
|
_DISABLE_ABLE_DR_STATUS = {
|
||||||
|
'disabled': ('DISABLED', 'ENABLING', 'REHYDRATING'),
|
||||||
|
'compression_deduplication': ('ENABLED', 'ENABLING'),
|
||||||
|
}
|
||||||
|
_DEDUPCOMP_ABLE_DR_STATUS = {
|
||||||
|
'disabled': ('DISABLED', 'ENABLING'),
|
||||||
|
'compression_deduplication': ('ENABLED', 'ENABLING'),
|
||||||
|
}
|
||||||
|
_CAPACITY_SAVING_DR_MODE = {
|
||||||
|
'disable': 'disabled',
|
||||||
|
'deduplication_compression': 'compression_deduplication',
|
||||||
|
'': 'disabled',
|
||||||
|
None: 'disabled',
|
||||||
|
}
|
||||||
|
|
||||||
REST_VOLUME_OPTS = [
|
REST_VOLUME_OPTS = [
|
||||||
cfg.BoolOpt(
|
cfg.BoolOpt(
|
||||||
'hitachi_rest_disable_io_wait',
|
'hitachi_rest_disable_io_wait',
|
||||||
@ -311,22 +331,39 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
if self.client is not None:
|
if self.client is not None:
|
||||||
self.client.enter_keep_session()
|
self.client.enter_keep_session()
|
||||||
|
|
||||||
def _create_ldev_on_storage(self, size, pool_id, ldev_range):
|
def _set_dr_mode(self, body, capacity_saving):
|
||||||
|
dr_mode = _CAPACITY_SAVING_DR_MODE.get(capacity_saving)
|
||||||
|
if not dr_mode:
|
||||||
|
msg = self.output_log(
|
||||||
|
MSG.INVALID_EXTRA_SPEC_KEY,
|
||||||
|
key=self.driver_info['driver_dir_name'] + ':capacity_saving',
|
||||||
|
value=capacity_saving)
|
||||||
|
self.raise_error(msg)
|
||||||
|
body['dataReductionMode'] = dr_mode
|
||||||
|
|
||||||
|
def _create_ldev_on_storage(self, size, extra_specs, pool_id, ldev_range):
|
||||||
"""Create an LDEV on the storage system."""
|
"""Create an LDEV on the storage system."""
|
||||||
body = {
|
body = {
|
||||||
'byteFormatCapacity': '%sG' % size,
|
'byteFormatCapacity': '%sG' % size,
|
||||||
'poolId': pool_id,
|
'poolId': pool_id,
|
||||||
'isParallelExecutionEnabled': True,
|
'isParallelExecutionEnabled': True,
|
||||||
}
|
}
|
||||||
|
capacity_saving = None
|
||||||
|
if self.driver_info.get('driver_dir_name'):
|
||||||
|
capacity_saving = extra_specs.get(
|
||||||
|
self.driver_info['driver_dir_name'] + ':capacity_saving')
|
||||||
|
if capacity_saving:
|
||||||
|
self._set_dr_mode(body, capacity_saving)
|
||||||
if self.storage_info['ldev_range']:
|
if self.storage_info['ldev_range']:
|
||||||
min_ldev, max_ldev = self.storage_info['ldev_range'][:2]
|
min_ldev, max_ldev = self.storage_info['ldev_range'][:2]
|
||||||
body['startLdevId'] = min_ldev
|
body['startLdevId'] = min_ldev
|
||||||
body['endLdevId'] = max_ldev
|
body['endLdevId'] = max_ldev
|
||||||
return self.client.add_ldev(body, no_log=True)
|
return self.client.add_ldev(body, no_log=True)
|
||||||
|
|
||||||
def create_ldev(self, size, pool_id, ldev_range):
|
def create_ldev(self, size, extra_specs, pool_id, ldev_range):
|
||||||
"""Create an LDEV of the specified size and the specified type."""
|
"""Create an LDEV of the specified size and the specified type."""
|
||||||
ldev = self._create_ldev_on_storage(size, pool_id, ldev_range)
|
ldev = self._create_ldev_on_storage(
|
||||||
|
size, extra_specs, pool_id, ldev_range)
|
||||||
LOG.debug('Created logical device. (LDEV: %s)', ldev)
|
LOG.debug('Created logical device. (LDEV: %s)', ldev)
|
||||||
return ldev
|
return ldev
|
||||||
|
|
||||||
@ -337,12 +374,23 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
|
|
||||||
def delete_ldev_from_storage(self, ldev):
|
def delete_ldev_from_storage(self, ldev):
|
||||||
"""Delete the specified LDEV from the storage."""
|
"""Delete the specified LDEV from the storage."""
|
||||||
result = self.client.get_ldev(ldev)
|
result = self.get_ldev_info(['emulationType',
|
||||||
|
'dataReductionMode',
|
||||||
|
'dataReductionStatus'], ldev)
|
||||||
|
if result['dataReductionStatus'] == 'FAILED':
|
||||||
|
msg = self.output_log(
|
||||||
|
MSG.CONSISTENCY_NOT_GUARANTEE, ldev=ldev)
|
||||||
|
self.raise_error(msg)
|
||||||
|
if result['dataReductionStatus'] in _DR_VOL_PATTERN.get(
|
||||||
|
result['dataReductionMode'], ()):
|
||||||
|
body = {'isDataReductionDeleteForceExecute': True}
|
||||||
|
else:
|
||||||
|
body = None
|
||||||
if result['emulationType'] == 'NOT DEFINED':
|
if result['emulationType'] == 'NOT DEFINED':
|
||||||
self.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev)
|
self.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev)
|
||||||
return
|
return
|
||||||
self.client.delete_ldev(
|
self.client.delete_ldev(
|
||||||
ldev,
|
ldev, body,
|
||||||
timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, {'ldev': ldev}))
|
timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, {'ldev': ldev}))
|
||||||
|
|
||||||
def _get_snap_pool_id(self, pvol):
|
def _get_snap_pool_id(self, pvol):
|
||||||
@ -1227,7 +1275,9 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
size = snapshot.volume_size
|
size = snapshot.volume_size
|
||||||
pool_id = self.get_pool_id_of_volume(snapshot.volume)
|
pool_id = self.get_pool_id_of_volume(snapshot.volume)
|
||||||
ldev_range = self.storage_info['ldev_range']
|
ldev_range = self.storage_info['ldev_range']
|
||||||
pair['svol'] = self.create_ldev(size, pool_id, ldev_range)
|
extra_specs = self.get_volume_extra_specs(snapshot.volume)
|
||||||
|
pair['svol'] = self.create_ldev(size, extra_specs,
|
||||||
|
pool_id, ldev_range)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
pair['msg'] = utils.get_exception_msg(exc)
|
pair['msg'] = utils.get_exception_msg(exc)
|
||||||
raise loopingcall.LoopingCallDone(pair)
|
raise loopingcall.LoopingCallDone(pair)
|
||||||
@ -1415,13 +1465,14 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
if (new_type or old_pool_id != new_pool_id or
|
if (new_type or old_pool_id != new_pool_id or
|
||||||
(ldev_range and
|
(ldev_range and
|
||||||
(pvol < ldev_range[0] or ldev_range[1] < pvol))):
|
(pvol < ldev_range[0] or ldev_range[1] < pvol))):
|
||||||
|
extra_specs = self.get_volume_extra_specs(volume)
|
||||||
snap_pool_id = host['capabilities']['location_info'].get(
|
snap_pool_id = host['capabilities']['location_info'].get(
|
||||||
'snap_pool_id')
|
'snap_pool_id')
|
||||||
ldev_range = host['capabilities']['location_info'].get(
|
ldev_range = host['capabilities']['location_info'].get(
|
||||||
'ldev_range')
|
'ldev_range')
|
||||||
svol = self.copy_on_storage(
|
svol = self.copy_on_storage(
|
||||||
pvol, volume.size, new_pool_id, snap_pool_id, ldev_range,
|
pvol, volume.size, extra_specs, new_pool_id,
|
||||||
|
snap_pool_id, ldev_range,
|
||||||
is_snapshot=False, sync=True)
|
is_snapshot=False, sync=True)
|
||||||
self.modify_ldev_name(svol, volume['id'].replace("-", ""))
|
self.modify_ldev_name(svol, volume['id'].replace("-", ""))
|
||||||
|
|
||||||
@ -1436,16 +1487,62 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
|
|
||||||
return True, None
|
return True, None
|
||||||
|
|
||||||
|
def _is_modifiable_dr_value(self, dr_mode, dr_status, new_dr_mode, volume):
|
||||||
|
if (dr_status == 'REHYDRATING' and
|
||||||
|
new_dr_mode == 'compression_deduplication'):
|
||||||
|
self.output_log(MSG.VOLUME_IS_BEING_REHYDRATED,
|
||||||
|
volume_id=volume['id'],
|
||||||
|
volume_type=volume['volume_type']['name'])
|
||||||
|
return False
|
||||||
|
elif dr_status == 'FAILED':
|
||||||
|
self.output_log(MSG.INCONSISTENCY_DEDUPLICATION_SYSTEM_VOLUME,
|
||||||
|
volume_id=volume['id'],
|
||||||
|
volume_type=volume['volume_type']['name'])
|
||||||
|
return False
|
||||||
|
elif new_dr_mode == 'disabled':
|
||||||
|
return dr_status in _DISABLE_ABLE_DR_STATUS.get(dr_mode, ())
|
||||||
|
elif new_dr_mode == 'compression_deduplication':
|
||||||
|
return dr_status in _DEDUPCOMP_ABLE_DR_STATUS.get(dr_mode, ())
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _modify_capacity_saving(self, ldev, capacity_saving):
|
||||||
|
body = {'dataReductionMode': capacity_saving}
|
||||||
|
self.client.modify_ldev(
|
||||||
|
ldev, body,
|
||||||
|
timeout_message=(
|
||||||
|
MSG.NOT_COMPLETED_CHANGE_VOLUME_TYPE, {'ldev': ldev}))
|
||||||
|
|
||||||
def retype(self, ctxt, volume, new_type, diff, host):
|
def retype(self, ctxt, volume, new_type, diff, host):
|
||||||
"""Retype the specified volume."""
|
"""Retype the specified volume."""
|
||||||
|
diff_items = []
|
||||||
|
|
||||||
def _check_specs_diff(diff):
|
def _check_specs_diff(diff, allowed_extra_specs):
|
||||||
for specs_key, specs_val in diff.items():
|
for specs_key, specs_val in diff.items():
|
||||||
for diff_key, diff_val in specs_val.items():
|
for diff_key, diff_val in specs_val.items():
|
||||||
|
if (specs_key == 'extra_specs' and
|
||||||
|
diff_key in allowed_extra_specs):
|
||||||
|
diff_items.append(diff_key)
|
||||||
|
continue
|
||||||
if diff_val[0] != diff_val[1]:
|
if diff_val[0] != diff_val[1]:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
extra_specs_capacity_saving = None
|
||||||
|
new_capacity_saving = None
|
||||||
|
allowed_extra_specs = []
|
||||||
|
if self.driver_info.get('driver_dir_name'):
|
||||||
|
extra_specs_capacity_saving = (
|
||||||
|
self.driver_info['driver_dir_name'] + ':capacity_saving')
|
||||||
|
new_capacity_saving = (
|
||||||
|
new_type['extra_specs'].get(extra_specs_capacity_saving))
|
||||||
|
allowed_extra_specs.append(extra_specs_capacity_saving)
|
||||||
|
new_dr_mode = _CAPACITY_SAVING_DR_MODE.get(new_capacity_saving)
|
||||||
|
if not new_dr_mode:
|
||||||
|
msg = self.output_log(
|
||||||
|
MSG.FAILED_CHANGE_VOLUME_TYPE,
|
||||||
|
key=extra_specs_capacity_saving,
|
||||||
|
value=new_capacity_saving)
|
||||||
|
self.raise_error(msg)
|
||||||
ldev = self.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = self.output_log(
|
msg = self.output_log(
|
||||||
@ -1453,15 +1550,27 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
id=volume['id'])
|
id=volume['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
ldev_info = self.get_ldev_info(
|
ldev_info = self.get_ldev_info(
|
||||||
['poolId'], ldev)
|
['dataReductionMode', 'dataReductionStatus', 'poolId'], ldev)
|
||||||
old_pool_id = ldev_info['poolId']
|
old_pool_id = ldev_info['poolId']
|
||||||
new_pool_id = host['capabilities']['location_info'].get('pool_id')
|
new_pool_id = host['capabilities']['location_info'].get('pool_id')
|
||||||
if not _check_specs_diff(diff) or new_pool_id != old_pool_id:
|
if (not _check_specs_diff(diff, allowed_extra_specs)
|
||||||
|
or new_pool_id != old_pool_id):
|
||||||
snaps = SnapshotList.get_all_for_volume(ctxt, volume.id)
|
snaps = SnapshotList.get_all_for_volume(ctxt, volume.id)
|
||||||
if not snaps:
|
if not snaps:
|
||||||
return self.migrate_volume(volume, host, new_type)
|
return self.migrate_volume(volume, host, new_type)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if (extra_specs_capacity_saving
|
||||||
|
and extra_specs_capacity_saving in diff_items):
|
||||||
|
ldev_info = self.get_ldev_info(
|
||||||
|
['dataReductionMode', 'dataReductionStatus'], ldev)
|
||||||
|
if not self._is_modifiable_dr_value(
|
||||||
|
ldev_info['dataReductionMode'],
|
||||||
|
ldev_info['dataReductionStatus'], new_dr_mode, volume):
|
||||||
|
return False
|
||||||
|
|
||||||
|
self._modify_capacity_saving(ldev, new_dr_mode)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def wait_copy_completion(self, pvol, svol):
|
def wait_copy_completion(self, pvol, svol):
|
||||||
|
@ -672,13 +672,13 @@ class RestApiClient():
|
|||||||
}
|
}
|
||||||
self._delete_object(url, body=body, **kwargs)
|
self._delete_object(url, body=body, **kwargs)
|
||||||
|
|
||||||
def modify_ldev(self, ldev_id, body):
|
def modify_ldev(self, ldev_id, body, **kwargs):
|
||||||
"""Modify a ldev information."""
|
"""Modify a ldev information."""
|
||||||
url = '%(url)s/ldevs/%(id)s' % {
|
url = '%(url)s/ldevs/%(id)s' % {
|
||||||
'url': self.object_url,
|
'url': self.object_url,
|
||||||
'id': ldev_id,
|
'id': ldev_id,
|
||||||
}
|
}
|
||||||
self._invoke(url, body=body)
|
self._invoke(url, body=body, **kwargs)
|
||||||
|
|
||||||
def extend_ldev(self, ldev_id, body):
|
def extend_ldev(self, ldev_id, body):
|
||||||
"""Expand a ldev size."""
|
"""Expand a ldev size."""
|
||||||
|
@ -25,7 +25,7 @@ from oslo_utils import units
|
|||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import utils as cinder_utils
|
from cinder import utils as cinder_utils
|
||||||
|
|
||||||
VERSION = '2.3.3'
|
VERSION = '2.3.4'
|
||||||
CI_WIKI_NAME = 'Hitachi_VSP_CI'
|
CI_WIKI_NAME = 'Hitachi_VSP_CI'
|
||||||
PARAM_PREFIX = 'hitachi'
|
PARAM_PREFIX = 'hitachi'
|
||||||
VENDOR_NAME = 'Hitachi'
|
VENDOR_NAME = 'Hitachi'
|
||||||
@ -191,6 +191,25 @@ class HBSDMsg(enum.Enum):
|
|||||||
'%(volume_type)s)',
|
'%(volume_type)s)',
|
||||||
'suffix': WARNING_SUFFIX,
|
'suffix': WARNING_SUFFIX,
|
||||||
}
|
}
|
||||||
|
VOLUME_IS_BEING_REHYDRATED = {
|
||||||
|
'msg_id': 333,
|
||||||
|
'loglevel': base_logging.WARNING,
|
||||||
|
'msg': 'Retyping the volume will be performed using migration '
|
||||||
|
'because the specified volume is being rehydrated. '
|
||||||
|
'This process may take a long time depending on the data '
|
||||||
|
'size. (volume: %(volume_id)s, volume type: %(volume_type)s)',
|
||||||
|
'suffix': WARNING_SUFFIX,
|
||||||
|
}
|
||||||
|
INCONSISTENCY_DEDUPLICATION_SYSTEM_VOLUME = {
|
||||||
|
'msg_id': 334,
|
||||||
|
'loglevel': base_logging.WARNING,
|
||||||
|
'msg': 'Retyping the volume will be performed using migration '
|
||||||
|
'because inconsistency was found in the deduplication '
|
||||||
|
'system data volume. This process may take a long time '
|
||||||
|
'depending on the data size. '
|
||||||
|
'(volume: %(volume_id)s, volume type: %(volume_type)s)',
|
||||||
|
'suffix': WARNING_SUFFIX,
|
||||||
|
}
|
||||||
HOST_GROUP_NUMBER_IS_MAXIMUM = {
|
HOST_GROUP_NUMBER_IS_MAXIMUM = {
|
||||||
'msg_id': 335,
|
'msg_id': 335,
|
||||||
'loglevel': base_logging.WARNING,
|
'loglevel': base_logging.WARNING,
|
||||||
@ -436,6 +455,30 @@ class HBSDMsg(enum.Enum):
|
|||||||
'msg': 'Failed to copy a volume. (P-VOL: %(pvol)s, S-VOL: %(svol)s)',
|
'msg': 'Failed to copy a volume. (P-VOL: %(pvol)s, S-VOL: %(svol)s)',
|
||||||
'suffix': ERROR_SUFFIX
|
'suffix': ERROR_SUFFIX
|
||||||
}
|
}
|
||||||
|
CONSISTENCY_NOT_GUARANTEE = {
|
||||||
|
'msg_id': 726,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'A volume or snapshot cannot be deleted. '
|
||||||
|
'The consistency of logical device for '
|
||||||
|
'a volume or snapshot cannot be guaranteed. (LDEV: %(ldev)s)',
|
||||||
|
'suffix': ERROR_SUFFIX
|
||||||
|
}
|
||||||
|
FAILED_CHANGE_VOLUME_TYPE = {
|
||||||
|
'msg_id': 727,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to change a volume type. '
|
||||||
|
'An invalid value is specified for the extra spec key '
|
||||||
|
'"%(key)s" of the volume type after change. '
|
||||||
|
'(value: %(value)s)',
|
||||||
|
'suffix': ERROR_SUFFIX
|
||||||
|
}
|
||||||
|
NOT_COMPLETED_CHANGE_VOLUME_TYPE = {
|
||||||
|
'msg_id': 728,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'The volume type change could not be completed. '
|
||||||
|
'(LDEV: %(ldev)s)',
|
||||||
|
'suffix': ERROR_SUFFIX
|
||||||
|
}
|
||||||
REST_SERVER_CONNECT_FAILED = {
|
REST_SERVER_CONNECT_FAILED = {
|
||||||
'msg_id': 731,
|
'msg_id': 731,
|
||||||
'loglevel': base_logging.ERROR,
|
'loglevel': base_logging.ERROR,
|
||||||
@ -535,6 +578,15 @@ class HBSDMsg(enum.Enum):
|
|||||||
'size: %(size)s)',
|
'size: %(size)s)',
|
||||||
'suffix': ERROR_SUFFIX,
|
'suffix': ERROR_SUFFIX,
|
||||||
}
|
}
|
||||||
|
DEDUPLICATION_IS_ENABLED = {
|
||||||
|
'msg_id': 753,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to create a volume in a %(rep_type)s environment '
|
||||||
|
'because deduplication is enabled for the volume type. '
|
||||||
|
'(volume: %(volume_id)s, volume type: %(volume_type)s, '
|
||||||
|
'size: %(size)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
CREATE_REPLICATION_PAIR_FAILED = {
|
CREATE_REPLICATION_PAIR_FAILED = {
|
||||||
'msg_id': 754,
|
'msg_id': 754,
|
||||||
'loglevel': base_logging.ERROR,
|
'loglevel': base_logging.ERROR,
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Hitachi driver: Support data deduplication and compression, by storage assist.
|
||||||
|
The feature can be worked, if user enable deduplication and compression
|
||||||
|
for the DP-pool, by Configuration Manager REST API, and set the extra
|
||||||
|
spec ``hbsd:capacity_saving`` to ``deduplication_compression``
|
Loading…
x
Reference in New Issue
Block a user