From 92b43f9c68fc124c502a4f765d4074c5febd86e1 Mon Sep 17 00:00:00 2001 From: Woojay Poynter Date: Wed, 3 Jul 2019 09:53:52 -0700 Subject: [PATCH] LINSTOR driver update for LINSTOR v0.9.12 with REST API LINSTOR driver for Cinder now supports the latest version of LINSTOR with REST API on the backend. Change-Id: Icf04b1c515c766edc037ba6f4bfba5b370faebbe --- .../unit/volume/drivers/test_linstordrv.py | 1068 +++++++++-------- cinder/volume/drivers/linstordrv.py | 625 +++++----- driver-requirements.txt | 3 - ...-linstor-rest-update-52fd52f6c09a4dd3.yaml | 11 + 4 files changed, 945 insertions(+), 762 deletions(-) create mode 100644 releasenotes/notes/drbd-linstor-rest-update-52fd52f6c09a4dd3.yaml diff --git a/cinder/tests/unit/volume/drivers/test_linstordrv.py b/cinder/tests/unit/volume/drivers/test_linstordrv.py index 563d0bd12da..ab236c3f5cf 100644 --- a/cinder/tests/unit/volume/drivers/test_linstordrv.py +++ b/cinder/tests/unit/volume/drivers/test_linstordrv.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 LINBIT HA Solutions GmbH +# Copyright (c) 2018-2019 LINBIT HA Solutions GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -26,442 +26,303 @@ from cinder.volume.drivers import linstordrv as drv CONF = cfg.CONF CINDER_UNKNOWN = 'unknown' -LVM = 'Lvm' -LVMTHIN = 'LvmThin' +DISKLESS = 'DISKLESS' +LVM = 'LVM' +LVM_THIN = 'LVM_THIN' +ZFS = 'ZFS' +ZFS_THIN = 'ZFS_THIN' DRIVER = 'cinder.volume.drivers.linstordrv.' RESOURCE = { - 'name': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'volume': { - 'device_path': '/dev/drbd1000', - }, + 'device_path': '/dev/drbd1000' + } } -RESOURCE_LIST = { - 'resourceStates': [ +RESOURCE_LIST = [{ + 'layer_object': { + 'children': [{ + 'storage': { + 'storage_volumes': [{ + 'allocated_size_kib': 1048576, + 'device_path': + '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', + 'disk_state': '[]', + 'usable_size_kib': 1048576, + 'volume_number': 0}]}, + 'type': 'STORAGE'}], + 'drbd': { + 'al_size': 32, + 'al_stripes': 1, + 'drbd_resource_definition': { + 'al_stripe_size_kib': 32, + 'al_stripes': 1, + 'down': False, + 'peer_slots': 7, + 'port': 7005, + 'secret': 'poQZ0Ad/Bq8DT9fA7ydB', + 'transport_type': 'IP'}, + 'drbd_volumes': [{ + 'allocated_size_kib': 1044740, + 'backing_device': + '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', + 'device_path': '/dev/drbd1005', + 'drbd_volume_definition': { + 'minor_number': 1005, + 'volume_number': 0}, + 'usable_size_kib': 1044480}], + 'node_id': 0, + 'peer_slots': 7}, + 'type': 'DRBD'}, + 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', + 'node_name': 'node-2', + 'state': {'in_use': False}, + 'uuid': 'a4ab4670-c5fc-4590-a3a2-39c4685c8c32', + 'volumes': [{ + 'allocated_size_kib': 45403, + 'device_path': '/dev/drbd1005', + 'layer_data_list': [{ + 'data': { + 'allocated_size_kib': 1044740, + 'backing_device': + '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', + 'device_path': '/dev/drbd1005', + 'drbd_volume_definition': { + 'minor_number': 1005, + 'volume_number': 0}, + 'usable_size_kib': 1044480}, + 'type': 'DRBD'}, { + 'data': { + 'allocated_size_kib': 1048576, + 'device_path': + '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', + 'disk_state': '[]', + 'usable_size_kib': 1048576, + 'volume_number': 0}, + 'type': 'STORAGE'} + ], + 'props': { + 'RestoreFromResource': 'CV_123a2fdc-365f-472e-bb8e-484788712abc', + 'RestoreFromSnapshot': 'SN_68edb708-48de-4da1-9953-b9de9da9f1b8' + }, + 'provider_kind': 'LVM_THIN', + 'state': {'disk_state': 'UpToDate'}, + 'storage_pool_name': 'DfltStorPool', + 'uuid': 'e270ba0c-b284-4f21-85cc-602f132a2251', + 'volume_number': 0}]}, { + 'flags': ['DISKLESS'], + 'layer_object': { + 'children': [{ + 'storage': { + 'storage_volumes': [{ + 'allocated_size_kib': 0, + 'usable_size_kib': 1044480, + 'volume_number': 0}]}, + 'type': 'STORAGE'}], + 'drbd': { + 'al_size': 32, + 'al_stripes': 1, + 'drbd_resource_definition': { + 'al_stripe_size_kib': 32, + 'al_stripes': 1, + 'down': False, + 'peer_slots': 7, + 'port': 7005, + 'secret': 'poQZ0Ad/Bq8DT9fA7ydB', + 'transport_type': 'IP'}, + 'drbd_volumes': [{ + 'allocated_size_kib': 1044740, + 'device_path': '/dev/drbd1005', + 'drbd_volume_definition': { + 'minor_number': 1005, + 'volume_number': 0}, + 'usable_size_kib': 1044480}], + 'flags': ['DISKLESS'], + 'node_id': 1, + 'peer_slots': 7}, + 'type': 'DRBD'}, + 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', + 'node_name': 'node-1', + 'state': {'in_use': False}, + 'uuid': '11e853df-6f66-4cd9-9fbc-f3f7cc98d5cf', + 'volumes': [{ + 'allocated_size_kib': 45403, + 'device_path': '/dev/drbd1005', + 'layer_data_list': [ + { + 'data': { + 'allocated_size_kib': 1044740, + 'device_path': '/dev/drbd1005', + 'drbd_volume_definition': { + 'minor_number': 1005, + 'volume_number': 0}, + 'usable_size_kib': 1044480}, + 'type': 'DRBD' + }, + { + 'data': { + 'allocated_size_kib': 0, + 'usable_size_kib': 1044480, + 'volume_number': 0 + }, + 'type': 'STORAGE' + } + ], + 'provider_kind': 'DISKLESS', + 'state': {'disk_state': 'Diskless'}, + 'storage_pool_name': 'DfltStorPool', + 'uuid': '27b4aeec-2b42-41c9-b186-86afc8778046', + 'volume_number': 0 + }]}] + +RESOURCE_LIST_RESP = ['node-1', 'node-2'] + +SNAPSHOT_LIST_RESP = ['node-1'] + +DISKLESS_LIST_RESP = ['node-1'] + +RESOURCE_DFN_LIST = [{ + 'layer_data': [ { - 'rscName': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'nodeName': 'node_one', - 'inUse': False, - 'vlmStates': [ - { - 'vlmNr': 0, - 'diskState': 'Diskless', - } - ], + 'data': { + 'al_stripe_size_kib': 32, + 'al_stripes': 1, + 'down': False, + 'peer_slots': 7, + 'port': 7005, + 'secret': 'poQZ0Ad/Bq8DT9fA7ydB', + 'transport_type': 'IP' + }, + 'type': 'DRBD' }, { - 'rscName': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'nodeName': 'node_two', - 'inUse': False, - 'vlmStates': [ - { - 'vlmNr': 0, - 'diskState': 'UpToDate', - } - ], - }, - ], - 'resources': [ - { - 'name': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'nodeId': 0, - 'nodeName': 'node_one', - 'nodeUuid': '67939f68-2b26-41b7-b32e-a20b77664aef', - 'props': [{'key': 'PeerSlots', 'value': '7'}], - 'rscDfnUuid': '03623665-35a3-4caa-aa92-0c8badbda84a', - 'uuid': '559a229e-2b97-4d20-8f6d-87778bbe2f9e', - 'vlms': [ - { - 'backingDisk': '/dev/vg-35/f1_00000', - 'devicePath': '/dev/drbd1000', - 'metaDisk': 'internal', - 'storPoolName': 'DfltStorPool', - 'storPoolUuid': 'd2f293f5-5d73-4447-a14b-70efe01302be', - 'vlmDfnUuid': '0eedabe4-3c20-4eff-af74-b2ec2304ab0c', - 'vlmMinorNr': 1000, - 'vlmNr': 0, - 'vlmUuid': '38e48fb8-e0af-4317-8aab-aabb46db4cf8' - } - ] - }, - { - 'name': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'nodeId': 1, - 'nodeName': 'node_two', - 'nodeUuid': '82c4c5a5-8290-481e-9e35-1c71094b0cab', - 'props': [{'key': 'PeerSlots', 'value': '7'}], - 'rscDfnUuid': '03623665-35a3-4caa-aa92-0c8badbda84a', - 'rscFlags': ['DISKLESS'], - 'uuid': '23d3d331-ad0c-43f3-975b-d1048e09dc23', - 'vlms': [ - { - 'backingDisk': 'none', - 'devicePath': '/dev/drbd1000', - 'metaDisk': 'internal', - 'storPoolName': 'DfltStorPool', - 'storPoolUuid': '85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'vlmDfnUuid': '0eedabe4-3c20-4eff-af74-b2ec2304ab0c', - 'vlmMinorNr': 1000, - 'vlmNr': 0, - 'vlmUuid': 'd25b6c91-680f-4aa6-97c3-533e4bf4e659' - } - ] + 'type': 'STORAGE' } - ] -} + ], + 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', + 'props': {'DrbdPrimarySetOn': 'node-1'}, + 'uuid': '9a684294-6db4-40c8-bfeb-e5351200b9db' +}] -RESOURCE_LIST_RESP = ['node_two', 'node_one'] +RESOURCE_DFN_LIST_RESP = [{ + 'rd_name': u'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', + 'rd_uuid': u'9a684294-6db4-40c8-bfeb-e5351200b9db', +}] -SNAPSHOT_LIST_RESP = ['node_two'] - -RESOURCE_DFN_LIST = { - 'rscDfns': [ - { - 'rscDfnPort': 7002, - 'rscDfnProps': [{'key': u'DrbdPrimarySetOn', - 'value': u'NODE_TWO'}], - 'rscDfnSecret': u'syxflfoMqj84cUUcsqta', - 'rscDfnUuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', - 'rscName': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'vlmDfns': [ - { - 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - 'vlmMinor': 1001, - 'vlmNr': 0, - 'vlmProps': [{'key': u'DrbdCurrentGi', - 'value': u'2286D24524D26AA'}], - 'vlmSize': '1044480'} - ] - }, - ] -} - -RESOURCE_DFN_LIST_RESP = [ +NODES_LIST = [ { - 'rd_name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'rd_port': 7002, - 'rd_size': 1.0, - 'rd_uuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', - 'vlm_dfn_uuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b' + 'connection_status': 'ONLINE', + 'name': 'node-1', + 'net_interfaces': [{ + 'address': '192.168.8.63', + 'name': 'default', + 'satellite_encryption_type': 'PLAIN', + 'satellite_port': 3366, + 'uuid': '9c5b727f-0c62-4040-9a33-96a4fd4aaac3'}], + 'props': {'CurStltConnName': 'default'}, + 'type': 'COMBINED', + 'uuid': '69b88ffb-50d9-4576-9843-d7bf4724d043' + }, + { + 'connection_status': 'ONLINE', + 'name': 'node-2', + 'net_interfaces': [{ + 'address': '192.168.8.102', + 'name': 'default', + 'satellite_encryption_type': 'PLAIN', + 'satellite_port': 3366, + 'uuid': '3f911fc9-4f9b-4155-b9da-047d5242484c'}], + 'props': {'CurStltConnName': 'default'}, + 'type': 'SATELLITE', + 'uuid': '26bde754-0f05-499c-a63c-9f4e5f30556e' } ] -NODES_LIST = { - 'nodes': [ - { - 'connectionStatus': 2, - 'name': u'node_two', - 'netInterfaces': [ - { - 'address': u'192.168.66.113', - 'name': u'default', - 'stltEncryptionType': u'PLAIN', - 'stltPort': 3366, - 'uuid': u'224e50c3-09a8-4cf8-b701-13663a66aecd' - } - ], - 'props': [{'key': u'CurStltConnName', 'value': u'default'}], - 'type': u'COMBINED', - 'uuid': u'67939f68-2b26-41b7-b32e-a20b77664aef' - }, - { - 'connectionStatus': 2, - 'name': u'node_one', - 'netInterfaces': [ - { - 'address': u'192.168.66.115', - 'name': u'default', - 'stltEncryptionType': u'PLAIN', - 'stltPort': 3366, - 'uuid': u'36f42ec9-9999-4ad7-a889-8d7dbb498163' - } - ], - 'props': [{'key': u'CurStltConnName', 'value': u'default'}], - 'type': u'COMBINED', - 'uuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab' - } - ] -} - NODES_RESP = [ + {'node_address': '192.168.8.63', 'node_name': 'node-1'}, + {'node_address': '192.168.8.102', 'node_name': 'node-2'} +] + +STORAGE_POOL_DEF = [{'storage_pool_name': 'DfltStorPool'}] + +STORAGE_POOL_DEF_RESP = ['DfltStorPool'] + +STORAGE_POOL_LIST = [ { - 'node_address': u'192.168.66.113', - 'node_name': u'node_two', - 'node_uuid': u'67939f68-2b26-41b7-b32e-a20b77664aef' + 'free_capacity': 104815656, + 'free_space_mgr_name': 'node-2:DfltStorPool', + 'node_name': 'node-2', + 'props': { + 'StorDriver/LvmVg': 'vol', + 'StorDriver/ThinPool': 'thin_pool' + }, + 'provider_kind': 'LVM_THIN', + 'static_traits': { + 'Provisioning': 'Thin', + 'SupportsSnapshots': 'true' + }, + 'storage_pool_name': 'DfltStorPool', + 'total_capacity': 104857600, + 'uuid': '004faf29-be1a-4d74-9470-038bcee2c611' }, { - 'node_address': u'192.168.66.115', - 'node_name': u'node_one', - 'node_uuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab' + 'free_capacity': 9223372036854775807, + 'free_space_mgr_name': 'node-1:DfltStorPool', + 'node_name': 'node-1', + 'provider_kind': 'DISKLESS', + 'static_traits': {'SupportsSnapshots': 'false'}, + 'storage_pool_name': 'DfltStorPool', + 'total_capacity': 9223372036854775807, + 'uuid': '897da09e-1316-45c0-a308-c07008af42df' } ] -STORAGE_POOL_DEF = { - 'storPoolDfns': [ - { - 'storPoolName': u'DfltStorPool', - 'uuid': u'f51611c6-528f-4793-a87a-866d09e6733a' - } - ] -} - -STORAGE_POOL_DEF_RESP = [ - { - 'spd_name': u'DfltStorPool', - 'spd_uuid': u'f51611c6-528f-4793-a87a-866d09e6733a' - } -] - -STORAGE_POOL_LIST = { - 'storPools': [ - { - 'driver': u'LvmThinDriver', - 'freeSpace': { - 'freeCapacity': '36700160', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', - 'totalCapacity': '36700160' - }, - 'nodeName': u'node_two', - 'nodeUuid': u'67939f68-2b26-41b7-b32e-a20b77664aef', - 'props': [{'key': u'StorDriver/LvmVg', 'value': u'vg-35'}, - {'key': u'StorDriver/ThinPool', - 'value': u'thinpool'}], - 'staticTraits': [{'key': u'Provisioning', 'value': u'Thin'}, - {'key': u'SupportsSnapshots', - 'value': u'true'}], - 'storPoolDfnUuid': u'f51611c6-528f-4793-a87a-866d09e6733a', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', - 'vlms': [ - { - 'backingDisk': - u'/dev/vg-35/CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'devicePath': u'/dev/drbd1001', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', - 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - 'vlmMinorNr': 1001, - 'vlmNr': 0, - 'vlmUuid': u'b91392ae-904a-4bc6-862f-9c7aca629b35' - }, - { - 'backingDisk': u'/dev/vg-35/f1_00000', - 'devicePath': u'/dev/drbd1000', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', - 'vlmDfnUuid': u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c', - 'vlmMinorNr': 1000, - 'vlmNr': 0, - 'vlmUuid': u'38e48fb8-e0af-4317-8aab-aabb46db4cf8' - } - ] - }, - { - 'driver': u'DisklessDriver', - 'freeSpace': { - 'freeCapacity': '9223372036854775807', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'totalCapacity': '9223372036854775807' - }, - 'nodeName': u'node_one', - 'nodeUuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab', - 'staticTraits': [{'key': u'SupportsSnapshots', - 'value': u'false'}], - 'storPoolDfnUuid': u'f51611c6-528f-4793-a87a-866d09e6733a', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'vlms': [ - { - 'backingDisk': u'none', - 'devicePath': u'/dev/drbd1001', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - 'vlmMinorNr': 1001, - 'vlmNr': 0, - 'vlmUuid': u'4c63ee46-acb0-4aa5-8758-8fa8f65fdd5a' - }, - { - 'backingDisk': u'none', - 'devicePath': u'/dev/drbd1000', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'vlmDfnUuid': u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c', - 'vlmMinorNr': 1000, - 'vlmNr': 0, - 'vlmUuid': u'd25b6c91-680f-4aa6-97c3-533e4bf4e659' - } - ] - } - ] -} - STORAGE_POOL_LIST_RESP = [ { - 'driver_name': 'LvmThin', - 'node_name': u'node_two', - 'node_uuid': u'67939f68-2b26-41b7-b32e-a20b77664aef', - 'sp_cap': 35.0, - 'sp_free': 35.0, - 'sp_name': u'DfltStorPool', - 'sp_uuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', - 'sp_vlms_uuid': [u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c'] + 'driver_name': 'LVM_THIN', + 'node_name': 'node-2', + 'sp_uuid': '004faf29-be1a-4d74-9470-038bcee2c611', + 'sp_cap': 100.0, + 'sp_free': 100, + 'sp_name': u'DfltStorPool' }, { - 'driver_name': u'DisklessDriver', - 'node_name': u'node_one', - 'node_uuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab', - 'sp_cap': 0.0, + 'driver_name': 'DISKLESS', + 'node_name': 'node-1', + 'sp_uuid': '897da09e-1316-45c0-a308-c07008af42df', + 'sp_allocated': 0.0, + 'sp_cap': -1.0, 'sp_free': -1.0, - 'sp_name': u'DfltStorPool', - 'sp_uuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'sp_vlms_uuid': [u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - u'0eedabe4-3c20-4eff-af74-b2ec2304ab0c'] - } -] - -VOLUME_LIST = { - 'resourceStates': [ - { - 'inUse': False, - 'nodeName': u'wp-u16-cinder-dev-lg', - 'rscName': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'vlmStates': [{'diskState': u'Diskless', 'vlmNr': 0}] - }, - { - 'nodeName': u'wp-u16-cinder-dev-1', 'rscName': u'foo' - }, - { - 'inUse': False, - 'nodeName': u'wp-u16-cinder-dev-1', - 'rscName': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'vlmStates': [{'diskState': u'UpToDate', 'vlmNr': 0}] - } - ], - 'resources': [ - { - 'name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'nodeId': 0, - 'nodeName': u'wp-u16-cinder-dev-1', - 'nodeUuid': u'67939f68-2b26-41b7-b32e-a20b77664aef', - 'props': [{'key': u'PeerSlots', 'value': u'7'}], - 'rscDfnUuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', - 'uuid': u'2da61a7a-83b7-41d1-8a96-3a1a118dfba2', - 'vlms': [ - { - 'backingDisk': - u'/dev/vg-35/CV_bc3015e6-695f-4688-91f2-' + - u'1deb4321e4f0_00000', - 'devicePath': u'/dev/drbd1001', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', - 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - 'vlmMinorNr': 1001, - 'vlmNr': 0, - 'vlmUuid': u'b91392ae-904a-4bc6-862f-9c7aca629b35' - } - ] - }, - { - 'name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'nodeId': 1, - 'nodeName': u'wp-u16-cinder-dev-lg', - 'nodeUuid': u'82c4c5a5-8290-481e-9e35-1c71094b0cab', - 'props': [{'key': u'PeerSlots', 'value': u'7'}], - 'rscDfnUuid': u'f55f0c28-455b-458f-a05d-b5f7f16b5c22', - 'rscFlags': [u'DISKLESS'], - 'uuid': u'bd6472d1-dc3c-4d41-a5f0-f44271c05680', - 'vlms': [ - { - 'backingDisk': u'none', - 'devicePath': u'/dev/drbd1001', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - 'vlmMinorNr': 1001, - 'vlmNr': 0, - 'vlmUuid': u'4c63ee46-acb0-4aa5-8758-8fa8f65fdd5a' - } - ] - } - ] -} - -VOLUME_LIST_RESP = [ - { - 'node_name': u'wp-u16-cinder-dev-1', - 'rd_name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'volume': [ - { - 'backingDisk': u'/dev/vg-35/CV_bc3015e6-695f-4688-91f2-' + - u'1deb4321e4f0_00000', - 'devicePath': u'/dev/drbd1001', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'd2f293f5-5d73-4447-a14b-70efe01302be', - 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - 'vlmMinorNr': 1001, - 'vlmNr': 0, - 'vlmUuid': u'b91392ae-904a-4bc6-862f-9c7aca629b35' - } - ] - }, - { - 'node_name': u'wp-u16-cinder-dev-lg', - 'rd_name': u'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'volume': [ - { - 'backingDisk': u'none', - 'devicePath': u'/dev/drbd1001', - 'metaDisk': u'internal', - 'storPoolName': u'DfltStorPool', - 'storPoolUuid': u'85ef7894-0682-4019-b95a-1b25e81c0cb5', - 'vlmDfnUuid': u'89f6eff2-c4cd-4586-9ab8-8e850568b93b', - 'vlmMinorNr': 1001, - 'vlmNr': 0, - 'vlmUuid': u'4c63ee46-acb0-4aa5-8758-8fa8f65fdd5a' - } - ] + 'sp_name': 'DfltStorPool' } ] VOLUME_STATS_RESP = { 'driver_version': '0.0.7', - 'pools': [ - { - 'QoS_support': False, - 'backend_state': 'up', - 'filter_function': None, - 'free_capacity_gb': 35.0, - 'goodness_function': None, - 'location_info': 'linstor://localhost', - 'max_over_subscription_ratio': 0, - 'multiattach': False, - 'pool_name': 'lin-test-driver', - 'provisioned_capacity_gb': 1.0, - 'reserved_percentage': 0, - 'thick_provisioning_support': False, - 'thin_provisioning_support': True, - 'total_capacity_gb': 35.0, - 'total_volumes': 1, - } - ], + 'pools': [{ + 'QoS_support': False, + 'backend_state': 'up', + 'filter_function': None, + 'free_capacity_gb': 100, + 'goodness_function': None, + 'location_info': 'linstor://localhost', + 'max_over_subscription_ratio': 0, + 'multiattach': False, + 'pool_name': 'lin-test-driver', + 'provisioned_capacity_gb': 0.0, + 'reserved_percentage': 0, + 'thick_provisioning_support': False, + 'thin_provisioning_support': True, + 'total_capacity_gb': 100.0, + 'total_volumes': 1, + }], 'vendor_name': 'LINBIT', 'volume_backend_name': 'lin-test-driver' } CINDER_VOLUME = { - 'id': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'id': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'name': 'test-lin-vol', 'size': 1, 'volume_type_id': 'linstor', @@ -469,15 +330,15 @@ CINDER_VOLUME = { } SNAPSHOT = { - 'id': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'volume_id': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'id': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', + 'volume_id': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'volume_size': 1 } VOLUME_NAMES = { - 'linstor': 'CV_bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'cinder': 'bc3015e6-695f-4688-91f2-1deb4321e4f0', - 'snap': 'SN_bc3015e6-695f-4688-91f2-1deb4321e4f0', + 'linstor': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', + 'cinder': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', + 'snap': 'SN_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', } @@ -498,9 +359,6 @@ class LinstorAPIFakeDriver(object): def fake_api_storage_pool_list(self): return STORAGE_POOL_LIST - def fake_api_volume_list(self): - return VOLUME_LIST - def fake_api_resource_dfn_list(self): return RESOURCE_DFN_LIST @@ -508,6 +366,22 @@ class LinstorAPIFakeDriver(object): return SNAPSHOT_LIST_RESP +class LinstorFakeResource(object): + + def __init__(self): + self.volumes = [{'size': 1069547520}] + self.id = 0 + + def delete(self): + return True + + def is_diskless(self, host): + if host in DISKLESS_LIST_RESP: + return True + else: + return False + + class LinstorBaseDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): @@ -530,13 +404,14 @@ class LinstorBaseDriverTestCase(test.TestCase): self.driver.default_rsc_size = 1 self.driver.default_vg_name = 'vg-1' self.driver.default_downsize_factor = int('4096') - self.driver.default_pool = STORAGE_POOL_DEF_RESP[0]['spd_name'] - self.driver.host_name = 'node_one' + self.driver.default_pool = STORAGE_POOL_DEF_RESP[0] + self.driver.host_name = 'node-1' self.driver.diskless = True self.driver.default_uri = 'linstor://localhost' self.driver.default_backend_name = 'lin-test-driver' self.driver.configuration.reserved_percentage = 0 self.driver.configuration.max_over_subscription_ratio = 0 + self.driver.ap_count = 0 @mock.patch(DRIVER + 'LinstorBaseDriver._ping') def test_ping(self, m_ping): @@ -554,23 +429,50 @@ class LinstorBaseDriverTestCase(test.TestCase): expected = u'bd6472d1-dc3c-4d41-a5f0-f44271c05680' self.assertEqual(expected, val) + @mock.patch('uuid.uuid4') + def test_clean_uuid_with_braces(self, m_uuid): + m_uuid.return_value = u'{bd6472d1-dc3c-4d41-a5f0-f44271c05680}' + + val = self.driver._clean_uuid() + expected = u'bd6472d1-dc3c-4d41-a5f0-f44271c05680' + + m_uuid.assert_called_once() + self.assertEqual(expected, val) + # Test volume size conversions - def test_unit_conversions_to_linstor(self): + def test_unit_conversions_to_linstor_1GiB(self): val = self.driver._vol_size_to_linstor(1) expected = 1044480 # 1048575 - 4096 self.assertEqual(expected, val) + def test_unit_conversions_to_linstor_2GiB(self): + val = self.driver._vol_size_to_linstor(2) + expected = 2093056 # 2097152 - 4096 + self.assertEqual(expected, val) + def test_unit_conversions_to_cinder(self): val = self.driver._vol_size_to_cinder(1048576) expected = 1 self.assertEqual(expected, val) + def test_unit_conversions_to_cinder_2GiB(self): + val = self.driver._vol_size_to_cinder(2097152) + expected = 2 + self.assertEqual(expected, val) + def test_is_clean_volume_name(self): val = self.driver._is_clean_volume_name(VOLUME_NAMES['cinder'], drv.DM_VN_PREFIX) expected = VOLUME_NAMES['linstor'] self.assertEqual(expected, val) + def test_is_clean_volume_name_invalid(self): + wrong_uuid = 'bc3015e6-695f-4688-91f2-invaliduuid1' + val = self.driver._is_clean_volume_name(wrong_uuid, + drv.DM_VN_PREFIX) + expected = None + self.assertEqual(expected, val) + def test_snapshot_name_from_cinder_snapshot(self): val = self.driver._snapshot_name_from_cinder_snapshot( SNAPSHOT) @@ -600,7 +502,9 @@ class LinstorBaseDriverTestCase(test.TestCase): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() val = self.driver._get_rsc_path(VOLUME_NAMES['linstor']) - expected = '/dev/drbd1000' + expected = '/dev/drbd1005' + + m_rsc_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') @@ -608,7 +512,9 @@ class LinstorBaseDriverTestCase(test.TestCase): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() val = self.driver._get_local_path(CINDER_VOLUME) - expected = '/dev/drbd1000' + expected = '/dev/drbd1005' + + m_rsc_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') @@ -618,6 +524,8 @@ class LinstorBaseDriverTestCase(test.TestCase): val = self.driver._get_spd() expected = STORAGE_POOL_DEF_RESP + + m_spd_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') @@ -627,6 +535,8 @@ class LinstorBaseDriverTestCase(test.TestCase): val = self.driver._get_storage_pool() expected = STORAGE_POOL_LIST_RESP + + m_sp_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') @@ -636,6 +546,8 @@ class LinstorBaseDriverTestCase(test.TestCase): val = self.driver._get_resource_definitions() expected = RESOURCE_DFN_LIST_RESP + + m_rscd_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') @@ -644,24 +556,43 @@ class LinstorBaseDriverTestCase(test.TestCase): val = self.driver._get_snapshot_nodes(VOLUME_NAMES['linstor']) expected = SNAPSHOT_LIST_RESP + + m_rsc_list.assert_called_once() self.assertEqual(expected, val) - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_get_diskless_nodes(self, m_rsc_list): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + + val = self.driver._get_diskless_nodes(RESOURCE['name']) + expected = DISKLESS_LIST_RESP + + m_rsc_list.assert_called_once() + self.assertEqual(expected, val) + + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') def test_get_linstor_nodes(self, m_node_list): m_node_list.return_value = self._fake_driver.fake_api_node_list() val = self.driver._get_linstor_nodes() expected = RESOURCE_LIST_RESP + + m_node_list.assert_called_once() self.assertEqual(expected, val) - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') def test_get_nodes(self, m_node_list): m_node_list.return_value = self._fake_driver.fake_api_node_list() val = self.driver._get_nodes() expected = NODES_RESP + + m_node_list.assert_called_once() self.assertEqual(expected, val) + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_size') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_is_diskless') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') @mock.patch(DRIVER + 'LinstorBaseDriver.get_goodness_function') @mock.patch(DRIVER + 'LinstorBaseDriver.get_filter_function') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') @@ -670,81 +601,79 @@ class LinstorBaseDriverTestCase(test.TestCase): m_sp_list, m_rscd_list, m_filter, - m_goodness): + m_goodness, + m_rsc_list, + m_diskless, + m_rsc_size): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) m_rscd_list.return_value = ( self._fake_driver.fake_api_resource_dfn_list()) m_filter.return_value = None m_goodness.return_value = None + m_rsc_list.return_value = RESOURCE_LIST + m_diskless.return_value = True + m_rsc_size.return_value = 1069547520 val = self.driver._get_volume_stats() expected = VOLUME_STATS_RESP + + m_sp_list.assert_called_once() + m_rscd_list.assert_called_once() self.assertEqual(expected, val) - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_create') def test_create_snapshot_fail(self, - m_snap_create, - m_api_reply, - m_rsc_list): - m_snap_create.return_value = None - m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() - m_api_reply.return_value = False + m_snap_create): + m_snap_create.return_value = False self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.create_snapshot, SNAPSHOT) - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_create') def test_create_snapshot_success(self, - m_snap_create, - m_api_reply, - m_rsc_list): - m_snap_create.return_value = None - m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() - m_api_reply.return_value = True + m_snap_create): + m_snap_create.return_value = True # No exception should be raised self.assertIsNone(self.driver.create_snapshot(SNAPSHOT)) - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') def test_delete_snapshot_fail(self, - m_snap_delete, - m_api_reply, - m_rsc_list, - m_rsc_dfn_list): - m_snap_delete.return_value = None - m_api_reply.return_value = False - m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() - m_rsc_dfn_list.return_value = ( - self._fake_driver.fake_api_resource_dfn_list()) + m_snap_delete): + m_snap_delete.return_value = False self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.delete_snapshot, SNAPSHOT) - @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') def test_delete_snapshot_success(self, m_snap_delete, - m_api_reply, - m_rsc_list, - m_rsc_dfn_delete): - m_snap_delete.return_value = None - m_api_reply.return_value = True - m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() - m_rsc_dfn_delete.return_value = True + m_snap_nodes): + m_snap_delete.return_value = True + m_snap_nodes.return_value = self._fake_driver.fake_api_snapshot_list() # No exception should be raised self.driver.delete_snapshot(SNAPSHOT) + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') + def test_delete_snapshot_success_cleanup_rd(self, + m_snap_delete, + m_snap_nodes, + m_rd_delete): + m_snap_delete.return_value = True + m_snap_nodes.return_value = [] + m_rd_delete.return_value = None + + # No exception should be raised + self.driver.delete_snapshot(SNAPSHOT) + + # Resource Definition Delete should run once + m_rd_delete.assert_called_once() + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_set_sp') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') @@ -768,8 +697,6 @@ class LinstorBaseDriverTestCase(test.TestCase): m_api_reply.return_value = True m_snap_vd_restore.return_value = True m_nodes = [] - m_nodes.append('for test') - m_nodes.remove('for test') m_lin_nodes.return_value = m_nodes m_snap_rsc_restore.return_value = True m_rsc_create.return_value = True @@ -778,16 +705,95 @@ class LinstorBaseDriverTestCase(test.TestCase): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) + # No exception should be raised self.assertIsNone(self.driver.create_volume_from_snapshot( CINDER_VOLUME, SNAPSHOT)) + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_set_sp') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_linstor_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_volume_dfn_restore') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') + def test_create_volume_from_snapshot_fail_restore(self, + m_rsc_dfn_create, + m_api_reply, + m_snap_vd_restore, + m_lin_nodes, + m_snap_rsc_restore, + m_rsc_create, + m_vol_extend, + m_vol_dfn, + m_sp_list): + m_rsc_dfn_create.return_value = True + m_api_reply.return_value = True + m_snap_vd_restore.return_value = True + m_nodes = [] + m_lin_nodes.return_value = m_nodes + m_snap_rsc_restore.return_value = False + m_rsc_create.return_value = True + m_vol_extend.return_value = True + m_vol_dfn.return_value = True + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + + # Failing to restore a snapshot should raise an exception + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + CINDER_VOLUME, SNAPSHOT) + + @mock.patch(DRIVER + 'LinstorBaseDriver.delete_volume') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_set_sp') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_linstor_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_volume_dfn_restore') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') + def test_create_volume_from_snapshot_fail_extend(self, + m_rsc_dfn_create, + m_api_reply, + m_snap_vd_restore, + m_lin_nodes, + m_snap_rsc_restore, + m_rsc_create, + m_vol_extend, + m_vol_dfn, + m_sp_list, + m_delete_volume): + m_rsc_dfn_create.return_value = True + m_api_reply.return_value = False + m_snap_vd_restore.return_value = True + m_nodes = [] + m_lin_nodes.return_value = m_nodes + m_snap_rsc_restore.return_value = True + m_rsc_create.return_value = True + m_vol_extend.return_value = True + m_vol_dfn.return_value = True + m_sp_list.return_value = ( + self._fake_driver.fake_api_storage_pool_list()) + m_delete_volume.return_value = True + + # Failing to extend the volume after a snapshot restoration should + # raise an exception + new_volume = CINDER_VOLUME + new_volume['size'] = 2 + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.create_volume_from_snapshot, + new_volume, SNAPSHOT) + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_create_volume_fail_no_linstor_nodes(self, m_sp_list, @@ -824,7 +830,7 @@ class LinstorBaseDriverTestCase(test.TestCase): @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_create_volume_fail_rsc_create(self, m_sp_list, @@ -862,7 +868,7 @@ class LinstorBaseDriverTestCase(test.TestCase): @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') - @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_nodes_list') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_create_volume(self, m_sp_list, @@ -893,8 +899,9 @@ class LinstorBaseDriverTestCase(test.TestCase): val = self.driver.create_volume(test_volume) expected = {} - self.assertEqual(val, expected) + self.assertEqual(expected, val) + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') @@ -905,12 +912,14 @@ class LinstorBaseDriverTestCase(test.TestCase): m_rsc_delete, m_vol_dfn_delete, m_rsc_dfn_delete, - m_api_reply): + m_api_reply, + m_rsc_auto_delete): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() m_rsc_delete.return_value = True m_vol_dfn_delete.return_value = True m_rsc_dfn_delete.return_value = True m_api_reply.return_value = False + m_rsc_auto_delete.return_value = True test_volume = CINDER_VOLUME test_volume['display_name'] = 'linstor_test' @@ -920,6 +929,109 @@ class LinstorBaseDriverTestCase(test.TestCase): self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.delete_volume, test_volume) + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_diskless_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_delete_volume_fail_diskless_remove(self, + m_rsc_list, + m_rsc_delete, + m_vol_dfn_delete, + m_rsc_dfn_delete, + m_api_reply, + m_diskless, + m_rsc_auto_delete): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_rsc_delete.return_value = False + m_vol_dfn_delete.return_value = True + m_rsc_dfn_delete.return_value = True + m_api_reply.return_value = False + m_diskless.return_value = ['foo'] + m_rsc_auto_delete.return_value = True + + test_volume = CINDER_VOLUME + test_volume['display_name'] = 'linstor_test' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + # Raises exception for failing to delete a diskless resource + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.delete_volume, test_volume) + + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_diskless_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_delete_volume_fail_diskful_remove(self, + m_rsc_list, + m_rsc_delete, + m_vol_dfn_delete, + m_rsc_dfn_delete, + m_api_reply, + m_diskless, + m_snap_nodes, + m_rsc_auto_delete): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_rsc_delete.return_value = False + m_vol_dfn_delete.return_value = True + m_rsc_dfn_delete.return_value = True + m_api_reply.return_value = False + m_diskless.return_value = [] + m_snap_nodes.return_value = ['foo'] + m_rsc_auto_delete.return_value = True + + test_volume = CINDER_VOLUME + test_volume['display_name'] = 'linstor_test' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + # Raises exception for failing to delete a diskful resource + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.delete_volume, test_volume) + + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_diskless_nodes') + @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') + @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') + def test_delete_volume_fail_volume_definition(self, + m_rsc_list, + m_rsc_delete, + m_vol_dfn_delete, + m_rsc_dfn_delete, + m_api_reply, + m_diskless, + m_snap_nodes, + m_rsc_auto_delete): + m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() + m_rsc_delete.return_value = True + m_vol_dfn_delete.return_value = False + m_rsc_dfn_delete.return_value = True + m_api_reply.return_value = False + m_diskless.return_value = [] + m_snap_nodes.return_value = [] + m_rsc_auto_delete.return_value = True + + test_volume = CINDER_VOLUME + test_volume['display_name'] = 'linstor_test' + test_volume['host'] = 'node_one' + test_volume['size'] = 1 + + # Raises exception for failing to delete a volume definition + self.assertRaises(cinder_exception.VolumeBackendAPIException, + self.driver.delete_volume, test_volume) + + @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') @@ -930,12 +1042,14 @@ class LinstorBaseDriverTestCase(test.TestCase): m_rsc_delete, m_vol_dfn_delete, m_rsc_dfn_delete, - m_api_reply): + m_api_reply, + m_rsc_auto_delete): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() m_rsc_delete.return_value = True m_vol_dfn_delete.return_value = True m_rsc_dfn_delete.return_value = True m_api_reply.return_value = True + m_rsc_auto_delete.return_value = True test_volume = CINDER_VOLUME test_volume['display_name'] = 'linstor_test' @@ -944,7 +1058,7 @@ class LinstorBaseDriverTestCase(test.TestCase): val = self.driver.delete_volume(test_volume) expected = True - self.assertEqual(val, expected) + self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') @@ -973,7 +1087,7 @@ class LinstorBaseDriverTestCase(test.TestCase): val = self.driver.migrate_volume(m_ctxt, m_volume, m_host) expected = (False, None) - self.assertEqual(val, expected) + self.assertEqual(expected, val) class LinstorIscsiDriverTestCase(test.TestCase): @@ -996,7 +1110,7 @@ class LinstorIscsiDriverTestCase(test.TestCase): self.driver.default_rsc_size = 1 self.driver.default_vg_name = 'vg-1' self.driver.default_downsize_factor = int('4096') - self.driver.default_pool = STORAGE_POOL_DEF_RESP[0]['spd_name'] + self.driver.default_pool = STORAGE_POOL_DEF_RESP[0] self.driver.host_name = 'node_one' self.driver.diskless = True self.driver.location_info = 'LinstorIscsi:linstor://localhost' @@ -1004,22 +1118,22 @@ class LinstorIscsiDriverTestCase(test.TestCase): self.driver.configuration.reserved_percentage = int('0') self.driver.configuration.max_over_subscription_ratio = int('0') + @mock.patch(DRIVER + 'LinstorIscsiDriver._get_api_resource_list') @mock.patch(DRIVER + 'LinstorIscsiDriver._get_volume_stats') - def test_iscsi_get_volume_stats(self, m_vol_stats): + def test_iscsi_get_volume_stats(self, m_vol_stats, m_rsc_list): m_vol_stats.return_value = VOLUME_STATS_RESP + m_rsc_list.return_value = RESOURCE_LIST val = self.driver.get_volume_stats() expected = VOLUME_STATS_RESP expected["storage_protocol"] = 'iSCSI' - self.assertEqual(val, expected) + self.assertEqual(expected, val) - @mock.patch(DRIVER + 'proto') @mock.patch(DRIVER + 'linstor') - def test_iscsi_check_for_setup_error_pass(self, m_linstor, m_proto): + def test_iscsi_check_for_setup_error_pass(self, m_linstor): m_linstor.return_value = True - m_proto.return_value = True # No exception should be raised self.driver.check_for_setup_error() @@ -1044,7 +1158,7 @@ class LinstorDrbdDriverTestCase(test.TestCase): self.driver.default_rsc_size = 1 self.driver.default_vg_name = 'vg-1' self.driver.default_downsize_factor = int('4096') - self.driver.default_pool = STORAGE_POOL_DEF_RESP[0]['spd_name'] + self.driver.default_pool = STORAGE_POOL_DEF_RESP[0] self.driver.host_name = 'node_one' self.driver.diskless = True self.driver.location_info = 'LinstorDrbd:linstor://localhost' @@ -1054,7 +1168,7 @@ class LinstorDrbdDriverTestCase(test.TestCase): @mock.patch(DRIVER + 'LinstorDrbdDriver._get_rsc_path') def test_drbd_return_drbd_config(self, m_rsc_path): - m_rsc_path.return_value = '/dev/drbd1000' + m_rsc_path.return_value = '/dev/drbd1005' val = self.driver._return_drbd_config(CINDER_VOLUME) @@ -1064,14 +1178,14 @@ class LinstorDrbdDriverTestCase(test.TestCase): "device_path": str(m_rsc_path.return_value) } } - self.assertEqual(val, expected) + self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorDrbdDriver._get_api_storage_pool_list') def test_drbd_node_in_sp(self, m_sp_list): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) - val = self.driver._node_in_sp('node_two') + val = self.driver._node_in_sp('node-1') self.assertTrue(val) @mock.patch(DRIVER + 'LinstorDrbdDriver._get_volume_stats') @@ -1081,13 +1195,11 @@ class LinstorDrbdDriverTestCase(test.TestCase): val = self.driver.get_volume_stats() expected = VOLUME_STATS_RESP expected["storage_protocol"] = 'DRBD' - self.assertEqual(val, expected) + self.assertEqual(expected, val) - @mock.patch(DRIVER + 'proto') @mock.patch(DRIVER + 'linstor') - def test_drbd_check_for_setup_error_pass(self, m_linstor, m_proto): + def test_drbd_check_for_setup_error_pass(self, m_linstor): m_linstor.return_value = True - m_proto.return_value = True # No exception should be raised self.driver.check_for_setup_error() @@ -1117,7 +1229,7 @@ class LinstorDrbdDriverTestCase(test.TestCase): "device_path": str(m_rsc_path.return_value) } } - self.assertEqual(val, expected) + self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorDrbdDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorDrbdDriver._api_rsc_delete') diff --git a/cinder/volume/drivers/linstordrv.py b/cinder/volume/drivers/linstordrv.py index b8ef69af735..9beb820456e 100644 --- a/cinder/volume/drivers/linstordrv.py +++ b/cinder/volume/drivers/linstordrv.py @@ -1,4 +1,4 @@ -# Copyright (c) 2014-2018 LINBIT HA Solutions GmbH +# Copyright (c) 2014-2019 LINBIT HA Solutions GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,7 +15,7 @@ """This driver connects Cinder to an installed LINSTOR instance. -See https://docs.linbit.com/docs/users-guide-9.0/#ch-openstack +See https://docs.linbit.com/docs/users-guide-9.0/#ch-openstack-linstor for more details. """ @@ -34,11 +34,6 @@ from cinder import interface from cinder.volume import configuration from cinder.volume import driver -try: - import google.protobuf.json_format as proto -except ImportError: - proto = None - try: import linstor lin_drv = linstor.Linstor @@ -69,7 +64,14 @@ linstor_opts = [ default=4096, help='Default Block size for Image restoration. ' 'When using iSCSI transport, this option ' - 'specifies the block size'), + 'specifies the block size.'), + + cfg.IntOpt('linstor_autoplace_count', + default=0, + help='Autoplace replication count on volume deployment. ' + '0 = Full cluster replication without autoplace, ' + '1 = Single node deployment without replication, ' + '2 or greater = Replicated deployment with autoplace.'), cfg.BoolOpt('linstor_controller_diskless', default=True, @@ -84,14 +86,25 @@ CONF.register_opts(linstor_opts, group=configuration.SHARED_CONF_GROUP) CINDER_UNKNOWN = 'unknown' DM_VN_PREFIX = 'CV_' DM_SN_PREFIX = 'SN_' -LVM = 'Lvm' -LVMTHIN = 'LvmThin' +DISKLESS = 'DISKLESS' +LVM = 'LVM' +LVM_THIN = 'LVM_THIN' +ZFS = 'ZFS' +ZFS_THIN = 'ZFS_THIN' class LinstorBaseDriver(driver.VolumeDriver): - """Cinder driver that uses Linstor for storage.""" + """Cinder driver that uses LINSTOR for storage. - VERSION = '1.0.0' + Version history: + + .. code-block:: none + + 1.0.0 - Initial driver + 1.0.1 - Added support for LINSTOR 0.9.12 + """ + + VERSION = '1.0.1' # ThirdPartySystems wiki page CI_WIKI_NAME = 'LINBIT_LINSTOR_CI' @@ -113,6 +126,8 @@ class LinstorBaseDriver(driver.VolumeDriver): 'linstor_default_blocksize') self.diskless = self.configuration.safe_get( 'linstor_controller_diskless') + self.ap_count = self.configuration.safe_get( + 'linstor_autoplace_count') self.default_backend_name = self.configuration.safe_get( 'volume_backend_name') self.host_name = socket.gethostname() @@ -176,44 +191,36 @@ class LinstorBaseDriver(driver.VolumeDriver): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() - - response = proto.MessageToDict(lin.resource_list()[0].proto_msg) - return response + api_reply = lin.resource_list()[0].__dict__['_rest_data'] + return api_reply def _get_api_resource_dfn_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() + api_reply = lin.resource_dfn_list()[0].__dict__['_rest_data'] + return api_reply - response = proto.MessageToDict( - lin.resource_dfn_list()[0].proto_msg) - return response - - def _get_api_nodes_list(self): + def _get_api_node_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() - - response = proto.MessageToDict(lin.node_list()[0].proto_msg) - return response + api_reply = lin.node_list()[0].__dict__['_rest_data'] + return api_reply def _get_api_storage_pool_dfn_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() - - response = proto.MessageToDict( - lin.storage_pool_dfn_list()[0].proto_msg) - return response + api_reply = lin.storage_pool_dfn_list()[0].__dict__['_rest_data'] + return api_reply def _get_api_storage_pool_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() - - response = proto.MessageToDict( - lin.storage_pool_list()[0].proto_msg) - return response + api_reply = lin.storage_pool_list()[0].__dict__['_rest_data'] + return api_reply def _get_api_volume_extend(self, rsc_target_name, new_size): with lin_drv(self.default_uri) as lin: @@ -226,25 +233,15 @@ class LinstorBaseDriver(driver.VolumeDriver): size=self._vol_size_to_linstor(new_size)) return vol_reply - def _api_snapshot_create(self, node_names, rsc_name, snapshot_name): - with lin_drv(self.default_uri) as lin: - if not lin.connected: - lin.connect() + def _api_snapshot_create(self, drbd_rsc_name, snapshot_name): + lin = linstor.Resource(drbd_rsc_name, uri=self.default_uri) + snap_reply = lin.snapshot_create(snapshot_name) + return snap_reply - snap_reply = lin.snapshot_create(node_names=node_names, - rsc_name=rsc_name, - snapshot_name=snapshot_name, - async_msg=False) - return snap_reply - - def _api_snapshot_delete(self, drbd_rsc_name, snap_name): - with lin_drv(self.default_uri) as lin: - if not lin.connected: - lin.connect() - - snap_reply = lin.snapshot_delete(rsc_name=drbd_rsc_name, - snapshot_name=snap_name) - return snap_reply + def _api_snapshot_delete(self, drbd_rsc_name, snapshot_name): + lin = linstor.Resource(drbd_rsc_name, uri=self.default_uri) + snap_reply = lin.snapshot_delete(snapshot_name) + return snap_reply def _api_rsc_dfn_delete(self, drbd_rsc_name): with lin_drv(self.default_uri) as lin: @@ -320,6 +317,18 @@ class LinstorBaseDriver(driver.VolumeDriver): rsc_reply = lin.resource_create([new_rsc], async_msg=False) return rsc_reply + def _api_rsc_autoplace(self, rsc_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + new_rsc = linstor.Resource(name=rsc_name, uri=self.default_uri) + new_rsc.placement.redundancy = self.ap_count + new_rsc.placement.storage_pool = self.default_pool + rsc_reply = new_rsc.autoplace() + + return rsc_reply + def _api_rsc_delete(self, rsc_name, node_name): with lin_drv(self.default_uri) as lin: if not lin.connected: @@ -329,6 +338,36 @@ class LinstorBaseDriver(driver.VolumeDriver): rsc_name=rsc_name) return rsc_reply + def _api_rsc_auto_delete(self, rsc_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + rsc = linstor.Resource(str(rsc_name), self.default_uri) + return rsc.delete() + + def _api_rsc_is_diskless(self, rsc_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + rsc = linstor.Resource(str(rsc_name)) + return rsc.is_diskless(self.host_name) + + def _api_rsc_size(self, rsc_name): + with lin_drv(self.default_uri) as lin: + if not lin.connected: + lin.connect() + + rsc = linstor.Resource(str(rsc_name)) + if len(rsc.volumes): + if "size" in rsc.volumes: + return rsc.volumes[0].size + else: + return 0 + else: + return 0 + def _api_volume_dfn_delete(self, rsc_name, volume_nr): with lin_drv(self.default_uri) as lin: if not lin.connected: @@ -353,29 +392,39 @@ class LinstorBaseDriver(driver.VolumeDriver): return vol_reply def _api_snapshot_resource_restore(self, - nodes, src_rsc_name, src_snap_name, new_vol_name): + + lin = linstor.Resource(src_rsc_name, uri=self.default_uri) + new_rsc = lin.restore_from_snapshot(src_snap_name, new_vol_name) + + # Adds an aux/property KV for synchronous return from snapshot restore with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() - rsc_reply = lin.snapshot_resource_restore( - node_names=nodes, - from_resource=src_rsc_name, - from_snapshot=src_snap_name, - to_resource=new_vol_name) - return rsc_reply + aux_prop = {} + aux_prop["Aux/restore"] = "done" + lin.volume_dfn_modify( + rsc_name=new_vol_name, + volume_nr=0, + set_properties=aux_prop) + + if new_rsc.name == new_vol_name: + return True + return False def _get_rsc_path(self, rsc_name): rsc_list_reply = self._get_api_resource_list() - for rsc in rsc_list_reply['resources']: - if rsc['name'] == rsc_name and rsc['nodeName'] == self.host_name: - for volume in rsc['vlms']: - if volume['vlmNr'] == 0: - return volume['devicePath'] + if rsc_list_reply: + for rsc in rsc_list_reply: + if (rsc["name"] == rsc_name and + rsc["node_name"] == self.host_name): + for volume in rsc["volumes"]: + if volume["volume_number"] == 0: + return volume["device_path"] def _get_local_path(self, volume): try: @@ -391,13 +440,11 @@ class LinstorBaseDriver(driver.VolumeDriver): def _get_spd(self): # Storage Pool Definition List spd_list_reply = self._get_api_storage_pool_dfn_list() - spd_list = [] - for node in spd_list_reply['storPoolDfns']: - spd_item = {} - spd_item['spd_uuid'] = node['uuid'] - spd_item['spd_name'] = node['storPoolName'] - spd_list.append(spd_item) + + if spd_list_reply: + for spd in spd_list_reply: + spd_list.append(spd["storage_pool_name"]) return spd_list @@ -405,49 +452,39 @@ class LinstorBaseDriver(driver.VolumeDriver): # Fetch Storage Pool List sp_list_reply = self._get_api_storage_pool_list() - # Fetch Resource Definition List - sp_list = [] - # Separate the diskless nodes sp_diskless_list = [] + sp_list = [] node_count = 0 if sp_list_reply: - for node in sp_list_reply['storPools']: - if node['storPoolName'] == self.default_pool: + for node in sp_list_reply: + if node["storage_pool_name"] == self.default_pool: sp_node = {} - sp_node['node_uuid'] = node['nodeUuid'] - sp_node['node_name'] = node['nodeName'] - sp_node['sp_uuid'] = node['storPoolUuid'] - sp_node['sp_name'] = node['storPoolName'] - sp_node['sp_vlms_uuid'] = [] - if 'vlms' in node: - for vlm in node['vlms']: - sp_node['sp_vlms_uuid'].append(vlm['vlmDfnUuid']) + sp_node["node_name"] = node["node_name"] + sp_node["sp_uuid"] = node["uuid"] + sp_node["sp_name"] = node["storage_pool_name"] - if 'Diskless' in node['driver']: + if node["provider_kind"] == DISKLESS: diskless = True - sp_node['sp_free'] = -1.0 - sp_node['sp_cap'] = 0.0 + sp_node["sp_free"] = -1.0 + sp_node["sp_cap"] = -1.0 + sp_node["sp_allocated"] = 0.0 else: diskless = False - if 'freeSpace' in node: - sp_node['sp_free'] = round( - int(node['freeSpace']['freeCapacity']) / - units.Mi, - 2) - sp_node['sp_cap'] = round( - int(node['freeSpace']['totalCapacity']) / - units.Mi, - 2) + if "free_capacity" in node: + temp = float(node["free_capacity"]) / units.Mi + sp_node["sp_free"] = round(temp) + temp = float(node["total_capacity"]) / units.Mi + sp_node["sp_cap"] = round(temp) - # Driver - if node['driver'] == "LvmDriver": - sp_node['driver_name'] = LVM - elif node['driver'] == "LvmThinDriver": - sp_node['driver_name'] = LVMTHIN + drivers = [LVM, LVM_THIN, ZFS, ZFS_THIN, DISKLESS] + + # Driver selection + if node["provider_kind"] in drivers: + sp_node['driver_name'] = node["provider_kind"] else: - sp_node['driver_name'] = node['driver'] + sp_node['driver_name'] = str(node["provider_kind"]) if diskless: sp_diskless_list.append(sp_node) @@ -455,9 +492,9 @@ class LinstorBaseDriver(driver.VolumeDriver): sp_list.append(sp_node) node_count += 1 - # Add the diskless nodes to the end of the list - if sp_diskless_list: - sp_list.extend(sp_diskless_list) + # Add the diskless nodes to the end of the list + if sp_diskless_list: + sp_list.extend(sp_diskless_list) return sp_list @@ -465,7 +502,7 @@ class LinstorBaseDriver(driver.VolumeDriver): data = {} data["volume_backend_name"] = self.default_backend_name - data["vendor_name"] = 'LINBIT' + data["vendor_name"] = "LINBIT" data["driver_version"] = self.VERSION data["pools"] = [] @@ -477,40 +514,51 @@ class LinstorBaseDriver(driver.VolumeDriver): for rd in rd_list: num_vols += 1 - allocated_sizes_gb = [] - free_capacity_gb = [] - total_capacity_gb = [] + # allocated_sizes_gb = [] + free_gb = [] + total_gb = [] thin_enabled = False - # Free capacity for Local Node + # Total & Free capacity for Local Node single_pool = {} for sp in sp_data: - if 'Diskless' not in sp['driver_name']: - if 'LvmThin' in sp['driver_name']: + if "Diskless" not in sp["driver_name"]: + thin_backends = [LVM_THIN, ZFS_THIN] + if sp["driver_name"] in thin_backends: thin_enabled = True - if 'sp_cap' in sp: - if sp['sp_cap'] >= 0.0: - total_capacity_gb.append(sp['sp_cap']) - if 'sp_free' in sp: - if sp['sp_free'] >= 0.0: - free_capacity_gb.append(sp['sp_free']) - sp_allocated_size_gb = 0 - for vlm_uuid in sp['sp_vlms_uuid']: - for rd in rd_list: - if 'vlm_dfn_uuid' in rd: - if rd['vlm_dfn_uuid'] == vlm_uuid: - sp_allocated_size_gb += rd['rd_size'] - allocated_sizes_gb.append(sp_allocated_size_gb) + if "sp_cap" in sp: + if sp["sp_cap"] >= 0.0: + total_gb.append(sp["sp_cap"]) + if "sp_free" in sp: + if sp["sp_free"] >= 0.0: + free_gb.append(sp["sp_free"]) + + # Allocated capacity + sp_allocated_size_gb = 0.0 + local_resources = [] + + reply = self._get_api_resource_list() + + if reply: + for rsc in reply: + if rsc["node_name"] == self.host_name: + local_resources.append(rsc["name"]) + + for rsc_name in local_resources: + if not self._api_rsc_is_diskless(rsc_name): + rsc_size = self._api_rsc_size(rsc_name) + sp_allocated_size_gb += round( + int(rsc_size) / units.Gi, 2) single_pool["pool_name"] = data["volume_backend_name"] - single_pool["free_capacity_gb"] = min(free_capacity_gb) - single_pool["total_capacity_gb"] = min(total_capacity_gb) - single_pool['provisioned_capacity_gb'] = max(allocated_sizes_gb) + single_pool["free_capacity_gb"] = min(free_gb) if free_gb else 0 + single_pool["total_capacity_gb"] = min(total_gb) if total_gb else 0 + single_pool["provisioned_capacity_gb"] = sp_allocated_size_gb single_pool["reserved_percentage"] = ( self.configuration.reserved_percentage) - single_pool['thin_provisioning_support'] = thin_enabled - single_pool['thick_provisioning_support'] = not thin_enabled - single_pool['max_over_subscription_ratio'] = ( + single_pool["thin_provisioning_support"] = thin_enabled + single_pool["thick_provisioning_support"] = not thin_enabled + single_pool["max_over_subscription_ratio"] = ( self.configuration.max_over_subscription_ratio) single_pool["location_info"] = self.default_uri single_pool["total_volumes"] = num_vols @@ -518,7 +566,7 @@ class LinstorBaseDriver(driver.VolumeDriver): single_pool["goodness_function"] = self.get_goodness_function() single_pool["QoS_support"] = False single_pool["multiattach"] = False - single_pool["backend_state"] = 'up' + single_pool["backend_state"] = "up" data["pools"].append(single_pool) @@ -526,29 +574,16 @@ class LinstorBaseDriver(driver.VolumeDriver): def _get_resource_definitions(self): + rd_list_reply = self._get_api_resource_dfn_list() rd_list = [] - rd_list_reply = self._get_api_resource_dfn_list() - - # Only if resource definition present - if 'rscDfns' in rd_list_reply: - for node in rd_list_reply['rscDfns']: - + if rd_list_reply: + for node in rd_list_reply: # Count only Cinder volumes - if DM_VN_PREFIX in node['rscName']: + if DM_VN_PREFIX in node['name']: rd_node = {} - rd_node['rd_uuid'] = node['rscDfnUuid'] - rd_node['rd_name'] = node['rscName'] - rd_node['rd_port'] = node['rscDfnPort'] - - if 'vlmDfns' in node: - for vol in node['vlmDfns']: - if vol['vlmNr'] == 0: - rd_node['vlm_dfn_uuid'] = vol['vlmDfnUuid'] - rd_node['rd_size'] = round( - float(vol['vlmSize']) / units.Mi, 2) - break - + rd_node["rd_uuid"] = node['uuid'] + rd_node["rd_name"] = node['name'] rd_list.append(rd_node) return rd_list @@ -558,46 +593,62 @@ class LinstorBaseDriver(driver.VolumeDriver): However, it excludes diskless nodes. """ - - rsc_list_reply = self._get_api_resource_list() # reply in dict - + rsc_list_reply = self._get_api_resource_list() snap_list = [] - for rsc in rsc_list_reply['resources']: - if rsc['name'] != resource: - continue - # Diskless nodes are not available for snapshots - diskless = False - if 'rscFlags' in rsc: - if 'DISKLESS' in rsc['rscFlags']: - diskless = True - if not diskless: - snap_list.append(rsc['nodeName']) + if rsc_list_reply: + for rsc in rsc_list_reply: + if rsc["name"] != resource: + continue + + # Diskless nodes are not available for snapshots + diskless = False + if "flags" in rsc: + if 'DISKLESS' in rsc["flags"]: + diskless = True + if not diskless: + snap_list.append(rsc["node_name"]) return snap_list - def _get_linstor_nodes(self): - # Returns all available DRBD nodes - node_list_reply = self._get_api_nodes_list() + def _get_diskless_nodes(self, resource): + # Returns diskless nodes given a resource + rsc_list_reply = self._get_api_resource_list() + diskless_list = [] + if rsc_list_reply: + for rsc in rsc_list_reply: + if rsc["name"] != resource: + continue + + if "flags" in rsc: + if DISKLESS in rsc["flags"]: + diskless_list.append(rsc["node_name"]) + + return diskless_list + + def _get_linstor_nodes(self): + # Returns all available LINSTOR nodes + node_list_reply = self._get_api_node_list() node_list = [] - for node in node_list_reply['nodes']: - node_list.append(node['name']) + + if node_list_reply: + for node in node_list_reply: + node_list.append(node["name"]) return node_list def _get_nodes(self): - # Get Node List - node_list_reply = self._get_api_nodes_list() - + # Returns all LINSTOR nodes in a dict list + node_list_reply = self._get_api_node_list() node_list = [] + if node_list_reply: - for node in node_list_reply['nodes']: + for node in node_list_reply: node_item = {} - node_item['node_name'] = node['name'] - node_item['node_uuid'] = node['uuid'] - node_item['node_address'] = ( - node['netInterfaces'][0]['address']) + node_item["node_name"] = node["name"] + node_item["node_address"] = ( + node["net_interfaces"][0]["address"]) node_list.append(node_item) return node_list @@ -622,83 +673,57 @@ class LinstorBaseDriver(driver.VolumeDriver): # def create_snapshot(self, snapshot): snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) - drbd_rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) - node_names = self._get_snapshot_nodes(drbd_rsc_name) + rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) - snap_reply = self._api_snapshot_create(node_names=node_names, - rsc_name=drbd_rsc_name, + snap_reply = self._api_snapshot_create(drbd_rsc_name=rsc_name, snapshot_name=snap_name) - if not self._check_api_reply(snap_reply, noerror_only=True): + if not snap_reply: msg = 'ERROR creating a LINSTOR snapshot {}'.format(snap_name) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) def delete_snapshot(self, snapshot): - snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) - drbd_rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) + snapshot_name = self._snapshot_name_from_cinder_snapshot(snapshot) + rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) - snap_reply = self._api_snapshot_delete(drbd_rsc_name, snap_name) + snap_reply = self._api_snapshot_delete(rsc_name, snapshot_name) - if not self._check_api_reply(snap_reply, noerror_only=True): - msg = 'ERROR deleting a LINSTOR snapshot {}'.format(snap_name) + if not snap_reply: + msg = 'ERROR deleting a LINSTOR snapshot {}'.format(snapshot_name) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) # Delete RD if no other RSC are found - if not self._get_snapshot_nodes(drbd_rsc_name): - self._api_rsc_dfn_delete(drbd_rsc_name) + if not self._get_snapshot_nodes(rsc_name): + self._api_rsc_dfn_delete(rsc_name) def create_volume_from_snapshot(self, volume, snapshot): src_rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) src_snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) new_vol_name = self._drbd_resource_name_from_cinder_volume(volume) - # New RD - rsc_reply = self._api_rsc_dfn_create(new_vol_name) + # If no autoplace, manually build a cluster list + if self.ap_count == 0: + diskless_nodes = [] + nodes = [] + for node in self._get_storage_pool(): - if not self._check_api_reply(rsc_reply): - msg = _('Error on creating LINSTOR Resource Definition') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) + if DISKLESS in node['driver_name']: + diskless_nodes.append(node['node_name']) + continue - # New VD from Snap - reply = self._api_snapshot_volume_dfn_restore(src_rsc_name, - src_snap_name, - new_vol_name) - if not self._check_api_reply(reply, noerror_only=True): - msg = _('Error on restoring LINSTOR Volume Definition') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) + # Filter out controller node if it is diskless + if self.diskless and node['node_name'] == self.host_name: + continue + else: + nodes.append(node['node_name']) - # Set StorPoolName property on VD - reply = self._api_volume_dfn_set_sp(new_vol_name) - if not self._check_api_reply(reply): - msg = _('Error on restoring LINSTOR Volume StorPoolName property') - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - # New RSC from Snap - # Assumes restoring to all the nodes containing the storage pool - # unless diskless - nodes = [] - for node in self._get_storage_pool(): - - if 'Diskless' in node['driver_name']: - continue - - # Filter out controller node if LINSTOR is diskless - if self.diskless and node['node_name'] == self.host_name: - continue - else: - nodes.append(node['node_name']) - - reply = self._api_snapshot_resource_restore(nodes, - src_rsc_name, + reply = self._api_snapshot_resource_restore(src_rsc_name, src_snap_name, new_vol_name) - if not self._check_api_reply(reply, noerror_only=True): - msg = _('Error on restoring LINSTOR resources') + if not reply: + msg = _('Error on restoring a LINSTOR volume') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @@ -708,6 +733,13 @@ class LinstorBaseDriver(driver.VolumeDriver): node_name=self.host_name, diskless=self.diskless) + # Add any other diskless nodes only if not autoplaced + if self.ap_count == 0 and diskless_nodes: + for node in diskless_nodes: + self._api_rsc_create(rsc_name=new_vol_name, + node_name=node, + diskless=True) + # Upsize if larger volume than original snapshot src_rsc_size = int(snapshot['volume_size']) new_vol_size = int(volume['size']) @@ -722,7 +754,7 @@ class LinstorBaseDriver(driver.VolumeDriver): if not self._check_api_reply(reply, noerror_only=True): # Delete failed volume - failed_volume = [] + failed_volume = {} failed_volume['id'] = volume['id'] self.delete_volume(failed_volume) @@ -731,9 +763,9 @@ class LinstorBaseDriver(driver.VolumeDriver): raise exception.VolumeBackendAPIException(data=msg) def create_volume(self, volume): + # Check for Storage Pool List sp_data = self._get_storage_pool() - rsc_size = 1 rsc_size = volume['size'] # No existing Storage Pools found @@ -747,11 +779,11 @@ class LinstorBaseDriver(driver.VolumeDriver): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - # Create Storage Pool (definition is implicit) + # Create Storage Pool spd_list = self._get_spd() if spd_list: - spd_name = spd_list[0]['spd_name'] + spd_name = spd_list[0] for node in node_list: @@ -766,12 +798,12 @@ class LinstorBaseDriver(driver.VolumeDriver): storage_driver=node_driver, driver_pool_name=self.default_vg_name) - if not self._check_api_reply(sp_reply): + if not self._check_api_reply(sp_reply, noerror_only=True): msg = _('Could not create a LINSTOR storage pool') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - # # Check for RD + # Check for RD # If Retyping from another volume, use parent/origin uuid # as a name source if (volume['migration_status'] is not None and @@ -785,8 +817,8 @@ class LinstorBaseDriver(driver.VolumeDriver): # Create a New RD rsc_dfn_reply = self._api_rsc_dfn_create(rsc_name) - if not self._check_api_reply(rsc_dfn_reply, - noerror_only=True): + + if not self._check_api_reply(rsc_dfn_reply, noerror_only=True): msg = _("Error creating a LINSTOR resource definition") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @@ -795,8 +827,8 @@ class LinstorBaseDriver(driver.VolumeDriver): vd_size = self._vol_size_to_linstor(rsc_size) vd_reply = self._api_volume_dfn_create(rsc_name=rsc_name, size=int(vd_size)) - if not self._check_api_reply(vd_reply, - noerror_only=True): + + if not self._check_api_reply(vd_reply, noerror_only=True): msg = _("Error creating a LINSTOR volume definition") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @@ -804,26 +836,38 @@ class LinstorBaseDriver(driver.VolumeDriver): # Create LINSTOR Resources ctrl_in_sp = False for node in sp_data: - # Check if controller is in the pool if node['node_name'] == self.host_name: ctrl_in_sp = True - # Create resources and, - # Check only errors when creating diskless resources - if 'Diskless' in node['driver_name']: - diskless = True - else: - diskless = False - rsc_reply = self._api_rsc_create(rsc_name=rsc_name, - node_name=node['node_name'], - diskless=diskless) + # Use autoplace to deploy if set + if self.ap_count: + try: + self._api_rsc_autoplace(rsc_name=rsc_name) - if not self._check_api_reply(rsc_reply, noerror_only=True): - msg = _("Error creating a LINSTOR resource") + except Exception: + msg = _("Error creating autoplaces LINSTOR resource(s)") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) + # Otherwise deploy across the entire cluster + else: + for node in sp_data: + # Deploy resource on each node + if DISKLESS in node['driver_name']: + diskless = True + else: + diskless = False + + rsc_reply = self._api_rsc_create(rsc_name=rsc_name, + node_name=node['node_name'], + diskless=diskless) + + if not self._check_api_reply(rsc_reply, noerror_only=True): + msg = _("Error creating a LINSTOR resource") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + # If the controller is diskless and not in the pool, create a diskless # resource on it if not ctrl_in_sp and self.diskless: @@ -832,7 +876,7 @@ class LinstorBaseDriver(driver.VolumeDriver): diskless=True) if not self._check_api_reply(rsc_reply, noerror_only=True): - msg = _("Error creating a LINSTOR resource") + msg = _("Error creating a LINSTOR controller resource") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @@ -841,32 +885,56 @@ class LinstorBaseDriver(driver.VolumeDriver): def delete_volume(self, volume): drbd_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) rsc_list_reply = self._get_api_resource_list() + diskful_nodes = self._get_snapshot_nodes(drbd_rsc_name) + diskless_nodes = self._get_diskless_nodes(drbd_rsc_name) - if rsc_list_reply: - # Delete Resources - for rsc in rsc_list_reply['resources']: - if rsc['name'] != drbd_rsc_name: - continue + # If autoplace was used, use Resource class + if self.ap_count: - rsc_reply = self._api_rsc_delete( - node_name=rsc['nodeName'], - rsc_name=drbd_rsc_name) - if not self._check_api_reply(rsc_reply, noerror_only=True): - msg = _("Error deleting a LINSTOR resource") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) + rsc_reply = self._api_rsc_auto_delete(drbd_rsc_name) + if not rsc_reply: + msg = _("Error deleting an autoplaced LINSTOR resource") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) - # Delete VD - vd_reply = self._api_volume_dfn_delete(drbd_rsc_name, 0) - if not vd_reply: - if not self._check_api_reply(vd_reply): - msg = _("Error deleting a LINSTOR volume definition") - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) + # Delete all resources in a cluster manually if not autoplaced + else: + if rsc_list_reply: + # Remove diskless nodes first + if diskless_nodes: + for node in diskless_nodes: + rsc_reply = self._api_rsc_delete( + node_name=node, + rsc_name=drbd_rsc_name) + if not self._check_api_reply(rsc_reply, + noerror_only=True): + msg = _("Error deleting a diskless LINSTOR rsc") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) - # Delete RD - # Will fail if snapshot exists but expected - self._api_rsc_dfn_delete(drbd_rsc_name) + # Remove diskful nodes + if diskful_nodes: + for node in diskful_nodes: + rsc_reply = self._api_rsc_delete( + node_name=node, + rsc_name=drbd_rsc_name) + if not self._check_api_reply(rsc_reply, + noerror_only=True): + msg = _("Error deleting a LINSTOR resource") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Delete VD + vd_reply = self._api_volume_dfn_delete(drbd_rsc_name, 0) + if not vd_reply: + if not self._check_api_reply(vd_reply): + msg = _("Error deleting a LINSTOR volume definition") + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + # Delete RD + # Will fail if snapshot exists but expected + self._api_rsc_dfn_delete(drbd_rsc_name) return True @@ -877,7 +945,7 @@ class LinstorBaseDriver(driver.VolumeDriver): extend_reply = self._get_api_volume_extend(rsc_target_name, new_size) if not self._check_api_reply(extend_reply, noerror_only=True): - msg = _("ERROR Linstor Volume Extend") + msg = _("ERROR extending a LINSTOR volume") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @@ -895,11 +963,10 @@ class LinstorBaseDriver(driver.VolumeDriver): self.delete_snapshot(snapshot) def copy_image_to_volume(self, context, volume, image_service, image_id): - # self.create_volume(volume) already called by Cinder, and works. - # Need to check return values + # self.create_volume(volume) already called by Cinder, and works full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) - # This creates a LINSTOR volume at the original size. + # This creates a LINSTOR volume from the source image image_utils.fetch_to_raw(context, image_service, image_id, @@ -923,14 +990,10 @@ class LinstorBaseDriver(driver.VolumeDriver): return (False, None) def check_for_setup_error(self): - msg = None if linstor is None: msg = _('Linstor python package not found') - if proto is None: - msg = _('Protobuf python package not found') - if msg is not None: LOG.error(msg) raise exception.VolumeDriverException(message=msg) @@ -954,7 +1017,7 @@ class LinstorBaseDriver(driver.VolumeDriver): # Class with iSCSI interface methods @interface.volumedriver class LinstorIscsiDriver(LinstorBaseDriver): - """Cinder iSCSI driver that uses Linstor for storage.""" + """Cinder iSCSI driver that uses LINSTOR for storage.""" def __init__(self, *args, **kwargs): super(LinstorIscsiDriver, self).__init__(*args, **kwargs) @@ -965,7 +1028,7 @@ class LinstorIscsiDriver(LinstorBaseDriver): self.helper_driver = self.helper_name self.target_driver = None else: - self.helper_name = self.configuration.safe_get('target_helper') + self.helper_name = self.configuration.safe_get('iscsi_helper') self.helper_driver = self.target_mapping[self.helper_name] self.target_driver = importutils.import_object( self.helper_driver, @@ -1024,7 +1087,7 @@ class LinstorIscsiDriver(LinstorBaseDriver): # Class with DRBD transport mode @interface.volumedriver class LinstorDrbdDriver(LinstorBaseDriver): - """Cinder DRBD driver that uses Linstor for storage.""" + """Cinder DRBD driver that uses LINSTOR for storage.""" def __init__(self, *args, **kwargs): super(LinstorDrbdDriver, self).__init__(*args, **kwargs) diff --git a/driver-requirements.txt b/driver-requirements.txt index 647091becc7..255982f6530 100644 --- a/driver-requirements.txt +++ b/driver-requirements.txt @@ -23,9 +23,6 @@ pywbem>=0.7.0 # LGPLv2.1+ # IBM XIV pyxcli>=1.1.5 # Apache-2.0 -# LINSTOR -protobuf>=3.6.1 # BSD - # RBD rados # LGPLv2.1 rbd # LGPLv2.1 diff --git a/releasenotes/notes/drbd-linstor-rest-update-52fd52f6c09a4dd3.yaml b/releasenotes/notes/drbd-linstor-rest-update-52fd52f6c09a4dd3.yaml new file mode 100644 index 00000000000..b5e01dc205d --- /dev/null +++ b/releasenotes/notes/drbd-linstor-rest-update-52fd52f6c09a4dd3.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + The LINSTOR driver for Cinder supports LINSTOR 0.9.12. The driver + supports LINSTOR backend using REST API. + + The new driver adds 'linstor_autoplace_count' configuration option that + specifies the number of volume replicas. +features: + - | + The LINSTOR Driver for Cinder now supports LINSTOR 0.9.12.