Support availability-zone type
Now availability zone is highly integrated into volume type's extra spec, it will be recognized when creating and retyping, also we can filter volume type by extra spec now. Change-Id: I4e6aa7af707bd063e7edf2b0bf28e3071ad5c67a Partial-Implements: bp support-az-in-volumetype
This commit is contained in:
parent
1f76e2cd7d
commit
306fa19079
@ -141,6 +141,8 @@ MULTIATTACH_VOLUMES = '3.50'
|
||||
|
||||
BACKUP_AZ = '3.51'
|
||||
|
||||
SUPPORT_VOLUME_TYPE_FILTER = '3.52'
|
||||
|
||||
|
||||
def get_mv_header(version):
|
||||
"""Gets a formatted HTTP microversion header.
|
||||
|
@ -115,6 +115,9 @@ REST_API_VERSION_HISTORY = """
|
||||
* 3.49 - Support report backend storage state in service list.
|
||||
* 3.50 - Add multiattach capability
|
||||
* 3.51 - Add support for cross AZ backups.
|
||||
* 3.52 - ``RESKEY:availability_zones`` is a reserved spec key for AZ
|
||||
volume type, and filter volume type by ``extra_specs`` is
|
||||
supported now.
|
||||
"""
|
||||
|
||||
# The minimum and maximum versions of the API supported
|
||||
@ -122,7 +125,7 @@ REST_API_VERSION_HISTORY = """
|
||||
# minimum version of the API supported.
|
||||
# Explicitly using /v2 endpoints will still work
|
||||
_MIN_API_VERSION = "3.0"
|
||||
_MAX_API_VERSION = "3.51"
|
||||
_MAX_API_VERSION = "3.52"
|
||||
_LEGACY_API_VERSION2 = "2.0"
|
||||
UPDATED = "2017-09-19T20:18:14Z"
|
||||
|
||||
|
@ -404,3 +404,8 @@ be used as a way to query if the capability exists in the Cinder service.
|
||||
3.51
|
||||
----
|
||||
Add support for cross AZ backups.
|
||||
|
||||
3.52
|
||||
----
|
||||
``RESKEY:availability_zones`` is a reserved spec key for AZ volume type,
|
||||
and filter volume type by ``extra_specs`` is supported now.
|
||||
|
@ -15,10 +15,14 @@
|
||||
|
||||
"""The volume type & volume types extra specs extension."""
|
||||
|
||||
from oslo_utils import strutils
|
||||
import ast
|
||||
from webob import exc
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import strutils
|
||||
|
||||
from cinder.api import common
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v2.views import types as views_types
|
||||
from cinder import exception
|
||||
@ -26,6 +30,8 @@ from cinder.i18n import _
|
||||
from cinder import utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeTypesController(wsgi.Controller):
|
||||
"""The volume types API controller for the OpenStack API."""
|
||||
@ -76,26 +82,42 @@ class VolumeTypesController(wsgi.Controller):
|
||||
msg = _('Invalid is_public filter [%s]') % is_public
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
@common.process_general_filtering('volume_type')
|
||||
def _process_volume_type_filtering(self, context=None, filters=None,
|
||||
req_version=None):
|
||||
utils.remove_invalid_filter_options(context,
|
||||
filters,
|
||||
self._get_vol_type_filter_options()
|
||||
)
|
||||
|
||||
def _get_volume_types(self, req):
|
||||
"""Helper function that returns a list of type dicts."""
|
||||
params = req.params.copy()
|
||||
marker, limit, offset = common.get_pagination_params(params)
|
||||
sort_keys, sort_dirs = common.get_sort_params(params)
|
||||
# NOTE(wanghao): Currently, we still only support to filter by
|
||||
# is_public. If we want to filter by more args, we should set params
|
||||
# to filters.
|
||||
filters = {}
|
||||
filters = params
|
||||
context = req.environ['cinder.context']
|
||||
req_version = req.api_version_request
|
||||
if req_version.matches(mv.SUPPORT_VOLUME_TYPE_FILTER):
|
||||
self._process_volume_type_filtering(context=context,
|
||||
filters=filters,
|
||||
req_version=req_version)
|
||||
else:
|
||||
utils.remove_invalid_filter_options(
|
||||
context, filters, self._get_vol_type_filter_options())
|
||||
if context.is_admin:
|
||||
# Only admin has query access to all volume types
|
||||
filters['is_public'] = self._parse_is_public(
|
||||
req.params.get('is_public', None))
|
||||
else:
|
||||
filters['is_public'] = True
|
||||
utils.remove_invalid_filter_options(context,
|
||||
filters,
|
||||
self._get_vol_type_filter_options()
|
||||
)
|
||||
if 'extra_specs' in filters:
|
||||
try:
|
||||
filters['extra_specs'] = ast.literal_eval(
|
||||
filters['extra_specs'])
|
||||
except (ValueError, SyntaxError):
|
||||
LOG.debug('Could not evaluate "extra_specs" %s, assuming '
|
||||
'dictionary string.', filters['extra_specs'])
|
||||
limited_types = volume_types.get_all_types(context,
|
||||
filters=filters,
|
||||
marker=marker, limit=limit,
|
||||
|
@ -3606,8 +3606,16 @@ def _process_volume_types_filters(query, filters):
|
||||
searchdict = filters.pop('extra_specs')
|
||||
extra_specs = getattr(models.VolumeTypes, 'extra_specs')
|
||||
for k, v in searchdict.items():
|
||||
the_filter.extend([extra_specs.any(key=k, value=v,
|
||||
deleted=False)])
|
||||
# NOTE(tommylikehu): We will use 'LIKE' operator for
|
||||
# 'availability_zones' extra spec as it always store the
|
||||
# AZ list info within the format: "az1, az2,...."
|
||||
if k == 'RESKEY:availability_zones':
|
||||
the_filter.extend([extra_specs.any(
|
||||
models.VolumeTypeExtraSpecs.value.like(u'%%%s%%' % v),
|
||||
key=k, deleted=False)])
|
||||
else:
|
||||
the_filter.extend(
|
||||
[extra_specs.any(key=k, value=v, deleted=False)])
|
||||
if len(the_filter) > 1:
|
||||
query = query.filter(and_(*the_filter))
|
||||
else:
|
||||
|
@ -207,6 +207,10 @@ class InvalidAvailabilityZone(Invalid):
|
||||
message = _("Availability zone '%(az)s' is invalid.")
|
||||
|
||||
|
||||
class InvalidTypeAvailabilityZones(Invalid):
|
||||
message = _("Volume type's availability zones are invalid %(az)s.")
|
||||
|
||||
|
||||
class InvalidVolumeType(Invalid):
|
||||
message = _("Invalid volume type: %(reason)s")
|
||||
|
||||
|
@ -144,6 +144,7 @@ OBJ_VERSIONS.add('1.33', {'Volume': '1.8'})
|
||||
OBJ_VERSIONS.add('1.34', {'VolumeAttachment': '1.3'})
|
||||
OBJ_VERSIONS.add('1.35', {'Backup': '1.6', 'BackupImport': '1.6'})
|
||||
OBJ_VERSIONS.add('1.36', {'RequestSpec': '1.4'})
|
||||
OBJ_VERSIONS.add('1.37', {'RequestSpec': '1.5'})
|
||||
|
||||
|
||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||
|
@ -27,7 +27,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
||||
# Version 1.2 Added ``resource_backend``
|
||||
# Version 1.3: Added backup_id
|
||||
# Version 1.4: Add 'operation'
|
||||
VERSION = '1.4'
|
||||
# Version 1.5: Added 'availability_zones'
|
||||
VERSION = '1.5'
|
||||
|
||||
fields = {
|
||||
'consistencygroup_id': fields.UUIDField(nullable=True),
|
||||
@ -47,6 +48,7 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
||||
'resource_backend': fields.StringField(nullable=True),
|
||||
'backup_id': fields.UUIDField(nullable=True),
|
||||
'operation': fields.StringField(nullable=True),
|
||||
'availability_zones': fields.ListOfStringsField(nullable=True),
|
||||
}
|
||||
|
||||
obj_extra_fields = ['resource_properties']
|
||||
@ -100,7 +102,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
||||
added_fields = (((1, 1), ('group_id', 'group_backend')),
|
||||
((1, 2), ('resource_backend')),
|
||||
((1, 3), ('backup_id')),
|
||||
((1, 4), ('operation')))
|
||||
((1, 4), ('operation')),
|
||||
((1, 5), ('availability_zones')))
|
||||
for version, remove_fields in added_fields:
|
||||
if target_version < version:
|
||||
for obj_field in remove_fields:
|
||||
|
@ -41,7 +41,8 @@ CONF = cfg.CONF
|
||||
CONF.register_opts(scheduler_driver_opts)
|
||||
|
||||
|
||||
def volume_update_db(context, volume_id, host, cluster_name):
|
||||
def volume_update_db(context, volume_id, host, cluster_name,
|
||||
availability_zone=None):
|
||||
"""Set the host, cluster_name, and set the scheduled_at field of a volume.
|
||||
|
||||
:returns: A Volume with the updated fields set properly.
|
||||
@ -50,6 +51,7 @@ def volume_update_db(context, volume_id, host, cluster_name):
|
||||
volume.host = host
|
||||
volume.cluster_name = cluster_name
|
||||
volume.scheduled_at = timeutils.utcnow()
|
||||
volume.availability_zone = availability_zone
|
||||
volume.save()
|
||||
|
||||
# A volume object is expected to be returned, as it is used by
|
||||
|
@ -96,9 +96,11 @@ class FilterScheduler(driver.Scheduler):
|
||||
backend = backend.obj
|
||||
volume_id = request_spec['volume_id']
|
||||
|
||||
updated_volume = driver.volume_update_db(context, volume_id,
|
||||
backend.host,
|
||||
backend.cluster_name)
|
||||
updated_volume = driver.volume_update_db(
|
||||
context, volume_id,
|
||||
backend.host,
|
||||
backend.cluster_name,
|
||||
availability_zone=backend.service['availability_zone'])
|
||||
self._post_select_populate_filter_properties(filter_properties,
|
||||
backend)
|
||||
|
||||
|
@ -24,6 +24,12 @@ class AvailabilityZoneFilter(filters.BaseBackendFilter):
|
||||
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
spec = filter_properties.get('request_spec', {})
|
||||
availability_zones = spec.get('availability_zones')
|
||||
|
||||
if availability_zones:
|
||||
return (backend_state.service['availability_zone']
|
||||
in availability_zones)
|
||||
|
||||
props = spec.get('resource_properties', {})
|
||||
availability_zone = props.get('availability_zone')
|
||||
|
||||
|
@ -186,7 +186,8 @@ class VolumeTypesApiTest(test.TestCase):
|
||||
def test_volume_types_index_with_invalid_filter(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v2/%s/types?id=%s' % (fake.PROJECT_ID, self.type_id1))
|
||||
req.environ['cinder.context'] = self.ctxt
|
||||
req.environ['cinder.context'] = context.RequestContext(
|
||||
user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=False)
|
||||
res = self.controller.index(req)
|
||||
|
||||
self.assertEqual(3, len(res['volume_types']))
|
||||
|
89
cinder/tests/unit/api/v3/test_types.py
Normal file
89
cinder/tests/unit/api/v3/test_types.py
Normal file
@ -0,0 +1,89 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.v2 import types
|
||||
from cinder import context
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit.api import fakes
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
|
||||
|
||||
class VolumeTypesApiTest(test.TestCase):
|
||||
|
||||
def _create_volume_type(self, ctxt, volume_type_name, extra_specs=None,
|
||||
is_public=True, projects=None):
|
||||
vol_type = objects.VolumeType(ctxt,
|
||||
name=volume_type_name,
|
||||
is_public=is_public,
|
||||
description='',
|
||||
extra_specs=extra_specs,
|
||||
projects=projects)
|
||||
vol_type.create()
|
||||
return vol_type
|
||||
|
||||
def setUp(self):
|
||||
super(VolumeTypesApiTest, self).setUp()
|
||||
self.controller = types.VolumeTypesController()
|
||||
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
|
||||
project_id=fake.PROJECT_ID,
|
||||
is_admin=True)
|
||||
self.type1 = self._create_volume_type(
|
||||
self.ctxt, 'volume_type1',
|
||||
{'key1': 'value1', 'RESKEY:availability_zones': 'az1,az2'})
|
||||
self.type2 = self._create_volume_type(
|
||||
self.ctxt, 'volume_type2',
|
||||
{'key2': 'value2', 'RESKEY:availability_zones': 'az1,az3'})
|
||||
self.type3 = self._create_volume_type(
|
||||
self.ctxt, 'volume_type3',
|
||||
{'key3': 'value3'}, False, [fake.PROJECT_ID])
|
||||
self.addCleanup(self._cleanup)
|
||||
|
||||
def _cleanup(self):
|
||||
self.type1.destroy()
|
||||
self.type2.destroy()
|
||||
self.type3.destroy()
|
||||
|
||||
def test_volume_types_index_with_extra_specs(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v3/%s/types?extra_specs={"key1":"value1"}' % fake.PROJECT_ID,
|
||||
use_admin_context=False)
|
||||
req.api_version_request = mv.get_api_version(mv.get_prior_version(
|
||||
mv.SUPPORT_VOLUME_TYPE_FILTER))
|
||||
res_dict = self.controller.index(req)
|
||||
|
||||
self.assertEqual(3, len(res_dict['volume_types']))
|
||||
|
||||
# Test filter volume type with extra specs
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v3/%s/types?extra_specs={"key1":"value1"}' % fake.PROJECT_ID,
|
||||
use_admin_context=True)
|
||||
req.api_version_request = mv.get_api_version(
|
||||
mv.SUPPORT_VOLUME_TYPE_FILTER)
|
||||
res_dict = self.controller.index(req)
|
||||
self.assertEqual(1, len(res_dict['volume_types']))
|
||||
self.assertDictEqual({'key1': 'value1',
|
||||
'RESKEY:availability_zones': 'az1,az2'},
|
||||
res_dict['volume_types'][0]['extra_specs'])
|
||||
|
||||
# Test filter volume type with 'availability_zones'
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v3/%s/types?extra_specs={"RESKEY:availability_zones":"az1"}'
|
||||
% fake.PROJECT_ID, use_admin_context=True)
|
||||
req.api_version_request = mv.get_api_version(
|
||||
mv.SUPPORT_VOLUME_TYPE_FILTER)
|
||||
res_dict = self.controller.index(req)
|
||||
self.assertEqual(2, len(res_dict['volume_types']))
|
||||
self.assertEqual(
|
||||
['volume_type1', 'volume_type2'],
|
||||
sorted([az['name'] for az in res_dict['volume_types']]))
|
@ -42,7 +42,7 @@ object_data = {
|
||||
'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
|
||||
'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'RequestSpec': '1.4-2f858ebf18fa1dfe00fba7c3ec5cf303',
|
||||
'RequestSpec': '1.5-2f6efbb86107ee70cc1bb07f4bdb4ec7',
|
||||
'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd',
|
||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
||||
|
@ -1737,6 +1737,24 @@ class BasicFiltersTestCase(BackendFiltersTestCase):
|
||||
host = fakes.FakeBackendState('host1', {'service': service})
|
||||
self.assertTrue(filt_cls.backend_passes(host, request))
|
||||
|
||||
def test_availability_zone_filter_with_AZs(self):
|
||||
filt_cls = self.class_map['AvailabilityZoneFilter']()
|
||||
ctxt = context.RequestContext('fake', 'fake', is_admin=False)
|
||||
request = {
|
||||
'context': ctxt,
|
||||
'request_spec': {'availability_zones': ['nova1', 'nova2']}
|
||||
}
|
||||
|
||||
host1 = fakes.FakeBackendState(
|
||||
'host1', {'service': {'availability_zone': 'nova1'}})
|
||||
host2 = fakes.FakeBackendState(
|
||||
'host2', {'service': {'availability_zone': 'nova2'}})
|
||||
host3 = fakes.FakeBackendState(
|
||||
'host3', {'service': {'availability_zone': 'nova3'}})
|
||||
self.assertTrue(filt_cls.backend_passes(host1, request))
|
||||
self.assertTrue(filt_cls.backend_passes(host2, request))
|
||||
self.assertFalse(filt_cls.backend_passes(host3, request))
|
||||
|
||||
def test_availability_zone_filter_different(self):
|
||||
filt_cls = self.class_map['AvailabilityZoneFilter']()
|
||||
service = {'availability_zone': 'nova'}
|
||||
|
@ -620,4 +620,5 @@ class SchedulerDriverModuleTestCase(test.TestCase):
|
||||
_mock_vol_update.assert_called_once_with(
|
||||
self.context, volume.id, {'host': 'fake_host',
|
||||
'cluster_name': 'fake_cluster',
|
||||
'scheduled_at': scheduled_at})
|
||||
'scheduled_at': scheduled_at,
|
||||
'availability_zone': None})
|
||||
|
@ -14,7 +14,9 @@
|
||||
# under the License.
|
||||
""" Tests for create_volume TaskFlow """
|
||||
|
||||
import six
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
@ -268,7 +270,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
fake_key_manager = mock_key_manager.MockKeyManager()
|
||||
volume_type = 'type1'
|
||||
volume_type = {'name': 'type1'}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service,
|
||||
@ -294,7 +296,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'availability_zone': 'nova',
|
||||
'availability_zones': ['nova'],
|
||||
'volume_type': volume_type,
|
||||
'volume_type_id': 1,
|
||||
'encryption_key_id': None,
|
||||
@ -312,7 +314,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
@mock.patch('cinder.volume.flows.api.create_volume.'
|
||||
'ExtractVolumeRequestTask.'
|
||||
'_get_volume_type_id')
|
||||
def test_extract_availability_zone_without_fallback(
|
||||
def test_extract_availability_zones_without_fallback(
|
||||
self,
|
||||
fake_get_type_id,
|
||||
fake_get_qos,
|
||||
@ -325,7 +327,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
fake_key_manager = mock_key_manager.MockKeyManager()
|
||||
volume_type = 'type1'
|
||||
volume_type = {'name': 'type1'}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service,
|
||||
@ -356,7 +358,117 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
@mock.patch('cinder.volume.flows.api.create_volume.'
|
||||
'ExtractVolumeRequestTask.'
|
||||
'_get_volume_type_id')
|
||||
def test_extract_availability_zone_with_fallback(
|
||||
def test_extract_availability_zones_with_azs_not_matched(
|
||||
self,
|
||||
fake_get_type_id,
|
||||
fake_get_qos,
|
||||
fake_is_encrypted):
|
||||
fake_image_service = fake_image.FakeImageService()
|
||||
image_id = six.text_type(uuid.uuid4())
|
||||
image_meta = {}
|
||||
image_meta['id'] = image_id
|
||||
image_meta['status'] = 'active'
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
fake_key_manager = mock_key_manager.MockKeyManager()
|
||||
volume_type = {'name': 'type1',
|
||||
'extra_specs':
|
||||
{'RESKEY:availability_zones': 'nova3'}}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service, {'nova1', 'nova2'})
|
||||
|
||||
fake_is_encrypted.return_value = False
|
||||
fake_get_type_id.return_value = 1
|
||||
fake_get_qos.return_value = {'qos_specs': None}
|
||||
self.assertRaises(exception.InvalidTypeAvailabilityZones,
|
||||
task.execute,
|
||||
self.ctxt,
|
||||
size=1,
|
||||
snapshot=None,
|
||||
image_id=image_id,
|
||||
source_volume=None,
|
||||
availability_zone='notnova',
|
||||
volume_type=volume_type,
|
||||
metadata=None,
|
||||
key_manager=fake_key_manager,
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
|
||||
@ddt.data({'type_azs': 'nova3',
|
||||
'self_azs': ['nova3'],
|
||||
'expected': ['nova3']},
|
||||
{'type_azs': 'nova3, nova2',
|
||||
'self_azs': ['nova3'],
|
||||
'expected': ['nova3']},
|
||||
{'type_azs': 'nova3,,,',
|
||||
'self_azs': ['nova3'],
|
||||
'expected': ['nova3']},
|
||||
{'type_azs': 'nova3',
|
||||
'self_azs': ['nova2'],
|
||||
'expected': exception.InvalidTypeAvailabilityZones},
|
||||
{'type_azs': ',,',
|
||||
'self_azs': ['nova2'],
|
||||
'expected': exception.InvalidTypeAvailabilityZones}
|
||||
)
|
||||
@ddt.unpack
|
||||
def test__extract_availability_zones_az_not_specified(self, type_azs,
|
||||
self_azs, expected):
|
||||
fake_image_service = fake_image.FakeImageService()
|
||||
image_id = six.text_type(uuid.uuid4())
|
||||
image_meta = {}
|
||||
image_meta['id'] = image_id
|
||||
image_meta['status'] = 'active'
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
volume_type = {'name': 'type1',
|
||||
'extra_specs':
|
||||
{'RESKEY:availability_zones': type_azs}}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service,
|
||||
{'nova'})
|
||||
task.availability_zones = self_azs
|
||||
if isinstance(expected, list):
|
||||
result = task._extract_availability_zones(
|
||||
None, {}, {}, {}, volume_type=volume_type)
|
||||
self.assertEqual(expected, result[0])
|
||||
else:
|
||||
self.assertRaises(
|
||||
expected, task._extract_availability_zones,
|
||||
None, {}, {}, {}, volume_type=volume_type)
|
||||
|
||||
def test__extract_availability_zones_az_not_in_type_azs(self):
|
||||
self.override_config('allow_availability_zone_fallback', False)
|
||||
fake_image_service = fake_image.FakeImageService()
|
||||
image_id = six.text_type(uuid.uuid4())
|
||||
image_meta = {}
|
||||
image_meta['id'] = image_id
|
||||
image_meta['status'] = 'active'
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
volume_type = {'name': 'type1',
|
||||
'extra_specs':
|
||||
{'RESKEY:availability_zones': 'nova1, nova2'}}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service,
|
||||
{'nova'})
|
||||
task.availability_zones = ['nova1']
|
||||
|
||||
self.assertRaises(exception.InvalidAvailabilityZone,
|
||||
task._extract_availability_zones,
|
||||
'nova2', {}, {}, {}, volume_type=volume_type)
|
||||
|
||||
@mock.patch('cinder.volume.volume_types.is_encrypted')
|
||||
@mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
|
||||
@mock.patch('cinder.volume.flows.api.create_volume.'
|
||||
'ExtractVolumeRequestTask.'
|
||||
'_get_volume_type_id')
|
||||
def test_extract_availability_zones_with_fallback(
|
||||
self,
|
||||
fake_get_type_id,
|
||||
fake_get_qos,
|
||||
@ -372,7 +484,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
fake_key_manager = mock_key_manager.MockKeyManager()
|
||||
volume_type = 'type1'
|
||||
volume_type = {'name': 'type1'}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service,
|
||||
@ -398,7 +510,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'availability_zone': 'nova',
|
||||
'availability_zones': ['nova'],
|
||||
'volume_type': volume_type,
|
||||
'volume_type_id': 1,
|
||||
'encryption_key_id': None,
|
||||
@ -434,7 +546,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'size': 1}
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
fake_key_manager = mock_key_manager.MockKeyManager()
|
||||
volume_type = 'type1'
|
||||
volume_type = {'name': 'type1'}
|
||||
|
||||
with mock.patch.object(fake_key_manager, 'create_key',
|
||||
side_effect=castellan_exc.KeyManagerError):
|
||||
@ -483,7 +595,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
fake_key_manager = mock_key_manager.MockKeyManager()
|
||||
volume_type = 'type1'
|
||||
volume_type = {'name': 'type1'}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service,
|
||||
@ -509,7 +621,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
expected_result = {'size': (sys.maxsize + 1),
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'availability_zone': 'nova',
|
||||
'availability_zones': ['nova'],
|
||||
'volume_type': volume_type,
|
||||
'volume_type_id': 1,
|
||||
'encryption_key_id': None,
|
||||
@ -541,7 +653,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
image_meta['size'] = 1
|
||||
fake_image_service.create(self.ctxt, image_meta)
|
||||
fake_key_manager = mock_key_manager.MockKeyManager()
|
||||
volume_type = 'type1'
|
||||
volume_type = {'name': 'type1'}
|
||||
|
||||
task = create_volume.ExtractVolumeRequestTask(
|
||||
fake_image_service,
|
||||
@ -568,7 +680,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'availability_zone': 'nova',
|
||||
'availability_zones': ['nova'],
|
||||
'volume_type': volume_type,
|
||||
'volume_type_id': 1,
|
||||
'encryption_key_id': None,
|
||||
@ -596,7 +708,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
fake_get_qos,
|
||||
fake_is_encrypted):
|
||||
|
||||
image_volume_type = 'type_from_image'
|
||||
image_volume_type = {'name': 'type_from_image'}
|
||||
fake_image_service = fake_image.FakeImageService()
|
||||
image_id = 6
|
||||
image_meta = {}
|
||||
@ -634,7 +746,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'availability_zone': 'nova',
|
||||
'availability_zones': ['nova'],
|
||||
'volume_type': image_volume_type,
|
||||
'volume_type_id': 1,
|
||||
'encryption_key_id': None,
|
||||
@ -680,7 +792,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
|
||||
fake_is_encrypted.return_value = False
|
||||
fake_get_type_id.return_value = 1
|
||||
fake_get_def_vol_type.return_value = 'fake_vol_type'
|
||||
fake_get_def_vol_type.return_value = {'name': 'fake_vol_type'}
|
||||
fake_db_get_vol_type.side_effect = (
|
||||
exception.VolumeTypeNotFoundByName(volume_type_name='invalid'))
|
||||
fake_get_qos.return_value = {'qos_specs': None}
|
||||
@ -701,8 +813,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'availability_zone': 'nova',
|
||||
'volume_type': 'fake_vol_type',
|
||||
'availability_zones': ['nova'],
|
||||
'volume_type': {'name': 'fake_vol_type'},
|
||||
'volume_type_id': 1,
|
||||
'encryption_key_id': None,
|
||||
'qos_specs': None,
|
||||
@ -748,7 +860,7 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
|
||||
fake_is_encrypted.return_value = False
|
||||
fake_get_type_id.return_value = 1
|
||||
fake_get_def_vol_type.return_value = 'fake_vol_type'
|
||||
fake_get_def_vol_type.return_value = {'name': 'fake_vol_type'}
|
||||
fake_get_qos.return_value = {'qos_specs': None}
|
||||
result = task.execute(self.ctxt,
|
||||
size=1,
|
||||
@ -767,8 +879,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
'availability_zone': 'nova',
|
||||
'volume_type': 'fake_vol_type',
|
||||
'availability_zones': ['nova'],
|
||||
'volume_type': {'name': 'fake_vol_type'},
|
||||
'volume_type_id': 1,
|
||||
'encryption_key_id': None,
|
||||
'qos_specs': None,
|
||||
|
@ -1714,6 +1714,11 @@ class API(base.Base):
|
||||
'quota_reservations': reservations,
|
||||
'old_reservations': old_reservations}
|
||||
|
||||
type_azs = volume_utils.extract_availability_zones_from_volume_type(
|
||||
new_type)
|
||||
if type_azs is not None:
|
||||
request_spec['availability_zones'] = type_azs
|
||||
|
||||
self.scheduler_rpcapi.retype(context, volume,
|
||||
request_spec=request_spec,
|
||||
filter_properties={})
|
||||
|
@ -65,11 +65,11 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
# This task will produce the following outputs (said outputs can be
|
||||
# saved to durable storage in the future so that the flow can be
|
||||
# reconstructed elsewhere and continued).
|
||||
default_provides = set(['availability_zone', 'size', 'snapshot_id',
|
||||
default_provides = set(['size', 'snapshot_id',
|
||||
'source_volid', 'volume_type', 'volume_type_id',
|
||||
'encryption_key_id', 'consistencygroup_id',
|
||||
'cgsnapshot_id', 'qos_specs', 'group_id',
|
||||
'refresh_az', 'backup_id'])
|
||||
'refresh_az', 'backup_id', 'availability_zones'])
|
||||
|
||||
def __init__(self, image_service, availability_zones, **kwargs):
|
||||
super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
|
||||
@ -291,17 +291,27 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
'volume_type': volume_type})
|
||||
return volume_type
|
||||
|
||||
def _extract_availability_zone(self, availability_zone, snapshot,
|
||||
source_volume, group):
|
||||
"""Extracts and returns a validated availability zone.
|
||||
def _extract_availability_zones(self, availability_zone, snapshot,
|
||||
source_volume, group, volume_type=None):
|
||||
"""Extracts and returns a validated availability zone list.
|
||||
|
||||
This function will extract the availability zone (if not provided) from
|
||||
the snapshot or source_volume and then performs a set of validation
|
||||
checks on the provided or extracted availability zone and then returns
|
||||
the validated availability zone.
|
||||
"""
|
||||
|
||||
refresh_az = False
|
||||
type_azs = vol_utils.extract_availability_zones_from_volume_type(
|
||||
volume_type)
|
||||
type_az_configured = type_azs is not None
|
||||
if type_az_configured:
|
||||
safe_azs = list(
|
||||
set(type_azs).intersection(self.availability_zones))
|
||||
if not safe_azs:
|
||||
raise exception.InvalidTypeAvailabilityZones(az=type_azs)
|
||||
else:
|
||||
safe_azs = self.availability_zones
|
||||
|
||||
# If the volume will be created in a group, it should be placed in
|
||||
# in same availability zone as the group.
|
||||
if group:
|
||||
@ -325,14 +335,14 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
except (TypeError, KeyError):
|
||||
pass
|
||||
|
||||
if availability_zone is None:
|
||||
if availability_zone is None and not type_az_configured:
|
||||
if CONF.default_availability_zone:
|
||||
availability_zone = CONF.default_availability_zone
|
||||
else:
|
||||
# For backwards compatibility use the storage_availability_zone
|
||||
availability_zone = CONF.storage_availability_zone
|
||||
|
||||
if availability_zone not in self.availability_zones:
|
||||
if availability_zone and availability_zone not in safe_azs:
|
||||
refresh_az = True
|
||||
if CONF.allow_availability_zone_fallback:
|
||||
original_az = availability_zone
|
||||
@ -349,7 +359,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
|
||||
# If the configuration only allows cloning to the same availability
|
||||
# zone then we need to enforce that.
|
||||
if CONF.cloned_volume_same_az:
|
||||
if availability_zone and CONF.cloned_volume_same_az:
|
||||
snap_az = None
|
||||
try:
|
||||
snap_az = snapshot['volume']['availability_zone']
|
||||
@ -369,7 +379,10 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
"availability zone as the source volume")
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
return availability_zone, refresh_az
|
||||
if availability_zone:
|
||||
return [availability_zone], refresh_az
|
||||
else:
|
||||
return safe_azs, refresh_az
|
||||
|
||||
def _get_encryption_key_id(self, key_manager, context, volume_type_id,
|
||||
snapshot, source_volume,
|
||||
@ -439,9 +452,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
image_id,
|
||||
size)
|
||||
|
||||
availability_zone, refresh_az = self._extract_availability_zone(
|
||||
availability_zone, snapshot, source_volume, group)
|
||||
|
||||
# TODO(joel-coffman): This special handling of snapshots to ensure that
|
||||
# their volume type matches the source volume is too convoluted. We
|
||||
# should copy encryption metadata from the encrypted volume type to the
|
||||
@ -453,6 +463,10 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
volume_type = (image_volume_type if image_volume_type else
|
||||
def_vol_type)
|
||||
|
||||
availability_zones, refresh_az = self._extract_availability_zones(
|
||||
availability_zone, snapshot, source_volume, group,
|
||||
volume_type=volume_type)
|
||||
|
||||
volume_type_id = self._get_volume_type_id(volume_type,
|
||||
source_volume, snapshot)
|
||||
|
||||
@ -487,7 +501,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
'size': size,
|
||||
'snapshot_id': snapshot_id,
|
||||
'source_volid': source_volid,
|
||||
'availability_zone': availability_zone,
|
||||
'volume_type': volume_type,
|
||||
'volume_type_id': volume_type_id,
|
||||
'encryption_key_id': encryption_key_id,
|
||||
@ -498,6 +511,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
'replication_status': replication_status,
|
||||
'refresh_az': refresh_az,
|
||||
'backup_id': backup_id,
|
||||
'availability_zones': availability_zones
|
||||
}
|
||||
|
||||
|
||||
@ -510,11 +524,11 @@ class EntryCreateTask(flow_utils.CinderTask):
|
||||
default_provides = set(['volume_properties', 'volume_id', 'volume'])
|
||||
|
||||
def __init__(self):
|
||||
requires = ['availability_zone', 'description', 'metadata',
|
||||
requires = ['description', 'metadata',
|
||||
'name', 'reservations', 'size', 'snapshot_id',
|
||||
'source_volid', 'volume_type_id', 'encryption_key_id',
|
||||
'consistencygroup_id', 'cgsnapshot_id', 'multiattach',
|
||||
'qos_specs', 'group_id', ]
|
||||
'qos_specs', 'group_id', 'availability_zones']
|
||||
super(EntryCreateTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
|
||||
@ -536,6 +550,7 @@ class EntryCreateTask(flow_utils.CinderTask):
|
||||
if src_vol is not None:
|
||||
bootable = src_vol.bootable
|
||||
|
||||
availability_zones = kwargs.pop('availability_zones')
|
||||
volume_properties = {
|
||||
'size': kwargs.pop('size'),
|
||||
'user_id': context.user_id,
|
||||
@ -549,6 +564,8 @@ class EntryCreateTask(flow_utils.CinderTask):
|
||||
'multiattach': kwargs.pop('multiattach'),
|
||||
'bootable': bootable,
|
||||
}
|
||||
if len(availability_zones) == 1:
|
||||
volume_properties['availability_zone'] = availability_zones[0]
|
||||
|
||||
# Merge in the other required arguments which should provide the rest
|
||||
# of the volume property fields (if applicable).
|
||||
@ -732,7 +749,8 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
|
||||
'source_volid', 'volume_id', 'volume', 'volume_type',
|
||||
'volume_properties', 'consistencygroup_id',
|
||||
'cgsnapshot_id', 'group_id', 'backup_id', ]
|
||||
'cgsnapshot_id', 'group_id', 'backup_id',
|
||||
'availability_zones']
|
||||
super(VolumeCastTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
self.volume_rpcapi = volume_rpcapi
|
||||
|
@ -639,6 +639,17 @@ def get_all_volume_groups(vg_name=None):
|
||||
utils.get_root_helper(),
|
||||
vg_name)
|
||||
|
||||
|
||||
def extract_availability_zones_from_volume_type(volume_type):
|
||||
if not volume_type:
|
||||
return None
|
||||
extra_specs = volume_type.get('extra_specs', {})
|
||||
if 'RESKEY:availability_zones' not in extra_specs:
|
||||
return None
|
||||
azs = extra_specs.get('RESKEY:availability_zones', '').split(',')
|
||||
return [az.strip() for az in azs if az != '']
|
||||
|
||||
|
||||
# Default symbols to use for passwords. Avoids visually confusing characters.
|
||||
# ~6 bits per symbol
|
||||
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
|
||||
|
52
doc/source/admin/blockstorage-availability-zone-type.rst
Normal file
52
doc/source/admin/blockstorage-availability-zone-type.rst
Normal file
@ -0,0 +1,52 @@
|
||||
=======================
|
||||
Availability-zone types
|
||||
=======================
|
||||
|
||||
Background
|
||||
----------
|
||||
|
||||
In a newly deployed region environment, the volume types (SSD, HDD or others)
|
||||
may only exist on part of the AZs, but end users have no idea which AZ is
|
||||
allowed for one specific volume type and they can't realize that only when
|
||||
the volume failed to be scheduled to backend. In this case, we have supported
|
||||
availability zone volume type in Rocky cycle which administrators can take
|
||||
advantage of to fix that.
|
||||
|
||||
How to config availability zone types?
|
||||
--------------------------------------
|
||||
|
||||
We decided to use type's extra-specs to store this additional info,
|
||||
administrators can turn it on by updating volume type's key
|
||||
``RESKEY:availability_zones`` as below::
|
||||
|
||||
"RESKEY:availability_zones": "az1,az2,az3"
|
||||
|
||||
It's an array list whose items are separated by comma and stored in string.
|
||||
Once the availability zone type is configured, any UI component or client
|
||||
can filter out invalid volume types based on their choice of availability
|
||||
zone::
|
||||
|
||||
Request example:
|
||||
/v3/{project_id}/types?extra_specs={'RESKEY:availability_zones':'az1'}
|
||||
|
||||
Remember, Cinder will always try inexact match for this spec value, for
|
||||
instance, when extra spec ``RESKEY:availability_zones`` is configured
|
||||
with value ``az1,az2``, both ``az1`` and ``az2`` are valid inputs for query,
|
||||
also this spec will not be used during performing capability filter, instead
|
||||
it will be only used for choosing suitable availability zones in these two
|
||||
cases below.
|
||||
|
||||
1. Create volume, within this feature, now we can specify availability zone
|
||||
via parameter ``availability_zone``, volume source (volume, snapshot, group),
|
||||
configuration option ``default_availability_zone`` and
|
||||
``storage_availability_zone``. When creating new volume, Cinder will try to
|
||||
read the AZ(s) in the priority of::
|
||||
|
||||
source group > parameter availability_zone > source snapshot (or volume) > volume type > configuration default_availability_zone > storage_availability_zone
|
||||
|
||||
If there is a conflict between any of them, 400 BadRequest will be raised,
|
||||
also now a AZ list instead of single AZ will be delivered to
|
||||
``AvailabilityZoneFilter``.
|
||||
|
||||
2. Retype volume, this flow also has been updated, if new type has configured
|
||||
``RESKEY:availability_zones`` Cinder scheduler will validate this as well.
|
@ -78,3 +78,5 @@ valid for first. The supported APIs are marked with "*" below in the table.
|
||||
+-----------------+-------------------------------------------------------------------------+
|
||||
| get pools | name, volume_type |
|
||||
+-----------------+-------------------------------------------------------------------------+
|
||||
| list types(3.51)| is_public, extra_specs |
|
||||
+-----------------+-------------------------------------------------------------------------+
|
||||
|
@ -27,6 +27,7 @@ Amazon EC2 Elastic Block Storage (EBS) offering.
|
||||
blockstorage-api-throughput.rst
|
||||
blockstorage-manage-volumes.rst
|
||||
blockstorage-troubleshoot.rst
|
||||
blockstorage-availability-zone-type.rst
|
||||
generalized_filters.rst
|
||||
blockstorage-backup-disks.rst
|
||||
blockstorage-boot-from-volume.rst
|
||||
|
@ -10,5 +10,6 @@
|
||||
"attachment": ["volume_id", "status", "instance_id", "attach_status"],
|
||||
"message": ["resource_uuid", "resource_type", "event_id",
|
||||
"request_id", "message_level"],
|
||||
"pool": ["name", "volume_type"]
|
||||
"pool": ["name", "volume_type"],
|
||||
"volume_type": []
|
||||
}
|
||||
|
@ -0,0 +1,12 @@
|
||||
---
|
||||
features: |
|
||||
Now availability zone is supported in volume type as below.
|
||||
|
||||
* ``RESKEY:availability_zones`` now is a reserved spec key for AZ volume type,
|
||||
and administrator can create AZ volume type that includes AZ restrictions
|
||||
by adding a list of Az's to the extra specs similar to:
|
||||
``RESKEY:availability_zones: az1,az2``.
|
||||
* Extra spec ``RESKEY:availability_zones`` will only be used for filtering backends
|
||||
when creating and retyping volumes.
|
||||
* Volume type can be filtered within extra spec: /types?extra_specs={"key":"value"}
|
||||
since microversion "3.52".
|
Loading…
x
Reference in New Issue
Block a user