From 135e094f33e39e4c80e32b0237910c355513a66f Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Wed, 7 Oct 2015 12:08:56 +0200 Subject: [PATCH] Remove API races from migrate and retype We have a window of opportunity for races in API's migrate_volume and retype methods. This patch removes these windows of opportunity using compare-and-swap for DB updates in the API. Specs: https://review.openstack.org/232599/ Implements: blueprint cinder-volume-active-active-support Closes-Bug: #1493476 Closes-Bug: #1494466 Change-Id: I2de07b9cf7f0d670e4c531e8be9ae1c879c890f6 --- cinder/db/api.py | 8 + cinder/db/sqlalchemy/api.py | 67 ++++ .../unit/api/contrib/test_volume_actions.py | 291 ++++++++++++++---- cinder/tests/unit/utils.py | 33 ++ cinder/volume/api.py | 163 ++++------ 5 files changed, 384 insertions(+), 178 deletions(-) diff --git a/cinder/db/api.py b/cinder/db/api.py index 2a94f5151bb..2e0db12c86e 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -266,6 +266,14 @@ def volume_has_attachments_filter(): return IMPL.volume_has_attachments_filter() +def volume_has_same_encryption_type(new_vol_type): + return IMPL.volume_has_same_encryption_type(new_vol_type) + + +def volume_qos_allows_retype(new_vol_type): + return IMPL.volume_qos_allows_retype(new_vol_type) + + #################### diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 19434e186ab..e083072860d 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -42,6 +42,7 @@ import six import sqlalchemy from sqlalchemy import MetaData from sqlalchemy import or_, and_, case +from sqlalchemy.orm import aliased from sqlalchemy.orm import joinedload, joinedload_all from sqlalchemy.orm import RelationshipProperty from sqlalchemy.schema import Table @@ -1902,6 +1903,72 @@ def volume_has_attachments_filter(): ~models.VolumeAttachment.deleted)) +def volume_has_same_encryption_type(new_vol_type): + """Filter to check that encryption matches with new volume type. + + They match if both don't have encryption or both have the same Encryption. + """ + # Query for the encryption in the new volume type + encryption_alias = aliased(models.Encryption) + new_enc = sql.select((encryption_alias.encryption_id,)).where(and_( + ~encryption_alias.deleted, + encryption_alias.volume_type_id == new_vol_type)) + + # Query for the encryption in the old volume type + old_enc = sql.select((models.Encryption.encryption_id,)).where(and_( + ~models.Encryption.deleted, + models.Encryption.volume_type_id == models.Volume.volume_type_id)) + + # NOTE(geguileo): This query is optimizable, but at this moment I can't + # figure out how. + return or_(and_(new_enc.as_scalar().is_(None), + old_enc.as_scalar().is_(None)), + new_enc.as_scalar() == old_enc.as_scalar()) + + +def volume_qos_allows_retype(new_vol_type): + """Filter to check that qos allows retyping the volume to new_vol_type. + + Returned sqlalchemy filter will evaluate to True when volume's status is + available or when it's 'in-use' but the qos in new_vol_type is the same as + the qos of the volume or when it doesn't exist a consumer spec key that + specifies anything other than the back-end in any of the 2 volume_types. + """ + # Query to get the qos of the volume type new_vol_type + q = sql.select([models.VolumeTypes.qos_specs_id]).where(and_( + ~models.VolumeTypes.deleted, + models.VolumeTypes.id == new_vol_type)) + # Construct the filter to check qos when volume is 'in-use' + return or_( + # If volume is available + models.Volume.status == 'available', + # Or both volume types have the same qos specs + sql.exists().where(and_( + ~models.VolumeTypes.deleted, + models.VolumeTypes.id == models.Volume.volume_type_id, + models.VolumeTypes.qos_specs_id == q.as_scalar())), + # Or they are different specs but they are handled by the backend or + # it is not specified. The way SQL evaluatels value != 'back-end' + # makes it result in False not only for 'back-end' values but for + # NULL as well, and with the double negation we ensure that we only + # allow QoS with 'consumer' values of 'back-end' and NULL. + and_( + ~sql.exists().where(and_( + ~models.VolumeTypes.deleted, + models.VolumeTypes.id == models.Volume.volume_type_id, + (models.VolumeTypes.qos_specs_id == + models.QualityOfServiceSpecs.specs_id), + models.QualityOfServiceSpecs.key == 'consumer', + models.QualityOfServiceSpecs.value != 'back-end')), + ~sql.exists().where(and_( + ~models.VolumeTypes.deleted, + models.VolumeTypes.id == new_vol_type, + (models.VolumeTypes.qos_specs_id == + models.QualityOfServiceSpecs.specs_id), + models.QualityOfServiceSpecs.key == 'consumer', + models.QualityOfServiceSpecs.value != 'back-end')))) + + #################### diff --git a/cinder/tests/unit/api/contrib/test_volume_actions.py b/cinder/tests/unit/api/contrib/test_volume_actions.py index d897c7dbf46..2aad59d100f 100644 --- a/cinder/tests/unit/api/contrib/test_volume_actions.py +++ b/cinder/tests/unit/api/contrib/test_volume_actions.py @@ -467,43 +467,28 @@ class VolumeActionsTest(test.TestCase): make_set_bootable_test(self, None, 400) -class VolumeRetypeActionsTest(VolumeActionsTest): +class VolumeRetypeActionsTest(test.TestCase): def setUp(self): - def get_vol_type(*args, **kwargs): - d1 = {'id': fake.VOLUME_TYPE_ID, - 'qos_specs_id': fake.QOS_SPEC_ID, - 'extra_specs': {}} - d2 = {'id': fake.VOLUME_TYPE2_ID, - 'qos_specs_id': fake.QOS_SPEC2_ID, - 'extra_specs': {}} - return d1 if d1['id'] == args[1] else d2 + super(VolumeRetypeActionsTest, self).setUp() + + self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, + is_admin=False) + self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') - self.retype_patchers = {} self.retype_mocks = {} - paths = ['cinder.volume.volume_types.get_volume_type', - 'cinder.volume.volume_types.get_volume_type_by_name', - 'cinder.volume.qos_specs.get_qos_specs', - 'cinder.quota.QUOTAS.add_volume_type_opts', - 'cinder.quota.QUOTAS.reserve'] + paths = ('cinder.quota.QUOTAS.add_volume_type_opts', + 'cinder.quota.QUOTAS.reserve') for path in paths: name = path.split('.')[-1] - self.retype_patchers[name] = mock.patch(path) - self.retype_mocks[name] = self.retype_patchers[name].start() - self.addCleanup(self.retype_patchers[name].stop) + patcher = mock.patch(path, return_value=None) + self.retype_mocks[name] = patcher.start() + self.addCleanup(patcher.stop) - self.retype_mocks['get_volume_type'].side_effect = get_vol_type - self.retype_mocks['get_volume_type_by_name'].side_effect = get_vol_type - self.retype_mocks['add_volume_type_opts'].return_value = None - self.retype_mocks['reserve'].return_value = None - - super(VolumeRetypeActionsTest, self).setUp() - self.context = context.RequestContext( - fake.USER_ID, fake.PROJECT_ID, auth_token=True) - - def _retype_volume_exec(self, expected_status, - new_type=fake.VOLUME_TYPE2_ID): + def _retype_volume_exec(self, expected_status, new_type='foo', + vol_id=None): + vol_id = vol_id or fake.VOLUME_ID req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) + (fake.PROJECT_ID, vol_id)) req.method = 'POST' req.headers['content-type'] = 'application/json' retype_body = {'new_type': new_type, 'migration_policy': 'never'} @@ -511,20 +496,13 @@ class VolumeRetypeActionsTest(VolumeActionsTest): res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) self.assertEqual(expected_status, res.status_int) - @mock.patch('cinder.volume.qos_specs.get_qos_specs') - def test_retype_volume_success(self, _mock_get_qspecs): - # Test that the retype API works for both available and in-use - self._retype_volume_exec(202) - self.mock_volume_get.return_value['status'] = 'in-use' - specs = {'id': fake.QOS_SPEC_ID, 'name': 'fake_name1', - 'consumer': 'back-end', 'specs': {'key1': 'value1'}} - _mock_get_qspecs.return_value = specs - self._retype_volume_exec(202) - def test_retype_volume_no_body(self): # Request with no body should fail + vol = utils.create_volume(self.context, + status='available', + testcase_instance=self) req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) + (fake.PROJECT_ID, vol.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-retype': None}) @@ -533,8 +511,11 @@ class VolumeRetypeActionsTest(VolumeActionsTest): def test_retype_volume_bad_policy(self): # Request with invalid migration policy should fail + vol = utils.create_volume(self.context, + status='available', + testcase_instance=self) req = webob.Request.blank('/v2/%s/volumes/%s/action' % - (fake.PROJECT_ID, fake.VOLUME_ID)) + (fake.PROJECT_ID, vol.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' retype_body = {'new_type': 'foo', 'migration_policy': 'invalid'} @@ -544,54 +525,228 @@ class VolumeRetypeActionsTest(VolumeActionsTest): def test_retype_volume_bad_status(self): # Should fail if volume does not have proper status - self.mock_volume_get.return_value['status'] = 'error' - self._retype_volume_exec(400) + vol_type_old = utils.create_volume_type(context.get_admin_context(), + self, name='old') + vol_type_new = utils.create_volume_type(context.get_admin_context(), + self, name='new') + vol = utils.create_volume(self.context, + status='error', + volume_type_id=vol_type_old.id, + testcase_instance=self) + + self._retype_volume_exec(400, vol_type_new.id, vol.id) def test_retype_type_no_exist(self): # Should fail if new type does not exist - exc = exception.VolumeTypeNotFound('exc') - self.retype_mocks['get_volume_type'].side_effect = exc - self._retype_volume_exec(404) + vol_type_old = utils.create_volume_type(context.get_admin_context(), + self, name='old') + vol = utils.create_volume(self.context, + status='available', + volume_type_id=vol_type_old.id, + testcase_instance=self) + self._retype_volume_exec(404, 'fake_vol_type', vol.id) def test_retype_same_type(self): # Should fail if new type and old type are the same - self._retype_volume_exec(400, new_type=fake.VOLUME_TYPE_ID) + vol_type_old = utils.create_volume_type(context.get_admin_context(), + self, name='old') + vol = utils.create_volume(self.context, + status='available', + volume_type_id=vol_type_old.id, + testcase_instance=self) + self._retype_volume_exec(400, vol_type_old.id, vol.id) def test_retype_over_quota(self): # Should fail if going over quota for new type + vol_type_new = utils.create_volume_type(context.get_admin_context(), + self, name='old') + vol = utils.create_volume(self.context, + status='available', + testcase_instance=self) + exc = exception.OverQuota(overs=['gigabytes'], quotas={'gigabytes': 20}, usages={'gigabytes': {'reserved': 5, 'in_use': 15}}) self.retype_mocks['reserve'].side_effect = exc - self._retype_volume_exec(413) + self._retype_volume_exec(413, vol_type_new.id, vol.id) - @mock.patch('cinder.volume.qos_specs.get_qos_specs') - def _retype_volume_diff_qos(self, vol_status, consumer, expected_status, - _mock_get_qspecs): - def fake_get_qos(ctxt, qos_id): - d1 = {'id': fake.QOS_SPEC_ID, 'name': 'fake_name1', - 'consumer': consumer, 'specs': {'key1': 'value1'}} - d2 = {'id': fake.QOS_SPEC2_ID, 'name': 'fake_name2', - 'consumer': consumer, 'specs': {'key1': 'value1'}} - return d1 if d1['id'] == qos_id else d2 + def _retype_volume_qos(self, vol_status, consumer, expected_status, + same_qos=False, has_qos=True, has_type=True): + admin_ctxt = context.get_admin_context() + if has_qos: + qos_old = utils.create_qos(admin_ctxt, self, + name='old', + qos_specs={'consumer': consumer})['id'] + else: + qos_old = None - self.mock_volume_get.return_value['status'] = vol_status - _mock_get_qspecs.side_effect = fake_get_qos - self._retype_volume_exec(expected_status) + if same_qos: + qos_new = qos_old + else: + qos_new = utils.create_qos(admin_ctxt, self, + name='new', + qos_specs={'consumer': consumer})['id'] + + if has_type: + vol_type_old = utils.create_volume_type(admin_ctxt, self, + name='old', + qos_specs_id=qos_old).id + else: + vol_type_old = None + + vol_type_new = utils.create_volume_type(admin_ctxt, self, + name='new', + qos_specs_id=qos_new).id + + vol = utils.create_volume(self.context, + status=vol_status, + volume_type_id=vol_type_old, + testcase_instance=self) + + self._retype_volume_exec(expected_status, vol_type_new, vol.id) def test_retype_volume_diff_qos_fe_in_use(self): - # should fail if changing qos enforced by front-end for in-use volumes - self._retype_volume_diff_qos('in-use', 'front-end', 400) + # should fail if changing qos enforced by front-end for in-use volume + self._retype_volume_qos('in-use', 'front-end', 400) + + def test_retype_volume_diff_qos_be_in_use(self): + # should NOT fail for in-use if changing qos enforced by back-end + self._retype_volume_qos('in-use', 'back-end', 202) def test_retype_volume_diff_qos_fe_available(self): # should NOT fail if changing qos enforced by FE for available volumes - self._retype_volume_diff_qos('available', 'front-end', 202) + self._retype_volume_qos('available', 'front-end', 202) - def test_retype_volume_diff_qos_be(self): - # should NOT fail if changing qos enforced by back-end - self._retype_volume_diff_qos('available', 'back-end', 202) - self._retype_volume_diff_qos('in-use', 'back-end', 202) + def test_retype_volume_diff_qos_be_available(self): + # should NOT fail if changing qos enforced by back-end for available + # volumes + self._retype_volume_qos('available', 'back-end', 202) + + def test_retype_volume_same_qos_fe_in_use(self): + # should NOT fail if changing qos enforced by front-end for in-use + # volumes if the qos is the same + self._retype_volume_qos('in-use', 'front-end', 202, True) + + def test_retype_volume_same_qos_be_in_use(self): + # should NOT fail if changing qos enforced by back-end for in-use + # volumes if the qos is the same + self._retype_volume_qos('in-use', 'back-end', 202, True) + + def test_retype_volume_same_qos_fe_available(self): + # should NOT fail if changing qos enforced by front-end for available + # volumes if the qos is the same + self._retype_volume_qos('available', 'front-end', 202, True) + + def test_retype_volume_same_qos_be_available(self): + # should NOT fail if changing qos enforced by back-end for available + # volumes if the qos is the same + self._retype_volume_qos('available', 'back-end', 202, True) + + def test_retype_volume_orig_no_qos_dest_fe_in_use(self): + # should fail if changing qos enforced by front-end on the new type + # and volume originally had no qos and was in-use + self._retype_volume_qos('in-use', 'front-end', 400, False, False) + + def test_retype_volume_orig_no_qos_dest_be_in_use(self): + # should NOT fail if changing qos enforced by back-end on the new type + # and volume originally had no qos and was in-use + self._retype_volume_qos('in-use', 'back-end', 202, False, False) + + def test_retype_volume_same_no_qos_in_use(self): + # should NOT fail if original and destinal types had no qos for in-use + # volumes + self._retype_volume_qos('in-use', '', 202, True, False) + + def test_retype_volume_orig_no_qos_dest_fe_available(self): + # should NOT fail if changing qos enforced by front-end on the new type + # and volume originally had no qos and was available + self._retype_volume_qos('available', 'front-end', 202, False, False) + + def test_retype_volume_orig_no_qos_dest_be_available(self): + # should NOT fail if changing qos enforced by back-end on the new type + # and volume originally had no qos and was available + self._retype_volume_qos('available', 'back-end', 202, False, False) + + def test_retype_volume_same_no_qos_vailable(self): + # should NOT fail if original and destinal types had no qos for + # available volumes + self._retype_volume_qos('available', '', 202, True, False) + + def test_retype_volume_orig_no_type_dest_fe_in_use(self): + # should fail if changing volume had no type, was in-use and + # destination type qos was enforced by front-end + self._retype_volume_qos('in-use', 'front-end', 400, False, False, + False) + + def test_retype_volume_orig_no_type_dest_no_qos_in_use(self): + # should NOT fail if changing volume had no type, was in-use and + # destination type had no qos + # and volume originally had no type and was in-use + self._retype_volume_qos('in-use', '', 202, True, False, False) + + def test_retype_volume_orig_no_type_dest_be_in_use(self): + # should NOT fail if changing volume had no type, was in-use and + # destination type qos was enforced by back-end + self._retype_volume_qos('in-use', 'back-end', 202, False, False, + False) + + def test_retype_volume_orig_no_type_dest_fe_available(self): + # should NOT fail if changing volume had no type, was in-use and + # destination type qos was enforced by front-end + self._retype_volume_qos('available', 'front-end', 202, False, False, + False) + + def test_retype_volume_orig_no_type_dest_no_qos_available(self): + # should NOT fail if changing volume had no type, was available and + # destination type had no qos + # and volume originally had no type and was in-use + self._retype_volume_qos('in-use', '', 202, True, False, False) + + def test_retype_volume_orig_no_type_dest_be_available(self): + # should NOT fail if changing volume had no type, was available and + # destination type qos was enforced by back-end + self._retype_volume_qos('in-use', 'back-end', 202, False, False, + False) + + def _retype_volume_encryption(self, vol_status, expected_status, + has_type=True, enc_orig=True, enc_dest=True): + enc_orig = None + admin_ctxt = context.get_admin_context() + if has_type: + vol_type_old = utils.create_volume_type(admin_ctxt, self, + name='old').id + if enc_orig: + utils.create_encryption(admin_ctxt, vol_type_old, self) + else: + vol_type_old = None + + vol_type_new = utils.create_volume_type(admin_ctxt, self, + name='new').id + if enc_dest: + utils.create_encryption(admin_ctxt, vol_type_new, self) + + vol = utils.create_volume(self.context, + status=vol_status, + volume_type_id=vol_type_old, + testcase_instance=self) + + self._retype_volume_exec(expected_status, vol_type_new, vol.id) + + def test_retype_volume_orig_no_type_dest_no_enc(self): + self._retype_volume_encryption('available', 202, False, False, False) + + def test_retype_volume_orig_no_type_dest_enc(self): + self._retype_volume_encryption('available', 400, False, False) + + def test_retype_volume_orig_type_no_enc_dest_no_enc(self): + self._retype_volume_encryption('available', 202, True, False, False) + + def test_retype_volume_orig_type_no_enc_dest_enc(self): + self._retype_volume_encryption('available', 400, True, False) + + def test_retype_volume_orig_type_enc_dest_enc(self): + self._retype_volume_encryption('available', 400) def stub_volume_get(self, context, volume_id): diff --git a/cinder/tests/unit/utils.py b/cinder/tests/unit/utils.py index 1bcef3a5497..301b90a9abc 100644 --- a/cinder/tests/unit/utils.py +++ b/cinder/tests/unit/utils.py @@ -47,6 +47,7 @@ def create_volume(ctxt, replication_driver_data=None, consistencygroup_id=None, previous_status=None, + testcase_instance=None, **kwargs): """Create a volume object in the DB.""" vol = {} @@ -68,6 +69,7 @@ def create_volume(ctxt, for key in kwargs: vol[key] = kwargs[key] vol['replication_status'] = replication_status + if replication_extended_status: vol['replication_extended_status'] = replication_extended_status if replication_driver_data: @@ -77,6 +79,10 @@ def create_volume(ctxt, volume = objects.Volume(ctxt, **vol) volume.create() + + # If we get a TestCase instance we add cleanup + if testcase_instance: + testcase_instance.addCleanup(volume.destroy) return volume @@ -219,6 +225,33 @@ def create_message(ctxt, return db.message_create(ctxt, message_record) +def create_volume_type(ctxt, testcase_instance=None, **kwargs): + vol_type = db.volume_type_create(ctxt, kwargs) + + # If we get a TestCase instance we add cleanup + if testcase_instance: + testcase_instance.addCleanup(db.volume_type_destroy, ctxt, vol_type.id) + + return vol_type + + +def create_encryption(ctxt, vol_type_id, testcase_instance=None, **kwargs): + encrypt = db.volume_type_encryption_create(ctxt, vol_type_id, kwargs) + + # If we get a TestCase instance we add cleanup + if testcase_instance: + testcase_instance.addCleanup(db.volume_type_encryption_delete, ctxt, + vol_type_id) + return encrypt + + +def create_qos(ctxt, testcase_instance=None, **kwargs): + qos = db.qos_specs_create(ctxt, kwargs) + if testcase_instance: + testcase_instance.addCleanup(db.qos_specs_delete, ctxt, qos['id']) + return qos + + class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall): def start(self, interval, **kwargs): kwargs['initial_delay'] = 0 diff --git a/cinder/volume/api.py b/cinder/volume/api.py index 803bbc32c2d..76f0fa4bf7a 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -49,7 +49,6 @@ from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import utils from cinder.volume.flows.api import create_volume from cinder.volume.flows.api import manage_existing -from cinder.volume import qos_specs from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as volume_utils from cinder.volume import volume_types @@ -1293,42 +1292,6 @@ class API(base.Base): def migrate_volume(self, context, volume, host, force_host_copy, lock_volume): """Migrate the volume to the specified host.""" - - if volume.status not in ['available', 'in-use']: - msg = _('Volume %(vol_id)s status must be available or in-use, ' - 'but current status is: ' - '%(vol_status)s.') % {'vol_id': volume.id, - 'vol_status': volume.status} - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - # Make sure volume is not part of a migration. - if self._is_volume_migrating(volume): - msg = _("Volume %s is already part of an active " - "migration.") % volume.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - # We only handle volumes without snapshots for now - snaps = objects.SnapshotList.get_all_for_volume(context, volume.id) - if snaps: - msg = _("Volume %s must not have snapshots.") % volume.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - # We only handle non-replicated volumes for now - if (volume.replication_status is not None and - volume.replication_status != 'disabled'): - msg = _("Volume %s must not be replicated.") % volume.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - if volume.consistencygroup_id: - msg = _("Volume %s must not be part of a consistency " - "group.") % volume.id - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - # Make sure the host is in the list of available hosts elevated = context.elevated() topic = CONF.volume_topic @@ -1344,12 +1307,17 @@ class API(base.Base): LOG.error(msg) raise exception.InvalidHost(reason=msg) - # Make sure the destination host is different than the current one - if host == volume.host: - msg = _('Destination host must be different ' - 'than the current host.') - LOG.error(msg) - raise exception.InvalidHost(reason=msg) + # Build required conditions for conditional update + expected = {'status': ('available', 'in-use'), + 'migration_status': self.AVAILABLE_MIGRATION_STATUS, + 'replication_status': (None, 'disabled'), + 'consistencygroup_id': (None, ''), + 'host': db.Not(host)} + + filters = [~db.volume_has_snapshots_filter()] + + updates = {'migration_status': 'starting', + 'previous_status': volume.model.status} # When the migration of an available volume starts, both the status # and the migration status of the volume will be changed. @@ -1357,11 +1325,20 @@ class API(base.Base): # status is changed to 'maintenance', telling users # that this volume is in maintenance mode, and no action is allowed # on this volume, e.g. attach, detach, retype, migrate, etc. - updates = {'migration_status': 'starting', - 'previous_status': volume.status} - if lock_volume and volume.status == 'available': - updates['status'] = 'maintenance' - self.update(context, volume, updates) + if lock_volume: + updates['status'] = db.Case( + [(volume.model.status == 'available', 'maintenance')], + else_=volume.model.status) + + result = volume.conditional_update(updates, expected, filters) + + if not result: + msg = _('Volume %s status must be available or in-use, must not ' + 'be migrating, have snapshots, be replicated, be part of ' + 'a consistency group and destination host must be ' + 'different than the current host') % {'vol_id': volume.id} + LOG.error(msg) + raise exception.InvalidVolume(reason=msg) # Call the scheduler to ensure that the host exists and that it can # accept the volume @@ -1440,32 +1417,12 @@ class API(base.Base): @wrap_check_policy def retype(self, context, volume, new_type, migration_policy=None): """Attempt to modify the type associated with an existing volume.""" - if volume.status not in ['available', 'in-use']: - msg = _('Unable to update type due to incorrect status: ' - '%(vol_status)s on volume: %(vol_id)s. Volume status ' - 'must be available or ' - 'in-use.') % {'vol_status': volume.status, - 'vol_id': volume.id} - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - if self._is_volume_migrating(volume): - msg = (_("Volume %s is already part of an active migration.") - % volume.id) - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - - if migration_policy and migration_policy not in ['on-demand', 'never']: + if migration_policy and migration_policy not in ('on-demand', 'never'): msg = _('migration_policy must be \'on-demand\' or \'never\', ' 'passed: %s') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) - if volume.consistencygroup_id: - msg = _("Volume must not be part of a consistency group.") - LOG.error(msg) - raise exception.InvalidVolume(reason=msg) - # Support specifying volume type by ID or name try: if uuidutils.is_uuid_like(new_type): @@ -1480,44 +1437,6 @@ class API(base.Base): raise exception.InvalidInput(reason=msg) vol_type_id = vol_type['id'] - vol_type_qos_id = vol_type['qos_specs_id'] - - old_vol_type = None - old_vol_type_id = volume.volume_type_id - old_vol_type_qos_id = None - - # Error if the original and new type are the same - if volume.volume_type_id == vol_type_id: - msg = _('New volume_type same as original: %s.') % new_type - LOG.error(msg) - raise exception.InvalidInput(reason=msg) - - if volume.volume_type_id: - old_vol_type = volume_types.get_volume_type( - context, old_vol_type_id) - old_vol_type_qos_id = old_vol_type['qos_specs_id'] - - # We don't support changing encryption requirements yet - old_enc = volume_types.get_volume_type_encryption(context, - old_vol_type_id) - new_enc = volume_types.get_volume_type_encryption(context, - vol_type_id) - if old_enc != new_enc: - msg = _('Retype cannot change encryption requirements.') - raise exception.InvalidInput(reason=msg) - - # We don't support changing QoS at the front-end yet for in-use volumes - # TODO(avishay): Call Nova to change QoS setting (libvirt has support - # - virDomainSetBlockIoTune() - Nova does not have support yet). - if (volume.status != 'available' and - old_vol_type_qos_id != vol_type_qos_id): - for qos_id in [old_vol_type_qos_id, vol_type_qos_id]: - if qos_id: - specs = qos_specs.get_qos_specs(context.elevated(), qos_id) - if specs['consumer'] != 'back-end': - msg = _('Retype cannot change front-end qos specs for ' - 'in-use volume: %s.') % volume.id - raise exception.InvalidInput(reason=msg) # We're checking here in so that we can report any quota issues as # early as possible, but won't commit until we change the type. We @@ -1530,7 +1449,7 @@ class API(base.Base): reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, - old_vol_type_id) + volume.volume_type_id) # NOTE(wanghao): We don't need to reserve volumes and gigabytes # quota for retyping operation since they didn't changed, just # reserve volume_type and type gigabytes is fine. @@ -1546,8 +1465,32 @@ class API(base.Base): LOG.exception(msg, resource=volume) raise exception.CinderException(msg) - self.update(context, volume, {'status': 'retyping', - 'previous_status': volume.status}) + # Build required conditions for conditional update + expected = {'status': ('available', 'in-use'), + 'migration_status': self.AVAILABLE_MIGRATION_STATUS, + 'consistencygroup_id': (None, ''), + 'volume_type_id': db.Not(vol_type_id)} + + # We don't support changing encryption requirements yet + # We don't support changing QoS at the front-end yet for in-use volumes + # TODO(avishay): Call Nova to change QoS setting (libvirt has support + # - virDomainSetBlockIoTune() - Nova does not have support yet). + filters = [db.volume_has_same_encryption_type(vol_type_id), + db.volume_qos_allows_retype(vol_type_id)] + + updates = {'status': 'retyping', + 'previous_status': objects.Volume.model.status} + + if not volume.conditional_update(updates, expected, filters): + msg = _('Retype needs volume to be in available or in-use state, ' + 'have same encryption requirements, not be part of an ' + 'active migration or a consistency group, requested type ' + 'has to be different that the one from the volume, and ' + 'for in-use volumes front-end qos specs cannot change.') + LOG.error(msg) + QUOTAS.rollback(context, reservations + old_reservations, + project_id=volume.project_id) + raise exception.InvalidVolume(reason=msg) request_spec = {'volume_properties': volume, 'volume_id': volume.id,