From e55043ff0087b508bc53fcc0b47658782c554a91 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Tue, 2 Mar 2021 13:14:08 +0100 Subject: [PATCH] Fix automatic quota sync for migrating volumes When using the automatic quota refresh via `until_refresh` and `max_age` configuration options the calculated quota usage by the refresh will not be correct if there are volumes that are migrating, since source and destination volumes are counted in the refresh. Normal quota usage calculation does not work like this, it only counts it once, and so should the automatic quota calculation. This patch fixes this by adding a filter to the query that skips migration destination volumes. It also updatest the DB implementation of volume_data_get_for_project that didn't match the signature in cinder.db.api Closes-Bug: #1917450 Change-Id: Ifff092917abe07726367a953f5ed420626c53bb9 --- cinder/db/sqlalchemy/api.py | 25 +++++++---- cinder/tests/unit/test_db_api.py | 41 +++++++++++++++++++ ...quota-sync-migrating-2c99e134e117a945.yaml | 7 ++++ 3 files changed, 65 insertions(+), 8 deletions(-) create mode 100644 releasenotes/notes/quota-sync-migrating-2c99e134e117a945.yaml diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index d369b6607ed..5e05a080b5f 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -1579,15 +1579,25 @@ def volume_data_get_for_host(context, host, count_only=False): @require_admin_context def _volume_data_get_for_project(context, project_id, volume_type_id=None, - session=None, host=None): + session=None, host=None, skip_internal=True): + model = models.Volume query = model_query(context, - func.count(models.Volume.id), - func.sum(models.Volume.size), + func.count(model.id), + func.sum(model.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) + + # When calling the method for quotas we don't count volumes that are the + # destination of a migration since they were not accounted for quotas or + # reservations in the first place. + if skip_internal: + query = query.filter( + or_(model.migration_status.is_(None), + ~model.migration_status.startswith('target:'))) + if host: - query = query.filter(_filter_host(models.Volume.host, host)) + query = query.filter(_filter_host(model.host, host)) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) @@ -1618,10 +1628,9 @@ def _backup_data_get_for_project(context, project_id, volume_type_id=None, @require_admin_context -def volume_data_get_for_project(context, project_id, - volume_type_id=None, host=None): - return _volume_data_get_for_project(context, project_id, - volume_type_id, host=host) +def volume_data_get_for_project(context, project_id, host=None): + return _volume_data_get_for_project(context, project_id, host=host, + skip_internal=False) VOLUME_DEPENDENT_MODELS = frozenset([models.VolumeMetadata, diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index d1bd63031a0..77cd15581ac 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -513,6 +513,47 @@ class DBAPIVolumeTestCase(BaseTest): db.volume_data_get_for_project( self.ctxt, 'p%d' % i)) + @mock.patch.object(sqlalchemy_api, '_volume_data_get_for_project') + def test_volume_data_get_for_project_migrating(self, mock_vol_data): + expected = (mock.sentinel.count, mock.sentinel.gb) + mock_vol_data.return_value = expected + res = db.volume_data_get_for_project(self.ctxt, + mock.sentinel.project_id, + mock.sentinel.host) + self.assertEqual(expected, res) + mock_vol_data.assert_called_once_with(self.ctxt, + mock.sentinel.project_id, + host=mock.sentinel.host, + skip_internal=False) + + @ddt.data((True, THREE_HUNDREDS, THREE), + (False, THREE_HUNDREDS + ONE_HUNDREDS, THREE + 1)) + @ddt.unpack + def test__volume_data_get_for_project_migrating(self, skip_internal, + gigabytes, count): + for i in range(2): + db.volume_create(self.ctxt, + {'project_id': 'project', + 'size': ONE_HUNDREDS, + 'host': 'h-%d' % i, + 'volume_type_id': fake.VOLUME_TYPE_ID}) + # This volume is migrating and will be counted + db.volume_create(self.ctxt, {'project_id': 'project', + 'size': ONE_HUNDREDS, + 'host': 'h-%d' % i, + 'volume_type_id': fake.VOLUME_TYPE_ID, + 'migration_status': 'migrating'}) + # This one will not be counted + db.volume_create(self.ctxt, {'project_id': 'project', + 'size': ONE_HUNDREDS, + 'host': 'h-%d' % i, + 'volume_type_id': fake.VOLUME_TYPE_ID, + 'migration_status': 'target:vol-id'}) + + result = sqlalchemy_api._volume_data_get_for_project( + self.ctxt, 'project', skip_internal=skip_internal) + self.assertEqual((count, gigabytes), result) + def test_volume_data_get_for_project_with_host(self): db.volume_create(self.ctxt, {'project_id': fake.PROJECT_ID, diff --git a/releasenotes/notes/quota-sync-migrating-2c99e134e117a945.yaml b/releasenotes/notes/quota-sync-migrating-2c99e134e117a945.yaml new file mode 100644 index 00000000000..70c67e05604 --- /dev/null +++ b/releasenotes/notes/quota-sync-migrating-2c99e134e117a945.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + `Bug #1917450 `_: Fix + automatic quota refresh to correctly account for migrating volumes. During + volume migration we'll have 2 volumes in cinder and only one will be + accounted for in quota usage.