diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/136_make_vol_type_col_non_nullable.py b/cinder/db/sqlalchemy/migrate_repo/versions/136_make_vol_type_col_non_nullable.py new file mode 100644 index 00000000000..e6a5b8e400d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/136_make_vol_type_col_non_nullable.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + +from cinder import exception +from cinder.i18n import _ + + +def upgrade(migrate_engine): + """Make volume_type columns non-nullable""" + + meta = MetaData(bind=migrate_engine) + + # Update volume_type columns in tables to not allow null value + + volumes = Table('volumes', meta, autoload=True) + + try: + volumes.c.volume_type_id.alter(nullable=False) + except Exception: + msg = (_('Migration cannot continue until all volumes have ' + 'been migrated to the `__DEFAULT__` volume type. Please ' + 'run `cinder-manage db online_data_migrations`. ' + 'There are still untyped volumes unmigrated.')) + raise exception.ValidationError(msg) + + snapshots = Table('snapshots', meta, autoload=True) + + try: + snapshots.c.volume_type_id.alter(nullable=False) + except Exception: + msg = (_('Migration cannot continue until all snapshots have ' + 'been migrated to the `__DEFAULT__` volume type. Please ' + 'run `cinder-manage db online_data_migrations`.' + 'There are still %(count)i untyped snapshots unmigrated.')) + raise exception.ValidationError(msg) + + encryption = Table('encryption', meta, autoload=True) + # since volume_type is a mandatory arg when creating encryption + # volume_type_id column won't contain any null values so we can directly + # alter it + encryption.c.volume_type_id.alter(nullable=False) diff --git a/cinder/tests/unit/cmd/test_status.py b/cinder/tests/unit/cmd/test_status.py index e1c5bf2c3b5..0cd33f4c11f 100644 --- a/cinder/tests/unit/cmd/test_status.py +++ b/cinder/tests/unit/cmd/test_status.py @@ -27,6 +27,7 @@ from cinder import db from cinder.db.sqlalchemy import api as sqla_api from cinder import exception from cinder import test +from cinder.tests.unit import fake_constants as fakes import cinder.volume.manager as volume_manager @@ -262,22 +263,24 @@ class TestCinderStatus(testtools.TestCase): def test__check_service_uuid_ok(self): self._create_service() self._create_service() - self._create_volume() + self._create_volume(volume_type_id=fakes.VOLUME_TYPE_ID) # Confirm that we ignored deleted entries - self._create_volume(service_uuid=None, deleted=True) + self._create_volume(service_uuid=None, deleted=True, + volume_type_id=fakes.VOLUME_TYPE_ID) result = self.checks._check_service_uuid() self.assertEqual(uc.Code.SUCCESS, result.code) def test__check_service_uuid_fail_service(self): self._create_service() self._create_service(uuid=None) - self._create_volume() + self._create_volume(volume_type_id=fakes.VOLUME_TYPE_ID) result = self.checks._check_service_uuid() self.assertEqual(uc.Code.FAILURE, result.code) def test__check_service_uuid_fail_volume(self): self._create_service() - self._create_volume(service_uuid=None) + self._create_volume(service_uuid=None, + volume_type_id=fakes.VOLUME_TYPE_ID) result = self.checks._check_service_uuid() self.assertEqual(uc.Code.FAILURE, result.code) diff --git a/cinder/tests/unit/db/test_migrations.py b/cinder/tests/unit/db/test_migrations.py index 498871e6dd3..f74fc902c93 100644 --- a/cinder/tests/unit/db/test_migrations.py +++ b/cinder/tests/unit/db/test_migrations.py @@ -114,6 +114,9 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin): # it should be add the additional prefix before volume_type_name, # which we of course allow *this* size to 300. 127, + # 136 modifies the the tables having volume_type_id field to set + # as non nullable + 136, ] if version not in exceptions: @@ -188,6 +191,15 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin): .execute().first()) self.assertIsNotNone(vtype) + def _check_136(self, engine, data): + """Test alter volume_type_id columns.""" + vol_table = db_utils.get_table(engine, 'volumes') + snap_table = db_utils.get_table(engine, 'snapshots') + encrypt_table = db_utils.get_table(engine, 'encryption') + self.assertFalse(vol_table.c.volume_type_id.nullable) + self.assertFalse(snap_table.c.volume_type_id.nullable) + self.assertFalse(encrypt_table.c.volume_type_id.nullable) + # NOTE: this test becomes slower with each addition of new DB migration. # 'pymysql' works much slower on slow nodes than 'psycopg2'. And such # timeout mostly required for testing of 'mysql' backend.