From 2fa6fdd7846df05c3d81e78c137ec2976499a7f9 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Thu, 16 Nov 2017 18:36:55 +0000 Subject: [PATCH] Add shared_targets flag to Volumes This adds a bool column to volumes to notify consumers if the backend hosting the volume utilizes shared_targets or not. We use the volume-drivers capabilities report to determine this and default to True if a driver doesn't report anything. The purpose of the column is to notify Nova that it needs to do some sort of locking around connect/disconnect to be sure other volumes on the same node aren't sharing the iscsi connection. Using a default of "True" is safe because although locking and doing the extra checks might be somewhat inefficient it works fine because it will just appear that there's never any other volumes in use. So this change adds the column to the DB as well as an online migration to go through and update any existing volumes. With this and the service_uuid column consumers will have everything the need to: 1. determine if they need to lock 2. use the service_uuid as a unique lock name That last remaining change in this set will be to add the fields to the view-builder and bump the API version. Change-Id: If600c28c86511cfb83f38d92cf6418954fb4975e --- cinder/cmd/manage.py | 44 +++++++++ .../115_add_shared_targets_to_volumes.py | 27 ++++++ cinder/db/sqlalchemy/models.py | 1 + cinder/objects/base.py | 1 + cinder/objects/volume.py | 4 +- cinder/tests/unit/db/test_migrations.py | 5 + cinder/tests/unit/objects/test_objects.py | 2 +- cinder/tests/unit/test_cmd.py | 97 +++++++++++++++++++ cinder/volume/drivers/lvm.py | 1 + cinder/volume/drivers/solidfire.py | 1 + cinder/volume/manager.py | 8 +- 11 files changed, 188 insertions(+), 3 deletions(-) create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/115_add_shared_targets_to_volumes.py diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py index 6a832e06b13..26ef27cff51 100644 --- a/cinder/cmd/manage.py +++ b/cinder/cmd/manage.py @@ -73,6 +73,7 @@ from cinder import context from cinder import db from cinder.db import migration as db_migration from cinder.db.sqlalchemy import api as db_api +from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _ from cinder import objects @@ -85,6 +86,48 @@ from cinder.volume import utils as vutils CONF = cfg.CONF +def _get_non_shared_target_hosts(ctxt): + hosts = [] + numvols_needing_update = 0 + rpc.init(CONF) + rpcapi = volume_rpcapi.VolumeAPI() + + services = objects.ServiceList.get_all_by_topic(ctxt, 'cinder-volume') + for service in services: + capabilities = rpcapi.get_capabilities(ctxt, service.host, True) + if not capabilities.get('shared_targets', True): + hosts.append(service.host) + numvols_needing_update += db_api.model_query( + ctxt, models.Volume).filter_by( + shared_targets=True, + service_uuid=service.uuid).count() + return hosts, numvols_needing_update + + +def shared_targets_online_data_migration(ctxt, max_count): + """Update existing volumes shared_targets flag based on capabilities.""" + non_shared_hosts = [] + completed = 0 + + non_shared_hosts, total_vols_to_update = _get_non_shared_target_hosts(ctxt) + for host in non_shared_hosts: + # We use the api call here instead of going direct to + # db query to take advantage of parsing out the host + # correctly + vrefs = db_api.volume_get_all_by_host( + ctxt, host, + filters={'shared_targets': True}) + if len(vrefs) > max_count: + del vrefs[-(len(vrefs) - max_count):] + max_count -= len(vrefs) + for v in vrefs: + db.volume_update( + ctxt, v['id'], + {'shared_targets': 0}) + completed += 1 + return total_vols_to_update, completed + + # Decorators for actions def args(*args, **kwargs): def _decorator(func): @@ -209,6 +252,7 @@ class DbCommands(object): db.service_uuids_online_data_migration, db.backup_service_online_migration, db.volume_service_uuids_online_data_migration, + shared_targets_online_data_migration, ) def __init__(self): diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/115_add_shared_targets_to_volumes.py b/cinder/db/sqlalchemy/migrate_repo/versions/115_add_shared_targets_to_volumes.py new file mode 100644 index 00000000000..5a8bd0c6230 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/115_add_shared_targets_to_volumes.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, MetaData, Table + + +def upgrade(migrate_engine): + """Add shared_targets column to Volumes.""" + meta = MetaData() + meta.bind = migrate_engine + volumes = Table('volumes', meta, autoload=True) + + # NOTE(jdg): We use a default of True because it's harmless for a device + # that does NOT use shared_targets to be treated as if it does + shared_targets = Column('shared_targets', + Boolean, + default=True) + volumes.create_column(shared_targets) diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py index 15a736745fb..c3c4955d766 100644 --- a/cinder/db/sqlalchemy/models.py +++ b/cinder/db/sqlalchemy/models.py @@ -320,6 +320,7 @@ class Volume(BASE, CinderBase): backref="volumes", foreign_keys=service_uuid, primaryjoin='Volume.service_uuid == Service.uuid') + shared_targets = Column(Boolean, default=True) # make an FK of service? class VolumeMetadata(BASE, CinderBase): diff --git a/cinder/objects/base.py b/cinder/objects/base.py index 9176a6f2368..6a156048324 100644 --- a/cinder/objects/base.py +++ b/cinder/objects/base.py @@ -140,6 +140,7 @@ OBJ_VERSIONS.add('1.29', {'Service': '1.6'}) OBJ_VERSIONS.add('1.30', {'RequestSpec': '1.2'}) OBJ_VERSIONS.add('1.31', {'Volume': '1.7'}) OBJ_VERSIONS.add('1.32', {'RequestSpec': '1.3'}) +OBJ_VERSIONS.add('1.33', {'Volume': '1.8'}) class CinderObjectRegistry(base.VersionedObjectRegistry): diff --git a/cinder/objects/volume.py b/cinder/objects/volume.py index b0d25334f3d..4314aba688e 100644 --- a/cinder/objects/volume.py +++ b/cinder/objects/volume.py @@ -61,7 +61,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject, # Version 1.5: Added group # Version 1.6: This object is now cleanable (adds rows to workers table) # Version 1.7: Added service_uuid - VERSION = '1.7' + # Version 1.8: Added shared_targets + VERSION = '1.8' OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup', @@ -126,6 +127,7 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject, 'snapshots': fields.ObjectField('SnapshotList', nullable=True), 'group': fields.ObjectField('Group', nullable=True), 'service_uuid': fields.StringField(nullable=True), + 'shared_targets': fields.BooleanField(default=True, nullable=True), } # NOTE(thangp): obj_extra_fields is used to hold properties that are not diff --git a/cinder/tests/unit/db/test_migrations.py b/cinder/tests/unit/db/test_migrations.py index ba96f8fdffe..dc3f98de5ab 100644 --- a/cinder/tests/unit/db/test_migrations.py +++ b/cinder/tests/unit/db/test_migrations.py @@ -384,6 +384,11 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin): self.assertEqual(sorted(['deleted', 'service_uuid']), sorted(index_columns)) + def _check_115(self, engine, data): + volumes = db_utils.get_table(engine, 'volumes') + self.assertIsInstance(volumes.c.shared_targets.type, + self.BOOL_TYPE) + def test_walk_versions(self): self.walk_versions(False, False) self.assert_each_foreign_key_is_part_of_an_index() diff --git a/cinder/tests/unit/objects/test_objects.py b/cinder/tests/unit/objects/test_objects.py index 0dddd9c99e0..85d53a2d7b5 100644 --- a/cinder/tests/unit/objects/test_objects.py +++ b/cinder/tests/unit/objects/test_objects.py @@ -47,7 +47,7 @@ object_data = { 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201', 'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', - 'Volume': '1.7-0845c5b7b826a4e9019f3684c2f9b132', + 'Volume': '1.8-6cf615b72269cef48702a2a5c2940159', 'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'VolumeAttachment': '1.2-b68b357a1756582b706006ea9de40c9a', 'VolumeAttachmentList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', diff --git a/cinder/tests/unit/test_cmd.py b/cinder/tests/unit/test_cmd.py index 4276bada0ea..64e4dfa0346 100644 --- a/cinder/tests/unit/test_cmd.py +++ b/cinder/tests/unit/test_cmd.py @@ -38,6 +38,7 @@ from cinder.cmd import scheduler as cinder_scheduler from cinder.cmd import volume as cinder_volume from cinder.cmd import volume_usage_audit from cinder import context +from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import fields from cinder import test @@ -45,7 +46,9 @@ from cinder.tests.unit import fake_cluster from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_service from cinder.tests.unit import fake_volume +from cinder.tests.unit import utils from cinder import version +from cinder.volume import rpcapi CONF = cfg.CONF @@ -2008,3 +2011,97 @@ class TestCinderVolumeUsageAuditCmd(test.TestCase): mock.call(ctxt, backup1, 'delete.end', extra_usage_info=extra_info_backup_delete) ]) + + +class TestVolumeSharedTargetsOnlineMigration(test.TestCase): + """Unit tests for cinder.db.api.service_*.""" + + def setUp(self): + super(TestVolumeSharedTargetsOnlineMigration, self).setUp() + + def _get_minimum_rpc_version_mock(ctxt, binary): + binary_map = { + 'cinder-volume': rpcapi.VolumeAPI, + } + return binary_map[binary].RPC_API_VERSION + + self.patch('cinder.objects.Service.get_minimum_rpc_version', + side_effect=_get_minimum_rpc_version_mock) + + @mock.patch('cinder.objects.Service.get_minimum_obj_version', + return_value='1.8') + def test_shared_targets_migrations(self, mock_version): + """Ensure we can update the column.""" + ctxt = context.get_admin_context() + sqlalchemy_api.volume_create( + ctxt, + {'host': 'host1@lvm-driver1#lvm-driver1', + 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) + + # Create another one correct setting + sqlalchemy_api.volume_create( + ctxt, + {'host': 'host1@lvm-driver1#lvm-driver1', + 'shared_targets': False, + 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) + + # Need a service to query + values = { + 'host': 'host1@lvm-driver1', + 'binary': 'cinder-volume', + 'topic': 'cinder-volume', + 'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'} + utils.create_service(ctxt, values) + + # Run the migration and verify that we updated 1 entry + with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', + return_value={'shared_targets': False}): + total, updated = ( + cinder_manage.shared_targets_online_data_migration( + ctxt, 10)) + self.assertEqual(1, total) + self.assertEqual(1, updated) + + @mock.patch('cinder.objects.Service.get_minimum_obj_version', + return_value='1.8') + def test_shared_targets_migrations_with_limit(self, mock_version): + """Ensure we update in batches.""" + ctxt = context.get_admin_context() + # default value in db for shared_targets on a volume + # is True, so don't need to set it here explicitly + sqlalchemy_api.volume_create( + ctxt, + {'host': 'host1@lvm-driver1#lvm-driver1', + 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) + + sqlalchemy_api.volume_create( + ctxt, + {'host': 'host1@lvm-driver1#lvm-driver1', + 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) + + sqlalchemy_api.volume_create( + ctxt, + {'host': 'host1@lvm-driver1#lvm-driver1', + 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) + + values = { + 'host': 'host1@lvm-driver1', + 'binary': 'cinder-volume', + 'topic': 'cinder-volume', + 'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'} + utils.create_service(ctxt, values) + + # Run the migration and verify that we updated 1 entry + with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', + return_value={'shared_targets': False}): + total, updated = ( + cinder_manage.shared_targets_online_data_migration( + ctxt, 2)) + self.assertEqual(3, total) + self.assertEqual(2, updated) + + total, updated = ( + cinder_manage.shared_targets_online_data_migration( + ctxt, 2)) + self.assertEqual(1, total) + self.assertEqual(1, updated) diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index cb8a44fd4ff..8dbf154ff27 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -279,6 +279,7 @@ class LVMVolumeDriver(driver.VolumeDriver): multiattach=False )) data["pools"].append(single_pool) + data["shared_targets"] = False # Check availability of sparse volume copy. data['sparse_copy_volume'] = self._sparse_copy_volume diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py index 5793e81f0cb..34da2e855a5 100644 --- a/cinder/volume/drivers/solidfire.py +++ b/cinder/volume/drivers/solidfire.py @@ -1845,6 +1845,7 @@ class SolidFireDriver(san.SanISCSIDriver): results['deDuplicationPercent']) data['thin_provision_percent'] = ( results['thinProvisioningPercent']) + data['shared_targets'] = False self.cluster_stats = data def initialize_connection(self, volume, connector): diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 0a2046cacb5..a1efefdae45 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -680,7 +680,13 @@ class VolumeManager(manager.CleanableManager, # volume stats as these are decremented on delete. self._update_allocated_capacity(volume) - updates = {'service_uuid': self.service_uuid} + shared_targets = ( + 1 + if self.driver.capabilities.get('shared_targets', True) + else 0) + updates = {'service_uuid': self.service_uuid, + 'shared_targets': shared_targets} + volume.update(updates) volume.save()