Add shared_targets flag to Volumes
This adds a bool column to volumes to notify consumers if the backend hosting the volume utilizes shared_targets or not. We use the volume-drivers capabilities report to determine this and default to True if a driver doesn't report anything. The purpose of the column is to notify Nova that it needs to do some sort of locking around connect/disconnect to be sure other volumes on the same node aren't sharing the iscsi connection. Using a default of "True" is safe because although locking and doing the extra checks might be somewhat inefficient it works fine because it will just appear that there's never any other volumes in use. So this change adds the column to the DB as well as an online migration to go through and update any existing volumes. With this and the service_uuid column consumers will have everything the need to: 1. determine if they need to lock 2. use the service_uuid as a unique lock name That last remaining change in this set will be to add the fields to the view-builder and bump the API version. Change-Id: If600c28c86511cfb83f38d92cf6418954fb4975e
This commit is contained in:
parent
94dba4647d
commit
2fa6fdd784
@ -73,6 +73,7 @@ from cinder import context
|
||||
from cinder import db
|
||||
from cinder.db import migration as db_migration
|
||||
from cinder.db.sqlalchemy import api as db_api
|
||||
from cinder.db.sqlalchemy import models
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
@ -85,6 +86,48 @@ from cinder.volume import utils as vutils
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def _get_non_shared_target_hosts(ctxt):
|
||||
hosts = []
|
||||
numvols_needing_update = 0
|
||||
rpc.init(CONF)
|
||||
rpcapi = volume_rpcapi.VolumeAPI()
|
||||
|
||||
services = objects.ServiceList.get_all_by_topic(ctxt, 'cinder-volume')
|
||||
for service in services:
|
||||
capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
|
||||
if not capabilities.get('shared_targets', True):
|
||||
hosts.append(service.host)
|
||||
numvols_needing_update += db_api.model_query(
|
||||
ctxt, models.Volume).filter_by(
|
||||
shared_targets=True,
|
||||
service_uuid=service.uuid).count()
|
||||
return hosts, numvols_needing_update
|
||||
|
||||
|
||||
def shared_targets_online_data_migration(ctxt, max_count):
|
||||
"""Update existing volumes shared_targets flag based on capabilities."""
|
||||
non_shared_hosts = []
|
||||
completed = 0
|
||||
|
||||
non_shared_hosts, total_vols_to_update = _get_non_shared_target_hosts(ctxt)
|
||||
for host in non_shared_hosts:
|
||||
# We use the api call here instead of going direct to
|
||||
# db query to take advantage of parsing out the host
|
||||
# correctly
|
||||
vrefs = db_api.volume_get_all_by_host(
|
||||
ctxt, host,
|
||||
filters={'shared_targets': True})
|
||||
if len(vrefs) > max_count:
|
||||
del vrefs[-(len(vrefs) - max_count):]
|
||||
max_count -= len(vrefs)
|
||||
for v in vrefs:
|
||||
db.volume_update(
|
||||
ctxt, v['id'],
|
||||
{'shared_targets': 0})
|
||||
completed += 1
|
||||
return total_vols_to_update, completed
|
||||
|
||||
|
||||
# Decorators for actions
|
||||
def args(*args, **kwargs):
|
||||
def _decorator(func):
|
||||
@ -209,6 +252,7 @@ class DbCommands(object):
|
||||
db.service_uuids_online_data_migration,
|
||||
db.backup_service_online_migration,
|
||||
db.volume_service_uuids_online_data_migration,
|
||||
shared_targets_online_data_migration,
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
|
@ -0,0 +1,27 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import Boolean, Column, MetaData, Table
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
"""Add shared_targets column to Volumes."""
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
volumes = Table('volumes', meta, autoload=True)
|
||||
|
||||
# NOTE(jdg): We use a default of True because it's harmless for a device
|
||||
# that does NOT use shared_targets to be treated as if it does
|
||||
shared_targets = Column('shared_targets',
|
||||
Boolean,
|
||||
default=True)
|
||||
volumes.create_column(shared_targets)
|
@ -320,6 +320,7 @@ class Volume(BASE, CinderBase):
|
||||
backref="volumes",
|
||||
foreign_keys=service_uuid,
|
||||
primaryjoin='Volume.service_uuid == Service.uuid')
|
||||
shared_targets = Column(Boolean, default=True) # make an FK of service?
|
||||
|
||||
|
||||
class VolumeMetadata(BASE, CinderBase):
|
||||
|
@ -140,6 +140,7 @@ OBJ_VERSIONS.add('1.29', {'Service': '1.6'})
|
||||
OBJ_VERSIONS.add('1.30', {'RequestSpec': '1.2'})
|
||||
OBJ_VERSIONS.add('1.31', {'Volume': '1.7'})
|
||||
OBJ_VERSIONS.add('1.32', {'RequestSpec': '1.3'})
|
||||
OBJ_VERSIONS.add('1.33', {'Volume': '1.8'})
|
||||
|
||||
|
||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||
|
@ -61,7 +61,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
# Version 1.5: Added group
|
||||
# Version 1.6: This object is now cleanable (adds rows to workers table)
|
||||
# Version 1.7: Added service_uuid
|
||||
VERSION = '1.7'
|
||||
# Version 1.8: Added shared_targets
|
||||
VERSION = '1.8'
|
||||
|
||||
OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
|
||||
'volume_type', 'volume_attachment', 'consistencygroup',
|
||||
@ -126,6 +127,7 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
'snapshots': fields.ObjectField('SnapshotList', nullable=True),
|
||||
'group': fields.ObjectField('Group', nullable=True),
|
||||
'service_uuid': fields.StringField(nullable=True),
|
||||
'shared_targets': fields.BooleanField(default=True, nullable=True),
|
||||
}
|
||||
|
||||
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
|
||||
|
@ -384,6 +384,11 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
|
||||
self.assertEqual(sorted(['deleted', 'service_uuid']),
|
||||
sorted(index_columns))
|
||||
|
||||
def _check_115(self, engine, data):
|
||||
volumes = db_utils.get_table(engine, 'volumes')
|
||||
self.assertIsInstance(volumes.c.shared_targets.type,
|
||||
self.BOOL_TYPE)
|
||||
|
||||
def test_walk_versions(self):
|
||||
self.walk_versions(False, False)
|
||||
self.assert_each_foreign_key_is_part_of_an_index()
|
||||
|
@ -47,7 +47,7 @@ object_data = {
|
||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
||||
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Volume': '1.7-0845c5b7b826a4e9019f3684c2f9b132',
|
||||
'Volume': '1.8-6cf615b72269cef48702a2a5c2940159',
|
||||
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'VolumeAttachment': '1.2-b68b357a1756582b706006ea9de40c9a',
|
||||
'VolumeAttachmentList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
|
@ -38,6 +38,7 @@ from cinder.cmd import scheduler as cinder_scheduler
|
||||
from cinder.cmd import volume as cinder_volume
|
||||
from cinder.cmd import volume_usage_audit
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import api as sqlalchemy_api
|
||||
from cinder import exception
|
||||
from cinder.objects import fields
|
||||
from cinder import test
|
||||
@ -45,7 +46,9 @@ from cinder.tests.unit import fake_cluster
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_service
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import utils
|
||||
from cinder import version
|
||||
from cinder.volume import rpcapi
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -2008,3 +2011,97 @@ class TestCinderVolumeUsageAuditCmd(test.TestCase):
|
||||
mock.call(ctxt, backup1, 'delete.end',
|
||||
extra_usage_info=extra_info_backup_delete)
|
||||
])
|
||||
|
||||
|
||||
class TestVolumeSharedTargetsOnlineMigration(test.TestCase):
|
||||
"""Unit tests for cinder.db.api.service_*."""
|
||||
|
||||
def setUp(self):
|
||||
super(TestVolumeSharedTargetsOnlineMigration, self).setUp()
|
||||
|
||||
def _get_minimum_rpc_version_mock(ctxt, binary):
|
||||
binary_map = {
|
||||
'cinder-volume': rpcapi.VolumeAPI,
|
||||
}
|
||||
return binary_map[binary].RPC_API_VERSION
|
||||
|
||||
self.patch('cinder.objects.Service.get_minimum_rpc_version',
|
||||
side_effect=_get_minimum_rpc_version_mock)
|
||||
|
||||
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
|
||||
return_value='1.8')
|
||||
def test_shared_targets_migrations(self, mock_version):
|
||||
"""Ensure we can update the column."""
|
||||
ctxt = context.get_admin_context()
|
||||
sqlalchemy_api.volume_create(
|
||||
ctxt,
|
||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
||||
|
||||
# Create another one correct setting
|
||||
sqlalchemy_api.volume_create(
|
||||
ctxt,
|
||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
||||
'shared_targets': False,
|
||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
||||
|
||||
# Need a service to query
|
||||
values = {
|
||||
'host': 'host1@lvm-driver1',
|
||||
'binary': 'cinder-volume',
|
||||
'topic': 'cinder-volume',
|
||||
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
|
||||
utils.create_service(ctxt, values)
|
||||
|
||||
# Run the migration and verify that we updated 1 entry
|
||||
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
|
||||
return_value={'shared_targets': False}):
|
||||
total, updated = (
|
||||
cinder_manage.shared_targets_online_data_migration(
|
||||
ctxt, 10))
|
||||
self.assertEqual(1, total)
|
||||
self.assertEqual(1, updated)
|
||||
|
||||
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
|
||||
return_value='1.8')
|
||||
def test_shared_targets_migrations_with_limit(self, mock_version):
|
||||
"""Ensure we update in batches."""
|
||||
ctxt = context.get_admin_context()
|
||||
# default value in db for shared_targets on a volume
|
||||
# is True, so don't need to set it here explicitly
|
||||
sqlalchemy_api.volume_create(
|
||||
ctxt,
|
||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
||||
|
||||
sqlalchemy_api.volume_create(
|
||||
ctxt,
|
||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
||||
|
||||
sqlalchemy_api.volume_create(
|
||||
ctxt,
|
||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
||||
|
||||
values = {
|
||||
'host': 'host1@lvm-driver1',
|
||||
'binary': 'cinder-volume',
|
||||
'topic': 'cinder-volume',
|
||||
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
|
||||
utils.create_service(ctxt, values)
|
||||
|
||||
# Run the migration and verify that we updated 1 entry
|
||||
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
|
||||
return_value={'shared_targets': False}):
|
||||
total, updated = (
|
||||
cinder_manage.shared_targets_online_data_migration(
|
||||
ctxt, 2))
|
||||
self.assertEqual(3, total)
|
||||
self.assertEqual(2, updated)
|
||||
|
||||
total, updated = (
|
||||
cinder_manage.shared_targets_online_data_migration(
|
||||
ctxt, 2))
|
||||
self.assertEqual(1, total)
|
||||
self.assertEqual(1, updated)
|
||||
|
@ -279,6 +279,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||
multiattach=False
|
||||
))
|
||||
data["pools"].append(single_pool)
|
||||
data["shared_targets"] = False
|
||||
|
||||
# Check availability of sparse volume copy.
|
||||
data['sparse_copy_volume'] = self._sparse_copy_volume
|
||||
|
@ -1845,6 +1845,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
||||
results['deDuplicationPercent'])
|
||||
data['thin_provision_percent'] = (
|
||||
results['thinProvisioningPercent'])
|
||||
data['shared_targets'] = False
|
||||
self.cluster_stats = data
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
|
@ -680,7 +680,13 @@ class VolumeManager(manager.CleanableManager,
|
||||
# volume stats as these are decremented on delete.
|
||||
self._update_allocated_capacity(volume)
|
||||
|
||||
updates = {'service_uuid': self.service_uuid}
|
||||
shared_targets = (
|
||||
1
|
||||
if self.driver.capabilities.get('shared_targets', True)
|
||||
else 0)
|
||||
updates = {'service_uuid': self.service_uuid,
|
||||
'shared_targets': shared_targets}
|
||||
|
||||
volume.update(updates)
|
||||
volume.save()
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user