Publish backup capabilities to a scheduler
It's the first patch in the series to implement create backups via scheduler and generic backup implementation. Related blueprint: backup-host-selection-algorigthm Change-Id: Ie03eb8e5f6191595ce53206ce1a685cfd9b71a2e
This commit is contained in:
parent
031e43be43
commit
acfc87e201
@ -71,9 +71,10 @@ backup_manager_opts = [
|
||||
help='Time in seconds between checks to see if the backup '
|
||||
'driver has been successfully initialized, any time '
|
||||
'the driver is restarted.'),
|
||||
cfg.IntOpt('backup_driver_status_check_interval',
|
||||
cfg.IntOpt('backup_driver_stats_polling_interval',
|
||||
default=60,
|
||||
min=10,
|
||||
deprecated_name='backup_driver_status_check_interval',
|
||||
help='Time in seconds between checks of the backup driver '
|
||||
'status. If does not report as working, it is '
|
||||
'restarted.'),
|
||||
@ -111,7 +112,7 @@ SERVICE_PGRP = '' if os.name == 'nt' else os.getpgrp()
|
||||
# writes/reads and the compression/decompression calls.
|
||||
# (https://github.com/eventlet/eventlet/issues/432)
|
||||
|
||||
class BackupManager(manager.ThreadPoolManager):
|
||||
class BackupManager(manager.SchedulerDependentManager):
|
||||
"""Manages backup of block storage devices."""
|
||||
|
||||
RPC_API_VERSION = backup_rpcapi.BackupAPI.RPC_API_VERSION
|
||||
@ -157,6 +158,7 @@ class BackupManager(manager.ThreadPoolManager):
|
||||
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
|
||||
self._add_to_threadpool(key_migration.migrate_fixed_key,
|
||||
backups=backups)
|
||||
self.publish_service_capabilities(ctxt)
|
||||
|
||||
def _setup_backup_driver(self, ctxt):
|
||||
backup_service = self.service(context=ctxt, db=self.db)
|
||||
@ -1073,7 +1075,18 @@ class BackupManager(manager.ThreadPoolManager):
|
||||
return self.is_initialized
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.backup_driver_status_check_interval)
|
||||
spacing=CONF.backup_driver_stats_polling_interval)
|
||||
def publish_service_capabilities(self, context):
|
||||
"""Collect driver status and then publish."""
|
||||
self._report_driver_status(context)
|
||||
self._publish_service_capabilities(context)
|
||||
|
||||
def _report_driver_status(self, context):
|
||||
if not self.is_working():
|
||||
self.setup_backup_backend(context)
|
||||
backup_stats = {
|
||||
'backend_state': self.is_working(),
|
||||
'driver_name': self.driver_name,
|
||||
'availability_zone': self.az
|
||||
}
|
||||
self.update_service_capabilities(backup_stats)
|
||||
|
@ -46,9 +46,10 @@ class BackupAPI(rpc.RPCAPI):
|
||||
|
||||
2.0 - Remove 1.x compatibility
|
||||
2.1 - Adds set_log_levels and get_log_levels
|
||||
2.2 - Adds publish_service_capabilities
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '2.1'
|
||||
RPC_API_VERSION = '2.2'
|
||||
RPC_DEFAULT_VERSION = '2.0'
|
||||
TOPIC = constants.BACKUP_TOPIC
|
||||
BINARY = 'cinder-backup'
|
||||
@ -111,3 +112,8 @@ class BackupAPI(rpc.RPCAPI):
|
||||
def get_log_levels(self, context, service, log_request):
|
||||
cctxt = self._get_cctxt(server=service.host, version='2.1')
|
||||
return cctxt.call(context, 'get_log_levels', log_request=log_request)
|
||||
|
||||
@rpc.assert_min_rpc_version('2.2')
|
||||
def publish_service_capabilities(self, ctxt):
|
||||
cctxt = self._get_cctxt(version='2.2', fanout=True)
|
||||
cctxt.cast(ctxt, 'publish_service_capabilities')
|
||||
|
@ -69,6 +69,7 @@ def _launch_backup_process(launcher, num_process):
|
||||
try:
|
||||
server = service.Service.create(binary='cinder-backup',
|
||||
coordination=True,
|
||||
service_name='backup',
|
||||
process_number=num_process)
|
||||
except Exception:
|
||||
LOG.exception('Backup service %s failed to start.', CONF.host)
|
||||
|
@ -177,12 +177,13 @@ class SchedulerDependentManager(ThreadPoolManager):
|
||||
"""
|
||||
|
||||
def __init__(self, host=None, db_driver=None, service_name='undefined',
|
||||
cluster=None):
|
||||
cluster=None, *args, **kwargs):
|
||||
self.last_capabilities = None
|
||||
self.service_name = service_name
|
||||
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
|
||||
super(SchedulerDependentManager, self).__init__(host, db_driver,
|
||||
cluster=cluster)
|
||||
cluster=cluster,
|
||||
*args, **kwargs)
|
||||
|
||||
def update_service_capabilities(self, capabilities):
|
||||
"""Remember these capabilities to send on next periodic update."""
|
||||
|
@ -400,6 +400,8 @@ class HostManager(object):
|
||||
|
||||
backend_state_cls = BackendState
|
||||
|
||||
ALLOWED_SERVICE_NAMES = ('volume', 'backup')
|
||||
|
||||
REQUIRED_KEYS = frozenset([
|
||||
'pool_name',
|
||||
'total_capacity_gb',
|
||||
@ -414,6 +416,7 @@ class HostManager(object):
|
||||
def __init__(self):
|
||||
self.service_states = {} # { <host|cluster>: {<service>: {cap k : v}}}
|
||||
self.backend_state_map = {}
|
||||
self.backup_service_states = {}
|
||||
self.filter_handler = filters.BackendFilterHandler('cinder.scheduler.'
|
||||
'filters')
|
||||
self.filter_classes = self.filter_handler.get_all_classes()
|
||||
@ -506,7 +509,7 @@ class HostManager(object):
|
||||
def update_service_capabilities(self, service_name, host, capabilities,
|
||||
cluster_name, timestamp):
|
||||
"""Update the per-service capabilities based on this notification."""
|
||||
if service_name != 'volume':
|
||||
if service_name not in HostManager.ALLOWED_SERVICE_NAMES:
|
||||
LOG.debug('Ignoring %(service_name)s service update '
|
||||
'from %(host)s',
|
||||
{'service_name': service_name, 'host': host})
|
||||
@ -525,6 +528,15 @@ class HostManager(object):
|
||||
|
||||
# Set the default capabilities in case None is set.
|
||||
backend = cluster_name or host
|
||||
|
||||
if service_name == 'backup':
|
||||
self.backup_service_states[backend] = capabilities
|
||||
LOG.debug("Received %(service_name)s service update from "
|
||||
"%(host)s: %(cap)s",
|
||||
{'service_name': service_name, 'host': host,
|
||||
'cap': capabilities})
|
||||
return
|
||||
|
||||
capab_old = self.service_states.get(backend, {"timestamp": 0})
|
||||
capab_last_update = self.service_states_last_update.get(
|
||||
backend, {"timestamp": 0})
|
||||
|
@ -33,6 +33,7 @@ from oslo_utils import timeutils
|
||||
from oslo_utils import versionutils
|
||||
import six
|
||||
|
||||
from cinder.backup import rpcapi as backup_rpcapi
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
@ -101,6 +102,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
self.driver = importutils.import_object(scheduler_driver)
|
||||
super(SchedulerManager, self).__init__(*args, **kwargs)
|
||||
self._startup_delay = True
|
||||
self.backup_api = backup_rpcapi.BackupAPI()
|
||||
self.volume_api = volume_rpcapi.VolumeAPI()
|
||||
self.sch_api = scheduler_rpcapi.SchedulerAPI()
|
||||
self.message_api = mess_api.API()
|
||||
@ -261,6 +263,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
|
||||
def request_service_capabilities(self, context):
|
||||
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
|
||||
try:
|
||||
self.backup_api.publish_service_capabilities(context)
|
||||
except exception.ServiceTooOld as e:
|
||||
# cinder-backup has publish_service_capabilities starting Stein
|
||||
# release only.
|
||||
msg = ("Failed to notify about cinder-backup service "
|
||||
"capabilities for host %(host)s. This is normal "
|
||||
"during a live upgrade. Error: %(e)s")
|
||||
LOG.warning(msg, {'host': self.host, 'e': e})
|
||||
|
||||
@append_operation_type()
|
||||
def migrate_volume(self, context, volume, backend, force_copy,
|
||||
|
@ -121,7 +121,8 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
@mock.patch('cinder.objects.service.Service.get_minimum_obj_version')
|
||||
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'})
|
||||
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': '1.4',
|
||||
'cinder-scheduler': '1.4'})
|
||||
'cinder-scheduler': '1.4',
|
||||
'cinder-backup': '1.5'})
|
||||
def test_reset(self, get_min_obj, get_min_rpc):
|
||||
mgr = self.manager_cls()
|
||||
|
||||
|
@ -109,10 +109,12 @@ class TestCinderBackupCmd(test.TestCase):
|
||||
|
||||
c1 = mock.call(binary=constants.BACKUP_BINARY,
|
||||
coordination=True,
|
||||
process_number=1)
|
||||
process_number=1,
|
||||
service_name='backup')
|
||||
c2 = mock.call(binary=constants.BACKUP_BINARY,
|
||||
coordination=True,
|
||||
process_number=2)
|
||||
process_number=2,
|
||||
service_name='backup')
|
||||
service_create.assert_has_calls([c1, c2])
|
||||
|
||||
launcher = get_launcher.return_value
|
||||
|
@ -0,0 +1,11 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
If you have ``backup_driver_status_check_interval`` option in your
|
||||
cinder.conf we recommend you to use ``backup_driver_stats_polling_interval``
|
||||
to avoid deprecation warnings in logs.
|
||||
deprecations:
|
||||
- |
|
||||
``backup_driver_status_check_interval`` config option is renamed to
|
||||
``backup_driver_stats_polling_interval`` to be similar with volume drivers
|
||||
configuration. Old option name support will be dropped in U release.
|
Loading…
x
Reference in New Issue
Block a user