diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf index 9e2a54104f0f..8c49bad80709 100644 --- a/etc/nova/nova-config-generator.conf +++ b/etc/nova/nova-config-generator.conf @@ -8,14 +8,14 @@ namespace = nova.compute namespace = nova.network namespace = nova.scheduler namespace = nova.virt -namespace = nova.openstack.common.eventlet_backdoor namespace = nova.openstack.common.memorycache -namespace = nova.openstack.common.periodic_task namespace = nova.openstack.common.policy -namespace = nova.openstack.common.sslutils namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy +namespace = oslo.service.periodic_task +namespace = oslo.service.service +namespace = oslo.service.sslutils namespace = oslo.db namespace = oslo.middleware namespace = oslo.concurrency diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 7acfdc4f6fe0..9f4df67a6974 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -24,6 +24,7 @@ from oslo_config import cfg from oslo_context import context as common_context from oslo_log import log as logging from oslo_serialization import jsonutils +from oslo_service import sslutils from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import timeutils @@ -80,7 +81,7 @@ ec2_opts = [ CONF = cfg.CONF CONF.register_opts(ec2_opts) CONF.import_opt('use_forwarded_for', 'nova.api.auth') -CONF.import_group('ssl', 'nova.openstack.common.sslutils') +sslutils.is_enabled(CONF) # Fault Wrapper around all EC2 requests diff --git a/nova/cells/manager.py b/nova/cells/manager.py index d19f11a4b5e5..51091c27742a 100644 --- a/nova/cells/manager.py +++ b/nova/cells/manager.py @@ -22,6 +22,7 @@ import time from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import periodic_task from oslo_utils import importutils from oslo_utils import timeutils import six @@ -37,7 +38,6 @@ from nova import manager from nova import objects from nova.objects import base as base_obj from nova.objects import instance as instance_obj -from nova.openstack.common import periodic_task cell_manager_opts = [ cfg.StrOpt('driver', diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7d3c92d4cc91..9a4edd6e0cea 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -44,6 +44,8 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils +from oslo_service import loopingcall +from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils @@ -78,8 +80,6 @@ from nova.network import model as network_model from nova.network.security_group import openstack_driver from nova import objects from nova.objects import base as obj_base -from nova.openstack.common import loopingcall -from nova.openstack.common import periodic_task from nova import paths from nova import rpc from nova import safe_utils diff --git a/nova/image/glance.py b/nova/image/glance.py index f0f67942f8f7..2add21eb43b7 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -28,6 +28,7 @@ import glanceclient.exc from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils +from oslo_service import sslutils from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import timeutils @@ -76,7 +77,6 @@ CONF = cfg.CONF CONF.register_opts(glance_opts, 'glance') CONF.import_opt('auth_strategy', 'nova.api.auth') CONF.import_opt('my_ip', 'nova.netconf') -CONF.import_group('ssl', 'nova.openstack.common.sslutils') def generate_glance_url(): @@ -128,6 +128,7 @@ def _create_glance_client(context, host, port, use_ssl, version=1): # https specific params params['insecure'] = CONF.glance.api_insecure params['ssl_compression'] = False + sslutils.is_enabled(CONF) if CONF.ssl.cert_file: params['cert_file'] = CONF.ssl.cert_file if CONF.ssl.key_file: diff --git a/nova/manager.py b/nova/manager.py index 97fe80fdab91..cb05dff8882b 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -53,9 +53,9 @@ This module provides Manager, a base class for managers. from oslo_config import cfg from oslo_log import log as logging +from oslo_service import periodic_task from nova.db import base -from nova.openstack.common import periodic_task from nova import rpc @@ -64,7 +64,12 @@ CONF.import_opt('host', 'nova.netconf') LOG = logging.getLogger(__name__) -class Manager(base.Base, periodic_task.PeriodicTasks): +class PeriodicTasks(periodic_task.PeriodicTasks): + def __init__(self): + super(PeriodicTasks, self).__init__(CONF) + + +class Manager(base.Base, PeriodicTasks): def __init__(self, host=None, db_driver=None, service_name='undefined'): if not host: diff --git a/nova/network/manager.py b/nova/network/manager.py index 1d960cd2725c..8cb1570fe895 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -37,6 +37,7 @@ import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging +from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import netutils @@ -58,7 +59,6 @@ from nova.network import rpcapi as network_rpcapi from nova import objects from nova.objects import base as obj_base from nova.objects import quotas as quotas_obj -from nova.openstack.common import periodic_task from nova import servicegroup from nova import utils diff --git a/nova/openstack/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py deleted file mode 100644 index bf79feba4603..000000000000 --- a/nova/openstack/common/eventlet_backdoor.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import copy -import errno -import gc -import logging -import os -import pprint -import socket -import sys -import traceback - -import eventlet.backdoor -import greenlet -from oslo_config import cfg - -from nova.openstack.common._i18n import _LI - -help_for_backdoor_port = ( - "Acceptable values are 0, , and :, where 0 results " - "in listening on a random tcp port number; results in listening " - "on the specified port number (and not enabling backdoor if that port " - "is in use); and : results in listening on the smallest " - "unused port number within the specified range of port numbers. The " - "chosen port is displayed in the service's log file.") -eventlet_backdoor_opts = [ - cfg.StrOpt('backdoor_port', - help="Enable eventlet backdoor. %s" % help_for_backdoor_port) -] - -CONF = cfg.CONF -CONF.register_opts(eventlet_backdoor_opts) -LOG = logging.getLogger(__name__) - - -def list_opts(): - """Entry point for oslo-config-generator. - """ - return [(None, copy.deepcopy(eventlet_backdoor_opts))] - - -class EventletBackdoorConfigValueError(Exception): - def __init__(self, port_range, help_msg, ex): - msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' - '%(help)s' % - {'range': port_range, 'ex': ex, 'help': help_msg}) - super(EventletBackdoorConfigValueError, self).__init__(msg) - self.port_range = port_range - - -def _dont_use_this(): - print("Don't use this, just disconnect instead") - - -def _find_objects(t): - return [o for o in gc.get_objects() if isinstance(o, t)] - - -def _print_greenthreads(): - for i, gt in enumerate(_find_objects(greenlet.greenlet)): - print(i, gt) - traceback.print_stack(gt.gr_frame) - print() - - -def _print_nativethreads(): - for threadId, stack in sys._current_frames().items(): - print(threadId) - traceback.print_stack(stack) - print() - - -def _parse_port_range(port_range): - if ':' not in port_range: - start, end = port_range, port_range - else: - start, end = port_range.split(':', 1) - try: - start, end = int(start), int(end) - if end < start: - raise ValueError - return start, end - except ValueError as ex: - raise EventletBackdoorConfigValueError(port_range, ex, - help_for_backdoor_port) - - -def _listen(host, start_port, end_port, listen_func): - try_port = start_port - while True: - try: - return listen_func((host, try_port)) - except socket.error as exc: - if (exc.errno != errno.EADDRINUSE or - try_port >= end_port): - raise - try_port += 1 - - -def initialize_if_enabled(): - backdoor_locals = { - 'exit': _dont_use_this, # So we don't exit the entire process - 'quit': _dont_use_this, # So we don't exit the entire process - 'fo': _find_objects, - 'pgt': _print_greenthreads, - 'pnt': _print_nativethreads, - } - - if CONF.backdoor_port is None: - return None - - start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) - - # NOTE(johannes): The standard sys.displayhook will print the value of - # the last expression and set it to __builtin__._, which overwrites - # the __builtin__._ that gettext sets. Let's switch to using pprint - # since it won't interact poorly with gettext, and it's easier to - # read the output too. - def displayhook(val): - if val is not None: - pprint.pprint(val) - sys.displayhook = displayhook - - sock = _listen('localhost', start_port, end_port, eventlet.listen) - - # In the case of backdoor port being zero, a port number is assigned by - # listen(). In any case, pull the port number out here. - port = sock.getsockname()[1] - LOG.info( - _LI('Eventlet backdoor listening on %(port)s for process %(pid)d'), - {'port': port, 'pid': os.getpid()} - ) - eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, - locals=backdoor_locals) - return port diff --git a/nova/openstack/common/loopingcall.py b/nova/openstack/common/loopingcall.py deleted file mode 100644 index 15f5405ffadf..000000000000 --- a/nova/openstack/common/loopingcall.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import sys -import time - -from eventlet import event -from eventlet import greenthread - -from nova.openstack.common._i18n import _LE, _LW - -LOG = logging.getLogger(__name__) - -# NOTE(zyluo): This lambda function was declared to avoid mocking collisions -# with time.time() called in the standard logging module -# during unittests. -_ts = lambda: time.time() - - -class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCallBase. - - The poll-function passed to LoopingCallBase can raise this exception to - break out of the loop normally. This is somewhat analogous to - StopIteration. - - An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCallBase.wait() - - """ - - def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCallBase.wait() should return.""" - self.retvalue = retvalue - - -class LoopingCallBase(object): - def __init__(self, f=None, *args, **kw): - self.args = args - self.kw = kw - self.f = f - self._running = False - self.done = None - - def stop(self): - self._running = False - - def wait(self): - return self.done.wait() - - -class FixedIntervalLoopingCall(LoopingCallBase): - """A fixed interval looping call.""" - - def start(self, interval, initial_delay=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - start = _ts() - self.f(*self.args, **self.kw) - end = _ts() - if not self._running: - break - delay = end - start - interval - if delay > 0: - LOG.warning(_LW('task %(func_name)r run outlasted ' - 'interval by %(delay).2f sec'), - {'func_name': self.f, 'delay': delay}) - greenthread.sleep(-delay if delay < 0 else 0) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_LE('in fixed duration looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn_n(_inner) - return self.done - - -class DynamicLoopingCall(LoopingCallBase): - """A looping call which sleeps until the next known event. - - The function called should return how long to sleep for before being - called again. - """ - - def start(self, initial_delay=None, periodic_interval_max=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - idle = self.f(*self.args, **self.kw) - if not self._running: - break - - if periodic_interval_max is not None: - idle = min(idle, periodic_interval_max) - LOG.debug('Dynamic looping call %(func_name)r sleeping ' - 'for %(idle).02f seconds', - {'func_name': self.f, 'idle': idle}) - greenthread.sleep(idle) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_LE('in dynamic looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn(_inner) - return self.done diff --git a/nova/openstack/common/periodic_task.py b/nova/openstack/common/periodic_task.py deleted file mode 100644 index 14f9fefcc8f0..000000000000 --- a/nova/openstack/common/periodic_task.py +++ /dev/null @@ -1,232 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import logging -import random -import time - -from oslo_config import cfg -import six - -from nova.openstack.common._i18n import _, _LE, _LI - - -periodic_opts = [ - cfg.BoolOpt('run_external_periodic_tasks', - default=True, - help='Some periodic tasks can be run in a separate process. ' - 'Should we run them here?'), -] - -CONF = cfg.CONF -CONF.register_opts(periodic_opts) - -LOG = logging.getLogger(__name__) - -DEFAULT_INTERVAL = 60.0 - - -def list_opts(): - """Entry point for oslo-config-generator.""" - return [(None, copy.deepcopy(periodic_opts))] - - -class InvalidPeriodicTaskArg(Exception): - message = _("Unexpected argument for periodic task creation: %(arg)s.") - - -def periodic_task(*args, **kwargs): - """Decorator to indicate that a method is a periodic task. - - This decorator can be used in two ways: - - 1. Without arguments '@periodic_task', this will be run on the default - interval of 60 seconds. - - 2. With arguments: - @periodic_task(spacing=N [, run_immediately=[True|False]] - [, name=[None|"string"]) - this will be run on approximately every N seconds. If this number is - negative the periodic task will be disabled. If the run_immediately - argument is provided and has a value of 'True', the first run of the - task will be shortly after task scheduler starts. If - run_immediately is omitted or set to 'False', the first time the - task runs will be approximately N seconds after the task scheduler - starts. If name is not provided, __name__ of function is used. - """ - def decorator(f): - # Test for old style invocation - if 'ticks_between_runs' in kwargs: - raise InvalidPeriodicTaskArg(arg='ticks_between_runs') - - # Control if run at all - f._periodic_task = True - f._periodic_external_ok = kwargs.pop('external_process_ok', False) - if f._periodic_external_ok and not CONF.run_external_periodic_tasks: - f._periodic_enabled = False - else: - f._periodic_enabled = kwargs.pop('enabled', True) - f._periodic_name = kwargs.pop('name', f.__name__) - - # Control frequency - f._periodic_spacing = kwargs.pop('spacing', 0) - f._periodic_immediate = kwargs.pop('run_immediately', False) - if f._periodic_immediate: - f._periodic_last_run = None - else: - f._periodic_last_run = time.time() - return f - - # NOTE(sirp): The `if` is necessary to allow the decorator to be used with - # and without parenthesis. - # - # In the 'with-parenthesis' case (with kwargs present), this function needs - # to return a decorator function since the interpreter will invoke it like: - # - # periodic_task(*args, **kwargs)(f) - # - # In the 'without-parenthesis' case, the original function will be passed - # in as the first argument, like: - # - # periodic_task(f) - if kwargs: - return decorator - else: - return decorator(args[0]) - - -class _PeriodicTasksMeta(type): - def _add_periodic_task(cls, task): - """Add a periodic task to the list of periodic tasks. - - The task should already be decorated by @periodic_task. - - :return: whether task was actually enabled - """ - name = task._periodic_name - - if task._periodic_spacing < 0: - LOG.info(_LI('Skipping periodic task %(task)s because ' - 'its interval is negative'), - {'task': name}) - return False - if not task._periodic_enabled: - LOG.info(_LI('Skipping periodic task %(task)s because ' - 'it is disabled'), - {'task': name}) - return False - - # A periodic spacing of zero indicates that this task should - # be run on the default interval to avoid running too - # frequently. - if task._periodic_spacing == 0: - task._periodic_spacing = DEFAULT_INTERVAL - - cls._periodic_tasks.append((name, task)) - cls._periodic_spacing[name] = task._periodic_spacing - return True - - def __init__(cls, names, bases, dict_): - """Metaclass that allows us to collect decorated periodic tasks.""" - super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) - - # NOTE(sirp): if the attribute is not present then we must be the base - # class, so, go ahead an initialize it. If the attribute is present, - # then we're a subclass so make a copy of it so we don't step on our - # parent's toes. - try: - cls._periodic_tasks = cls._periodic_tasks[:] - except AttributeError: - cls._periodic_tasks = [] - - try: - cls._periodic_spacing = cls._periodic_spacing.copy() - except AttributeError: - cls._periodic_spacing = {} - - for value in cls.__dict__.values(): - if getattr(value, '_periodic_task', False): - cls._add_periodic_task(value) - - -def _nearest_boundary(last_run, spacing): - """Find nearest boundary which is in the past, which is a multiple of the - spacing with the last run as an offset. - - Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24, - 31, 38... - - 0% to 5% of the spacing value will be added to this value to ensure tasks - do not synchronize. This jitter is rounded to the nearest second, this - means that spacings smaller than 20 seconds will not have jitter. - """ - current_time = time.time() - if last_run is None: - return current_time - delta = current_time - last_run - offset = delta % spacing - # Add up to 5% jitter - jitter = int(spacing * (random.random() / 20)) - return current_time - offset + jitter - - -@six.add_metaclass(_PeriodicTasksMeta) -class PeriodicTasks(object): - def __init__(self): - super(PeriodicTasks, self).__init__() - self._periodic_last_run = {} - for name, task in self._periodic_tasks: - self._periodic_last_run[name] = task._periodic_last_run - - def add_periodic_task(self, task): - """Add a periodic task to the list of periodic tasks. - - The task should already be decorated by @periodic_task. - """ - if self.__class__._add_periodic_task(task): - self._periodic_last_run[task._periodic_name] = ( - task._periodic_last_run) - - def run_periodic_tasks(self, context, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - idle_for = DEFAULT_INTERVAL - for task_name, task in self._periodic_tasks: - full_task_name = '.'.join([self.__class__.__name__, task_name]) - - spacing = self._periodic_spacing[task_name] - last_run = self._periodic_last_run[task_name] - - # Check if due, if not skip - idle_for = min(idle_for, spacing) - if last_run is not None: - delta = last_run + spacing - time.time() - if delta > 0: - idle_for = min(idle_for, delta) - continue - - LOG.debug("Running periodic task %(full_task_name)s", - {"full_task_name": full_task_name}) - self._periodic_last_run[task_name] = _nearest_boundary( - last_run, spacing) - - try: - task(self, context) - except Exception: - if raise_on_error: - raise - LOG.exception(_LE("Error during %(full_task_name)s"), - {"full_task_name": full_task_name}) - time.sleep(0) - - return idle_for diff --git a/nova/openstack/common/service.py b/nova/openstack/common/service.py deleted file mode 100644 index 126ac6c215a4..000000000000 --- a/nova/openstack/common/service.py +++ /dev/null @@ -1,507 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic Node base class for all workers that run on hosts.""" - -import errno -import io -import logging -import os -import random -import signal -import sys -import time - -import eventlet -from eventlet import event -from oslo_config import cfg - -from nova.openstack.common import eventlet_backdoor -from nova.openstack.common._i18n import _LE, _LI, _LW -from nova.openstack.common import systemd -from nova.openstack.common import threadgroup - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _sighup_supported(): - return hasattr(signal, 'SIGHUP') - - -def _is_daemon(): - # The process group for a foreground process will match the - # process group of the controlling terminal. If those values do - # not match, or ioctl() fails on the stdout file handle, we assume - # the process is running in the background as a daemon. - # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics - try: - is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) - except io.UnsupportedOperation: - # Could not get the fileno for stdout, so we must be a daemon. - is_daemon = True - except OSError as err: - if err.errno == errno.ENOTTY: - # Assume we are a daemon because there is no terminal. - is_daemon = True - else: - raise - return is_daemon - - -def _is_sighup_and_daemon(signo): - if not (_sighup_supported() and signo == signal.SIGHUP): - # Avoid checking if we are a daemon, because the signal isn't - # SIGHUP. - return False - return _is_daemon() - - -def _signo_to_signame(signo): - signals = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'} - if _sighup_supported(): - signals[signal.SIGHUP] = 'SIGHUP' - return signals[signo] - - -def _set_signals_handler(handler): - signal.signal(signal.SIGTERM, handler) - signal.signal(signal.SIGINT, handler) - if _sighup_supported(): - signal.signal(signal.SIGHUP, handler) - - -class Launcher(object): - """Launch one or more services and wait for them to complete.""" - - def __init__(self): - """Initialize the service launcher. - - :returns: None - - """ - self.services = Services() - self.backdoor_port = eventlet_backdoor.initialize_if_enabled() - - def launch_service(self, service): - """Load and start the given service. - - :param service: The service you would like to start. - :returns: None - - """ - service.backdoor_port = self.backdoor_port - self.services.add(service) - - def stop(self): - """Stop all services which are currently running. - - :returns: None - - """ - self.services.stop() - - def wait(self): - """Waits until all services have been stopped, and then returns. - - :returns: None - - """ - self.services.wait() - - def restart(self): - """Reload config files and restart service. - - :returns: None - - """ - cfg.CONF.reload_config_files() - self.services.restart() - - -class SignalExit(SystemExit): - def __init__(self, signo, exccode=1): - super(SignalExit, self).__init__(exccode) - self.signo = signo - - -class ServiceLauncher(Launcher): - def _handle_signal(self, signo, frame): - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - raise SignalExit(signo) - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _wait_for_exit_or_signal(self, ready_callback=None): - status = None - signo = 0 - - LOG.debug('Full set of CONF:') - CONF.log_opt_values(LOG, logging.DEBUG) - - try: - if ready_callback: - ready_callback() - super(ServiceLauncher, self).wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_LI('Caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - finally: - self.stop() - - return status, signo - - def wait(self, ready_callback=None): - systemd.notify_once() - while True: - self.handle_signal() - status, signo = self._wait_for_exit_or_signal(ready_callback) - if not _is_sighup_and_daemon(signo): - return status - self.restart() - - -class ServiceWrapper(object): - def __init__(self, service, workers): - self.service = service - self.workers = workers - self.children = set() - self.forktimes = [] - - -class ProcessLauncher(object): - _signal_handlers_set = set() - - @classmethod - def _handle_class_signals(cls, *args, **kwargs): - for handler in cls._signal_handlers_set: - handler(*args, **kwargs) - - def __init__(self, wait_interval=0.01): - """Constructor. - - :param wait_interval: The interval to sleep for between checks - of child process exit. - """ - self.children = {} - self.sigcaught = None - self.running = True - self.wait_interval = wait_interval - rfd, self.writepipe = os.pipe() - self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') - self.handle_signal() - - def handle_signal(self): - self._signal_handlers_set.add(self._handle_signal) - _set_signals_handler(self._handle_class_signals) - - def _handle_signal(self, signo, frame): - self.sigcaught = signo - self.running = False - - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - - def _pipe_watcher(self): - # This will block until the write end is closed when the parent - # dies unexpectedly - self.readpipe.read(1) - - LOG.info(_LI('Parent process has died unexpectedly, exiting')) - - sys.exit(1) - - def _child_process_handle_signal(self): - # Setup child signal handlers differently - def _sighup(*args): - signal.signal(signal.SIGHUP, signal.SIG_DFL) - raise SignalExit(signal.SIGHUP) - - # Parent signals with SIGTERM when it wants us to go away. - signal.signal(signal.SIGTERM, signal.SIG_DFL) - if _sighup_supported(): - signal.signal(signal.SIGHUP, _sighup) - # Block SIGINT and let the parent send us a SIGTERM - signal.signal(signal.SIGINT, signal.SIG_IGN) - - def _child_wait_for_exit_or_signal(self, launcher): - status = 0 - signo = 0 - - # NOTE(johannes): All exceptions are caught to ensure this - # doesn't fallback into the loop spawning children. It would - # be bad for a child to spawn more children. - try: - launcher.wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_LI('Child caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - except BaseException: - LOG.exception(_LE('Unhandled exception')) - status = 2 - finally: - launcher.stop() - - return status, signo - - def _child_process(self, service): - self._child_process_handle_signal() - - # Reopen the eventlet hub to make sure we don't share an epoll - # fd with parent and/or siblings, which would be bad - eventlet.hubs.use_hub() - - # Close write to ensure only parent has it open - os.close(self.writepipe) - # Create greenthread to watch for parent to close pipe - eventlet.spawn_n(self._pipe_watcher) - - # Reseed random number generator - random.seed() - - launcher = Launcher() - launcher.launch_service(service) - return launcher - - def _start_child(self, wrap): - if len(wrap.forktimes) > wrap.workers: - # Limit ourselves to one process a second (over the period of - # number of workers * 1 second). This will allow workers to - # start up quickly but ensure we don't fork off children that - # die instantly too quickly. - if time.time() - wrap.forktimes[0] < wrap.workers: - LOG.info(_LI('Forking too fast, sleeping')) - time.sleep(1) - - wrap.forktimes.pop(0) - - wrap.forktimes.append(time.time()) - - pid = os.fork() - if pid == 0: - launcher = self._child_process(wrap.service) - while True: - self._child_process_handle_signal() - status, signo = self._child_wait_for_exit_or_signal(launcher) - if not _is_sighup_and_daemon(signo): - break - launcher.restart() - - os._exit(status) - - LOG.info(_LI('Started child %d'), pid) - - wrap.children.add(pid) - self.children[pid] = wrap - - return pid - - def launch_service(self, service, workers=1): - wrap = ServiceWrapper(service, workers) - - LOG.info(_LI('Starting %d workers'), wrap.workers) - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def _wait_child(self): - try: - # Don't block if no child processes have exited - pid, status = os.waitpid(0, os.WNOHANG) - if not pid: - return None - except OSError as exc: - if exc.errno not in (errno.EINTR, errno.ECHILD): - raise - return None - - if os.WIFSIGNALED(status): - sig = os.WTERMSIG(status) - LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), - dict(pid=pid, sig=sig)) - else: - code = os.WEXITSTATUS(status) - LOG.info(_LI('Child %(pid)s exited with status %(code)d'), - dict(pid=pid, code=code)) - - if pid not in self.children: - LOG.warning(_LW('pid %d not in child list'), pid) - return None - - wrap = self.children.pop(pid) - wrap.children.remove(pid) - return wrap - - def _respawn_children(self): - while self.running: - wrap = self._wait_child() - if not wrap: - # Yield to other threads if no children have exited - # Sleep for a short time to avoid excessive CPU usage - # (see bug #1095346) - eventlet.greenthread.sleep(self.wait_interval) - continue - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def wait(self): - """Loop waiting on children to die and respawning as necessary.""" - - systemd.notify_once() - LOG.debug('Full set of CONF:') - CONF.log_opt_values(LOG, logging.DEBUG) - - try: - while True: - self.handle_signal() - self._respawn_children() - # No signal means that stop was called. Don't clean up here. - if not self.sigcaught: - return - - signame = _signo_to_signame(self.sigcaught) - LOG.info(_LI('Caught %s, stopping children'), signame) - if not _is_sighup_and_daemon(self.sigcaught): - break - - cfg.CONF.reload_config_files() - for service in set( - [wrap.service for wrap in self.children.values()]): - service.reset() - - for pid in self.children: - os.kill(pid, signal.SIGHUP) - - self.running = True - self.sigcaught = None - except eventlet.greenlet.GreenletExit: - LOG.info(_LI("Wait called after thread killed. Cleaning up.")) - - self.stop() - - def stop(self): - """Terminate child processes and wait on each.""" - self.running = False - for pid in self.children: - try: - os.kill(pid, signal.SIGTERM) - except OSError as exc: - if exc.errno != errno.ESRCH: - raise - - # Wait for children to die - if self.children: - LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) - while self.children: - self._wait_child() - - -class Service(object): - """Service object for binaries running on hosts.""" - - def __init__(self, threads=1000): - self.tg = threadgroup.ThreadGroup(threads) - - # signal that the service is done shutting itself down: - self._done = event.Event() - - def reset(self): - # NOTE(Fengqian): docs for Event.reset() recommend against using it - self._done = event.Event() - - def start(self): - pass - - def stop(self, graceful=False): - self.tg.stop(graceful) - self.tg.wait() - # Signal that service cleanup is done: - if not self._done.ready(): - self._done.send() - - def wait(self): - self._done.wait() - - -class Services(object): - - def __init__(self): - self.services = [] - self.tg = threadgroup.ThreadGroup() - self.done = event.Event() - - def add(self, service): - self.services.append(service) - self.tg.add_thread(self.run_service, service, self.done) - - def stop(self): - # wait for graceful shutdown of services: - for service in self.services: - service.stop() - service.wait() - - # Each service has performed cleanup, now signal that the run_service - # wrapper threads can now die: - if not self.done.ready(): - self.done.send() - - # reap threads: - self.tg.stop() - - def wait(self): - self.tg.wait() - - def restart(self): - self.stop() - self.done = event.Event() - for restart_service in self.services: - restart_service.reset() - self.tg.add_thread(self.run_service, restart_service, self.done) - - @staticmethod - def run_service(service, done): - """Service start wrapper. - - :param service: service to run - :param done: event to wait on until a shutdown is triggered - :returns: None - - """ - service.start() - done.wait() - - -def launch(service, workers=1): - if workers is None or workers == 1: - launcher = ServiceLauncher() - launcher.launch_service(service) - else: - launcher = ProcessLauncher() - launcher.launch_service(service, workers=workers) - - return launcher diff --git a/nova/openstack/common/sslutils.py b/nova/openstack/common/sslutils.py deleted file mode 100644 index 86c6f0af94e7..000000000000 --- a/nova/openstack/common/sslutils.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import os -import ssl - -from oslo_config import cfg - -from nova.openstack.common._i18n import _ - - -ssl_opts = [ - cfg.StrOpt('ca_file', - help="CA certificate file to use to verify " - "connecting clients."), - cfg.StrOpt('cert_file', - help="Certificate file to use when starting " - "the server securely."), - cfg.StrOpt('key_file', - help="Private key file to use when starting " - "the server securely."), -] - -CONF = cfg.CONF -config_section = 'ssl' -CONF.register_opts(ssl_opts, config_section) - - -def list_opts(): - """Entry point for oslo-config-generator.""" - return [(config_section, copy.deepcopy(ssl_opts))] - - -def is_enabled(): - cert_file = CONF.ssl.cert_file - key_file = CONF.ssl.key_file - ca_file = CONF.ssl.ca_file - use_ssl = cert_file or key_file - - if cert_file and not os.path.exists(cert_file): - raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) - - if ca_file and not os.path.exists(ca_file): - raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) - - if key_file and not os.path.exists(key_file): - raise RuntimeError(_("Unable to find key_file : %s") % key_file) - - if use_ssl and (not cert_file or not key_file): - raise RuntimeError(_("When running server in SSL mode, you must " - "specify both a cert_file and key_file " - "option value in your configuration file")) - - return use_ssl - - -def wrap(sock): - ssl_kwargs = { - 'server_side': True, - 'certfile': CONF.ssl.cert_file, - 'keyfile': CONF.ssl.key_file, - 'cert_reqs': ssl.CERT_NONE, - } - - if CONF.ssl.ca_file: - ssl_kwargs['ca_certs'] = CONF.ssl.ca_file - ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED - - return ssl.wrap_socket(sock, **ssl_kwargs) diff --git a/nova/openstack/common/systemd.py b/nova/openstack/common/systemd.py deleted file mode 100644 index 36243b342abd..000000000000 --- a/nova/openstack/common/systemd.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2012-2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper module for systemd service readiness notification. -""" - -import logging -import os -import socket -import sys - - -LOG = logging.getLogger(__name__) - - -def _abstractify(socket_name): - if socket_name.startswith('@'): - # abstract namespace socket - socket_name = '\0%s' % socket_name[1:] - return socket_name - - -def _sd_notify(unset_env, msg): - notify_socket = os.getenv('NOTIFY_SOCKET') - if notify_socket: - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - try: - sock.connect(_abstractify(notify_socket)) - sock.sendall(msg) - if unset_env: - del os.environ['NOTIFY_SOCKET'] - except EnvironmentError: - LOG.debug("Systemd notification failed", exc_info=True) - finally: - sock.close() - - -def notify(): - """Send notification to Systemd that service is ready. - - For details see - http://www.freedesktop.org/software/systemd/man/sd_notify.html - """ - _sd_notify(False, 'READY=1') - - -def notify_once(): - """Send notification once to Systemd that service is ready. - - Systemd sets NOTIFY_SOCKET environment variable with the name of the - socket listening for notifications from services. - This method removes the NOTIFY_SOCKET environment variable to ensure - notification is sent only once. - """ - _sd_notify(True, 'READY=1') - - -def onready(notify_socket, timeout): - """Wait for systemd style notification on the socket. - - :param notify_socket: local socket address - :type notify_socket: string - :param timeout: socket timeout - :type timeout: float - :returns: 0 service ready - 1 service not ready - 2 timeout occurred - """ - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - sock.settimeout(timeout) - sock.bind(_abstractify(notify_socket)) - try: - msg = sock.recv(512) - except socket.timeout: - return 2 - finally: - sock.close() - if 'READY=1' in msg: - return 0 - else: - return 1 - - -if __name__ == '__main__': - # simple CLI for testing - if len(sys.argv) == 1: - notify() - elif len(sys.argv) >= 2: - timeout = float(sys.argv[1]) - notify_socket = os.getenv('NOTIFY_SOCKET') - if notify_socket: - retval = onready(notify_socket, timeout) - sys.exit(retval) diff --git a/nova/openstack/common/threadgroup.py b/nova/openstack/common/threadgroup.py deleted file mode 100644 index a2119f9a769a..000000000000 --- a/nova/openstack/common/threadgroup.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging -import threading - -import eventlet -from eventlet import greenpool - -from nova.openstack.common._i18n import _LE -from nova.openstack.common import loopingcall - - -LOG = logging.getLogger(__name__) - - -def _thread_done(gt, *args, **kwargs): - """Callback function to be passed to GreenThread.link() when we spawn() - Calls the :class:`ThreadGroup` to notify if. - - """ - kwargs['group'].thread_done(kwargs['thread']) - - -class Thread(object): - """Wrapper around a greenthread, that holds a reference to the - :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when - it has done so it can be removed from the threads list. - """ - def __init__(self, thread, group): - self.thread = thread - self.thread.link(_thread_done, group=group, thread=self) - - def stop(self): - self.thread.kill() - - def wait(self): - return self.thread.wait() - - def link(self, func, *args, **kwargs): - self.thread.link(func, *args, **kwargs) - - -class ThreadGroup(object): - """The point of the ThreadGroup class is to: - - * keep track of timers and greenthreads (making it easier to stop them - when need be). - * provide an easy API to add timers. - """ - def __init__(self, thread_pool_size=10): - self.pool = greenpool.GreenPool(thread_pool_size) - self.threads = [] - self.timers = [] - - def add_dynamic_timer(self, callback, initial_delay=None, - periodic_interval_max=None, *args, **kwargs): - timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) - timer.start(initial_delay=initial_delay, - periodic_interval_max=periodic_interval_max) - self.timers.append(timer) - - def add_timer(self, interval, callback, initial_delay=None, - *args, **kwargs): - pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) - pulse.start(interval=interval, - initial_delay=initial_delay) - self.timers.append(pulse) - - def add_thread(self, callback, *args, **kwargs): - gt = self.pool.spawn(callback, *args, **kwargs) - th = Thread(gt, self) - self.threads.append(th) - return th - - def thread_done(self, thread): - self.threads.remove(thread) - - def _stop_threads(self): - current = threading.current_thread() - - # Iterate over a copy of self.threads so thread_done doesn't - # modify the list while we're iterating - for x in self.threads[:]: - if x is current: - # don't kill the current thread. - continue - try: - x.stop() - except eventlet.greenlet.GreenletExit: - pass - except Exception: - LOG.exception(_LE('Error stopping thread.')) - - def stop_timers(self): - for x in self.timers: - try: - x.stop() - except Exception: - LOG.exception(_LE('Error stopping timer.')) - self.timers = [] - - def stop(self, graceful=False): - """stop function has the option of graceful=True/False. - - * In case of graceful=True, wait for all threads to be finished. - Never kill threads. - * In case of graceful=False, kill threads immediately. - """ - self.stop_timers() - if graceful: - # In case of graceful=True, wait for all threads to be - # finished, never kill threads - self.wait() - else: - # In case of graceful=False(Default), kill threads - # immediately - self._stop_threads() - - def wait(self): - for x in self.timers: - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception: - LOG.exception(_LE('Error waiting on ThreadGroup.')) - current = threading.current_thread() - - # Iterate over a copy of self.threads so thread_done doesn't - # modify the list while we're iterating - for x in self.threads[:]: - if x is current: - continue - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 9f8928251400..adee62a63fb6 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -23,12 +23,12 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils +from oslo_service import periodic_task from oslo_utils import importutils from nova import exception from nova import manager from nova import objects -from nova.openstack.common import periodic_task from nova import quota diff --git a/nova/service.py b/nova/service.py index e264193cbd0b..24c6a4e66225 100644 --- a/nova/service.py +++ b/nova/service.py @@ -25,6 +25,7 @@ from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging +from oslo_service import service from oslo_utils import importutils from nova import baserpc @@ -35,7 +36,6 @@ from nova import exception from nova.i18n import _, _LE, _LI, _LW from nova import objects from nova.objects import base as objects_base -from nova.openstack.common import service from nova import rpc from nova import servicegroup from nova import utils @@ -305,7 +305,7 @@ class Service(service.Service): sys.exit(1) -class WSGIService(object): +class WSGIService(service.Service): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): @@ -412,7 +412,7 @@ class WSGIService(object): def process_launcher(): - return service.ProcessLauncher() + return service.ProcessLauncher(CONF) # NOTE(vish): the global launcher is to maintain the existing @@ -426,7 +426,7 @@ def serve(server, workers=None): if _launcher: raise RuntimeError(_('serve() can only be called once')) - _launcher = service.launch(server, workers=workers) + _launcher = service.launch(CONF, server, workers=workers) def wait(): diff --git a/nova/storage/linuxscsi.py b/nova/storage/linuxscsi.py index 285dda96f6c7..214f4a6585bf 100644 --- a/nova/storage/linuxscsi.py +++ b/nova/storage/linuxscsi.py @@ -16,9 +16,9 @@ from oslo_concurrency import processutils from oslo_log import log as logging +from oslo_service import loopingcall from nova.i18n import _LW -from nova.openstack.common import loopingcall from nova import utils import os diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py index 8d7e75631fe8..62e752fa59ed 100644 --- a/nova/tests/unit/image/test_glance.py +++ b/nova/tests/unit/image/test_glance.py @@ -417,8 +417,10 @@ class TestGlanceClientWrapper(test.NoDBTestCase): ) sleep_mock.assert_called_once_with(1) + @mock.patch('oslo_service.sslutils.is_enabled') @mock.patch('glanceclient.Client') - def test_create_glance_client_with_ssl(self, client_mock): + def test_create_glance_client_with_ssl(self, client_mock, + ssl_enable_mock): self.flags(ca_file='foo.cert', cert_file='bar.cert', key_file='wut.key', group='ssl') ctxt = mock.sentinel.ctx diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py index e970380cd228..c3f53617b846 100644 --- a/nova/tests/unit/test_service.py +++ b/nova/tests/unit/test_service.py @@ -24,12 +24,12 @@ import mock from mox3 import mox from oslo_concurrency import processutils from oslo_config import cfg +from oslo_service import service as _service import testtools from nova import exception from nova import manager from nova import objects -from nova.openstack.common import service as _service from nova import rpc from nova import service from nova import test @@ -341,14 +341,16 @@ class TestLauncher(test.NoDBTestCase): def test_launch_app(self, mock_launch): service._launcher = None service.serve(mock.sentinel.service) - mock_launch.assert_called_once_with(mock.sentinel.service, + mock_launch.assert_called_once_with(mock.ANY, + mock.sentinel.service, workers=None) @mock.patch.object(_service, 'launch') def test_launch_app_with_workers(self, mock_launch): service._launcher = None service.serve(mock.sentinel.service, workers=mock.sentinel.workers) - mock_launch.assert_called_once_with(mock.sentinel.service, + mock_launch.assert_called_once_with(mock.ANY, + mock.sentinel.service, workers=mock.sentinel.workers) @mock.patch.object(_service, 'launch') diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py index 28882f54a58a..7cf41d721f30 100644 --- a/nova/tests/unit/virt/ironic/test_driver.py +++ b/nova/tests/unit/virt/ironic/test_driver.py @@ -19,6 +19,7 @@ from ironicclient import exc as ironic_exception import mock from oslo_config import cfg from oslo_serialization import jsonutils +from oslo_service import loopingcall from oslo_utils import uuidutils import six @@ -29,7 +30,6 @@ from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova import objects -from nova.openstack.common import loopingcall from nova import test from nova.tests.unit import fake_instance from nova.tests.unit import utils diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 81884ddb05f0..3d2b2164f36f 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -37,6 +37,7 @@ from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils +from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import importutils from oslo_utils import timeutils @@ -59,7 +60,6 @@ from nova import exception from nova.network import model as network_model from nova import objects from nova.openstack.common import fileutils -from nova.openstack.common import loopingcall from nova.pci import manager as pci_manager from nova import test from nova.tests.unit import fake_block_device @@ -8144,7 +8144,7 @@ class LibvirtConnTestCase(test.NoDBTestCase): block_device_info) @mock.patch('nova.openstack.common.fileutils.ensure_tree') - @mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall') + @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.pci.manager.get_instance_pci_devs') @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index d8a142e87972..b40e4fc50f18 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -25,6 +25,7 @@ from eventlet import timeout as etimeout from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging +from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import units @@ -34,7 +35,6 @@ from nova.api.metadata import base as instance_metadata from nova import exception from nova.i18n import _, _LI, _LE, _LW from nova.openstack.common import fileutils -from nova.openstack.common import loopingcall from nova import utils from nova.virt import configdrive from nova.virt import hardware diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py index 84c69ab5a51e..a3c92e42ca32 100644 --- a/nova/virt/ironic/driver.py +++ b/nova/virt/ironic/driver.py @@ -30,6 +30,7 @@ import time from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils +from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import importutils import six @@ -48,7 +49,6 @@ from nova.i18n import _LE from nova.i18n import _LI from nova.i18n import _LW from nova import objects -from nova.openstack.common import loopingcall from nova.virt import configdrive from nova.virt import driver as virt_driver from nova.virt import firewall diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index b7f97e488579..b50a4d161cf4 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -47,6 +47,7 @@ from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils +from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils @@ -75,7 +76,6 @@ from nova import image from nova.network import model as network_model from nova import objects from nova.openstack.common import fileutils -from nova.openstack.common import loopingcall from nova.pci import manager as pci_manager from nova.pci import utils as pci_utils from nova import utils diff --git a/nova/virt/libvirt/rbd_utils.py b/nova/virt/libvirt/rbd_utils.py index 715941b127bf..1f9afe4c967b 100644 --- a/nova/virt/libvirt/rbd_utils.py +++ b/nova/virt/libvirt/rbd_utils.py @@ -25,6 +25,7 @@ except ImportError: from oslo_log import log as logging from oslo_serialization import jsonutils +from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units @@ -32,7 +33,6 @@ from nova import exception from nova.i18n import _ from nova.i18n import _LE from nova.i18n import _LW -from nova.openstack.common import loopingcall from nova import utils LOG = logging.getLogger(__name__) diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index b4a05760e261..6217b176bc2d 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -26,6 +26,7 @@ import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging +from oslo_service import loopingcall from oslo_utils import strutils import six from six.moves import urllib @@ -37,7 +38,6 @@ from nova.i18n import _ from nova.i18n import _LE from nova.i18n import _LI from nova.i18n import _LW -from nova.openstack.common import loopingcall from nova import paths from nova.storage import linuxscsi from nova import utils diff --git a/nova/wsgi.py b/nova/wsgi.py index fc3141f2b732..30a41bb8db89 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -29,6 +29,7 @@ import eventlet.wsgi import greenlet from oslo_config import cfg from oslo_log import log as logging +from oslo_service import service from oslo_utils import excutils from paste import deploy import routes.middleware @@ -86,7 +87,7 @@ CONF.register_opts(wsgi_opts) LOG = logging.getLogger(__name__) -class Server(object): +class Server(service.ServiceBase): """Server class to manage a WSGI server, serving a WSGI application.""" default_pool_size = CONF.wsgi_default_pool_size diff --git a/openstack-common.conf b/openstack-common.conf index a586529190dc..9866d490346f 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -4,9 +4,7 @@ module=cliutils module=fileutils module=imageutils -module=loopingcall module=memorycache -module=periodic_task module=report module=report.generators module=report.models @@ -14,8 +12,6 @@ module=report.views module=report.views.json module=report.views.text module=report.views.xml -module=service -module=sslutils # The base module to hold the copy of openstack.common base=nova diff --git a/requirements.txt b/requirements.txt index f35882fb5bc1..16f1f7f5c3d7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -43,6 +43,7 @@ oslo.db>=1.12.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.messaging!=1.12.0,>=1.8.0 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0 +oslo.service>=0.1.0 # Apache-2.0 rfc3986>=0.2.0 # Apache-2.0 oslo.middleware!=2.0.0,>=1.2.0 # Apache-2.0 psutil<2.0.0,>=1.1.1 diff --git a/setup.cfg b/setup.cfg index e0327553fc8a..62194413adf5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,11 +34,8 @@ oslo.config.opts = nova.network = nova.network.opts:list_opts nova.scheduler = nova.scheduler.opts:list_opts nova.virt = nova.virt.opts:list_opts - nova.openstack.common.eventlet_backdoor = nova.openstack.common.eventlet_backdoor:list_opts nova.openstack.common.memorycache = nova.openstack.common.memorycache:list_opts - nova.openstack.common.periodic_task = nova.openstack.common.periodic_task:list_opts nova.openstack.common.policy = nova.openstack.common.policy:list_opts - nova.openstack.common.sslutils = nova.openstack.common.sslutils:list_opts nova.openstack.common.versionutils = nova.openstack.common.versionutils:list_opts nova.compute.resources =