Merge "Remove translation of log messages"
This commit is contained in:
commit
09ed3b5626
@ -19,7 +19,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
import nova.conf
|
||||
from nova.i18n import _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = nova.conf.CONF
|
||||
@ -30,12 +29,12 @@ api_class = None
|
||||
try:
|
||||
api_class = CONF.key_manager.api_class
|
||||
except cfg.NoSuchOptError:
|
||||
LOG.warning(_LW("key_manager.api_class is not set, will use deprecated "
|
||||
"option keymgr.api_class if set"))
|
||||
LOG.warning("key_manager.api_class is not set, will use deprecated "
|
||||
"option keymgr.api_class if set")
|
||||
try:
|
||||
api_class = CONF.keymgr.api_class
|
||||
except cfg.NoSuchOptError:
|
||||
LOG.warning(_LW("keymgr.api_class is not set"))
|
||||
LOG.warning("keymgr.api_class is not set")
|
||||
|
||||
deprecated_barbican = 'nova.keymgr.barbican.BarbicanKeyManager'
|
||||
barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager'
|
||||
@ -45,8 +44,8 @@ castellan_mock = ('castellan.tests.unit.key_manager.mock_key_manager.'
|
||||
|
||||
|
||||
def log_deprecated_warning(deprecated, castellan):
|
||||
LOG.warning(_LW("key manager api_class set to use deprecated option "
|
||||
"%(deprecated)s, using %(castellan)s instead"),
|
||||
LOG.warning("key manager api_class set to use deprecated option "
|
||||
"%(deprecated)s, using %(castellan)s instead",
|
||||
{'deprecated': deprecated, 'castellan': castellan})
|
||||
|
||||
if api_class == deprecated_barbican:
|
||||
@ -58,7 +57,7 @@ elif api_class == deprecated_mock:
|
||||
elif api_class is None:
|
||||
# TODO(kfarr): key_manager.api_class should be set in DevStack, and this
|
||||
# block can be removed
|
||||
LOG.warning(_LW("key manager not set, using insecure default %s"),
|
||||
LOG.warning("key manager not set, using insecure default %s",
|
||||
castellan_mock)
|
||||
api_class = castellan_mock
|
||||
|
||||
|
@ -39,7 +39,7 @@ from oslo_log import log as logging
|
||||
|
||||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _
|
||||
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
@ -55,8 +55,8 @@ class ConfKeyManager(key_manager.KeyManager):
|
||||
"""
|
||||
|
||||
def __init__(self, configuration):
|
||||
LOG.warning(_LW('This key manager is insecure and is not recommended '
|
||||
'for production deployments'))
|
||||
LOG.warning('This key manager is insecure and is not recommended '
|
||||
'for production deployments')
|
||||
super(ConfKeyManager, self).__init__(configuration)
|
||||
|
||||
self.key_id = '00000000-0000-0000-0000-000000000000'
|
||||
@ -128,4 +128,4 @@ class ConfKeyManager(key_manager.KeyManager):
|
||||
raise exception.KeyManagerError(
|
||||
reason="cannot delete non-existent key")
|
||||
|
||||
LOG.warning(_LW("Not deleting key %s"), managed_object_id)
|
||||
LOG.warning("Not deleting key %s", managed_object_id)
|
||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import strutils
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _LI
|
||||
from nova.network import base_api
|
||||
from nova.network import floating_ips
|
||||
from nova.network import model as network_model
|
||||
@ -182,8 +181,8 @@ class API(base_api.NetworkAPI):
|
||||
if orig_instance_uuid:
|
||||
msg_dict = dict(address=floating_address,
|
||||
instance_id=orig_instance_uuid)
|
||||
LOG.info(_LI('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s'), msg_dict)
|
||||
LOG.info('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s', msg_dict)
|
||||
orig_instance = objects.Instance.get_by_uuid(
|
||||
context, orig_instance_uuid, expected_attrs=['flavor'])
|
||||
|
||||
|
@ -22,7 +22,7 @@ from oslo_utils import excutils
|
||||
|
||||
from nova.db import base
|
||||
from nova import hooks
|
||||
from nova.i18n import _, _LE
|
||||
from nova.i18n import _
|
||||
from nova.network import model as network_model
|
||||
from nova import objects
|
||||
|
||||
@ -56,7 +56,7 @@ def update_instance_cache_with_nw_info(impl, context, instance,
|
||||
instance.info_cache = ic
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Failed storing info cache'), instance=instance)
|
||||
LOG.exception('Failed storing info cache', instance=instance)
|
||||
|
||||
|
||||
def refresh_cache(f):
|
||||
|
@ -18,7 +18,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import importutils
|
||||
|
||||
import nova.conf
|
||||
from nova.i18n import _LE, _LI
|
||||
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
@ -30,9 +29,9 @@ def load_network_driver(network_driver=None):
|
||||
network_driver = CONF.network_driver
|
||||
|
||||
if not network_driver:
|
||||
LOG.error(_LE("Network driver option required, but not specified"))
|
||||
LOG.error("Network driver option required, but not specified")
|
||||
sys.exit(1)
|
||||
|
||||
LOG.info(_LI("Loading network driver '%s'"), network_driver)
|
||||
LOG.info("Loading network driver '%s'", network_driver)
|
||||
|
||||
return importutils.import_module(network_driver)
|
||||
|
@ -27,7 +27,6 @@ import nova.conf
|
||||
from nova import context
|
||||
from nova.db import base
|
||||
from nova import exception
|
||||
from nova.i18n import _LE, _LI, _LW
|
||||
from nova.network import rpcapi as network_rpcapi
|
||||
from nova import objects
|
||||
from nova import rpc
|
||||
@ -149,7 +148,7 @@ class FloatingIP(object):
|
||||
address,
|
||||
affect_auto_assigned=True)
|
||||
except exception.FloatingIpNotAssociated:
|
||||
LOG.info(_LI("Floating IP %s is not associated. Ignore."),
|
||||
LOG.info("Floating IP %s is not associated. Ignore.",
|
||||
address)
|
||||
# deallocate if auto_assigned
|
||||
if floating_ip.auto_assigned:
|
||||
@ -168,12 +167,12 @@ class FloatingIP(object):
|
||||
|
||||
if floating_ip.project_id != context.project_id:
|
||||
if floating_ip.project_id is None:
|
||||
LOG.warning(_LW('Address |%(address)s| is not allocated'),
|
||||
LOG.warning('Address |%(address)s| is not allocated',
|
||||
{'address': floating_ip.address})
|
||||
raise exception.Forbidden()
|
||||
else:
|
||||
LOG.warning(_LW('Address |%(address)s| is not allocated '
|
||||
'to your project |%(project)s|'),
|
||||
LOG.warning('Address |%(address)s| is not allocated '
|
||||
'to your project |%(project)s|',
|
||||
{'address': floating_ip.address,
|
||||
'project': context.project_id})
|
||||
raise exception.Forbidden()
|
||||
@ -206,8 +205,8 @@ class FloatingIP(object):
|
||||
objects.Quotas.check_deltas(context, {'floating_ips': 1},
|
||||
project_id)
|
||||
except exception.OverQuota:
|
||||
LOG.warning(_LW("Quota exceeded for %s, tried to allocate "
|
||||
"floating IP"), context.project_id)
|
||||
LOG.warning("Quota exceeded for %s, tried to allocate "
|
||||
"floating IP", context.project_id)
|
||||
raise exception.FloatingIpLimitExceeded()
|
||||
|
||||
floating_ip = objects.FloatingIP.allocate_address(
|
||||
@ -223,8 +222,8 @@ class FloatingIP(object):
|
||||
project_id)
|
||||
except exception.OverQuota:
|
||||
objects.FloatingIP.deallocate(context, floating_ip.address)
|
||||
LOG.warning(_LW("Quota exceeded for %s, tried to allocate "
|
||||
"floating IP"), context.project_id)
|
||||
LOG.warning("Quota exceeded for %s, tried to allocate "
|
||||
"floating IP", context.project_id)
|
||||
raise exception.FloatingIpLimitExceeded()
|
||||
|
||||
payload = dict(project_id=project_id, floating_ip=floating_ip)
|
||||
@ -344,12 +343,12 @@ class FloatingIP(object):
|
||||
objects.FloatingIP.disassociate(context,
|
||||
floating_address)
|
||||
except Exception:
|
||||
LOG.warning(_LW('Failed to disassociated floating '
|
||||
'address: %s'), floating_address)
|
||||
LOG.warning('Failed to disassociated floating '
|
||||
'address: %s', floating_address)
|
||||
pass
|
||||
if "Cannot find device" in six.text_type(e):
|
||||
try:
|
||||
LOG.error(_LE('Interface %s not found'), interface)
|
||||
LOG.error('Interface %s not found', interface)
|
||||
except Exception:
|
||||
pass
|
||||
raise exception.NoFloatingIpInterface(
|
||||
@ -503,15 +502,14 @@ class FloatingIP(object):
|
||||
if not floating_addresses or (source and source == dest):
|
||||
return
|
||||
|
||||
LOG.info(_LI("Starting migration network for instance %s"),
|
||||
instance_uuid)
|
||||
LOG.info("Starting migration network for instance %s", instance_uuid)
|
||||
for address in floating_addresses:
|
||||
floating_ip = objects.FloatingIP.get_by_address(context, address)
|
||||
|
||||
if self._is_stale_floating_ip_address(context, floating_ip):
|
||||
LOG.warning(_LW("Floating IP address |%(address)s| no longer "
|
||||
LOG.warning("Floating IP address |%(address)s| no longer "
|
||||
"belongs to instance %(instance_uuid)s. "
|
||||
"Will not migrate it "),
|
||||
"Will not migrate it ",
|
||||
{'address': address,
|
||||
'instance_uuid': instance_uuid})
|
||||
continue
|
||||
@ -539,16 +537,15 @@ class FloatingIP(object):
|
||||
if not floating_addresses or (source and source == dest):
|
||||
return
|
||||
|
||||
LOG.info(_LI("Finishing migration network for instance %s"),
|
||||
instance_uuid)
|
||||
LOG.info("Finishing migration network for instance %s", instance_uuid)
|
||||
|
||||
for address in floating_addresses:
|
||||
floating_ip = objects.FloatingIP.get_by_address(context, address)
|
||||
|
||||
if self._is_stale_floating_ip_address(context, floating_ip):
|
||||
LOG.warning(_LW("Floating IP address |%(address)s| no longer "
|
||||
LOG.warning("Floating IP address |%(address)s| no longer "
|
||||
"belongs to instance %(instance_uuid)s. "
|
||||
"Will not setup it."),
|
||||
"Will not setup it.",
|
||||
{'address': address,
|
||||
'instance_uuid': instance_uuid})
|
||||
continue
|
||||
@ -589,10 +586,10 @@ class FloatingIP(object):
|
||||
if domain_entry:
|
||||
domains.append(domain_entry)
|
||||
else:
|
||||
LOG.warning(_LW('Database inconsistency: DNS domain |%s| is '
|
||||
LOG.warning('Database inconsistency: DNS domain |%s| is '
|
||||
'registered in the Nova db but not visible to '
|
||||
'either the floating or instance DNS driver. '
|
||||
'It will be ignored.'), dns_domain.domain)
|
||||
'It will be ignored.', dns_domain.domain)
|
||||
|
||||
return domains
|
||||
|
||||
@ -629,8 +626,8 @@ class FloatingIP(object):
|
||||
try:
|
||||
self.instance_dns_manager.create_domain(domain)
|
||||
except exception.FloatingIpDNSExists:
|
||||
LOG.warning(_LW('Domain |%(domain)s| already exists, '
|
||||
'changing zone to |%(av_zone)s|.'),
|
||||
LOG.warning('Domain |%(domain)s| already exists, '
|
||||
'changing zone to |%(av_zone)s|.',
|
||||
{'domain': domain, 'av_zone': av_zone})
|
||||
|
||||
def create_public_dns_domain(self, context, domain, project):
|
||||
@ -638,8 +635,8 @@ class FloatingIP(object):
|
||||
try:
|
||||
self.floating_dns_manager.create_domain(domain)
|
||||
except exception.FloatingIpDNSExists:
|
||||
LOG.warning(_LW('Domain |%(domain)s| already exists, '
|
||||
'changing project to |%(project)s|.'),
|
||||
LOG.warning('Domain |%(domain)s| already exists, '
|
||||
'changing project to |%(project)s|.',
|
||||
{'domain': domain, 'project': project})
|
||||
|
||||
def delete_dns_domain(self, context, domain):
|
||||
|
@ -24,7 +24,7 @@ from oslo_log import log as logging
|
||||
|
||||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _
|
||||
from nova.network import dns_driver
|
||||
from nova import utils
|
||||
|
||||
@ -65,8 +65,8 @@ class DNSEntry(object):
|
||||
if not entry:
|
||||
return None
|
||||
if len(entry) > 1:
|
||||
LOG.warning(_LW("Found multiple matches for domain "
|
||||
"%(domain)s.\n%(entry)s"),
|
||||
LOG.warning("Found multiple matches for domain "
|
||||
"%(domain)s.\n%(entry)s",
|
||||
domain, entry)
|
||||
return entry[0]
|
||||
|
||||
@ -92,8 +92,8 @@ class DNSEntry(object):
|
||||
if name.endswith(z):
|
||||
dequalified = name[0:name.rfind(z)]
|
||||
else:
|
||||
LOG.warning(_LW("Unable to dequalify. %(name)s is not in "
|
||||
"%(domain)s.\n"),
|
||||
LOG.warning("Unable to dequalify. %(name)s is not in "
|
||||
"%(domain)s.\n",
|
||||
{'name': name,
|
||||
'domain': self.qualified_domain})
|
||||
dequalified = None
|
||||
@ -333,6 +333,5 @@ class LdapDNS(dns_driver.DNSDriver):
|
||||
dEntry.delete()
|
||||
|
||||
def delete_dns_file(self):
|
||||
LOG.warning(_LW("This shouldn't be getting called except during "
|
||||
"testing."))
|
||||
LOG.warning("This shouldn't be getting called except during testing.")
|
||||
pass
|
||||
|
@ -36,7 +36,7 @@ import six
|
||||
|
||||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova.i18n import _
|
||||
from nova.network import model as network_model
|
||||
from nova import objects
|
||||
from nova.pci import utils as pci_utils
|
||||
@ -141,8 +141,8 @@ class IptablesTable(object):
|
||||
chain_set = self.unwrapped_chains
|
||||
|
||||
if name not in chain_set:
|
||||
LOG.warning(_LW('Attempted to remove chain %s which does not '
|
||||
'exist'), name)
|
||||
LOG.warning('Attempted to remove chain %s which does not exist',
|
||||
name)
|
||||
return
|
||||
self.dirty = True
|
||||
|
||||
@ -210,8 +210,8 @@ class IptablesTable(object):
|
||||
self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
|
||||
self.dirty = True
|
||||
except ValueError:
|
||||
LOG.warning(_LW('Tried to remove rule that was not there:'
|
||||
' %(chain)r %(rule)r %(wrap)r %(top)r'),
|
||||
LOG.warning('Tried to remove rule that was not there:'
|
||||
' %(chain)r %(rule)r %(wrap)r %(top)r',
|
||||
{'chain': chain, 'rule': rule,
|
||||
'top': top, 'wrap': wrap})
|
||||
|
||||
@ -714,8 +714,8 @@ def ensure_floating_forward(floating_ip, fixed_ip, device, network):
|
||||
regex = '.*\s+%s(/32|\s+|$)' % floating_ip
|
||||
num_rules = iptables_manager.ipv4['nat'].remove_rules_regex(regex)
|
||||
if num_rules:
|
||||
msg = _LW('Removed %(num)d duplicate rules for floating IP %(float)s')
|
||||
LOG.warning(msg, {'num': num_rules, 'float': floating_ip})
|
||||
LOG.warning('Removed %(num)d duplicate rules for floating IP '
|
||||
'%(float)s', {'num': num_rules, 'float': floating_ip})
|
||||
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
|
||||
iptables_manager.ipv4['nat'].add_rule(chain, rule)
|
||||
iptables_manager.apply()
|
||||
@ -762,7 +762,7 @@ def clean_conntrack(fixed_ip):
|
||||
_execute('conntrack', '-D', '-r', fixed_ip, run_as_root=True,
|
||||
check_exit_code=[0, 1])
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.exception(_LE('Error deleting conntrack entries for %s'), fixed_ip)
|
||||
LOG.exception('Error deleting conntrack entries for %s', fixed_ip)
|
||||
|
||||
|
||||
def _enable_ipv4_forwarding():
|
||||
@ -1013,7 +1013,7 @@ def restart_dhcp(context, dev, network_ref, fixedips):
|
||||
_add_dnsmasq_accept_rules(dev)
|
||||
return
|
||||
except Exception as exc:
|
||||
LOG.error(_LE('kill -HUP dnsmasq threw %s'), exc)
|
||||
LOG.error('kill -HUP dnsmasq threw %s', exc)
|
||||
else:
|
||||
LOG.debug('Pid %d is stale, relaunching dnsmasq', pid)
|
||||
|
||||
@ -1091,7 +1091,7 @@ interface %s
|
||||
try:
|
||||
_execute('kill', pid, run_as_root=True)
|
||||
except Exception as exc:
|
||||
LOG.error(_LE('killing radvd threw %s'), exc)
|
||||
LOG.error('killing radvd threw %s', exc)
|
||||
else:
|
||||
LOG.debug('Pid %d is stale, relaunching radvd', pid)
|
||||
|
||||
@ -1123,7 +1123,7 @@ def _host_dhcp(fixedip):
|
||||
# to truncate the hostname to only 63 characters.
|
||||
hostname = fixedip.instance.hostname
|
||||
if len(hostname) > 63:
|
||||
LOG.warning(_LW('hostname %s too long, truncating.'), hostname)
|
||||
LOG.warning('hostname %s too long, truncating.', hostname)
|
||||
hostname = fixedip.instance.hostname[:2] + '-' +\
|
||||
fixedip.instance.hostname[-60:]
|
||||
if CONF.use_single_default_gateway:
|
||||
@ -1258,7 +1258,7 @@ def _ovs_vsctl(args):
|
||||
try:
|
||||
return utils.execute(*full_args, run_as_root=True)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Unable to execute %(cmd)s. Exception: %(exception)s"),
|
||||
LOG.error("Unable to execute %(cmd)s. Exception: %(exception)s",
|
||||
{'cmd': full_args, 'exception': e})
|
||||
raise exception.OvsConfigurationFailure(inner_exception=e)
|
||||
|
||||
@ -1322,9 +1322,9 @@ def create_tap_dev(dev, mac_address=None, multiqueue=False):
|
||||
except processutils.ProcessExecutionError:
|
||||
if multiqueue:
|
||||
LOG.warning(
|
||||
_LW('Failed to create a tap device with ip tuntap. '
|
||||
'Failed to create a tap device with ip tuntap. '
|
||||
'tunctl does not support creation of multi-queue '
|
||||
'enabled devices, skipping fallback.'))
|
||||
'enabled devices, skipping fallback.')
|
||||
raise
|
||||
|
||||
# Second option: tunctl
|
||||
@ -1359,7 +1359,7 @@ def delete_net_dev(dev):
|
||||
LOG.debug("Net device removed: '%s'", dev)
|
||||
except processutils.ProcessExecutionError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failed removing net device: '%s'"), dev)
|
||||
LOG.error("Failed removing net device: '%s'", dev)
|
||||
|
||||
|
||||
def delete_bridge_dev(dev):
|
||||
@ -1370,7 +1370,7 @@ def delete_bridge_dev(dev):
|
||||
utils.execute('brctl', 'delbr', dev, run_as_root=True)
|
||||
except processutils.ProcessExecutionError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Failed removing bridge device: '%s'"), dev)
|
||||
LOG.error("Failed removing bridge device: '%s'", dev)
|
||||
|
||||
|
||||
# Similar to compute virt layers, the Linux network node
|
||||
@ -1681,18 +1681,18 @@ def _exec_ebtables(*cmd, **kwargs):
|
||||
# See if we can retry the error.
|
||||
if any(error in exc.stderr for error in retry_strings):
|
||||
if count > attempts and check_exit_code:
|
||||
LOG.warning(_LW('%s failed. Not Retrying.'), ' '.join(cmd))
|
||||
LOG.warning('%s failed. Not Retrying.', ' '.join(cmd))
|
||||
raise
|
||||
else:
|
||||
# We need to sleep a bit before retrying
|
||||
LOG.warning(_LW("%(cmd)s failed. Sleeping %(time)s "
|
||||
"seconds before retry."),
|
||||
LOG.warning("%(cmd)s failed. Sleeping %(time)s "
|
||||
"seconds before retry.",
|
||||
{'cmd': ' '.join(cmd), 'time': sleep})
|
||||
time.sleep(sleep)
|
||||
else:
|
||||
# Not eligible for retry
|
||||
if check_exit_code:
|
||||
LOG.warning(_LW('%s failed. Not Retrying.'), ' '.join(cmd))
|
||||
LOG.warning('%s failed. Not Retrying.', ' '.join(cmd))
|
||||
raise
|
||||
else:
|
||||
return
|
||||
|
@ -45,7 +45,7 @@ import six
|
||||
import nova.conf
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LI, _LE, _LW
|
||||
from nova.i18n import _
|
||||
from nova import ipv6
|
||||
from nova import manager
|
||||
from nova.network import api as network_api
|
||||
@ -250,7 +250,7 @@ class NetworkManager(manager.Manager):
|
||||
network['uuid'], self.host)
|
||||
dev = self.driver.get_dev(network)
|
||||
self.driver.update_dns(ctxt, dev, network)
|
||||
LOG.info(_LI('Configured network %(network)s on host %(host)s'),
|
||||
LOG.info('Configured network %(network)s on host %(host)s',
|
||||
{'network': network['uuid'], 'host': self.host})
|
||||
|
||||
@periodic_task.periodic_task
|
||||
@ -424,7 +424,7 @@ class NetworkManager(manager.Manager):
|
||||
|
||||
net_info = self.get_instance_nw_info(admin_context, instance_uuid,
|
||||
rxtx_factor, host)
|
||||
LOG.info(_LI("Allocated network: '%s' for instance"), net_info,
|
||||
LOG.info("Allocated network: '%s' for instance", net_info,
|
||||
instance_uuid=instance_uuid)
|
||||
return net_info
|
||||
|
||||
@ -486,7 +486,7 @@ class NetworkManager(manager.Manager):
|
||||
# deallocate vifs (mac addresses)
|
||||
objects.VirtualInterface.delete_by_instance_uuid(
|
||||
read_deleted_context, instance_uuid)
|
||||
LOG.info(_LI("Network deallocated for instance (fixed IPs: '%s')"),
|
||||
LOG.info("Network deallocated for instance (fixed IPs: '%s')",
|
||||
fixed_ips, instance_uuid=instance_uuid)
|
||||
|
||||
@messaging.expected_exceptions(exception.InstanceNotFound)
|
||||
@ -521,12 +521,12 @@ class NetworkManager(manager.Manager):
|
||||
for fixed_ip in fixed_ips:
|
||||
vif = fixed_ip.virtual_interface
|
||||
if not vif:
|
||||
LOG.warning(_LW('No VirtualInterface for FixedIP: %s'),
|
||||
LOG.warning('No VirtualInterface for FixedIP: %s',
|
||||
str(fixed_ip.address), instance_uuid=instance_uuid)
|
||||
continue
|
||||
|
||||
if not fixed_ip.network:
|
||||
LOG.warning(_LW('No Network for FixedIP: %s'),
|
||||
LOG.warning('No Network for FixedIP: %s',
|
||||
str(fixed_ip.address), instance_uuid=instance_uuid)
|
||||
continue
|
||||
|
||||
@ -756,17 +756,17 @@ class NetworkManager(manager.Manager):
|
||||
|
||||
domainref = objects.DNSDomain.get_by_domain(context, instance_domain)
|
||||
if domainref is None:
|
||||
LOG.warning(_LW('instance-dns-zone not found |%s|.'),
|
||||
LOG.warning('instance-dns-zone not found |%s|.',
|
||||
instance_domain, instance=instance)
|
||||
return True
|
||||
dns_zone = domainref.availability_zone
|
||||
|
||||
instance_zone = instance.get('availability_zone')
|
||||
if dns_zone and (dns_zone != instance_zone):
|
||||
LOG.warning(_LW('instance-dns-zone is |%(domain)s|, '
|
||||
LOG.warning('instance-dns-zone is |%(domain)s|, '
|
||||
'which is in availability zone |%(zone)s|. '
|
||||
'Instance is in zone |%(zone2)s|. '
|
||||
'No DNS record will be created.'),
|
||||
'No DNS record will be created.',
|
||||
{'domain': instance_domain,
|
||||
'zone': dns_zone,
|
||||
'zone2': instance_zone},
|
||||
@ -801,9 +801,9 @@ class NetworkManager(manager.Manager):
|
||||
quotas.check_deltas(context, {'fixed_ips': 1}, quota_project)
|
||||
except exception.OverQuota as exc:
|
||||
count = exc.kwargs['usages']['fixed_ips']
|
||||
LOG.warning(_LW("Quota exceeded for project %(pid)s, tried to "
|
||||
LOG.warning("Quota exceeded for project %(pid)s, tried to "
|
||||
"allocate fixed IP. %(used)s of %(allowed)s are "
|
||||
"in use or are already reserved."),
|
||||
"in use or are already reserved.",
|
||||
{'pid': quota_project, 'used': count,
|
||||
'allowed': exc.kwargs['quotas']['fixed_ips']},
|
||||
instance_uuid=instance_id)
|
||||
@ -865,10 +865,10 @@ class NetworkManager(manager.Manager):
|
||||
# outermost catch-all except block.
|
||||
count = exc.kwargs['usages']['fixed_ips']
|
||||
allowed = exc.kwargs['quotas']['fixed_ips']
|
||||
LOG.warning(_LW("Quota exceeded for project %(pid)s, "
|
||||
LOG.warning("Quota exceeded for project %(pid)s, "
|
||||
"tried to allocate fixed IP. %(used)s "
|
||||
"of %(allowed)s are in use or are "
|
||||
"already reserved."),
|
||||
"already reserved.",
|
||||
{'pid': quota_project, 'used': count,
|
||||
'allowed': allowed},
|
||||
instance_uuid=instance_id)
|
||||
@ -922,9 +922,9 @@ class NetworkManager(manager.Manager):
|
||||
try:
|
||||
f()
|
||||
except Exception:
|
||||
LOG.warning(_LW('Error cleaning up fixed IP '
|
||||
LOG.warning('Error cleaning up fixed IP '
|
||||
'allocation. Manual cleanup may '
|
||||
'be required.'), exc_info=True)
|
||||
'be required.', exc_info=True)
|
||||
|
||||
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
|
||||
instance=None):
|
||||
@ -969,15 +969,15 @@ class NetworkManager(manager.Manager):
|
||||
# so we log a message to help track down
|
||||
# the possible race.
|
||||
if not vif_id:
|
||||
LOG.info(_LI("Unable to release %s because vif "
|
||||
"doesn't exist"), address)
|
||||
LOG.info("Unable to release %s because vif doesn't exist",
|
||||
address)
|
||||
return
|
||||
|
||||
vif = objects.VirtualInterface.get_by_id(context, vif_id)
|
||||
|
||||
if not vif:
|
||||
LOG.info(_LI("Unable to release %s because vif "
|
||||
"object doesn't exist"), address)
|
||||
LOG.info("Unable to release %s because vif "
|
||||
"object doesn't exist", address)
|
||||
return
|
||||
|
||||
# NOTE(cfb): Call teardown before release_dhcp to ensure
|
||||
@ -997,8 +997,8 @@ class NetworkManager(manager.Manager):
|
||||
# release_dhcp on the local driver
|
||||
self.driver.release_dhcp(dev, address, vif.address)
|
||||
except exception.NetworkDhcpReleaseFailed:
|
||||
LOG.error(_LE("Error releasing DHCP for IP %(address)s"
|
||||
" with MAC %(mac_address)s"),
|
||||
LOG.error("Error releasing DHCP for IP %(address)s"
|
||||
" with MAC %(mac_address)s",
|
||||
{'address': address,
|
||||
'mac_address': vif.address},
|
||||
instance=instance)
|
||||
@ -1028,12 +1028,12 @@ class NetworkManager(manager.Manager):
|
||||
fixed_ip = objects.FixedIP.get_by_address(context, address)
|
||||
|
||||
if fixed_ip.instance_uuid is None:
|
||||
LOG.warning(_LW('IP %s leased that is not associated'), fixed_ip)
|
||||
LOG.warning('IP %s leased that is not associated', fixed_ip)
|
||||
return
|
||||
fixed_ip.leased = True
|
||||
fixed_ip.save()
|
||||
if not fixed_ip.allocated:
|
||||
LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), fixed_ip,
|
||||
LOG.warning('IP |%s| leased that isn\'t allocated', fixed_ip,
|
||||
instance_uuid=fixed_ip.instance_uuid)
|
||||
|
||||
def release_fixed_ip(self, context, address, mac=None):
|
||||
@ -1042,10 +1042,10 @@ class NetworkManager(manager.Manager):
|
||||
fixed_ip = objects.FixedIP.get_by_address(context, address)
|
||||
|
||||
if fixed_ip.instance_uuid is None:
|
||||
LOG.warning(_LW('IP %s released that is not associated'), fixed_ip)
|
||||
LOG.warning('IP %s released that is not associated', fixed_ip)
|
||||
return
|
||||
if not fixed_ip.leased:
|
||||
LOG.warning(_LW('IP %s released that was not leased'), fixed_ip,
|
||||
LOG.warning('IP %s released that was not leased', fixed_ip,
|
||||
instance_uuid=fixed_ip.instance_uuid)
|
||||
else:
|
||||
fixed_ip.leased = False
|
||||
@ -1067,11 +1067,11 @@ class NetworkManager(manager.Manager):
|
||||
LOG.debug('Found VIF: %s', vif,
|
||||
instance_uuid=fixed_ip.instance_uuid)
|
||||
if vif.instance_uuid != fixed_ip.instance_uuid:
|
||||
LOG.info(_LI("Ignoring request to release fixed IP "
|
||||
LOG.info("Ignoring request to release fixed IP "
|
||||
"%(address)s with MAC %(mac)s since it "
|
||||
"is now associated with a new instance "
|
||||
"that is in the process of allocating "
|
||||
"it's network."),
|
||||
"it's network.",
|
||||
{'address': address, 'mac': mac},
|
||||
instance_uuid=fixed_ip.instance_uuid)
|
||||
return
|
||||
@ -1155,10 +1155,9 @@ class NetworkManager(manager.Manager):
|
||||
each_subnet_size = fixnet.size / kwargs["num_networks"]
|
||||
if each_subnet_size > CONF.network_size:
|
||||
subnet = 32 - int(math.log(CONF.network_size, 2))
|
||||
oversize_msg = _LW(
|
||||
LOG.warning(
|
||||
'Subnet(s) too large, defaulting to /%s.'
|
||||
' To override, specify network_size flag.') % subnet
|
||||
LOG.warning(oversize_msg)
|
||||
' To override, specify network_size flag.', subnet)
|
||||
kwargs["network_size"] = CONF.network_size
|
||||
else:
|
||||
kwargs["network_size"] = fixnet.size
|
||||
|
@ -20,7 +20,7 @@ from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LI, _LW
|
||||
from nova.i18n import _
|
||||
from nova.network import dns_driver
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -115,8 +115,7 @@ class MiniDNS(dns_driver.DNSDriver):
|
||||
outfile.close()
|
||||
shutil.move(outfile.name, self.filename)
|
||||
if not deleted:
|
||||
LOG.warning(_LW('Cannot delete entry |%s|'),
|
||||
self.qualify(name, domain))
|
||||
LOG.warning('Cannot delete entry |%s|', self.qualify(name, domain))
|
||||
raise exception.NotFound
|
||||
|
||||
def modify_address(self, name, address, domain):
|
||||
@ -190,10 +189,10 @@ class MiniDNS(dns_driver.DNSDriver):
|
||||
entry['domain'] != fqdomain.lower()):
|
||||
outfile.write(line)
|
||||
else:
|
||||
LOG.info(_LI("deleted %s"), entry)
|
||||
LOG.info("deleted %s", entry)
|
||||
deleted = True
|
||||
outfile.close()
|
||||
shutil.move(outfile.name, self.filename)
|
||||
if not deleted:
|
||||
LOG.warning(_LW('Cannot delete domain |%s|'), fqdomain)
|
||||
LOG.warning('Cannot delete domain |%s|', fqdomain)
|
||||
raise exception.NotFound
|
||||
|
@ -28,7 +28,7 @@ import six
|
||||
from nova.compute import utils as compute_utils
|
||||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova.i18n import _
|
||||
from nova.network import base_api
|
||||
from nova.network import model as network_model
|
||||
from nova.network.neutronv2 import constants
|
||||
@ -109,9 +109,9 @@ class ClientWrapper(clientv20.Client):
|
||||
# should be able to regenerate a valid by using the
|
||||
# Neutron admin credential configuration located in
|
||||
# nova.conf.
|
||||
LOG.error(_LE("Neutron client was not able to generate a "
|
||||
LOG.error("Neutron client was not able to generate a "
|
||||
"valid admin token, please verify Neutron "
|
||||
"admin credential located in nova.conf"))
|
||||
"admin credential located in nova.conf")
|
||||
raise exception.NeutronAdminCredentialConfigurationInvalid()
|
||||
except neutron_client_exc.Forbidden as e:
|
||||
raise exception.Forbidden(e)
|
||||
@ -162,8 +162,8 @@ def _is_not_duplicate(item, items, items_list_name, instance):
|
||||
# item is not part of the items list so if it is part of it
|
||||
# we should at least log it as a warning
|
||||
if present:
|
||||
LOG.warning(_LW("%(item)s already exists in list: %(list_name)s "
|
||||
"containing: %(items)s. ignoring it"),
|
||||
LOG.warning("%(item)s already exists in list: %(list_name)s "
|
||||
"containing: %(items)s. ignoring it",
|
||||
{'item': item,
|
||||
'list_name': items_list_name,
|
||||
'items': items},
|
||||
@ -246,8 +246,8 @@ class API(base_api.NetworkAPI):
|
||||
return updated_port
|
||||
except Exception as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Unable to update binding profile "
|
||||
"for port: %(port)s due to failure: %(error)s"),
|
||||
LOG.error("Unable to update binding profile "
|
||||
"for port: %(port)s due to failure: %(error)s",
|
||||
{'port': port_id, 'error': ex},
|
||||
instance=instance)
|
||||
|
||||
@ -397,8 +397,8 @@ class API(base_api.NetworkAPI):
|
||||
instance=instance)
|
||||
return port
|
||||
except neutron_client_exc.InvalidIpForNetworkClient:
|
||||
LOG.warning(_LW('Neutron error: %(ip)s is not a valid IP address '
|
||||
'for network %(network_id)s.'),
|
||||
LOG.warning('Neutron error: %(ip)s is not a valid IP address '
|
||||
'for network %(network_id)s.',
|
||||
{'ip': fixed_ip, 'network_id': network_id},
|
||||
instance=instance)
|
||||
msg = (_('Fixed IP %(ip)s is not a valid ip address for '
|
||||
@ -406,22 +406,22 @@ class API(base_api.NetworkAPI):
|
||||
{'ip': fixed_ip, 'network_id': network_id})
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
except neutron_client_exc.IpAddressInUseClient:
|
||||
LOG.warning(_LW('Neutron error: Fixed IP %s is '
|
||||
'already in use.'), fixed_ip, instance=instance)
|
||||
LOG.warning('Neutron error: Fixed IP %s is '
|
||||
'already in use.', fixed_ip, instance=instance)
|
||||
msg = _("Fixed IP %s is already in use.") % fixed_ip
|
||||
raise exception.FixedIpAlreadyInUse(message=msg)
|
||||
except neutron_client_exc.OverQuotaClient:
|
||||
LOG.warning(_LW(
|
||||
'Neutron error: Port quota exceeded in tenant: %s'),
|
||||
LOG.warning(
|
||||
'Neutron error: Port quota exceeded in tenant: %s',
|
||||
port_req_body['port']['tenant_id'], instance=instance)
|
||||
raise exception.PortLimitExceeded()
|
||||
except neutron_client_exc.IpAddressGenerationFailureClient:
|
||||
LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'),
|
||||
LOG.warning('Neutron error: No more fixed IPs in network: %s',
|
||||
network_id, instance=instance)
|
||||
raise exception.NoMoreFixedIps(net=network_id)
|
||||
except neutron_client_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Neutron error creating port on network %s'),
|
||||
LOG.exception('Neutron error creating port on network %s',
|
||||
network_id, instance=instance)
|
||||
|
||||
def _update_port(self, port_client, instance, port_id,
|
||||
@ -436,16 +436,16 @@ class API(base_api.NetworkAPI):
|
||||
except neutron_client_exc.MacAddressInUseClient:
|
||||
mac_address = port_req_body['port'].get('mac_address')
|
||||
network_id = port_req_body['port'].get('network_id')
|
||||
LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
|
||||
'in use on network %(network)s.'),
|
||||
LOG.warning('Neutron error: MAC address %(mac)s is already '
|
||||
'in use on network %(network)s.',
|
||||
{'mac': mac_address, 'network': network_id},
|
||||
instance=instance)
|
||||
raise exception.PortInUse(port_id=mac_address)
|
||||
except neutron_client_exc.HostNotCompatibleWithFixedIpsClient:
|
||||
network_id = port_req_body['port'].get('network_id')
|
||||
LOG.warning(_LW('Neutron error: Tried to bind a port with '
|
||||
LOG.warning('Neutron error: Tried to bind a port with '
|
||||
'fixed_ips to a host in the wrong segment on '
|
||||
'network %(network)s.'),
|
||||
'network %(network)s.',
|
||||
{'network': network_id}, instance=instance)
|
||||
raise exception.FixedIpInvalidOnHost(port_id=port_id)
|
||||
|
||||
@ -507,8 +507,8 @@ class API(base_api.NetworkAPI):
|
||||
LOG.debug('Unable to unbind port %s as it no longer exists.',
|
||||
port_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Unable to clear device ID "
|
||||
"for port '%s'"), port_id)
|
||||
LOG.exception("Unable to clear device ID for port '%s'",
|
||||
port_id)
|
||||
|
||||
def _validate_requested_port_ids(self, context, instance, neutron,
|
||||
requested_networks):
|
||||
@ -1069,9 +1069,9 @@ class API(base_api.NetworkAPI):
|
||||
# only ever index a single device, which needs to be
|
||||
# successfully claimed for this to be called as part of
|
||||
# allocate_networks method
|
||||
LOG.error(_LE("PCI request %s does not have a "
|
||||
LOG.error("PCI request %s does not have a "
|
||||
"unique device associated with it. Unable to "
|
||||
"determine MAC address"),
|
||||
"determine MAC address",
|
||||
pci_request, instance=instance)
|
||||
return
|
||||
pci_dev = pci_devs[0]
|
||||
@ -1080,8 +1080,8 @@ class API(base_api.NetworkAPI):
|
||||
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
|
||||
except exception.PciDeviceNotFoundById as e:
|
||||
LOG.error(
|
||||
_LE("Could not determine MAC address for %(addr)s, "
|
||||
"error: %(e)s"),
|
||||
"Could not determine MAC address for %(addr)s, "
|
||||
"error: %(e)s",
|
||||
{"addr": pci_dev.address, "e": e}, instance=instance)
|
||||
else:
|
||||
port_req_body['port']['mac_address'] = mac
|
||||
@ -1140,8 +1140,8 @@ class API(base_api.NetworkAPI):
|
||||
port_req_body = {'port': {'dns_name': instance.hostname}}
|
||||
neutron.update_port(port_id, port_req_body)
|
||||
except neutron_client_exc.BadRequest:
|
||||
LOG.warning(_LW('Neutron error: Instance hostname '
|
||||
'%(hostname)s is not a valid DNS name'),
|
||||
LOG.warning('Neutron error: Instance hostname '
|
||||
'%(hostname)s is not a valid DNS name',
|
||||
{'hostname': instance.hostname}, instance=instance)
|
||||
msg = (_('Instance hostname %(hostname)s is not a valid DNS '
|
||||
'name') % {'hostname': instance.hostname})
|
||||
@ -1154,12 +1154,11 @@ class API(base_api.NetworkAPI):
|
||||
neutron.delete_port(port)
|
||||
except neutron_client_exc.NeutronClientException as e:
|
||||
if e.status_code == 404:
|
||||
LOG.warning(_LW("Port %s does not exist"), port,
|
||||
LOG.warning("Port %s does not exist", port,
|
||||
instance=instance)
|
||||
else:
|
||||
exceptions.append(e)
|
||||
LOG.warning(
|
||||
_LW("Failed to delete port %s for instance."),
|
||||
LOG.warning("Failed to delete port %s for instance.",
|
||||
port, instance=instance, exc_info=True)
|
||||
if len(exceptions) > 0 and raise_if_fail:
|
||||
raise exceptions[0]
|
||||
@ -1562,8 +1561,8 @@ class API(base_api.NetworkAPI):
|
||||
except neutron_client_exc.NetworkNotFoundClient:
|
||||
# This shouldn't happen since we just created the network, but
|
||||
# handle it anyway.
|
||||
LOG.error(_LE('Automatically allocated network %(network_id)s '
|
||||
'was not found.'), {'network_id': topology['id']},
|
||||
LOG.error('Automatically allocated network %(network_id)s '
|
||||
'was not found.', {'network_id': topology['id']},
|
||||
instance=instance)
|
||||
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
|
||||
|
||||
@ -1767,8 +1766,8 @@ class API(base_api.NetworkAPI):
|
||||
|
||||
msg_dict = dict(address=floating_address,
|
||||
instance_id=orig_instance_uuid)
|
||||
LOG.info(_LI('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s'), msg_dict,
|
||||
LOG.info('re-assign floating IP %(address)s from '
|
||||
'instance %(instance_id)s', msg_dict,
|
||||
instance=instance)
|
||||
orig_instance = objects.Instance.get_by_uuid(context,
|
||||
orig_instance_uuid)
|
||||
@ -1862,7 +1861,7 @@ class API(base_api.NetworkAPI):
|
||||
raise exception.FloatingIpNotFound(id=id)
|
||||
else:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Unable to access floating IP %s'), id)
|
||||
LOG.exception('Unable to access floating IP %s', id)
|
||||
pool_dict = self._setup_net_dict(client,
|
||||
fip['floating_network_id'])
|
||||
port_dict = self._setup_port_dict(context, client, fip['port_id'])
|
||||
@ -1982,12 +1981,12 @@ class API(base_api.NetworkAPI):
|
||||
use the [neutron] option only.
|
||||
"""
|
||||
if CONF.default_floating_pool != 'nova':
|
||||
LOG.warning(_LW("Config option 'default_floating_pool' is set to "
|
||||
LOG.warning("Config option 'default_floating_pool' is set to "
|
||||
"a non-default value. Falling back to this value "
|
||||
"for now but this behavior will change in a "
|
||||
"future release. You should unset this value "
|
||||
"and set the '[neutron] default_floating_pool' "
|
||||
"option instead."))
|
||||
"option instead.")
|
||||
return CONF.default_floating_pool
|
||||
|
||||
return CONF.neutron.default_floating_pool
|
||||
@ -2025,7 +2024,7 @@ class API(base_api.NetworkAPI):
|
||||
if e.status_code == 404:
|
||||
return []
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Unable to access floating IP for %s'),
|
||||
LOG.exception('Unable to access floating IP for %s',
|
||||
', '.join(['%s %s' % (k, v)
|
||||
for k, v in kwargs.items()]))
|
||||
|
||||
@ -2154,9 +2153,9 @@ class API(base_api.NetworkAPI):
|
||||
break
|
||||
else:
|
||||
tenant_id = port['tenant_id']
|
||||
LOG.warning(_LW("Network %(id)s not matched with the tenants "
|
||||
LOG.warning("Network %(id)s not matched with the tenants "
|
||||
"network! The ports tenant %(tenant_id)s will be "
|
||||
"used."),
|
||||
"used.",
|
||||
{'id': port['network_id'], 'tenant_id': tenant_id})
|
||||
|
||||
bridge = None
|
||||
@ -2311,9 +2310,9 @@ class API(base_api.NetworkAPI):
|
||||
preserve_on_delete=preserve_on_delete))
|
||||
|
||||
elif nw_info_refresh:
|
||||
LOG.info(_LI('Port %s from network info_cache is no '
|
||||
LOG.info('Port %s from network info_cache is no '
|
||||
'longer associated with instance in Neutron. '
|
||||
'Removing from network info_cache.'), port_id,
|
||||
'Removing from network info_cache.', port_id,
|
||||
instance=instance)
|
||||
|
||||
return nw_info
|
||||
@ -2513,16 +2512,16 @@ class API(base_api.NetworkAPI):
|
||||
# TODO(lbeliveau): Batch up the port updates in one neutron call.
|
||||
for port_id, updates in port_updates:
|
||||
if updates:
|
||||
LOG.info(_LI("Updating port %(port)s with "
|
||||
"attributes %(attributes)s"),
|
||||
LOG.info("Updating port %(port)s with "
|
||||
"attributes %(attributes)s",
|
||||
{"port": port_id, "attributes": updates},
|
||||
instance=instance)
|
||||
try:
|
||||
neutron.update_port(port_id, {'port': updates})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Unable to update binding details "
|
||||
"for port %s"),
|
||||
LOG.exception("Unable to update binding details "
|
||||
"for port %s",
|
||||
port_id, instance=instance)
|
||||
|
||||
def update_instance_vnic_index(self, context, instance, vif, index):
|
||||
@ -2539,8 +2538,8 @@ class API(base_api.NetworkAPI):
|
||||
neutron.update_port(vif['id'], port_req_body)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Unable to update instance VNIC index '
|
||||
'for port %s.'),
|
||||
LOG.exception('Unable to update instance VNIC index '
|
||||
'for port %s.',
|
||||
vif['id'], instance=instance)
|
||||
|
||||
|
||||
|
@ -24,7 +24,7 @@ import six
|
||||
from webob import exc
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LI, _LW
|
||||
from nova.i18n import _
|
||||
from nova.network.neutronv2 import api as neutronapi
|
||||
from nova.network.security_group import security_group_base
|
||||
from nova import utils
|
||||
@ -52,8 +52,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
raise exception.Invalid(six.text_type(e))
|
||||
except n_exc.NeutronClientException as e:
|
||||
exc_info = sys.exc_info()
|
||||
LOG.exception(_LE("Neutron Error creating security group %s"),
|
||||
name)
|
||||
LOG.exception("Neutron Error creating security group %s", name)
|
||||
if e.status_code == 401:
|
||||
# TODO(arosen) Cannot raise generic response from neutron here
|
||||
# as this error code could be related to bad input or over
|
||||
@ -73,8 +72,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
security_group['id'], body).get('security_group')
|
||||
except n_exc.NeutronClientException as e:
|
||||
exc_info = sys.exc_info()
|
||||
LOG.exception(_LE("Neutron Error updating security group %s"),
|
||||
name)
|
||||
LOG.exception("Neutron Error updating security group %s", name)
|
||||
if e.status_code == 401:
|
||||
# TODO(arosen) Cannot raise generic response from neutron here
|
||||
# as this error code could be related to bad input or over
|
||||
@ -152,10 +150,10 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
LOG.debug("Neutron security group %s not found", name)
|
||||
raise exception.SecurityGroupNotFound(six.text_type(e))
|
||||
else:
|
||||
LOG.error(_LE("Neutron Error: %s"), e)
|
||||
LOG.error("Neutron Error: %s", e)
|
||||
six.reraise(*exc_info)
|
||||
except TypeError as e:
|
||||
LOG.error(_LE("Neutron Error: %s"), e)
|
||||
LOG.error("Neutron Error: %s", e)
|
||||
msg = _("Invalid security group name: %(name)s.") % {"name": name}
|
||||
raise exception.SecurityGroupNotFound(six.text_type(msg))
|
||||
|
||||
@ -188,7 +186,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
'security_groups')
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Neutron Error getting security groups"))
|
||||
LOG.exception("Neutron Error getting security groups")
|
||||
converted_rules = []
|
||||
for security_group in security_groups:
|
||||
converted_rules.append(
|
||||
@ -214,7 +212,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
elif e.status_code == 409:
|
||||
self.raise_invalid_property(six.text_type(e))
|
||||
else:
|
||||
LOG.error(_LE("Neutron Error: %s"), e)
|
||||
LOG.error("Neutron Error: %s", e)
|
||||
six.reraise(*exc_info)
|
||||
|
||||
def add_rules(self, context, id, name, vals):
|
||||
@ -234,18 +232,17 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
except n_exc.NeutronClientException as e:
|
||||
exc_info = sys.exc_info()
|
||||
if e.status_code == 404:
|
||||
LOG.exception(_LE("Neutron Error getting security group %s"),
|
||||
name)
|
||||
LOG.exception("Neutron Error getting security group %s", name)
|
||||
self.raise_not_found(six.text_type(e))
|
||||
elif e.status_code == 409:
|
||||
LOG.exception(_LE("Neutron Error adding rules to security "
|
||||
"group %s"), name)
|
||||
LOG.exception("Neutron Error adding rules to security "
|
||||
"group %s", name)
|
||||
self.raise_over_quota(six.text_type(e))
|
||||
elif e.status_code == 400:
|
||||
LOG.exception(_LE("Neutron Error: %s"), e)
|
||||
LOG.exception("Neutron Error: %s", e)
|
||||
self.raise_invalid_property(six.text_type(e))
|
||||
else:
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
LOG.exception("Neutron Error:")
|
||||
six.reraise(*exc_info)
|
||||
converted_rules = []
|
||||
for rule in rules:
|
||||
@ -299,8 +296,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
neutron.delete_security_group_rule(rule_ids.pop())
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Neutron Error unable to delete %s"),
|
||||
rule_ids)
|
||||
LOG.exception("Neutron Error unable to delete %s", rule_ids)
|
||||
|
||||
def get_rule(self, context, id):
|
||||
neutron = neutronapi.get_client(context)
|
||||
@ -313,7 +309,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
LOG.debug("Neutron security group rule %s not found", id)
|
||||
self.raise_not_found(six.text_type(e))
|
||||
else:
|
||||
LOG.error(_LE("Neutron Error: %s"), e)
|
||||
LOG.error("Neutron Error: %s", e)
|
||||
six.reraise(*exc_info)
|
||||
return self._convert_to_nova_security_group_rule_format(rule)
|
||||
|
||||
@ -443,14 +439,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
'project': context.project_id})
|
||||
self.raise_not_found(msg)
|
||||
else:
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
LOG.exception("Neutron Error:")
|
||||
six.reraise(*exc_info)
|
||||
params = {'device_id': instance.uuid}
|
||||
try:
|
||||
ports = neutron.list_ports(**params).get('ports')
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
LOG.exception("Neutron Error:")
|
||||
|
||||
if not ports:
|
||||
msg = (_("instance_id %s could not be found as device id on"
|
||||
@ -459,9 +455,9 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
|
||||
for port in ports:
|
||||
if not self._has_security_group_requirements(port):
|
||||
LOG.warning(_LW("Cannot add security group %(name)s to "
|
||||
LOG.warning("Cannot add security group %(name)s to "
|
||||
"%(instance)s since the port %(port_id)s "
|
||||
"does not meet security requirements"),
|
||||
"does not meet security requirements",
|
||||
{'name': security_group_name,
|
||||
'instance': instance.uuid,
|
||||
'port_id': port['id']})
|
||||
@ -471,14 +467,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
port['security_groups'].append(security_group_id)
|
||||
updated_port = {'security_groups': port['security_groups']}
|
||||
try:
|
||||
LOG.info(_LI("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s"),
|
||||
LOG.info("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s",
|
||||
{'security_group_id': security_group_id,
|
||||
'port_id': port['id']})
|
||||
neutron.update_port(port['id'], {'port': updated_port})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
LOG.exception("Neutron Error:")
|
||||
|
||||
def remove_from_instance(self, context, instance, security_group_name):
|
||||
"""Remove the security group associated with the instance."""
|
||||
@ -497,14 +493,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
'project': context.project_id})
|
||||
self.raise_not_found(msg)
|
||||
else:
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
LOG.exception("Neutron Error:")
|
||||
six.reraise(*exc_info)
|
||||
params = {'device_id': instance.uuid}
|
||||
try:
|
||||
ports = neutron.list_ports(**params).get('ports')
|
||||
except n_exc.NeutronClientException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
LOG.exception("Neutron Error:")
|
||||
|
||||
if not ports:
|
||||
msg = (_("instance_id %s could not be found as device id on"
|
||||
@ -525,15 +521,15 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
|
||||
updated_port = {'security_groups': port['security_groups']}
|
||||
try:
|
||||
LOG.info(_LI("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s"),
|
||||
LOG.info("Adding security group %(security_group_id)s to "
|
||||
"port %(port_id)s",
|
||||
{'security_group_id': security_group_id,
|
||||
'port_id': port['id']})
|
||||
neutron.update_port(port['id'], {'port': updated_port})
|
||||
found_security_group = True
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Neutron Error:"))
|
||||
LOG.exception("Neutron Error:")
|
||||
if not found_security_group:
|
||||
msg = (_("Security group %(security_group_name)s not associated "
|
||||
"with the instance %(instance)s") %
|
||||
|
@ -29,7 +29,6 @@ import six
|
||||
import nova.conf
|
||||
import nova.context
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
from nova.image import glance
|
||||
from nova import network
|
||||
from nova.network import model as network_model
|
||||
@ -142,7 +141,7 @@ def send_update(context, old_instance, new_instance, service="compute",
|
||||
'instance could not be found and was most likely '
|
||||
'deleted.', instance=new_instance)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to send state update notification"),
|
||||
LOG.exception("Failed to send state update notification",
|
||||
instance=new_instance)
|
||||
|
||||
|
||||
@ -185,7 +184,7 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
||||
'instance could not be found and was most likely '
|
||||
'deleted.', instance=instance)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to send state update notification"),
|
||||
LOG.exception("Failed to send state update notification",
|
||||
instance=instance)
|
||||
|
||||
|
||||
@ -336,7 +335,7 @@ def bandwidth_usage(instance_ref, audit_start,
|
||||
except Exception:
|
||||
try:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Failed to get nw_info'),
|
||||
LOG.exception('Failed to get nw_info',
|
||||
instance=instance_ref)
|
||||
except Exception:
|
||||
if ignore_missing_network_data:
|
||||
|
@ -28,7 +28,7 @@ from nova.db.sqlalchemy import api as db_api
|
||||
from nova.db.sqlalchemy import api_models
|
||||
from nova.db.sqlalchemy import models as main_models
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
@ -317,7 +317,7 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
||||
filter_by(deleted=0).count()
|
||||
if result:
|
||||
LOG.warning(
|
||||
_LW('Main database contains %(count)i unmigrated aggregates'),
|
||||
'Main database contains %(count)i unmigrated aggregates',
|
||||
{'count': result})
|
||||
return result == 0
|
||||
|
||||
@ -585,7 +585,7 @@ def migrate_aggregates(ctxt, count):
|
||||
db.aggregate_delete(ctxt, aggregate.id)
|
||||
except exception.AggregateNotFound:
|
||||
LOG.warning(
|
||||
_LW('Aggregate id %(id)i disappeared during migration'),
|
||||
'Aggregate id %(id)i disappeared during migration',
|
||||
{'id': aggregate_id})
|
||||
except (exception.AggregateNameExists) as e:
|
||||
LOG.error(six.text_type(e))
|
||||
|
@ -20,7 +20,7 @@ from nova.cells import opts as cells_opts
|
||||
from nova.cells import rpcapi as cells_rpcapi
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
@ -221,8 +221,8 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
|
||||
if not db_bdms:
|
||||
raise exception.VolumeBDMNotFound(volume_id=volume_id)
|
||||
if len(db_bdms) > 1:
|
||||
LOG.warning(_LW('Legacy get_by_volume_id() call found multiple '
|
||||
'BDMs for volume %(volume)s'),
|
||||
LOG.warning('Legacy get_by_volume_id() call found multiple '
|
||||
'BDMs for volume %(volume)s',
|
||||
{'volume': volume_id})
|
||||
db_bdm = db_bdms[0]
|
||||
# NOTE (ndipanov): Move this to the db layer into a
|
||||
|
@ -23,7 +23,6 @@ from sqlalchemy.sql import null
|
||||
from nova.db.sqlalchemy import api as db
|
||||
from nova.db.sqlalchemy import api_models
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
@ -82,8 +81,7 @@ class BuildRequest(base.NovaObject):
|
||||
'which is not supported here.',
|
||||
dict(instance_uuid=self.instance_uuid,
|
||||
version=exc.objver))
|
||||
LOG.exception(_LE('Could not deserialize instance in '
|
||||
'BuildRequest'))
|
||||
LOG.exception('Could not deserialize instance in BuildRequest')
|
||||
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
|
||||
# NOTE(sbauza): The instance primitive should already have the deleted
|
||||
# field being set, so when hydrating it back here, we should get the
|
||||
@ -155,7 +153,7 @@ class BuildRequest(base.NovaObject):
|
||||
try:
|
||||
getattr(req, '_load_%s' % key)(db_req[key])
|
||||
except AttributeError:
|
||||
LOG.exception(_LE('No load handler for %s'), key)
|
||||
LOG.exception('No load handler for %s', key)
|
||||
else:
|
||||
setattr(req, key, db_req[key])
|
||||
# Load instance last because other fields on req may be referenced
|
||||
|
@ -30,7 +30,6 @@ from nova.db.sqlalchemy.api import require_context
|
||||
from nova.db.sqlalchemy import api_models
|
||||
from nova.db.sqlalchemy import models as main_models
|
||||
from nova import exception
|
||||
from nova.i18n import _LW
|
||||
from nova.notifications.objects import base as notification
|
||||
from nova.notifications.objects import flavor as flavor_notification
|
||||
from nova import objects
|
||||
@ -199,7 +198,7 @@ def _ensure_migrated(context):
|
||||
result = context.session.query(main_models.InstanceTypes).\
|
||||
filter_by(deleted=0).count()
|
||||
if result:
|
||||
LOG.warning(_LW('Main database contains %(count)i unmigrated flavors'),
|
||||
LOG.warning('Main database contains %(count)i unmigrated flavors',
|
||||
{'count': result})
|
||||
return result == 0
|
||||
|
||||
@ -760,7 +759,7 @@ def migrate_flavors(ctxt, count, hard_delete=False):
|
||||
else:
|
||||
db.flavor_destroy(ctxt, flavor.flavorid)
|
||||
except exception.FlavorNotFound:
|
||||
LOG.warning(_LW('Flavor id %(id)i disappeared during migration'),
|
||||
LOG.warning('Flavor id %(id)i disappeared during migration',
|
||||
{'id': flavor_id})
|
||||
except (exception.FlavorExists, exception.FlavorIdExists) as e:
|
||||
LOG.error(six.text_type(e))
|
||||
|
@ -34,7 +34,7 @@ from nova import db
|
||||
from nova.db.sqlalchemy import api as db_api
|
||||
from nova.db.sqlalchemy import models
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova.i18n import _
|
||||
from nova.network import model as network_model
|
||||
from nova import notifications
|
||||
from nova import objects
|
||||
@ -311,8 +311,8 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
||||
# If the vm/task states don't indicate that it's being booted
|
||||
# then we have a bug here. Log an error and attempt to return
|
||||
# the uuid which is what an error above would return.
|
||||
LOG.error(_LE('Could not lazy-load instance.id while '
|
||||
'attempting to generate the instance name.'))
|
||||
LOG.error('Could not lazy-load instance.id while '
|
||||
'attempting to generate the instance name.')
|
||||
base_name = self.uuid
|
||||
return base_name
|
||||
|
||||
@ -726,7 +726,7 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
||||
if field in _INSTANCE_EXTRA_FIELDS:
|
||||
self._save_extra_generic(field)
|
||||
continue
|
||||
LOG.exception(_LE('No save handler for %s'), field,
|
||||
LOG.exception('No save handler for %s', field,
|
||||
instance=self)
|
||||
except db_exc.DBReferenceError as exp:
|
||||
if exp.key != 'instance_uuid':
|
||||
@ -1489,7 +1489,7 @@ def _migrate_instance_keypairs(ctxt, count):
|
||||
keypairs.objects.append(key)
|
||||
except exception.KeypairNotFound:
|
||||
LOG.warning(
|
||||
_LW('Instance %(uuid)s keypair %(keyname)s not found'),
|
||||
'Instance %(uuid)s keypair %(keyname)s not found',
|
||||
{'uuid': db_extra.instance_uuid, 'keyname': key_name})
|
||||
db_extra.keypairs = jsonutils.dumps(keypairs.obj_to_primitive())
|
||||
db_extra.save(ctxt.session)
|
||||
|
@ -20,7 +20,6 @@ from nova.cells import opts as cells_opts
|
||||
from nova.cells import rpcapi as cells_rpcapi
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
@ -88,7 +87,7 @@ class InstanceFault(base.NovaPersistentObject, base.NovaObject,
|
||||
cells_rpcapi.CellsAPI().instance_fault_create_at_top(
|
||||
self._context, db_fault)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to notify cells of instance fault"))
|
||||
LOG.exception("Failed to notify cells of instance fault")
|
||||
|
||||
|
||||
@base.NovaObjectRegistry.register
|
||||
|
@ -18,7 +18,6 @@ from nova.cells import opts as cells_opts
|
||||
from nova.cells import rpcapi as cells_rpcapi
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
|
||||
@ -81,8 +80,8 @@ class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject):
|
||||
try:
|
||||
cells_api.instance_info_cache_update_at_top(ctxt, info_cache)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to notify cells of instance info "
|
||||
"cache update"))
|
||||
LOG.exception("Failed to notify cells of instance info "
|
||||
"cache update")
|
||||
|
||||
@base.remotable
|
||||
def save(self, update_cells=True):
|
||||
|
@ -22,7 +22,6 @@ from nova.db.sqlalchemy import api as db_api
|
||||
from nova.db.sqlalchemy import api_models
|
||||
from nova.db.sqlalchemy import models as main_models
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
@ -258,9 +257,9 @@ def _get_main_keypairs(context, limit):
|
||||
def migrate_keypairs_to_api_db(context, count):
|
||||
bad_instances = _count_unmigrated_instances(context)
|
||||
if bad_instances:
|
||||
LOG.error(_LE('Some instances are still missing keypair '
|
||||
LOG.error('Some instances are still missing keypair '
|
||||
'information. Unable to run keypair migration '
|
||||
'at this time.'))
|
||||
'at this time.')
|
||||
return 0, 0
|
||||
|
||||
main_keypairs = _get_main_keypairs(context, count)
|
||||
|
@ -33,7 +33,7 @@ from nova.db.sqlalchemy import api as db_api
|
||||
from nova.db.sqlalchemy import api_models as models
|
||||
from nova.db.sqlalchemy import resource_class_cache as rc_cache
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova.i18n import _
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
@ -452,8 +452,8 @@ class ResourceProvider(base.NovaObject):
|
||||
"""Set all resource provider Inventory to be the provided list."""
|
||||
exceeded = _set_inventory(self._context, self, inv_list)
|
||||
for uuid, rclass in exceeded:
|
||||
LOG.warning(_LW('Resource provider %(uuid)s is now over-'
|
||||
'capacity for %(resource)s'),
|
||||
LOG.warning('Resource provider %(uuid)s is now over-'
|
||||
'capacity for %(resource)s',
|
||||
{'uuid': uuid, 'resource': rclass})
|
||||
self.obj_reset_changes()
|
||||
|
||||
@ -464,8 +464,8 @@ class ResourceProvider(base.NovaObject):
|
||||
"""
|
||||
exceeded = _update_inventory(self._context, self, inventory)
|
||||
for uuid, rclass in exceeded:
|
||||
LOG.warning(_LW('Resource provider %(uuid)s is now over-'
|
||||
'capacity for %(resource)s'),
|
||||
LOG.warning('Resource provider %(uuid)s is now over-'
|
||||
'capacity for %(resource)s',
|
||||
{'uuid': uuid, 'resource': rclass})
|
||||
self.obj_reset_changes()
|
||||
|
||||
@ -1643,10 +1643,10 @@ def _check_capacity_exceeded(conn, allocs):
|
||||
if (amount_needed < min_unit or amount_needed > max_unit or
|
||||
amount_needed % step_size != 0):
|
||||
LOG.warning(
|
||||
_LW("Allocation for %(rc)s on resource provider %(rp)s "
|
||||
"Allocation for %(rc)s on resource provider %(rp)s "
|
||||
"violates min_unit, max_unit, or step_size. "
|
||||
"Requested: %(requested)s, min_unit: %(min_unit)s, "
|
||||
"max_unit: %(max_unit)s, step_size: %(step_size)s"),
|
||||
"max_unit: %(max_unit)s, step_size: %(step_size)s",
|
||||
{'rc': alloc.resource_class,
|
||||
'rp': rp_uuid,
|
||||
'requested': amount_needed,
|
||||
@ -1662,8 +1662,8 @@ def _check_capacity_exceeded(conn, allocs):
|
||||
capacity = (usage['total'] - usage['reserved']) * allocation_ratio
|
||||
if capacity < (used + amount_needed):
|
||||
LOG.warning(
|
||||
_LW("Over capacity for %(rc)s on resource provider %(rp)s. "
|
||||
"Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s"),
|
||||
"Over capacity for %(rc)s on resource provider %(rp)s. "
|
||||
"Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s",
|
||||
{'rc': alloc.resource_class,
|
||||
'rp': rp_uuid,
|
||||
'needed': amount_needed,
|
||||
@ -2081,8 +2081,8 @@ class ResourceClass(base.NovaObject):
|
||||
# limit to be exceeded. We set it high in the hope that we never
|
||||
# hit this point, but added this log message so we know that this
|
||||
# specific situation occurred.
|
||||
LOG.warning(_LW("Exceeded retry limit on ID generation while "
|
||||
"creating ResourceClass %(name)s"),
|
||||
LOG.warning("Exceeded retry limit on ID generation while "
|
||||
"creating ResourceClass %(name)s",
|
||||
{'name': self.name})
|
||||
msg = _("creating resource class %s") % self.name
|
||||
raise exception.MaxDBRetriesExceeded(action=msg)
|
||||
|
@ -20,7 +20,6 @@ from nova import availability_zones
|
||||
from nova import context as nova_context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova.i18n import _LW
|
||||
from nova.notifications.objects import base as notification
|
||||
from nova.notifications.objects import service as service_notification
|
||||
from nova import objects
|
||||
@ -402,8 +401,8 @@ class Service(base.NovaPersistentObject, base.NovaObject,
|
||||
@base.remotable_classmethod
|
||||
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
|
||||
if not all(binary.startswith('nova-') for binary in binaries):
|
||||
LOG.warning(_LW('get_minimum_version called with likely-incorrect '
|
||||
'binaries `%s\''), ','.join(binaries))
|
||||
LOG.warning('get_minimum_version called with likely-incorrect '
|
||||
'binaries `%s\'', ','.join(binaries))
|
||||
raise exception.ObjectActionError(action='get_minimum_version',
|
||||
reason='Invalid binary prefix')
|
||||
|
||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _LW
|
||||
from nova import objects
|
||||
from nova.objects import fields
|
||||
from nova.pci import stats
|
||||
@ -168,9 +167,9 @@ class PciDevTracker(object):
|
||||
try:
|
||||
existed.remove()
|
||||
except exception.PciDeviceInvalidStatus as e:
|
||||
LOG.warning(_LW("Trying to remove device with %(status)s "
|
||||
LOG.warning("Trying to remove device with %(status)s "
|
||||
"ownership %(instance_uuid)s because of "
|
||||
"%(pci_exception)s"),
|
||||
"%(pci_exception)s",
|
||||
{'status': existed.status,
|
||||
'instance_uuid': existed.instance_uuid,
|
||||
'pci_exception': e.format_message()})
|
||||
@ -228,8 +227,8 @@ class PciDevTracker(object):
|
||||
dev.claim(instance_uuid)
|
||||
if instance_numa_topology and any(
|
||||
dev.numa_node is None for dev in devs):
|
||||
LOG.warning(_LW("Assigning a pci device without numa affinity to"
|
||||
"instance %(instance)s which has numa topology"),
|
||||
LOG.warning("Assigning a pci device without numa affinity to "
|
||||
"instance %(instance)s which has numa topology",
|
||||
{'instance': instance_uuid})
|
||||
return devs
|
||||
|
||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _LE
|
||||
from nova.objects import fields
|
||||
from nova.objects import pci_device_pool
|
||||
from nova.pci import utils
|
||||
@ -157,12 +156,12 @@ class PciDeviceStats(object):
|
||||
# Failed to allocate the required number of devices
|
||||
# Return the devices already allocated back to their pools
|
||||
if sum([pool['count'] for pool in pools]) < count:
|
||||
LOG.error(_LE("Failed to allocate PCI devices for instance."
|
||||
LOG.error("Failed to allocate PCI devices for instance."
|
||||
" Unassigning devices back to pools."
|
||||
" This should not happen, since the scheduler"
|
||||
" should have accurate information, and allocation"
|
||||
" during claims is controlled via a hold"
|
||||
" on the compute node semaphore"))
|
||||
" on the compute node semaphore")
|
||||
for d in range(len(alloc_devices)):
|
||||
self.add_device(alloc_devices.pop())
|
||||
return None
|
||||
|
@ -23,7 +23,6 @@ from oslo_log import log as logging
|
||||
|
||||
|
||||
from nova import exception
|
||||
from nova.i18n import _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -152,9 +151,9 @@ def get_mac_by_pci_address(pci_addr, pf_interface=False):
|
||||
mac = next(f).strip()
|
||||
return mac
|
||||
except (IOError, StopIteration) as e:
|
||||
LOG.warning(_LW("Could not find the expected sysfs file for "
|
||||
LOG.warning("Could not find the expected sysfs file for "
|
||||
"determining the MAC address of the PCI device "
|
||||
"%(addr)s. May not be a NIC. Error: %(e)s"),
|
||||
"%(addr)s. May not be a NIC. Error: %(e)s",
|
||||
{'addr': pci_addr, 'e': e})
|
||||
raise exception.PciDeviceNotFoundById(id=pci_addr)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user