Merge "Remove translation of log messages"
This commit is contained in:
commit
09ed3b5626
@ -19,7 +19,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
import nova.conf
|
import nova.conf
|
||||||
from nova.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
CONF = nova.conf.CONF
|
CONF = nova.conf.CONF
|
||||||
@ -30,12 +29,12 @@ api_class = None
|
|||||||
try:
|
try:
|
||||||
api_class = CONF.key_manager.api_class
|
api_class = CONF.key_manager.api_class
|
||||||
except cfg.NoSuchOptError:
|
except cfg.NoSuchOptError:
|
||||||
LOG.warning(_LW("key_manager.api_class is not set, will use deprecated "
|
LOG.warning("key_manager.api_class is not set, will use deprecated "
|
||||||
"option keymgr.api_class if set"))
|
"option keymgr.api_class if set")
|
||||||
try:
|
try:
|
||||||
api_class = CONF.keymgr.api_class
|
api_class = CONF.keymgr.api_class
|
||||||
except cfg.NoSuchOptError:
|
except cfg.NoSuchOptError:
|
||||||
LOG.warning(_LW("keymgr.api_class is not set"))
|
LOG.warning("keymgr.api_class is not set")
|
||||||
|
|
||||||
deprecated_barbican = 'nova.keymgr.barbican.BarbicanKeyManager'
|
deprecated_barbican = 'nova.keymgr.barbican.BarbicanKeyManager'
|
||||||
barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager'
|
barbican = 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager'
|
||||||
@ -45,8 +44,8 @@ castellan_mock = ('castellan.tests.unit.key_manager.mock_key_manager.'
|
|||||||
|
|
||||||
|
|
||||||
def log_deprecated_warning(deprecated, castellan):
|
def log_deprecated_warning(deprecated, castellan):
|
||||||
LOG.warning(_LW("key manager api_class set to use deprecated option "
|
LOG.warning("key manager api_class set to use deprecated option "
|
||||||
"%(deprecated)s, using %(castellan)s instead"),
|
"%(deprecated)s, using %(castellan)s instead",
|
||||||
{'deprecated': deprecated, 'castellan': castellan})
|
{'deprecated': deprecated, 'castellan': castellan})
|
||||||
|
|
||||||
if api_class == deprecated_barbican:
|
if api_class == deprecated_barbican:
|
||||||
@ -58,7 +57,7 @@ elif api_class == deprecated_mock:
|
|||||||
elif api_class is None:
|
elif api_class is None:
|
||||||
# TODO(kfarr): key_manager.api_class should be set in DevStack, and this
|
# TODO(kfarr): key_manager.api_class should be set in DevStack, and this
|
||||||
# block can be removed
|
# block can be removed
|
||||||
LOG.warning(_LW("key manager not set, using insecure default %s"),
|
LOG.warning("key manager not set, using insecure default %s",
|
||||||
castellan_mock)
|
castellan_mock)
|
||||||
api_class = castellan_mock
|
api_class = castellan_mock
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
import nova.conf
|
import nova.conf
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LW
|
from nova.i18n import _
|
||||||
|
|
||||||
|
|
||||||
CONF = nova.conf.CONF
|
CONF = nova.conf.CONF
|
||||||
@ -55,8 +55,8 @@ class ConfKeyManager(key_manager.KeyManager):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, configuration):
|
def __init__(self, configuration):
|
||||||
LOG.warning(_LW('This key manager is insecure and is not recommended '
|
LOG.warning('This key manager is insecure and is not recommended '
|
||||||
'for production deployments'))
|
'for production deployments')
|
||||||
super(ConfKeyManager, self).__init__(configuration)
|
super(ConfKeyManager, self).__init__(configuration)
|
||||||
|
|
||||||
self.key_id = '00000000-0000-0000-0000-000000000000'
|
self.key_id = '00000000-0000-0000-0000-000000000000'
|
||||||
@ -128,4 +128,4 @@ class ConfKeyManager(key_manager.KeyManager):
|
|||||||
raise exception.KeyManagerError(
|
raise exception.KeyManagerError(
|
||||||
reason="cannot delete non-existent key")
|
reason="cannot delete non-existent key")
|
||||||
|
|
||||||
LOG.warning(_LW("Not deleting key %s"), managed_object_id)
|
LOG.warning("Not deleting key %s", managed_object_id)
|
||||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import strutils
|
from oslo_utils import strutils
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LI
|
|
||||||
from nova.network import base_api
|
from nova.network import base_api
|
||||||
from nova.network import floating_ips
|
from nova.network import floating_ips
|
||||||
from nova.network import model as network_model
|
from nova.network import model as network_model
|
||||||
@ -182,8 +181,8 @@ class API(base_api.NetworkAPI):
|
|||||||
if orig_instance_uuid:
|
if orig_instance_uuid:
|
||||||
msg_dict = dict(address=floating_address,
|
msg_dict = dict(address=floating_address,
|
||||||
instance_id=orig_instance_uuid)
|
instance_id=orig_instance_uuid)
|
||||||
LOG.info(_LI('re-assign floating IP %(address)s from '
|
LOG.info('re-assign floating IP %(address)s from '
|
||||||
'instance %(instance_id)s'), msg_dict)
|
'instance %(instance_id)s', msg_dict)
|
||||||
orig_instance = objects.Instance.get_by_uuid(
|
orig_instance = objects.Instance.get_by_uuid(
|
||||||
context, orig_instance_uuid, expected_attrs=['flavor'])
|
context, orig_instance_uuid, expected_attrs=['flavor'])
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ from oslo_utils import excutils
|
|||||||
|
|
||||||
from nova.db import base
|
from nova.db import base
|
||||||
from nova import hooks
|
from nova import hooks
|
||||||
from nova.i18n import _, _LE
|
from nova.i18n import _
|
||||||
from nova.network import model as network_model
|
from nova.network import model as network_model
|
||||||
from nova import objects
|
from nova import objects
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ def update_instance_cache_with_nw_info(impl, context, instance,
|
|||||||
instance.info_cache = ic
|
instance.info_cache = ic
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed storing info cache'), instance=instance)
|
LOG.exception('Failed storing info cache', instance=instance)
|
||||||
|
|
||||||
|
|
||||||
def refresh_cache(f):
|
def refresh_cache(f):
|
||||||
|
@ -18,7 +18,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
import nova.conf
|
import nova.conf
|
||||||
from nova.i18n import _LE, _LI
|
|
||||||
|
|
||||||
|
|
||||||
CONF = nova.conf.CONF
|
CONF = nova.conf.CONF
|
||||||
@ -30,9 +29,9 @@ def load_network_driver(network_driver=None):
|
|||||||
network_driver = CONF.network_driver
|
network_driver = CONF.network_driver
|
||||||
|
|
||||||
if not network_driver:
|
if not network_driver:
|
||||||
LOG.error(_LE("Network driver option required, but not specified"))
|
LOG.error("Network driver option required, but not specified")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
LOG.info(_LI("Loading network driver '%s'"), network_driver)
|
LOG.info("Loading network driver '%s'", network_driver)
|
||||||
|
|
||||||
return importutils.import_module(network_driver)
|
return importutils.import_module(network_driver)
|
||||||
|
@ -27,7 +27,6 @@ import nova.conf
|
|||||||
from nova import context
|
from nova import context
|
||||||
from nova.db import base
|
from nova.db import base
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LE, _LI, _LW
|
|
||||||
from nova.network import rpcapi as network_rpcapi
|
from nova.network import rpcapi as network_rpcapi
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova import rpc
|
from nova import rpc
|
||||||
@ -149,7 +148,7 @@ class FloatingIP(object):
|
|||||||
address,
|
address,
|
||||||
affect_auto_assigned=True)
|
affect_auto_assigned=True)
|
||||||
except exception.FloatingIpNotAssociated:
|
except exception.FloatingIpNotAssociated:
|
||||||
LOG.info(_LI("Floating IP %s is not associated. Ignore."),
|
LOG.info("Floating IP %s is not associated. Ignore.",
|
||||||
address)
|
address)
|
||||||
# deallocate if auto_assigned
|
# deallocate if auto_assigned
|
||||||
if floating_ip.auto_assigned:
|
if floating_ip.auto_assigned:
|
||||||
@ -168,12 +167,12 @@ class FloatingIP(object):
|
|||||||
|
|
||||||
if floating_ip.project_id != context.project_id:
|
if floating_ip.project_id != context.project_id:
|
||||||
if floating_ip.project_id is None:
|
if floating_ip.project_id is None:
|
||||||
LOG.warning(_LW('Address |%(address)s| is not allocated'),
|
LOG.warning('Address |%(address)s| is not allocated',
|
||||||
{'address': floating_ip.address})
|
{'address': floating_ip.address})
|
||||||
raise exception.Forbidden()
|
raise exception.Forbidden()
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Address |%(address)s| is not allocated '
|
LOG.warning('Address |%(address)s| is not allocated '
|
||||||
'to your project |%(project)s|'),
|
'to your project |%(project)s|',
|
||||||
{'address': floating_ip.address,
|
{'address': floating_ip.address,
|
||||||
'project': context.project_id})
|
'project': context.project_id})
|
||||||
raise exception.Forbidden()
|
raise exception.Forbidden()
|
||||||
@ -206,8 +205,8 @@ class FloatingIP(object):
|
|||||||
objects.Quotas.check_deltas(context, {'floating_ips': 1},
|
objects.Quotas.check_deltas(context, {'floating_ips': 1},
|
||||||
project_id)
|
project_id)
|
||||||
except exception.OverQuota:
|
except exception.OverQuota:
|
||||||
LOG.warning(_LW("Quota exceeded for %s, tried to allocate "
|
LOG.warning("Quota exceeded for %s, tried to allocate "
|
||||||
"floating IP"), context.project_id)
|
"floating IP", context.project_id)
|
||||||
raise exception.FloatingIpLimitExceeded()
|
raise exception.FloatingIpLimitExceeded()
|
||||||
|
|
||||||
floating_ip = objects.FloatingIP.allocate_address(
|
floating_ip = objects.FloatingIP.allocate_address(
|
||||||
@ -223,8 +222,8 @@ class FloatingIP(object):
|
|||||||
project_id)
|
project_id)
|
||||||
except exception.OverQuota:
|
except exception.OverQuota:
|
||||||
objects.FloatingIP.deallocate(context, floating_ip.address)
|
objects.FloatingIP.deallocate(context, floating_ip.address)
|
||||||
LOG.warning(_LW("Quota exceeded for %s, tried to allocate "
|
LOG.warning("Quota exceeded for %s, tried to allocate "
|
||||||
"floating IP"), context.project_id)
|
"floating IP", context.project_id)
|
||||||
raise exception.FloatingIpLimitExceeded()
|
raise exception.FloatingIpLimitExceeded()
|
||||||
|
|
||||||
payload = dict(project_id=project_id, floating_ip=floating_ip)
|
payload = dict(project_id=project_id, floating_ip=floating_ip)
|
||||||
@ -344,12 +343,12 @@ class FloatingIP(object):
|
|||||||
objects.FloatingIP.disassociate(context,
|
objects.FloatingIP.disassociate(context,
|
||||||
floating_address)
|
floating_address)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW('Failed to disassociated floating '
|
LOG.warning('Failed to disassociated floating '
|
||||||
'address: %s'), floating_address)
|
'address: %s', floating_address)
|
||||||
pass
|
pass
|
||||||
if "Cannot find device" in six.text_type(e):
|
if "Cannot find device" in six.text_type(e):
|
||||||
try:
|
try:
|
||||||
LOG.error(_LE('Interface %s not found'), interface)
|
LOG.error('Interface %s not found', interface)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
raise exception.NoFloatingIpInterface(
|
raise exception.NoFloatingIpInterface(
|
||||||
@ -503,15 +502,14 @@ class FloatingIP(object):
|
|||||||
if not floating_addresses or (source and source == dest):
|
if not floating_addresses or (source and source == dest):
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Starting migration network for instance %s"),
|
LOG.info("Starting migration network for instance %s", instance_uuid)
|
||||||
instance_uuid)
|
|
||||||
for address in floating_addresses:
|
for address in floating_addresses:
|
||||||
floating_ip = objects.FloatingIP.get_by_address(context, address)
|
floating_ip = objects.FloatingIP.get_by_address(context, address)
|
||||||
|
|
||||||
if self._is_stale_floating_ip_address(context, floating_ip):
|
if self._is_stale_floating_ip_address(context, floating_ip):
|
||||||
LOG.warning(_LW("Floating IP address |%(address)s| no longer "
|
LOG.warning("Floating IP address |%(address)s| no longer "
|
||||||
"belongs to instance %(instance_uuid)s. "
|
"belongs to instance %(instance_uuid)s. "
|
||||||
"Will not migrate it "),
|
"Will not migrate it ",
|
||||||
{'address': address,
|
{'address': address,
|
||||||
'instance_uuid': instance_uuid})
|
'instance_uuid': instance_uuid})
|
||||||
continue
|
continue
|
||||||
@ -539,16 +537,15 @@ class FloatingIP(object):
|
|||||||
if not floating_addresses or (source and source == dest):
|
if not floating_addresses or (source and source == dest):
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI("Finishing migration network for instance %s"),
|
LOG.info("Finishing migration network for instance %s", instance_uuid)
|
||||||
instance_uuid)
|
|
||||||
|
|
||||||
for address in floating_addresses:
|
for address in floating_addresses:
|
||||||
floating_ip = objects.FloatingIP.get_by_address(context, address)
|
floating_ip = objects.FloatingIP.get_by_address(context, address)
|
||||||
|
|
||||||
if self._is_stale_floating_ip_address(context, floating_ip):
|
if self._is_stale_floating_ip_address(context, floating_ip):
|
||||||
LOG.warning(_LW("Floating IP address |%(address)s| no longer "
|
LOG.warning("Floating IP address |%(address)s| no longer "
|
||||||
"belongs to instance %(instance_uuid)s. "
|
"belongs to instance %(instance_uuid)s. "
|
||||||
"Will not setup it."),
|
"Will not setup it.",
|
||||||
{'address': address,
|
{'address': address,
|
||||||
'instance_uuid': instance_uuid})
|
'instance_uuid': instance_uuid})
|
||||||
continue
|
continue
|
||||||
@ -589,10 +586,10 @@ class FloatingIP(object):
|
|||||||
if domain_entry:
|
if domain_entry:
|
||||||
domains.append(domain_entry)
|
domains.append(domain_entry)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Database inconsistency: DNS domain |%s| is '
|
LOG.warning('Database inconsistency: DNS domain |%s| is '
|
||||||
'registered in the Nova db but not visible to '
|
'registered in the Nova db but not visible to '
|
||||||
'either the floating or instance DNS driver. '
|
'either the floating or instance DNS driver. '
|
||||||
'It will be ignored.'), dns_domain.domain)
|
'It will be ignored.', dns_domain.domain)
|
||||||
|
|
||||||
return domains
|
return domains
|
||||||
|
|
||||||
@ -629,8 +626,8 @@ class FloatingIP(object):
|
|||||||
try:
|
try:
|
||||||
self.instance_dns_manager.create_domain(domain)
|
self.instance_dns_manager.create_domain(domain)
|
||||||
except exception.FloatingIpDNSExists:
|
except exception.FloatingIpDNSExists:
|
||||||
LOG.warning(_LW('Domain |%(domain)s| already exists, '
|
LOG.warning('Domain |%(domain)s| already exists, '
|
||||||
'changing zone to |%(av_zone)s|.'),
|
'changing zone to |%(av_zone)s|.',
|
||||||
{'domain': domain, 'av_zone': av_zone})
|
{'domain': domain, 'av_zone': av_zone})
|
||||||
|
|
||||||
def create_public_dns_domain(self, context, domain, project):
|
def create_public_dns_domain(self, context, domain, project):
|
||||||
@ -638,8 +635,8 @@ class FloatingIP(object):
|
|||||||
try:
|
try:
|
||||||
self.floating_dns_manager.create_domain(domain)
|
self.floating_dns_manager.create_domain(domain)
|
||||||
except exception.FloatingIpDNSExists:
|
except exception.FloatingIpDNSExists:
|
||||||
LOG.warning(_LW('Domain |%(domain)s| already exists, '
|
LOG.warning('Domain |%(domain)s| already exists, '
|
||||||
'changing project to |%(project)s|.'),
|
'changing project to |%(project)s|.',
|
||||||
{'domain': domain, 'project': project})
|
{'domain': domain, 'project': project})
|
||||||
|
|
||||||
def delete_dns_domain(self, context, domain):
|
def delete_dns_domain(self, context, domain):
|
||||||
|
@ -24,7 +24,7 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
import nova.conf
|
import nova.conf
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LW
|
from nova.i18n import _
|
||||||
from nova.network import dns_driver
|
from nova.network import dns_driver
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
@ -65,8 +65,8 @@ class DNSEntry(object):
|
|||||||
if not entry:
|
if not entry:
|
||||||
return None
|
return None
|
||||||
if len(entry) > 1:
|
if len(entry) > 1:
|
||||||
LOG.warning(_LW("Found multiple matches for domain "
|
LOG.warning("Found multiple matches for domain "
|
||||||
"%(domain)s.\n%(entry)s"),
|
"%(domain)s.\n%(entry)s",
|
||||||
domain, entry)
|
domain, entry)
|
||||||
return entry[0]
|
return entry[0]
|
||||||
|
|
||||||
@ -92,8 +92,8 @@ class DNSEntry(object):
|
|||||||
if name.endswith(z):
|
if name.endswith(z):
|
||||||
dequalified = name[0:name.rfind(z)]
|
dequalified = name[0:name.rfind(z)]
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Unable to dequalify. %(name)s is not in "
|
LOG.warning("Unable to dequalify. %(name)s is not in "
|
||||||
"%(domain)s.\n"),
|
"%(domain)s.\n",
|
||||||
{'name': name,
|
{'name': name,
|
||||||
'domain': self.qualified_domain})
|
'domain': self.qualified_domain})
|
||||||
dequalified = None
|
dequalified = None
|
||||||
@ -333,6 +333,5 @@ class LdapDNS(dns_driver.DNSDriver):
|
|||||||
dEntry.delete()
|
dEntry.delete()
|
||||||
|
|
||||||
def delete_dns_file(self):
|
def delete_dns_file(self):
|
||||||
LOG.warning(_LW("This shouldn't be getting called except during "
|
LOG.warning("This shouldn't be getting called except during testing.")
|
||||||
"testing."))
|
|
||||||
pass
|
pass
|
||||||
|
@ -36,7 +36,7 @@ import six
|
|||||||
|
|
||||||
import nova.conf
|
import nova.conf
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LE, _LW
|
from nova.i18n import _
|
||||||
from nova.network import model as network_model
|
from nova.network import model as network_model
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.pci import utils as pci_utils
|
from nova.pci import utils as pci_utils
|
||||||
@ -141,8 +141,8 @@ class IptablesTable(object):
|
|||||||
chain_set = self.unwrapped_chains
|
chain_set = self.unwrapped_chains
|
||||||
|
|
||||||
if name not in chain_set:
|
if name not in chain_set:
|
||||||
LOG.warning(_LW('Attempted to remove chain %s which does not '
|
LOG.warning('Attempted to remove chain %s which does not exist',
|
||||||
'exist'), name)
|
name)
|
||||||
return
|
return
|
||||||
self.dirty = True
|
self.dirty = True
|
||||||
|
|
||||||
@ -210,8 +210,8 @@ class IptablesTable(object):
|
|||||||
self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
|
self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
|
||||||
self.dirty = True
|
self.dirty = True
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.warning(_LW('Tried to remove rule that was not there:'
|
LOG.warning('Tried to remove rule that was not there:'
|
||||||
' %(chain)r %(rule)r %(wrap)r %(top)r'),
|
' %(chain)r %(rule)r %(wrap)r %(top)r',
|
||||||
{'chain': chain, 'rule': rule,
|
{'chain': chain, 'rule': rule,
|
||||||
'top': top, 'wrap': wrap})
|
'top': top, 'wrap': wrap})
|
||||||
|
|
||||||
@ -714,8 +714,8 @@ def ensure_floating_forward(floating_ip, fixed_ip, device, network):
|
|||||||
regex = '.*\s+%s(/32|\s+|$)' % floating_ip
|
regex = '.*\s+%s(/32|\s+|$)' % floating_ip
|
||||||
num_rules = iptables_manager.ipv4['nat'].remove_rules_regex(regex)
|
num_rules = iptables_manager.ipv4['nat'].remove_rules_regex(regex)
|
||||||
if num_rules:
|
if num_rules:
|
||||||
msg = _LW('Removed %(num)d duplicate rules for floating IP %(float)s')
|
LOG.warning('Removed %(num)d duplicate rules for floating IP '
|
||||||
LOG.warning(msg, {'num': num_rules, 'float': floating_ip})
|
'%(float)s', {'num': num_rules, 'float': floating_ip})
|
||||||
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
|
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
|
||||||
iptables_manager.ipv4['nat'].add_rule(chain, rule)
|
iptables_manager.ipv4['nat'].add_rule(chain, rule)
|
||||||
iptables_manager.apply()
|
iptables_manager.apply()
|
||||||
@ -762,7 +762,7 @@ def clean_conntrack(fixed_ip):
|
|||||||
_execute('conntrack', '-D', '-r', fixed_ip, run_as_root=True,
|
_execute('conntrack', '-D', '-r', fixed_ip, run_as_root=True,
|
||||||
check_exit_code=[0, 1])
|
check_exit_code=[0, 1])
|
||||||
except processutils.ProcessExecutionError:
|
except processutils.ProcessExecutionError:
|
||||||
LOG.exception(_LE('Error deleting conntrack entries for %s'), fixed_ip)
|
LOG.exception('Error deleting conntrack entries for %s', fixed_ip)
|
||||||
|
|
||||||
|
|
||||||
def _enable_ipv4_forwarding():
|
def _enable_ipv4_forwarding():
|
||||||
@ -1013,7 +1013,7 @@ def restart_dhcp(context, dev, network_ref, fixedips):
|
|||||||
_add_dnsmasq_accept_rules(dev)
|
_add_dnsmasq_accept_rules(dev)
|
||||||
return
|
return
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.error(_LE('kill -HUP dnsmasq threw %s'), exc)
|
LOG.error('kill -HUP dnsmasq threw %s', exc)
|
||||||
else:
|
else:
|
||||||
LOG.debug('Pid %d is stale, relaunching dnsmasq', pid)
|
LOG.debug('Pid %d is stale, relaunching dnsmasq', pid)
|
||||||
|
|
||||||
@ -1091,7 +1091,7 @@ interface %s
|
|||||||
try:
|
try:
|
||||||
_execute('kill', pid, run_as_root=True)
|
_execute('kill', pid, run_as_root=True)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
LOG.error(_LE('killing radvd threw %s'), exc)
|
LOG.error('killing radvd threw %s', exc)
|
||||||
else:
|
else:
|
||||||
LOG.debug('Pid %d is stale, relaunching radvd', pid)
|
LOG.debug('Pid %d is stale, relaunching radvd', pid)
|
||||||
|
|
||||||
@ -1123,7 +1123,7 @@ def _host_dhcp(fixedip):
|
|||||||
# to truncate the hostname to only 63 characters.
|
# to truncate the hostname to only 63 characters.
|
||||||
hostname = fixedip.instance.hostname
|
hostname = fixedip.instance.hostname
|
||||||
if len(hostname) > 63:
|
if len(hostname) > 63:
|
||||||
LOG.warning(_LW('hostname %s too long, truncating.'), hostname)
|
LOG.warning('hostname %s too long, truncating.', hostname)
|
||||||
hostname = fixedip.instance.hostname[:2] + '-' +\
|
hostname = fixedip.instance.hostname[:2] + '-' +\
|
||||||
fixedip.instance.hostname[-60:]
|
fixedip.instance.hostname[-60:]
|
||||||
if CONF.use_single_default_gateway:
|
if CONF.use_single_default_gateway:
|
||||||
@ -1258,7 +1258,7 @@ def _ovs_vsctl(args):
|
|||||||
try:
|
try:
|
||||||
return utils.execute(*full_args, run_as_root=True)
|
return utils.execute(*full_args, run_as_root=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Unable to execute %(cmd)s. Exception: %(exception)s"),
|
LOG.error("Unable to execute %(cmd)s. Exception: %(exception)s",
|
||||||
{'cmd': full_args, 'exception': e})
|
{'cmd': full_args, 'exception': e})
|
||||||
raise exception.OvsConfigurationFailure(inner_exception=e)
|
raise exception.OvsConfigurationFailure(inner_exception=e)
|
||||||
|
|
||||||
@ -1322,9 +1322,9 @@ def create_tap_dev(dev, mac_address=None, multiqueue=False):
|
|||||||
except processutils.ProcessExecutionError:
|
except processutils.ProcessExecutionError:
|
||||||
if multiqueue:
|
if multiqueue:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Failed to create a tap device with ip tuntap. '
|
'Failed to create a tap device with ip tuntap. '
|
||||||
'tunctl does not support creation of multi-queue '
|
'tunctl does not support creation of multi-queue '
|
||||||
'enabled devices, skipping fallback.'))
|
'enabled devices, skipping fallback.')
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Second option: tunctl
|
# Second option: tunctl
|
||||||
@ -1359,7 +1359,7 @@ def delete_net_dev(dev):
|
|||||||
LOG.debug("Net device removed: '%s'", dev)
|
LOG.debug("Net device removed: '%s'", dev)
|
||||||
except processutils.ProcessExecutionError:
|
except processutils.ProcessExecutionError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed removing net device: '%s'"), dev)
|
LOG.error("Failed removing net device: '%s'", dev)
|
||||||
|
|
||||||
|
|
||||||
def delete_bridge_dev(dev):
|
def delete_bridge_dev(dev):
|
||||||
@ -1370,7 +1370,7 @@ def delete_bridge_dev(dev):
|
|||||||
utils.execute('brctl', 'delbr', dev, run_as_root=True)
|
utils.execute('brctl', 'delbr', dev, run_as_root=True)
|
||||||
except processutils.ProcessExecutionError:
|
except processutils.ProcessExecutionError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Failed removing bridge device: '%s'"), dev)
|
LOG.error("Failed removing bridge device: '%s'", dev)
|
||||||
|
|
||||||
|
|
||||||
# Similar to compute virt layers, the Linux network node
|
# Similar to compute virt layers, the Linux network node
|
||||||
@ -1681,18 +1681,18 @@ def _exec_ebtables(*cmd, **kwargs):
|
|||||||
# See if we can retry the error.
|
# See if we can retry the error.
|
||||||
if any(error in exc.stderr for error in retry_strings):
|
if any(error in exc.stderr for error in retry_strings):
|
||||||
if count > attempts and check_exit_code:
|
if count > attempts and check_exit_code:
|
||||||
LOG.warning(_LW('%s failed. Not Retrying.'), ' '.join(cmd))
|
LOG.warning('%s failed. Not Retrying.', ' '.join(cmd))
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
# We need to sleep a bit before retrying
|
# We need to sleep a bit before retrying
|
||||||
LOG.warning(_LW("%(cmd)s failed. Sleeping %(time)s "
|
LOG.warning("%(cmd)s failed. Sleeping %(time)s "
|
||||||
"seconds before retry."),
|
"seconds before retry.",
|
||||||
{'cmd': ' '.join(cmd), 'time': sleep})
|
{'cmd': ' '.join(cmd), 'time': sleep})
|
||||||
time.sleep(sleep)
|
time.sleep(sleep)
|
||||||
else:
|
else:
|
||||||
# Not eligible for retry
|
# Not eligible for retry
|
||||||
if check_exit_code:
|
if check_exit_code:
|
||||||
LOG.warning(_LW('%s failed. Not Retrying.'), ' '.join(cmd))
|
LOG.warning('%s failed. Not Retrying.', ' '.join(cmd))
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
@ -45,7 +45,7 @@ import six
|
|||||||
import nova.conf
|
import nova.conf
|
||||||
from nova import context
|
from nova import context
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LI, _LE, _LW
|
from nova.i18n import _
|
||||||
from nova import ipv6
|
from nova import ipv6
|
||||||
from nova import manager
|
from nova import manager
|
||||||
from nova.network import api as network_api
|
from nova.network import api as network_api
|
||||||
@ -250,7 +250,7 @@ class NetworkManager(manager.Manager):
|
|||||||
network['uuid'], self.host)
|
network['uuid'], self.host)
|
||||||
dev = self.driver.get_dev(network)
|
dev = self.driver.get_dev(network)
|
||||||
self.driver.update_dns(ctxt, dev, network)
|
self.driver.update_dns(ctxt, dev, network)
|
||||||
LOG.info(_LI('Configured network %(network)s on host %(host)s'),
|
LOG.info('Configured network %(network)s on host %(host)s',
|
||||||
{'network': network['uuid'], 'host': self.host})
|
{'network': network['uuid'], 'host': self.host})
|
||||||
|
|
||||||
@periodic_task.periodic_task
|
@periodic_task.periodic_task
|
||||||
@ -424,7 +424,7 @@ class NetworkManager(manager.Manager):
|
|||||||
|
|
||||||
net_info = self.get_instance_nw_info(admin_context, instance_uuid,
|
net_info = self.get_instance_nw_info(admin_context, instance_uuid,
|
||||||
rxtx_factor, host)
|
rxtx_factor, host)
|
||||||
LOG.info(_LI("Allocated network: '%s' for instance"), net_info,
|
LOG.info("Allocated network: '%s' for instance", net_info,
|
||||||
instance_uuid=instance_uuid)
|
instance_uuid=instance_uuid)
|
||||||
return net_info
|
return net_info
|
||||||
|
|
||||||
@ -486,7 +486,7 @@ class NetworkManager(manager.Manager):
|
|||||||
# deallocate vifs (mac addresses)
|
# deallocate vifs (mac addresses)
|
||||||
objects.VirtualInterface.delete_by_instance_uuid(
|
objects.VirtualInterface.delete_by_instance_uuid(
|
||||||
read_deleted_context, instance_uuid)
|
read_deleted_context, instance_uuid)
|
||||||
LOG.info(_LI("Network deallocated for instance (fixed IPs: '%s')"),
|
LOG.info("Network deallocated for instance (fixed IPs: '%s')",
|
||||||
fixed_ips, instance_uuid=instance_uuid)
|
fixed_ips, instance_uuid=instance_uuid)
|
||||||
|
|
||||||
@messaging.expected_exceptions(exception.InstanceNotFound)
|
@messaging.expected_exceptions(exception.InstanceNotFound)
|
||||||
@ -521,13 +521,13 @@ class NetworkManager(manager.Manager):
|
|||||||
for fixed_ip in fixed_ips:
|
for fixed_ip in fixed_ips:
|
||||||
vif = fixed_ip.virtual_interface
|
vif = fixed_ip.virtual_interface
|
||||||
if not vif:
|
if not vif:
|
||||||
LOG.warning(_LW('No VirtualInterface for FixedIP: %s'),
|
LOG.warning('No VirtualInterface for FixedIP: %s',
|
||||||
str(fixed_ip.address), instance_uuid=instance_uuid)
|
str(fixed_ip.address), instance_uuid=instance_uuid)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not fixed_ip.network:
|
if not fixed_ip.network:
|
||||||
LOG.warning(_LW('No Network for FixedIP: %s'),
|
LOG.warning('No Network for FixedIP: %s',
|
||||||
str(fixed_ip.address), instance_uuid=instance_uuid)
|
str(fixed_ip.address), instance_uuid=instance_uuid)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if vif.uuid in vifs:
|
if vif.uuid in vifs:
|
||||||
@ -756,17 +756,17 @@ class NetworkManager(manager.Manager):
|
|||||||
|
|
||||||
domainref = objects.DNSDomain.get_by_domain(context, instance_domain)
|
domainref = objects.DNSDomain.get_by_domain(context, instance_domain)
|
||||||
if domainref is None:
|
if domainref is None:
|
||||||
LOG.warning(_LW('instance-dns-zone not found |%s|.'),
|
LOG.warning('instance-dns-zone not found |%s|.',
|
||||||
instance_domain, instance=instance)
|
instance_domain, instance=instance)
|
||||||
return True
|
return True
|
||||||
dns_zone = domainref.availability_zone
|
dns_zone = domainref.availability_zone
|
||||||
|
|
||||||
instance_zone = instance.get('availability_zone')
|
instance_zone = instance.get('availability_zone')
|
||||||
if dns_zone and (dns_zone != instance_zone):
|
if dns_zone and (dns_zone != instance_zone):
|
||||||
LOG.warning(_LW('instance-dns-zone is |%(domain)s|, '
|
LOG.warning('instance-dns-zone is |%(domain)s|, '
|
||||||
'which is in availability zone |%(zone)s|. '
|
'which is in availability zone |%(zone)s|. '
|
||||||
'Instance is in zone |%(zone2)s|. '
|
'Instance is in zone |%(zone2)s|. '
|
||||||
'No DNS record will be created.'),
|
'No DNS record will be created.',
|
||||||
{'domain': instance_domain,
|
{'domain': instance_domain,
|
||||||
'zone': dns_zone,
|
'zone': dns_zone,
|
||||||
'zone2': instance_zone},
|
'zone2': instance_zone},
|
||||||
@ -801,9 +801,9 @@ class NetworkManager(manager.Manager):
|
|||||||
quotas.check_deltas(context, {'fixed_ips': 1}, quota_project)
|
quotas.check_deltas(context, {'fixed_ips': 1}, quota_project)
|
||||||
except exception.OverQuota as exc:
|
except exception.OverQuota as exc:
|
||||||
count = exc.kwargs['usages']['fixed_ips']
|
count = exc.kwargs['usages']['fixed_ips']
|
||||||
LOG.warning(_LW("Quota exceeded for project %(pid)s, tried to "
|
LOG.warning("Quota exceeded for project %(pid)s, tried to "
|
||||||
"allocate fixed IP. %(used)s of %(allowed)s are "
|
"allocate fixed IP. %(used)s of %(allowed)s are "
|
||||||
"in use or are already reserved."),
|
"in use or are already reserved.",
|
||||||
{'pid': quota_project, 'used': count,
|
{'pid': quota_project, 'used': count,
|
||||||
'allowed': exc.kwargs['quotas']['fixed_ips']},
|
'allowed': exc.kwargs['quotas']['fixed_ips']},
|
||||||
instance_uuid=instance_id)
|
instance_uuid=instance_id)
|
||||||
@ -865,10 +865,10 @@ class NetworkManager(manager.Manager):
|
|||||||
# outermost catch-all except block.
|
# outermost catch-all except block.
|
||||||
count = exc.kwargs['usages']['fixed_ips']
|
count = exc.kwargs['usages']['fixed_ips']
|
||||||
allowed = exc.kwargs['quotas']['fixed_ips']
|
allowed = exc.kwargs['quotas']['fixed_ips']
|
||||||
LOG.warning(_LW("Quota exceeded for project %(pid)s, "
|
LOG.warning("Quota exceeded for project %(pid)s, "
|
||||||
"tried to allocate fixed IP. %(used)s "
|
"tried to allocate fixed IP. %(used)s "
|
||||||
"of %(allowed)s are in use or are "
|
"of %(allowed)s are in use or are "
|
||||||
"already reserved."),
|
"already reserved.",
|
||||||
{'pid': quota_project, 'used': count,
|
{'pid': quota_project, 'used': count,
|
||||||
'allowed': allowed},
|
'allowed': allowed},
|
||||||
instance_uuid=instance_id)
|
instance_uuid=instance_id)
|
||||||
@ -922,9 +922,9 @@ class NetworkManager(manager.Manager):
|
|||||||
try:
|
try:
|
||||||
f()
|
f()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW('Error cleaning up fixed IP '
|
LOG.warning('Error cleaning up fixed IP '
|
||||||
'allocation. Manual cleanup may '
|
'allocation. Manual cleanup may '
|
||||||
'be required.'), exc_info=True)
|
'be required.', exc_info=True)
|
||||||
|
|
||||||
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
|
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
|
||||||
instance=None):
|
instance=None):
|
||||||
@ -969,15 +969,15 @@ class NetworkManager(manager.Manager):
|
|||||||
# so we log a message to help track down
|
# so we log a message to help track down
|
||||||
# the possible race.
|
# the possible race.
|
||||||
if not vif_id:
|
if not vif_id:
|
||||||
LOG.info(_LI("Unable to release %s because vif "
|
LOG.info("Unable to release %s because vif doesn't exist",
|
||||||
"doesn't exist"), address)
|
address)
|
||||||
return
|
return
|
||||||
|
|
||||||
vif = objects.VirtualInterface.get_by_id(context, vif_id)
|
vif = objects.VirtualInterface.get_by_id(context, vif_id)
|
||||||
|
|
||||||
if not vif:
|
if not vif:
|
||||||
LOG.info(_LI("Unable to release %s because vif "
|
LOG.info("Unable to release %s because vif "
|
||||||
"object doesn't exist"), address)
|
"object doesn't exist", address)
|
||||||
return
|
return
|
||||||
|
|
||||||
# NOTE(cfb): Call teardown before release_dhcp to ensure
|
# NOTE(cfb): Call teardown before release_dhcp to ensure
|
||||||
@ -997,8 +997,8 @@ class NetworkManager(manager.Manager):
|
|||||||
# release_dhcp on the local driver
|
# release_dhcp on the local driver
|
||||||
self.driver.release_dhcp(dev, address, vif.address)
|
self.driver.release_dhcp(dev, address, vif.address)
|
||||||
except exception.NetworkDhcpReleaseFailed:
|
except exception.NetworkDhcpReleaseFailed:
|
||||||
LOG.error(_LE("Error releasing DHCP for IP %(address)s"
|
LOG.error("Error releasing DHCP for IP %(address)s"
|
||||||
" with MAC %(mac_address)s"),
|
" with MAC %(mac_address)s",
|
||||||
{'address': address,
|
{'address': address,
|
||||||
'mac_address': vif.address},
|
'mac_address': vif.address},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
@ -1028,12 +1028,12 @@ class NetworkManager(manager.Manager):
|
|||||||
fixed_ip = objects.FixedIP.get_by_address(context, address)
|
fixed_ip = objects.FixedIP.get_by_address(context, address)
|
||||||
|
|
||||||
if fixed_ip.instance_uuid is None:
|
if fixed_ip.instance_uuid is None:
|
||||||
LOG.warning(_LW('IP %s leased that is not associated'), fixed_ip)
|
LOG.warning('IP %s leased that is not associated', fixed_ip)
|
||||||
return
|
return
|
||||||
fixed_ip.leased = True
|
fixed_ip.leased = True
|
||||||
fixed_ip.save()
|
fixed_ip.save()
|
||||||
if not fixed_ip.allocated:
|
if not fixed_ip.allocated:
|
||||||
LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), fixed_ip,
|
LOG.warning('IP |%s| leased that isn\'t allocated', fixed_ip,
|
||||||
instance_uuid=fixed_ip.instance_uuid)
|
instance_uuid=fixed_ip.instance_uuid)
|
||||||
|
|
||||||
def release_fixed_ip(self, context, address, mac=None):
|
def release_fixed_ip(self, context, address, mac=None):
|
||||||
@ -1042,10 +1042,10 @@ class NetworkManager(manager.Manager):
|
|||||||
fixed_ip = objects.FixedIP.get_by_address(context, address)
|
fixed_ip = objects.FixedIP.get_by_address(context, address)
|
||||||
|
|
||||||
if fixed_ip.instance_uuid is None:
|
if fixed_ip.instance_uuid is None:
|
||||||
LOG.warning(_LW('IP %s released that is not associated'), fixed_ip)
|
LOG.warning('IP %s released that is not associated', fixed_ip)
|
||||||
return
|
return
|
||||||
if not fixed_ip.leased:
|
if not fixed_ip.leased:
|
||||||
LOG.warning(_LW('IP %s released that was not leased'), fixed_ip,
|
LOG.warning('IP %s released that was not leased', fixed_ip,
|
||||||
instance_uuid=fixed_ip.instance_uuid)
|
instance_uuid=fixed_ip.instance_uuid)
|
||||||
else:
|
else:
|
||||||
fixed_ip.leased = False
|
fixed_ip.leased = False
|
||||||
@ -1067,11 +1067,11 @@ class NetworkManager(manager.Manager):
|
|||||||
LOG.debug('Found VIF: %s', vif,
|
LOG.debug('Found VIF: %s', vif,
|
||||||
instance_uuid=fixed_ip.instance_uuid)
|
instance_uuid=fixed_ip.instance_uuid)
|
||||||
if vif.instance_uuid != fixed_ip.instance_uuid:
|
if vif.instance_uuid != fixed_ip.instance_uuid:
|
||||||
LOG.info(_LI("Ignoring request to release fixed IP "
|
LOG.info("Ignoring request to release fixed IP "
|
||||||
"%(address)s with MAC %(mac)s since it "
|
"%(address)s with MAC %(mac)s since it "
|
||||||
"is now associated with a new instance "
|
"is now associated with a new instance "
|
||||||
"that is in the process of allocating "
|
"that is in the process of allocating "
|
||||||
"it's network."),
|
"it's network.",
|
||||||
{'address': address, 'mac': mac},
|
{'address': address, 'mac': mac},
|
||||||
instance_uuid=fixed_ip.instance_uuid)
|
instance_uuid=fixed_ip.instance_uuid)
|
||||||
return
|
return
|
||||||
@ -1155,10 +1155,9 @@ class NetworkManager(manager.Manager):
|
|||||||
each_subnet_size = fixnet.size / kwargs["num_networks"]
|
each_subnet_size = fixnet.size / kwargs["num_networks"]
|
||||||
if each_subnet_size > CONF.network_size:
|
if each_subnet_size > CONF.network_size:
|
||||||
subnet = 32 - int(math.log(CONF.network_size, 2))
|
subnet = 32 - int(math.log(CONF.network_size, 2))
|
||||||
oversize_msg = _LW(
|
LOG.warning(
|
||||||
'Subnet(s) too large, defaulting to /%s.'
|
'Subnet(s) too large, defaulting to /%s.'
|
||||||
' To override, specify network_size flag.') % subnet
|
' To override, specify network_size flag.', subnet)
|
||||||
LOG.warning(oversize_msg)
|
|
||||||
kwargs["network_size"] = CONF.network_size
|
kwargs["network_size"] = CONF.network_size
|
||||||
else:
|
else:
|
||||||
kwargs["network_size"] = fixnet.size
|
kwargs["network_size"] = fixnet.size
|
||||||
|
@ -20,7 +20,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LI, _LW
|
from nova.i18n import _
|
||||||
from nova.network import dns_driver
|
from nova.network import dns_driver
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -115,8 +115,7 @@ class MiniDNS(dns_driver.DNSDriver):
|
|||||||
outfile.close()
|
outfile.close()
|
||||||
shutil.move(outfile.name, self.filename)
|
shutil.move(outfile.name, self.filename)
|
||||||
if not deleted:
|
if not deleted:
|
||||||
LOG.warning(_LW('Cannot delete entry |%s|'),
|
LOG.warning('Cannot delete entry |%s|', self.qualify(name, domain))
|
||||||
self.qualify(name, domain))
|
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
|
|
||||||
def modify_address(self, name, address, domain):
|
def modify_address(self, name, address, domain):
|
||||||
@ -190,10 +189,10 @@ class MiniDNS(dns_driver.DNSDriver):
|
|||||||
entry['domain'] != fqdomain.lower()):
|
entry['domain'] != fqdomain.lower()):
|
||||||
outfile.write(line)
|
outfile.write(line)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("deleted %s"), entry)
|
LOG.info("deleted %s", entry)
|
||||||
deleted = True
|
deleted = True
|
||||||
outfile.close()
|
outfile.close()
|
||||||
shutil.move(outfile.name, self.filename)
|
shutil.move(outfile.name, self.filename)
|
||||||
if not deleted:
|
if not deleted:
|
||||||
LOG.warning(_LW('Cannot delete domain |%s|'), fqdomain)
|
LOG.warning('Cannot delete domain |%s|', fqdomain)
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
|
@ -28,7 +28,7 @@ import six
|
|||||||
from nova.compute import utils as compute_utils
|
from nova.compute import utils as compute_utils
|
||||||
import nova.conf
|
import nova.conf
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LE, _LI, _LW
|
from nova.i18n import _
|
||||||
from nova.network import base_api
|
from nova.network import base_api
|
||||||
from nova.network import model as network_model
|
from nova.network import model as network_model
|
||||||
from nova.network.neutronv2 import constants
|
from nova.network.neutronv2 import constants
|
||||||
@ -109,9 +109,9 @@ class ClientWrapper(clientv20.Client):
|
|||||||
# should be able to regenerate a valid by using the
|
# should be able to regenerate a valid by using the
|
||||||
# Neutron admin credential configuration located in
|
# Neutron admin credential configuration located in
|
||||||
# nova.conf.
|
# nova.conf.
|
||||||
LOG.error(_LE("Neutron client was not able to generate a "
|
LOG.error("Neutron client was not able to generate a "
|
||||||
"valid admin token, please verify Neutron "
|
"valid admin token, please verify Neutron "
|
||||||
"admin credential located in nova.conf"))
|
"admin credential located in nova.conf")
|
||||||
raise exception.NeutronAdminCredentialConfigurationInvalid()
|
raise exception.NeutronAdminCredentialConfigurationInvalid()
|
||||||
except neutron_client_exc.Forbidden as e:
|
except neutron_client_exc.Forbidden as e:
|
||||||
raise exception.Forbidden(e)
|
raise exception.Forbidden(e)
|
||||||
@ -162,8 +162,8 @@ def _is_not_duplicate(item, items, items_list_name, instance):
|
|||||||
# item is not part of the items list so if it is part of it
|
# item is not part of the items list so if it is part of it
|
||||||
# we should at least log it as a warning
|
# we should at least log it as a warning
|
||||||
if present:
|
if present:
|
||||||
LOG.warning(_LW("%(item)s already exists in list: %(list_name)s "
|
LOG.warning("%(item)s already exists in list: %(list_name)s "
|
||||||
"containing: %(items)s. ignoring it"),
|
"containing: %(items)s. ignoring it",
|
||||||
{'item': item,
|
{'item': item,
|
||||||
'list_name': items_list_name,
|
'list_name': items_list_name,
|
||||||
'items': items},
|
'items': items},
|
||||||
@ -246,8 +246,8 @@ class API(base_api.NetworkAPI):
|
|||||||
return updated_port
|
return updated_port
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Unable to update binding profile "
|
LOG.error("Unable to update binding profile "
|
||||||
"for port: %(port)s due to failure: %(error)s"),
|
"for port: %(port)s due to failure: %(error)s",
|
||||||
{'port': port_id, 'error': ex},
|
{'port': port_id, 'error': ex},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
|
|
||||||
@ -397,8 +397,8 @@ class API(base_api.NetworkAPI):
|
|||||||
instance=instance)
|
instance=instance)
|
||||||
return port
|
return port
|
||||||
except neutron_client_exc.InvalidIpForNetworkClient:
|
except neutron_client_exc.InvalidIpForNetworkClient:
|
||||||
LOG.warning(_LW('Neutron error: %(ip)s is not a valid IP address '
|
LOG.warning('Neutron error: %(ip)s is not a valid IP address '
|
||||||
'for network %(network_id)s.'),
|
'for network %(network_id)s.',
|
||||||
{'ip': fixed_ip, 'network_id': network_id},
|
{'ip': fixed_ip, 'network_id': network_id},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
msg = (_('Fixed IP %(ip)s is not a valid ip address for '
|
msg = (_('Fixed IP %(ip)s is not a valid ip address for '
|
||||||
@ -406,22 +406,22 @@ class API(base_api.NetworkAPI):
|
|||||||
{'ip': fixed_ip, 'network_id': network_id})
|
{'ip': fixed_ip, 'network_id': network_id})
|
||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
except neutron_client_exc.IpAddressInUseClient:
|
except neutron_client_exc.IpAddressInUseClient:
|
||||||
LOG.warning(_LW('Neutron error: Fixed IP %s is '
|
LOG.warning('Neutron error: Fixed IP %s is '
|
||||||
'already in use.'), fixed_ip, instance=instance)
|
'already in use.', fixed_ip, instance=instance)
|
||||||
msg = _("Fixed IP %s is already in use.") % fixed_ip
|
msg = _("Fixed IP %s is already in use.") % fixed_ip
|
||||||
raise exception.FixedIpAlreadyInUse(message=msg)
|
raise exception.FixedIpAlreadyInUse(message=msg)
|
||||||
except neutron_client_exc.OverQuotaClient:
|
except neutron_client_exc.OverQuotaClient:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
'Neutron error: Port quota exceeded in tenant: %s'),
|
'Neutron error: Port quota exceeded in tenant: %s',
|
||||||
port_req_body['port']['tenant_id'], instance=instance)
|
port_req_body['port']['tenant_id'], instance=instance)
|
||||||
raise exception.PortLimitExceeded()
|
raise exception.PortLimitExceeded()
|
||||||
except neutron_client_exc.IpAddressGenerationFailureClient:
|
except neutron_client_exc.IpAddressGenerationFailureClient:
|
||||||
LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'),
|
LOG.warning('Neutron error: No more fixed IPs in network: %s',
|
||||||
network_id, instance=instance)
|
network_id, instance=instance)
|
||||||
raise exception.NoMoreFixedIps(net=network_id)
|
raise exception.NoMoreFixedIps(net=network_id)
|
||||||
except neutron_client_exc.NeutronClientException:
|
except neutron_client_exc.NeutronClientException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Neutron error creating port on network %s'),
|
LOG.exception('Neutron error creating port on network %s',
|
||||||
network_id, instance=instance)
|
network_id, instance=instance)
|
||||||
|
|
||||||
def _update_port(self, port_client, instance, port_id,
|
def _update_port(self, port_client, instance, port_id,
|
||||||
@ -436,16 +436,16 @@ class API(base_api.NetworkAPI):
|
|||||||
except neutron_client_exc.MacAddressInUseClient:
|
except neutron_client_exc.MacAddressInUseClient:
|
||||||
mac_address = port_req_body['port'].get('mac_address')
|
mac_address = port_req_body['port'].get('mac_address')
|
||||||
network_id = port_req_body['port'].get('network_id')
|
network_id = port_req_body['port'].get('network_id')
|
||||||
LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
|
LOG.warning('Neutron error: MAC address %(mac)s is already '
|
||||||
'in use on network %(network)s.'),
|
'in use on network %(network)s.',
|
||||||
{'mac': mac_address, 'network': network_id},
|
{'mac': mac_address, 'network': network_id},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
raise exception.PortInUse(port_id=mac_address)
|
raise exception.PortInUse(port_id=mac_address)
|
||||||
except neutron_client_exc.HostNotCompatibleWithFixedIpsClient:
|
except neutron_client_exc.HostNotCompatibleWithFixedIpsClient:
|
||||||
network_id = port_req_body['port'].get('network_id')
|
network_id = port_req_body['port'].get('network_id')
|
||||||
LOG.warning(_LW('Neutron error: Tried to bind a port with '
|
LOG.warning('Neutron error: Tried to bind a port with '
|
||||||
'fixed_ips to a host in the wrong segment on '
|
'fixed_ips to a host in the wrong segment on '
|
||||||
'network %(network)s.'),
|
'network %(network)s.',
|
||||||
{'network': network_id}, instance=instance)
|
{'network': network_id}, instance=instance)
|
||||||
raise exception.FixedIpInvalidOnHost(port_id=port_id)
|
raise exception.FixedIpInvalidOnHost(port_id=port_id)
|
||||||
|
|
||||||
@ -507,8 +507,8 @@ class API(base_api.NetworkAPI):
|
|||||||
LOG.debug('Unable to unbind port %s as it no longer exists.',
|
LOG.debug('Unable to unbind port %s as it no longer exists.',
|
||||||
port_id)
|
port_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to clear device ID "
|
LOG.exception("Unable to clear device ID for port '%s'",
|
||||||
"for port '%s'"), port_id)
|
port_id)
|
||||||
|
|
||||||
def _validate_requested_port_ids(self, context, instance, neutron,
|
def _validate_requested_port_ids(self, context, instance, neutron,
|
||||||
requested_networks):
|
requested_networks):
|
||||||
@ -1069,9 +1069,9 @@ class API(base_api.NetworkAPI):
|
|||||||
# only ever index a single device, which needs to be
|
# only ever index a single device, which needs to be
|
||||||
# successfully claimed for this to be called as part of
|
# successfully claimed for this to be called as part of
|
||||||
# allocate_networks method
|
# allocate_networks method
|
||||||
LOG.error(_LE("PCI request %s does not have a "
|
LOG.error("PCI request %s does not have a "
|
||||||
"unique device associated with it. Unable to "
|
"unique device associated with it. Unable to "
|
||||||
"determine MAC address"),
|
"determine MAC address",
|
||||||
pci_request, instance=instance)
|
pci_request, instance=instance)
|
||||||
return
|
return
|
||||||
pci_dev = pci_devs[0]
|
pci_dev = pci_devs[0]
|
||||||
@ -1080,8 +1080,8 @@ class API(base_api.NetworkAPI):
|
|||||||
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
|
mac = pci_utils.get_mac_by_pci_address(pci_dev.address)
|
||||||
except exception.PciDeviceNotFoundById as e:
|
except exception.PciDeviceNotFoundById as e:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Could not determine MAC address for %(addr)s, "
|
"Could not determine MAC address for %(addr)s, "
|
||||||
"error: %(e)s"),
|
"error: %(e)s",
|
||||||
{"addr": pci_dev.address, "e": e}, instance=instance)
|
{"addr": pci_dev.address, "e": e}, instance=instance)
|
||||||
else:
|
else:
|
||||||
port_req_body['port']['mac_address'] = mac
|
port_req_body['port']['mac_address'] = mac
|
||||||
@ -1140,8 +1140,8 @@ class API(base_api.NetworkAPI):
|
|||||||
port_req_body = {'port': {'dns_name': instance.hostname}}
|
port_req_body = {'port': {'dns_name': instance.hostname}}
|
||||||
neutron.update_port(port_id, port_req_body)
|
neutron.update_port(port_id, port_req_body)
|
||||||
except neutron_client_exc.BadRequest:
|
except neutron_client_exc.BadRequest:
|
||||||
LOG.warning(_LW('Neutron error: Instance hostname '
|
LOG.warning('Neutron error: Instance hostname '
|
||||||
'%(hostname)s is not a valid DNS name'),
|
'%(hostname)s is not a valid DNS name',
|
||||||
{'hostname': instance.hostname}, instance=instance)
|
{'hostname': instance.hostname}, instance=instance)
|
||||||
msg = (_('Instance hostname %(hostname)s is not a valid DNS '
|
msg = (_('Instance hostname %(hostname)s is not a valid DNS '
|
||||||
'name') % {'hostname': instance.hostname})
|
'name') % {'hostname': instance.hostname})
|
||||||
@ -1154,13 +1154,12 @@ class API(base_api.NetworkAPI):
|
|||||||
neutron.delete_port(port)
|
neutron.delete_port(port)
|
||||||
except neutron_client_exc.NeutronClientException as e:
|
except neutron_client_exc.NeutronClientException as e:
|
||||||
if e.status_code == 404:
|
if e.status_code == 404:
|
||||||
LOG.warning(_LW("Port %s does not exist"), port,
|
LOG.warning("Port %s does not exist", port,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
else:
|
else:
|
||||||
exceptions.append(e)
|
exceptions.append(e)
|
||||||
LOG.warning(
|
LOG.warning("Failed to delete port %s for instance.",
|
||||||
_LW("Failed to delete port %s for instance."),
|
port, instance=instance, exc_info=True)
|
||||||
port, instance=instance, exc_info=True)
|
|
||||||
if len(exceptions) > 0 and raise_if_fail:
|
if len(exceptions) > 0 and raise_if_fail:
|
||||||
raise exceptions[0]
|
raise exceptions[0]
|
||||||
|
|
||||||
@ -1562,8 +1561,8 @@ class API(base_api.NetworkAPI):
|
|||||||
except neutron_client_exc.NetworkNotFoundClient:
|
except neutron_client_exc.NetworkNotFoundClient:
|
||||||
# This shouldn't happen since we just created the network, but
|
# This shouldn't happen since we just created the network, but
|
||||||
# handle it anyway.
|
# handle it anyway.
|
||||||
LOG.error(_LE('Automatically allocated network %(network_id)s '
|
LOG.error('Automatically allocated network %(network_id)s '
|
||||||
'was not found.'), {'network_id': topology['id']},
|
'was not found.', {'network_id': topology['id']},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
|
raise exception.UnableToAutoAllocateNetwork(project_id=project_id)
|
||||||
|
|
||||||
@ -1767,8 +1766,8 @@ class API(base_api.NetworkAPI):
|
|||||||
|
|
||||||
msg_dict = dict(address=floating_address,
|
msg_dict = dict(address=floating_address,
|
||||||
instance_id=orig_instance_uuid)
|
instance_id=orig_instance_uuid)
|
||||||
LOG.info(_LI('re-assign floating IP %(address)s from '
|
LOG.info('re-assign floating IP %(address)s from '
|
||||||
'instance %(instance_id)s'), msg_dict,
|
'instance %(instance_id)s', msg_dict,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
orig_instance = objects.Instance.get_by_uuid(context,
|
orig_instance = objects.Instance.get_by_uuid(context,
|
||||||
orig_instance_uuid)
|
orig_instance_uuid)
|
||||||
@ -1862,7 +1861,7 @@ class API(base_api.NetworkAPI):
|
|||||||
raise exception.FloatingIpNotFound(id=id)
|
raise exception.FloatingIpNotFound(id=id)
|
||||||
else:
|
else:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Unable to access floating IP %s'), id)
|
LOG.exception('Unable to access floating IP %s', id)
|
||||||
pool_dict = self._setup_net_dict(client,
|
pool_dict = self._setup_net_dict(client,
|
||||||
fip['floating_network_id'])
|
fip['floating_network_id'])
|
||||||
port_dict = self._setup_port_dict(context, client, fip['port_id'])
|
port_dict = self._setup_port_dict(context, client, fip['port_id'])
|
||||||
@ -1982,12 +1981,12 @@ class API(base_api.NetworkAPI):
|
|||||||
use the [neutron] option only.
|
use the [neutron] option only.
|
||||||
"""
|
"""
|
||||||
if CONF.default_floating_pool != 'nova':
|
if CONF.default_floating_pool != 'nova':
|
||||||
LOG.warning(_LW("Config option 'default_floating_pool' is set to "
|
LOG.warning("Config option 'default_floating_pool' is set to "
|
||||||
"a non-default value. Falling back to this value "
|
"a non-default value. Falling back to this value "
|
||||||
"for now but this behavior will change in a "
|
"for now but this behavior will change in a "
|
||||||
"future release. You should unset this value "
|
"future release. You should unset this value "
|
||||||
"and set the '[neutron] default_floating_pool' "
|
"and set the '[neutron] default_floating_pool' "
|
||||||
"option instead."))
|
"option instead.")
|
||||||
return CONF.default_floating_pool
|
return CONF.default_floating_pool
|
||||||
|
|
||||||
return CONF.neutron.default_floating_pool
|
return CONF.neutron.default_floating_pool
|
||||||
@ -2025,9 +2024,9 @@ class API(base_api.NetworkAPI):
|
|||||||
if e.status_code == 404:
|
if e.status_code == 404:
|
||||||
return []
|
return []
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Unable to access floating IP for %s'),
|
LOG.exception('Unable to access floating IP for %s',
|
||||||
', '.join(['%s %s' % (k, v)
|
', '.join(['%s %s' % (k, v)
|
||||||
for k, v in kwargs.items()]))
|
for k, v in kwargs.items()]))
|
||||||
|
|
||||||
def _get_floating_ip_by_address(self, client, address):
|
def _get_floating_ip_by_address(self, client, address):
|
||||||
"""Get floating IP from floating IP address."""
|
"""Get floating IP from floating IP address."""
|
||||||
@ -2154,9 +2153,9 @@ class API(base_api.NetworkAPI):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
tenant_id = port['tenant_id']
|
tenant_id = port['tenant_id']
|
||||||
LOG.warning(_LW("Network %(id)s not matched with the tenants "
|
LOG.warning("Network %(id)s not matched with the tenants "
|
||||||
"network! The ports tenant %(tenant_id)s will be "
|
"network! The ports tenant %(tenant_id)s will be "
|
||||||
"used."),
|
"used.",
|
||||||
{'id': port['network_id'], 'tenant_id': tenant_id})
|
{'id': port['network_id'], 'tenant_id': tenant_id})
|
||||||
|
|
||||||
bridge = None
|
bridge = None
|
||||||
@ -2311,9 +2310,9 @@ class API(base_api.NetworkAPI):
|
|||||||
preserve_on_delete=preserve_on_delete))
|
preserve_on_delete=preserve_on_delete))
|
||||||
|
|
||||||
elif nw_info_refresh:
|
elif nw_info_refresh:
|
||||||
LOG.info(_LI('Port %s from network info_cache is no '
|
LOG.info('Port %s from network info_cache is no '
|
||||||
'longer associated with instance in Neutron. '
|
'longer associated with instance in Neutron. '
|
||||||
'Removing from network info_cache.'), port_id,
|
'Removing from network info_cache.', port_id,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
|
|
||||||
return nw_info
|
return nw_info
|
||||||
@ -2513,16 +2512,16 @@ class API(base_api.NetworkAPI):
|
|||||||
# TODO(lbeliveau): Batch up the port updates in one neutron call.
|
# TODO(lbeliveau): Batch up the port updates in one neutron call.
|
||||||
for port_id, updates in port_updates:
|
for port_id, updates in port_updates:
|
||||||
if updates:
|
if updates:
|
||||||
LOG.info(_LI("Updating port %(port)s with "
|
LOG.info("Updating port %(port)s with "
|
||||||
"attributes %(attributes)s"),
|
"attributes %(attributes)s",
|
||||||
{"port": port_id, "attributes": updates},
|
{"port": port_id, "attributes": updates},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
try:
|
try:
|
||||||
neutron.update_port(port_id, {'port': updates})
|
neutron.update_port(port_id, {'port': updates})
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Unable to update binding details "
|
LOG.exception("Unable to update binding details "
|
||||||
"for port %s"),
|
"for port %s",
|
||||||
port_id, instance=instance)
|
port_id, instance=instance)
|
||||||
|
|
||||||
def update_instance_vnic_index(self, context, instance, vif, index):
|
def update_instance_vnic_index(self, context, instance, vif, index):
|
||||||
@ -2539,8 +2538,8 @@ class API(base_api.NetworkAPI):
|
|||||||
neutron.update_port(vif['id'], port_req_body)
|
neutron.update_port(vif['id'], port_req_body)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Unable to update instance VNIC index '
|
LOG.exception('Unable to update instance VNIC index '
|
||||||
'for port %s.'),
|
'for port %s.',
|
||||||
vif['id'], instance=instance)
|
vif['id'], instance=instance)
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ import six
|
|||||||
from webob import exc
|
from webob import exc
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LE, _LI, _LW
|
from nova.i18n import _
|
||||||
from nova.network.neutronv2 import api as neutronapi
|
from nova.network.neutronv2 import api as neutronapi
|
||||||
from nova.network.security_group import security_group_base
|
from nova.network.security_group import security_group_base
|
||||||
from nova import utils
|
from nova import utils
|
||||||
@ -52,8 +52,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
raise exception.Invalid(six.text_type(e))
|
raise exception.Invalid(six.text_type(e))
|
||||||
except n_exc.NeutronClientException as e:
|
except n_exc.NeutronClientException as e:
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
LOG.exception(_LE("Neutron Error creating security group %s"),
|
LOG.exception("Neutron Error creating security group %s", name)
|
||||||
name)
|
|
||||||
if e.status_code == 401:
|
if e.status_code == 401:
|
||||||
# TODO(arosen) Cannot raise generic response from neutron here
|
# TODO(arosen) Cannot raise generic response from neutron here
|
||||||
# as this error code could be related to bad input or over
|
# as this error code could be related to bad input or over
|
||||||
@ -73,8 +72,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
security_group['id'], body).get('security_group')
|
security_group['id'], body).get('security_group')
|
||||||
except n_exc.NeutronClientException as e:
|
except n_exc.NeutronClientException as e:
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
LOG.exception(_LE("Neutron Error updating security group %s"),
|
LOG.exception("Neutron Error updating security group %s", name)
|
||||||
name)
|
|
||||||
if e.status_code == 401:
|
if e.status_code == 401:
|
||||||
# TODO(arosen) Cannot raise generic response from neutron here
|
# TODO(arosen) Cannot raise generic response from neutron here
|
||||||
# as this error code could be related to bad input or over
|
# as this error code could be related to bad input or over
|
||||||
@ -152,10 +150,10 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
LOG.debug("Neutron security group %s not found", name)
|
LOG.debug("Neutron security group %s not found", name)
|
||||||
raise exception.SecurityGroupNotFound(six.text_type(e))
|
raise exception.SecurityGroupNotFound(six.text_type(e))
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Neutron Error: %s"), e)
|
LOG.error("Neutron Error: %s", e)
|
||||||
six.reraise(*exc_info)
|
six.reraise(*exc_info)
|
||||||
except TypeError as e:
|
except TypeError as e:
|
||||||
LOG.error(_LE("Neutron Error: %s"), e)
|
LOG.error("Neutron Error: %s", e)
|
||||||
msg = _("Invalid security group name: %(name)s.") % {"name": name}
|
msg = _("Invalid security group name: %(name)s.") % {"name": name}
|
||||||
raise exception.SecurityGroupNotFound(six.text_type(msg))
|
raise exception.SecurityGroupNotFound(six.text_type(msg))
|
||||||
|
|
||||||
@ -188,7 +186,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
'security_groups')
|
'security_groups')
|
||||||
except n_exc.NeutronClientException:
|
except n_exc.NeutronClientException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Neutron Error getting security groups"))
|
LOG.exception("Neutron Error getting security groups")
|
||||||
converted_rules = []
|
converted_rules = []
|
||||||
for security_group in security_groups:
|
for security_group in security_groups:
|
||||||
converted_rules.append(
|
converted_rules.append(
|
||||||
@ -214,7 +212,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
elif e.status_code == 409:
|
elif e.status_code == 409:
|
||||||
self.raise_invalid_property(six.text_type(e))
|
self.raise_invalid_property(six.text_type(e))
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Neutron Error: %s"), e)
|
LOG.error("Neutron Error: %s", e)
|
||||||
six.reraise(*exc_info)
|
six.reraise(*exc_info)
|
||||||
|
|
||||||
def add_rules(self, context, id, name, vals):
|
def add_rules(self, context, id, name, vals):
|
||||||
@ -234,18 +232,17 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
except n_exc.NeutronClientException as e:
|
except n_exc.NeutronClientException as e:
|
||||||
exc_info = sys.exc_info()
|
exc_info = sys.exc_info()
|
||||||
if e.status_code == 404:
|
if e.status_code == 404:
|
||||||
LOG.exception(_LE("Neutron Error getting security group %s"),
|
LOG.exception("Neutron Error getting security group %s", name)
|
||||||
name)
|
|
||||||
self.raise_not_found(six.text_type(e))
|
self.raise_not_found(six.text_type(e))
|
||||||
elif e.status_code == 409:
|
elif e.status_code == 409:
|
||||||
LOG.exception(_LE("Neutron Error adding rules to security "
|
LOG.exception("Neutron Error adding rules to security "
|
||||||
"group %s"), name)
|
"group %s", name)
|
||||||
self.raise_over_quota(six.text_type(e))
|
self.raise_over_quota(six.text_type(e))
|
||||||
elif e.status_code == 400:
|
elif e.status_code == 400:
|
||||||
LOG.exception(_LE("Neutron Error: %s"), e)
|
LOG.exception("Neutron Error: %s", e)
|
||||||
self.raise_invalid_property(six.text_type(e))
|
self.raise_invalid_property(six.text_type(e))
|
||||||
else:
|
else:
|
||||||
LOG.exception(_LE("Neutron Error:"))
|
LOG.exception("Neutron Error:")
|
||||||
six.reraise(*exc_info)
|
six.reraise(*exc_info)
|
||||||
converted_rules = []
|
converted_rules = []
|
||||||
for rule in rules:
|
for rule in rules:
|
||||||
@ -299,8 +296,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
neutron.delete_security_group_rule(rule_ids.pop())
|
neutron.delete_security_group_rule(rule_ids.pop())
|
||||||
except n_exc.NeutronClientException:
|
except n_exc.NeutronClientException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Neutron Error unable to delete %s"),
|
LOG.exception("Neutron Error unable to delete %s", rule_ids)
|
||||||
rule_ids)
|
|
||||||
|
|
||||||
def get_rule(self, context, id):
|
def get_rule(self, context, id):
|
||||||
neutron = neutronapi.get_client(context)
|
neutron = neutronapi.get_client(context)
|
||||||
@ -313,7 +309,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
LOG.debug("Neutron security group rule %s not found", id)
|
LOG.debug("Neutron security group rule %s not found", id)
|
||||||
self.raise_not_found(six.text_type(e))
|
self.raise_not_found(six.text_type(e))
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Neutron Error: %s"), e)
|
LOG.error("Neutron Error: %s", e)
|
||||||
six.reraise(*exc_info)
|
six.reraise(*exc_info)
|
||||||
return self._convert_to_nova_security_group_rule_format(rule)
|
return self._convert_to_nova_security_group_rule_format(rule)
|
||||||
|
|
||||||
@ -443,14 +439,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
'project': context.project_id})
|
'project': context.project_id})
|
||||||
self.raise_not_found(msg)
|
self.raise_not_found(msg)
|
||||||
else:
|
else:
|
||||||
LOG.exception(_LE("Neutron Error:"))
|
LOG.exception("Neutron Error:")
|
||||||
six.reraise(*exc_info)
|
six.reraise(*exc_info)
|
||||||
params = {'device_id': instance.uuid}
|
params = {'device_id': instance.uuid}
|
||||||
try:
|
try:
|
||||||
ports = neutron.list_ports(**params).get('ports')
|
ports = neutron.list_ports(**params).get('ports')
|
||||||
except n_exc.NeutronClientException:
|
except n_exc.NeutronClientException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Neutron Error:"))
|
LOG.exception("Neutron Error:")
|
||||||
|
|
||||||
if not ports:
|
if not ports:
|
||||||
msg = (_("instance_id %s could not be found as device id on"
|
msg = (_("instance_id %s could not be found as device id on"
|
||||||
@ -459,9 +455,9 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
|
|
||||||
for port in ports:
|
for port in ports:
|
||||||
if not self._has_security_group_requirements(port):
|
if not self._has_security_group_requirements(port):
|
||||||
LOG.warning(_LW("Cannot add security group %(name)s to "
|
LOG.warning("Cannot add security group %(name)s to "
|
||||||
"%(instance)s since the port %(port_id)s "
|
"%(instance)s since the port %(port_id)s "
|
||||||
"does not meet security requirements"),
|
"does not meet security requirements",
|
||||||
{'name': security_group_name,
|
{'name': security_group_name,
|
||||||
'instance': instance.uuid,
|
'instance': instance.uuid,
|
||||||
'port_id': port['id']})
|
'port_id': port['id']})
|
||||||
@ -471,14 +467,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
port['security_groups'].append(security_group_id)
|
port['security_groups'].append(security_group_id)
|
||||||
updated_port = {'security_groups': port['security_groups']}
|
updated_port = {'security_groups': port['security_groups']}
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Adding security group %(security_group_id)s to "
|
LOG.info("Adding security group %(security_group_id)s to "
|
||||||
"port %(port_id)s"),
|
"port %(port_id)s",
|
||||||
{'security_group_id': security_group_id,
|
{'security_group_id': security_group_id,
|
||||||
'port_id': port['id']})
|
'port_id': port['id']})
|
||||||
neutron.update_port(port['id'], {'port': updated_port})
|
neutron.update_port(port['id'], {'port': updated_port})
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Neutron Error:"))
|
LOG.exception("Neutron Error:")
|
||||||
|
|
||||||
def remove_from_instance(self, context, instance, security_group_name):
|
def remove_from_instance(self, context, instance, security_group_name):
|
||||||
"""Remove the security group associated with the instance."""
|
"""Remove the security group associated with the instance."""
|
||||||
@ -497,14 +493,14 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
'project': context.project_id})
|
'project': context.project_id})
|
||||||
self.raise_not_found(msg)
|
self.raise_not_found(msg)
|
||||||
else:
|
else:
|
||||||
LOG.exception(_LE("Neutron Error:"))
|
LOG.exception("Neutron Error:")
|
||||||
six.reraise(*exc_info)
|
six.reraise(*exc_info)
|
||||||
params = {'device_id': instance.uuid}
|
params = {'device_id': instance.uuid}
|
||||||
try:
|
try:
|
||||||
ports = neutron.list_ports(**params).get('ports')
|
ports = neutron.list_ports(**params).get('ports')
|
||||||
except n_exc.NeutronClientException:
|
except n_exc.NeutronClientException:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Neutron Error:"))
|
LOG.exception("Neutron Error:")
|
||||||
|
|
||||||
if not ports:
|
if not ports:
|
||||||
msg = (_("instance_id %s could not be found as device id on"
|
msg = (_("instance_id %s could not be found as device id on"
|
||||||
@ -525,15 +521,15 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
|||||||
|
|
||||||
updated_port = {'security_groups': port['security_groups']}
|
updated_port = {'security_groups': port['security_groups']}
|
||||||
try:
|
try:
|
||||||
LOG.info(_LI("Adding security group %(security_group_id)s to "
|
LOG.info("Adding security group %(security_group_id)s to "
|
||||||
"port %(port_id)s"),
|
"port %(port_id)s",
|
||||||
{'security_group_id': security_group_id,
|
{'security_group_id': security_group_id,
|
||||||
'port_id': port['id']})
|
'port_id': port['id']})
|
||||||
neutron.update_port(port['id'], {'port': updated_port})
|
neutron.update_port(port['id'], {'port': updated_port})
|
||||||
found_security_group = True
|
found_security_group = True
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("Neutron Error:"))
|
LOG.exception("Neutron Error:")
|
||||||
if not found_security_group:
|
if not found_security_group:
|
||||||
msg = (_("Security group %(security_group_name)s not associated "
|
msg = (_("Security group %(security_group_name)s not associated "
|
||||||
"with the instance %(instance)s") %
|
"with the instance %(instance)s") %
|
||||||
|
@ -29,7 +29,6 @@ import six
|
|||||||
import nova.conf
|
import nova.conf
|
||||||
import nova.context
|
import nova.context
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LE
|
|
||||||
from nova.image import glance
|
from nova.image import glance
|
||||||
from nova import network
|
from nova import network
|
||||||
from nova.network import model as network_model
|
from nova.network import model as network_model
|
||||||
@ -142,8 +141,8 @@ def send_update(context, old_instance, new_instance, service="compute",
|
|||||||
'instance could not be found and was most likely '
|
'instance could not be found and was most likely '
|
||||||
'deleted.', instance=new_instance)
|
'deleted.', instance=new_instance)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to send state update notification"),
|
LOG.exception("Failed to send state update notification",
|
||||||
instance=new_instance)
|
instance=new_instance)
|
||||||
|
|
||||||
|
|
||||||
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
||||||
@ -185,8 +184,8 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
|||||||
'instance could not be found and was most likely '
|
'instance could not be found and was most likely '
|
||||||
'deleted.', instance=instance)
|
'deleted.', instance=instance)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to send state update notification"),
|
LOG.exception("Failed to send state update notification",
|
||||||
instance=instance)
|
instance=instance)
|
||||||
|
|
||||||
|
|
||||||
def _compute_states_payload(instance, old_vm_state=None,
|
def _compute_states_payload(instance, old_vm_state=None,
|
||||||
@ -336,7 +335,7 @@ def bandwidth_usage(instance_ref, audit_start,
|
|||||||
except Exception:
|
except Exception:
|
||||||
try:
|
try:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Failed to get nw_info'),
|
LOG.exception('Failed to get nw_info',
|
||||||
instance=instance_ref)
|
instance=instance_ref)
|
||||||
except Exception:
|
except Exception:
|
||||||
if ignore_missing_network_data:
|
if ignore_missing_network_data:
|
||||||
|
@ -28,7 +28,7 @@ from nova.db.sqlalchemy import api as db_api
|
|||||||
from nova.db.sqlalchemy import api_models
|
from nova.db.sqlalchemy import api_models
|
||||||
from nova.db.sqlalchemy import models as main_models
|
from nova.db.sqlalchemy import models as main_models
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LW
|
from nova.i18n import _
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
@ -317,7 +317,7 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
|||||||
filter_by(deleted=0).count()
|
filter_by(deleted=0).count()
|
||||||
if result:
|
if result:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Main database contains %(count)i unmigrated aggregates'),
|
'Main database contains %(count)i unmigrated aggregates',
|
||||||
{'count': result})
|
{'count': result})
|
||||||
return result == 0
|
return result == 0
|
||||||
|
|
||||||
@ -585,7 +585,7 @@ def migrate_aggregates(ctxt, count):
|
|||||||
db.aggregate_delete(ctxt, aggregate.id)
|
db.aggregate_delete(ctxt, aggregate.id)
|
||||||
except exception.AggregateNotFound:
|
except exception.AggregateNotFound:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Aggregate id %(id)i disappeared during migration'),
|
'Aggregate id %(id)i disappeared during migration',
|
||||||
{'id': aggregate_id})
|
{'id': aggregate_id})
|
||||||
except (exception.AggregateNameExists) as e:
|
except (exception.AggregateNameExists) as e:
|
||||||
LOG.error(six.text_type(e))
|
LOG.error(six.text_type(e))
|
||||||
|
@ -20,7 +20,7 @@ from nova.cells import opts as cells_opts
|
|||||||
from nova.cells import rpcapi as cells_rpcapi
|
from nova.cells import rpcapi as cells_rpcapi
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LW
|
from nova.i18n import _
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
@ -221,8 +221,8 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
|
|||||||
if not db_bdms:
|
if not db_bdms:
|
||||||
raise exception.VolumeBDMNotFound(volume_id=volume_id)
|
raise exception.VolumeBDMNotFound(volume_id=volume_id)
|
||||||
if len(db_bdms) > 1:
|
if len(db_bdms) > 1:
|
||||||
LOG.warning(_LW('Legacy get_by_volume_id() call found multiple '
|
LOG.warning('Legacy get_by_volume_id() call found multiple '
|
||||||
'BDMs for volume %(volume)s'),
|
'BDMs for volume %(volume)s',
|
||||||
{'volume': volume_id})
|
{'volume': volume_id})
|
||||||
db_bdm = db_bdms[0]
|
db_bdm = db_bdms[0]
|
||||||
# NOTE (ndipanov): Move this to the db layer into a
|
# NOTE (ndipanov): Move this to the db layer into a
|
||||||
|
@ -23,7 +23,6 @@ from sqlalchemy.sql import null
|
|||||||
from nova.db.sqlalchemy import api as db
|
from nova.db.sqlalchemy import api as db
|
||||||
from nova.db.sqlalchemy import api_models
|
from nova.db.sqlalchemy import api_models
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LE
|
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
@ -82,8 +81,7 @@ class BuildRequest(base.NovaObject):
|
|||||||
'which is not supported here.',
|
'which is not supported here.',
|
||||||
dict(instance_uuid=self.instance_uuid,
|
dict(instance_uuid=self.instance_uuid,
|
||||||
version=exc.objver))
|
version=exc.objver))
|
||||||
LOG.exception(_LE('Could not deserialize instance in '
|
LOG.exception('Could not deserialize instance in BuildRequest')
|
||||||
'BuildRequest'))
|
|
||||||
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
|
raise exception.BuildRequestNotFound(uuid=self.instance_uuid)
|
||||||
# NOTE(sbauza): The instance primitive should already have the deleted
|
# NOTE(sbauza): The instance primitive should already have the deleted
|
||||||
# field being set, so when hydrating it back here, we should get the
|
# field being set, so when hydrating it back here, we should get the
|
||||||
@ -155,7 +153,7 @@ class BuildRequest(base.NovaObject):
|
|||||||
try:
|
try:
|
||||||
getattr(req, '_load_%s' % key)(db_req[key])
|
getattr(req, '_load_%s' % key)(db_req[key])
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.exception(_LE('No load handler for %s'), key)
|
LOG.exception('No load handler for %s', key)
|
||||||
else:
|
else:
|
||||||
setattr(req, key, db_req[key])
|
setattr(req, key, db_req[key])
|
||||||
# Load instance last because other fields on req may be referenced
|
# Load instance last because other fields on req may be referenced
|
||||||
|
@ -30,7 +30,6 @@ from nova.db.sqlalchemy.api import require_context
|
|||||||
from nova.db.sqlalchemy import api_models
|
from nova.db.sqlalchemy import api_models
|
||||||
from nova.db.sqlalchemy import models as main_models
|
from nova.db.sqlalchemy import models as main_models
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LW
|
|
||||||
from nova.notifications.objects import base as notification
|
from nova.notifications.objects import base as notification
|
||||||
from nova.notifications.objects import flavor as flavor_notification
|
from nova.notifications.objects import flavor as flavor_notification
|
||||||
from nova import objects
|
from nova import objects
|
||||||
@ -199,7 +198,7 @@ def _ensure_migrated(context):
|
|||||||
result = context.session.query(main_models.InstanceTypes).\
|
result = context.session.query(main_models.InstanceTypes).\
|
||||||
filter_by(deleted=0).count()
|
filter_by(deleted=0).count()
|
||||||
if result:
|
if result:
|
||||||
LOG.warning(_LW('Main database contains %(count)i unmigrated flavors'),
|
LOG.warning('Main database contains %(count)i unmigrated flavors',
|
||||||
{'count': result})
|
{'count': result})
|
||||||
return result == 0
|
return result == 0
|
||||||
|
|
||||||
@ -760,7 +759,7 @@ def migrate_flavors(ctxt, count, hard_delete=False):
|
|||||||
else:
|
else:
|
||||||
db.flavor_destroy(ctxt, flavor.flavorid)
|
db.flavor_destroy(ctxt, flavor.flavorid)
|
||||||
except exception.FlavorNotFound:
|
except exception.FlavorNotFound:
|
||||||
LOG.warning(_LW('Flavor id %(id)i disappeared during migration'),
|
LOG.warning('Flavor id %(id)i disappeared during migration',
|
||||||
{'id': flavor_id})
|
{'id': flavor_id})
|
||||||
except (exception.FlavorExists, exception.FlavorIdExists) as e:
|
except (exception.FlavorExists, exception.FlavorIdExists) as e:
|
||||||
LOG.error(six.text_type(e))
|
LOG.error(six.text_type(e))
|
||||||
|
@ -34,7 +34,7 @@ from nova import db
|
|||||||
from nova.db.sqlalchemy import api as db_api
|
from nova.db.sqlalchemy import api as db_api
|
||||||
from nova.db.sqlalchemy import models
|
from nova.db.sqlalchemy import models
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LE, _LW
|
from nova.i18n import _
|
||||||
from nova.network import model as network_model
|
from nova.network import model as network_model
|
||||||
from nova import notifications
|
from nova import notifications
|
||||||
from nova import objects
|
from nova import objects
|
||||||
@ -311,8 +311,8 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||||||
# If the vm/task states don't indicate that it's being booted
|
# If the vm/task states don't indicate that it's being booted
|
||||||
# then we have a bug here. Log an error and attempt to return
|
# then we have a bug here. Log an error and attempt to return
|
||||||
# the uuid which is what an error above would return.
|
# the uuid which is what an error above would return.
|
||||||
LOG.error(_LE('Could not lazy-load instance.id while '
|
LOG.error('Could not lazy-load instance.id while '
|
||||||
'attempting to generate the instance name.'))
|
'attempting to generate the instance name.')
|
||||||
base_name = self.uuid
|
base_name = self.uuid
|
||||||
return base_name
|
return base_name
|
||||||
|
|
||||||
@ -726,7 +726,7 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||||||
if field in _INSTANCE_EXTRA_FIELDS:
|
if field in _INSTANCE_EXTRA_FIELDS:
|
||||||
self._save_extra_generic(field)
|
self._save_extra_generic(field)
|
||||||
continue
|
continue
|
||||||
LOG.exception(_LE('No save handler for %s'), field,
|
LOG.exception('No save handler for %s', field,
|
||||||
instance=self)
|
instance=self)
|
||||||
except db_exc.DBReferenceError as exp:
|
except db_exc.DBReferenceError as exp:
|
||||||
if exp.key != 'instance_uuid':
|
if exp.key != 'instance_uuid':
|
||||||
@ -1489,7 +1489,7 @@ def _migrate_instance_keypairs(ctxt, count):
|
|||||||
keypairs.objects.append(key)
|
keypairs.objects.append(key)
|
||||||
except exception.KeypairNotFound:
|
except exception.KeypairNotFound:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Instance %(uuid)s keypair %(keyname)s not found'),
|
'Instance %(uuid)s keypair %(keyname)s not found',
|
||||||
{'uuid': db_extra.instance_uuid, 'keyname': key_name})
|
{'uuid': db_extra.instance_uuid, 'keyname': key_name})
|
||||||
db_extra.keypairs = jsonutils.dumps(keypairs.obj_to_primitive())
|
db_extra.keypairs = jsonutils.dumps(keypairs.obj_to_primitive())
|
||||||
db_extra.save(ctxt.session)
|
db_extra.save(ctxt.session)
|
||||||
|
@ -20,7 +20,6 @@ from nova.cells import opts as cells_opts
|
|||||||
from nova.cells import rpcapi as cells_rpcapi
|
from nova.cells import rpcapi as cells_rpcapi
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LE
|
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
@ -88,7 +87,7 @@ class InstanceFault(base.NovaPersistentObject, base.NovaObject,
|
|||||||
cells_rpcapi.CellsAPI().instance_fault_create_at_top(
|
cells_rpcapi.CellsAPI().instance_fault_create_at_top(
|
||||||
self._context, db_fault)
|
self._context, db_fault)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to notify cells of instance fault"))
|
LOG.exception("Failed to notify cells of instance fault")
|
||||||
|
|
||||||
|
|
||||||
@base.NovaObjectRegistry.register
|
@base.NovaObjectRegistry.register
|
||||||
|
@ -18,7 +18,6 @@ from nova.cells import opts as cells_opts
|
|||||||
from nova.cells import rpcapi as cells_rpcapi
|
from nova.cells import rpcapi as cells_rpcapi
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LE
|
|
||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
|
|
||||||
@ -81,8 +80,8 @@ class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject):
|
|||||||
try:
|
try:
|
||||||
cells_api.instance_info_cache_update_at_top(ctxt, info_cache)
|
cells_api.instance_info_cache_update_at_top(ctxt, info_cache)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to notify cells of instance info "
|
LOG.exception("Failed to notify cells of instance info "
|
||||||
"cache update"))
|
"cache update")
|
||||||
|
|
||||||
@base.remotable
|
@base.remotable
|
||||||
def save(self, update_cells=True):
|
def save(self, update_cells=True):
|
||||||
|
@ -22,7 +22,6 @@ from nova.db.sqlalchemy import api as db_api
|
|||||||
from nova.db.sqlalchemy import api_models
|
from nova.db.sqlalchemy import api_models
|
||||||
from nova.db.sqlalchemy import models as main_models
|
from nova.db.sqlalchemy import models as main_models
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LE
|
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
@ -258,9 +257,9 @@ def _get_main_keypairs(context, limit):
|
|||||||
def migrate_keypairs_to_api_db(context, count):
|
def migrate_keypairs_to_api_db(context, count):
|
||||||
bad_instances = _count_unmigrated_instances(context)
|
bad_instances = _count_unmigrated_instances(context)
|
||||||
if bad_instances:
|
if bad_instances:
|
||||||
LOG.error(_LE('Some instances are still missing keypair '
|
LOG.error('Some instances are still missing keypair '
|
||||||
'information. Unable to run keypair migration '
|
'information. Unable to run keypair migration '
|
||||||
'at this time.'))
|
'at this time.')
|
||||||
return 0, 0
|
return 0, 0
|
||||||
|
|
||||||
main_keypairs = _get_main_keypairs(context, count)
|
main_keypairs = _get_main_keypairs(context, count)
|
||||||
|
@ -33,7 +33,7 @@ from nova.db.sqlalchemy import api as db_api
|
|||||||
from nova.db.sqlalchemy import api_models as models
|
from nova.db.sqlalchemy import api_models as models
|
||||||
from nova.db.sqlalchemy import resource_class_cache as rc_cache
|
from nova.db.sqlalchemy import resource_class_cache as rc_cache
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _, _LW
|
from nova.i18n import _
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
@ -452,8 +452,8 @@ class ResourceProvider(base.NovaObject):
|
|||||||
"""Set all resource provider Inventory to be the provided list."""
|
"""Set all resource provider Inventory to be the provided list."""
|
||||||
exceeded = _set_inventory(self._context, self, inv_list)
|
exceeded = _set_inventory(self._context, self, inv_list)
|
||||||
for uuid, rclass in exceeded:
|
for uuid, rclass in exceeded:
|
||||||
LOG.warning(_LW('Resource provider %(uuid)s is now over-'
|
LOG.warning('Resource provider %(uuid)s is now over-'
|
||||||
'capacity for %(resource)s'),
|
'capacity for %(resource)s',
|
||||||
{'uuid': uuid, 'resource': rclass})
|
{'uuid': uuid, 'resource': rclass})
|
||||||
self.obj_reset_changes()
|
self.obj_reset_changes()
|
||||||
|
|
||||||
@ -464,8 +464,8 @@ class ResourceProvider(base.NovaObject):
|
|||||||
"""
|
"""
|
||||||
exceeded = _update_inventory(self._context, self, inventory)
|
exceeded = _update_inventory(self._context, self, inventory)
|
||||||
for uuid, rclass in exceeded:
|
for uuid, rclass in exceeded:
|
||||||
LOG.warning(_LW('Resource provider %(uuid)s is now over-'
|
LOG.warning('Resource provider %(uuid)s is now over-'
|
||||||
'capacity for %(resource)s'),
|
'capacity for %(resource)s',
|
||||||
{'uuid': uuid, 'resource': rclass})
|
{'uuid': uuid, 'resource': rclass})
|
||||||
self.obj_reset_changes()
|
self.obj_reset_changes()
|
||||||
|
|
||||||
@ -1643,10 +1643,10 @@ def _check_capacity_exceeded(conn, allocs):
|
|||||||
if (amount_needed < min_unit or amount_needed > max_unit or
|
if (amount_needed < min_unit or amount_needed > max_unit or
|
||||||
amount_needed % step_size != 0):
|
amount_needed % step_size != 0):
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Allocation for %(rc)s on resource provider %(rp)s "
|
"Allocation for %(rc)s on resource provider %(rp)s "
|
||||||
"violates min_unit, max_unit, or step_size. "
|
"violates min_unit, max_unit, or step_size. "
|
||||||
"Requested: %(requested)s, min_unit: %(min_unit)s, "
|
"Requested: %(requested)s, min_unit: %(min_unit)s, "
|
||||||
"max_unit: %(max_unit)s, step_size: %(step_size)s"),
|
"max_unit: %(max_unit)s, step_size: %(step_size)s",
|
||||||
{'rc': alloc.resource_class,
|
{'rc': alloc.resource_class,
|
||||||
'rp': rp_uuid,
|
'rp': rp_uuid,
|
||||||
'requested': amount_needed,
|
'requested': amount_needed,
|
||||||
@ -1662,8 +1662,8 @@ def _check_capacity_exceeded(conn, allocs):
|
|||||||
capacity = (usage['total'] - usage['reserved']) * allocation_ratio
|
capacity = (usage['total'] - usage['reserved']) * allocation_ratio
|
||||||
if capacity < (used + amount_needed):
|
if capacity < (used + amount_needed):
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Over capacity for %(rc)s on resource provider %(rp)s. "
|
"Over capacity for %(rc)s on resource provider %(rp)s. "
|
||||||
"Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s"),
|
"Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s",
|
||||||
{'rc': alloc.resource_class,
|
{'rc': alloc.resource_class,
|
||||||
'rp': rp_uuid,
|
'rp': rp_uuid,
|
||||||
'needed': amount_needed,
|
'needed': amount_needed,
|
||||||
@ -2081,8 +2081,8 @@ class ResourceClass(base.NovaObject):
|
|||||||
# limit to be exceeded. We set it high in the hope that we never
|
# limit to be exceeded. We set it high in the hope that we never
|
||||||
# hit this point, but added this log message so we know that this
|
# hit this point, but added this log message so we know that this
|
||||||
# specific situation occurred.
|
# specific situation occurred.
|
||||||
LOG.warning(_LW("Exceeded retry limit on ID generation while "
|
LOG.warning("Exceeded retry limit on ID generation while "
|
||||||
"creating ResourceClass %(name)s"),
|
"creating ResourceClass %(name)s",
|
||||||
{'name': self.name})
|
{'name': self.name})
|
||||||
msg = _("creating resource class %s") % self.name
|
msg = _("creating resource class %s") % self.name
|
||||||
raise exception.MaxDBRetriesExceeded(action=msg)
|
raise exception.MaxDBRetriesExceeded(action=msg)
|
||||||
|
@ -20,7 +20,6 @@ from nova import availability_zones
|
|||||||
from nova import context as nova_context
|
from nova import context as nova_context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LW
|
|
||||||
from nova.notifications.objects import base as notification
|
from nova.notifications.objects import base as notification
|
||||||
from nova.notifications.objects import service as service_notification
|
from nova.notifications.objects import service as service_notification
|
||||||
from nova import objects
|
from nova import objects
|
||||||
@ -402,8 +401,8 @@ class Service(base.NovaPersistentObject, base.NovaObject,
|
|||||||
@base.remotable_classmethod
|
@base.remotable_classmethod
|
||||||
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
|
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
|
||||||
if not all(binary.startswith('nova-') for binary in binaries):
|
if not all(binary.startswith('nova-') for binary in binaries):
|
||||||
LOG.warning(_LW('get_minimum_version called with likely-incorrect '
|
LOG.warning('get_minimum_version called with likely-incorrect '
|
||||||
'binaries `%s\''), ','.join(binaries))
|
'binaries `%s\'', ','.join(binaries))
|
||||||
raise exception.ObjectActionError(action='get_minimum_version',
|
raise exception.ObjectActionError(action='get_minimum_version',
|
||||||
reason='Invalid binary prefix')
|
reason='Invalid binary prefix')
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LW
|
|
||||||
from nova import objects
|
from nova import objects
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
from nova.pci import stats
|
from nova.pci import stats
|
||||||
@ -168,9 +167,9 @@ class PciDevTracker(object):
|
|||||||
try:
|
try:
|
||||||
existed.remove()
|
existed.remove()
|
||||||
except exception.PciDeviceInvalidStatus as e:
|
except exception.PciDeviceInvalidStatus as e:
|
||||||
LOG.warning(_LW("Trying to remove device with %(status)s "
|
LOG.warning("Trying to remove device with %(status)s "
|
||||||
"ownership %(instance_uuid)s because of "
|
"ownership %(instance_uuid)s because of "
|
||||||
"%(pci_exception)s"),
|
"%(pci_exception)s",
|
||||||
{'status': existed.status,
|
{'status': existed.status,
|
||||||
'instance_uuid': existed.instance_uuid,
|
'instance_uuid': existed.instance_uuid,
|
||||||
'pci_exception': e.format_message()})
|
'pci_exception': e.format_message()})
|
||||||
@ -228,8 +227,8 @@ class PciDevTracker(object):
|
|||||||
dev.claim(instance_uuid)
|
dev.claim(instance_uuid)
|
||||||
if instance_numa_topology and any(
|
if instance_numa_topology and any(
|
||||||
dev.numa_node is None for dev in devs):
|
dev.numa_node is None for dev in devs):
|
||||||
LOG.warning(_LW("Assigning a pci device without numa affinity to"
|
LOG.warning("Assigning a pci device without numa affinity to "
|
||||||
"instance %(instance)s which has numa topology"),
|
"instance %(instance)s which has numa topology",
|
||||||
{'instance': instance_uuid})
|
{'instance': instance_uuid})
|
||||||
return devs
|
return devs
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
|||||||
import six
|
import six
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LE
|
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
from nova.objects import pci_device_pool
|
from nova.objects import pci_device_pool
|
||||||
from nova.pci import utils
|
from nova.pci import utils
|
||||||
@ -157,12 +156,12 @@ class PciDeviceStats(object):
|
|||||||
# Failed to allocate the required number of devices
|
# Failed to allocate the required number of devices
|
||||||
# Return the devices already allocated back to their pools
|
# Return the devices already allocated back to their pools
|
||||||
if sum([pool['count'] for pool in pools]) < count:
|
if sum([pool['count'] for pool in pools]) < count:
|
||||||
LOG.error(_LE("Failed to allocate PCI devices for instance."
|
LOG.error("Failed to allocate PCI devices for instance."
|
||||||
" Unassigning devices back to pools."
|
" Unassigning devices back to pools."
|
||||||
" This should not happen, since the scheduler"
|
" This should not happen, since the scheduler"
|
||||||
" should have accurate information, and allocation"
|
" should have accurate information, and allocation"
|
||||||
" during claims is controlled via a hold"
|
" during claims is controlled via a hold"
|
||||||
" on the compute node semaphore"))
|
" on the compute node semaphore")
|
||||||
for d in range(len(alloc_devices)):
|
for d in range(len(alloc_devices)):
|
||||||
self.add_device(alloc_devices.pop())
|
self.add_device(alloc_devices.pop())
|
||||||
return None
|
return None
|
||||||
|
@ -23,7 +23,6 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -152,9 +151,9 @@ def get_mac_by_pci_address(pci_addr, pf_interface=False):
|
|||||||
mac = next(f).strip()
|
mac = next(f).strip()
|
||||||
return mac
|
return mac
|
||||||
except (IOError, StopIteration) as e:
|
except (IOError, StopIteration) as e:
|
||||||
LOG.warning(_LW("Could not find the expected sysfs file for "
|
LOG.warning("Could not find the expected sysfs file for "
|
||||||
"determining the MAC address of the PCI device "
|
"determining the MAC address of the PCI device "
|
||||||
"%(addr)s. May not be a NIC. Error: %(e)s"),
|
"%(addr)s. May not be a NIC. Error: %(e)s",
|
||||||
{'addr': pci_addr, 'e': e})
|
{'addr': pci_addr, 'e': e})
|
||||||
raise exception.PciDeviceNotFoundById(id=pci_addr)
|
raise exception.PciDeviceNotFoundById(id=pci_addr)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user