Prevent compute crash on discovery failure
Keystoneauth1 may crash during discovery of URL for placement API. This is often happen on situation, when all services are started at once, compute will first be up while keystone still not. This patch fixes this by catching DiscoveyFailure as additional expected failures. Closes-Bug: #1656075 Change-Id: I48aa6802286b408260b9d10f9d13860b44bd7d34
This commit is contained in:
parent
8d9325b525
commit
4dbc0ddc74
@ -216,6 +216,9 @@ class UpgradeCommands(object):
|
||||
except ks_exc.EndpointNotFound:
|
||||
msg = _('Placement API endpoint not found.')
|
||||
return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
|
||||
except ks_exc.DiscoveryFailure:
|
||||
msg = _('Discovery for placement API URI failed.')
|
||||
return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
|
||||
except ks_exc.NotFound:
|
||||
msg = _('Placement API does not seem to be running.')
|
||||
return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)
|
||||
|
@ -71,6 +71,13 @@ def safe_connect(f):
|
||||
'Placement is optional in Newton, but required '
|
||||
'in Ocata. Please enable the placement service '
|
||||
'before upgrading.'))
|
||||
except ks_exc.DiscoveryFailure:
|
||||
# TODO(_gryf): Looks like DiscoveryFailure is not the only missing
|
||||
# exception here. In Pike we should take care about keystoneauth1
|
||||
# failures handling globally.
|
||||
warn_limit(self,
|
||||
_LW('Discovering suitable URL for placement API '
|
||||
'failed.'))
|
||||
except ks_exc.ConnectFailure:
|
||||
msg = _LW('Placement API service is not responding.')
|
||||
LOG.warning(msg)
|
||||
|
@ -148,6 +148,19 @@ class TestPlacementCheck(test.NoDBTestCase):
|
||||
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
|
||||
self.assertIn('Placement API endpoint not found', res.details)
|
||||
|
||||
@mock.patch.object(status.UpgradeCommands, "_placement_get")
|
||||
def test_discovery_failure(self, get):
|
||||
"""Test failure when discovery for placement URL failed.
|
||||
|
||||
Replicate in devstack: start devstack with placement
|
||||
engine, create valid placement service user and specify it
|
||||
in auth section of [placement] in nova.conf. Stop keystone service.
|
||||
"""
|
||||
get.side_effect = ks_exc.DiscoveryFailure()
|
||||
res = self.cmd._check_placement()
|
||||
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
|
||||
self.assertIn('Discovery for placement API URI failed.', res.details)
|
||||
|
||||
@mock.patch.object(status.UpgradeCommands, "_placement_get")
|
||||
def test_down_endpoint(self, get):
|
||||
"""Test failure when endpoint is down.
|
||||
|
@ -114,6 +114,21 @@ class SafeConnectedTestCase(test.NoDBTestCase):
|
||||
mock_log.warning.assert_has_calls([mock.call('warning'),
|
||||
mock.call('warning')])
|
||||
|
||||
@mock.patch('keystoneauth1.session.Session.request')
|
||||
def test_failed_discovery(self, req):
|
||||
"""Test DiscoveryFailure behavior.
|
||||
|
||||
Failed discovery should not blow up.
|
||||
"""
|
||||
req.side_effect = ks_exc.DiscoveryFailure()
|
||||
self.client._get_resource_provider("fake")
|
||||
|
||||
# reset the call count to demonstrate that future calls still
|
||||
# work
|
||||
req.reset_mock()
|
||||
self.client._get_resource_provider("fake")
|
||||
self.assertTrue(req.called)
|
||||
|
||||
|
||||
class TestConstructor(test.NoDBTestCase):
|
||||
@mock.patch('keystoneauth1.session.Session')
|
||||
|
Loading…
x
Reference in New Issue
Block a user