Cleanup ugly stub in TestLocalDeleteAllocations
With a fix in wsgi-intercept 1.7.0 we can properly use the PlacementFixture as a context manager to simulate when placement is configured for a given operation. This allows us to remove the ugly stub that one of the tests in here had to rely on. While in here, the CastAsCall fixture is removed since we shouldn't rely on that in these tests where we're trying to simulate the user experience. Change-Id: I2074b45126b839ea6307a8740364393e9dddd50b
This commit is contained in:
parent
9706d89d52
commit
ee6a257f58
@ -169,4 +169,4 @@ warlock==1.3.0
|
|||||||
WebOb==1.7.1
|
WebOb==1.7.1
|
||||||
websockify==0.8.0
|
websockify==0.8.0
|
||||||
wrapt==1.10.11
|
wrapt==1.10.11
|
||||||
wsgi-intercept==1.4.1
|
wsgi-intercept==1.7.0
|
||||||
|
@ -10,11 +10,9 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from nova.scheduler.client import report as reportclient
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.tests import fixtures as nova_fixtures
|
from nova.tests import fixtures as nova_fixtures
|
||||||
from nova.tests.functional import integrated_helpers
|
from nova.tests.functional import integrated_helpers
|
||||||
from nova.tests.unit import cast_as_call
|
|
||||||
import nova.tests.unit.image.fake
|
import nova.tests.unit.image.fake
|
||||||
from nova.tests.unit import policy_fixture
|
from nova.tests.unit import policy_fixture
|
||||||
|
|
||||||
@ -26,10 +24,6 @@ class TestLocalDeleteAllocations(test.TestCase,
|
|||||||
self.useFixture(policy_fixture.RealPolicyFixture())
|
self.useFixture(policy_fixture.RealPolicyFixture())
|
||||||
# The NeutronFixture is needed to show security groups for a server.
|
# The NeutronFixture is needed to show security groups for a server.
|
||||||
self.useFixture(nova_fixtures.NeutronFixture(self))
|
self.useFixture(nova_fixtures.NeutronFixture(self))
|
||||||
# We need the computes reporting into placement for the filter
|
|
||||||
# scheduler to pick a host.
|
|
||||||
placement = self.useFixture(nova_fixtures.PlacementFixture())
|
|
||||||
self.placement_api = placement.api
|
|
||||||
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
|
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
|
||||||
api_version='v2.1'))
|
api_version='v2.1'))
|
||||||
self.api = api_fixture.api
|
self.api = api_fixture.api
|
||||||
@ -44,92 +38,71 @@ class TestLocalDeleteAllocations(test.TestCase,
|
|||||||
|
|
||||||
self.start_service('scheduler')
|
self.start_service('scheduler')
|
||||||
|
|
||||||
self.compute = self.start_service('compute')
|
|
||||||
|
|
||||||
self.useFixture(cast_as_call.CastAsCall(self))
|
|
||||||
|
|
||||||
self.image_id = self.api.get_images()[0]['id']
|
self.image_id = self.api.get_images()[0]['id']
|
||||||
self.flavor_id = self.api.get_flavors()[0]['id']
|
self.flavor_id = self.api.get_flavors()[0]['id']
|
||||||
|
|
||||||
def _get_usages(self, rp_uuid):
|
@staticmethod
|
||||||
|
def _get_usages(placement_api, rp_uuid):
|
||||||
fmt = '/resource_providers/%(uuid)s/usages'
|
fmt = '/resource_providers/%(uuid)s/usages'
|
||||||
resp = self.placement_api.get(fmt % {'uuid': rp_uuid})
|
resp = placement_api.get(fmt % {'uuid': rp_uuid})
|
||||||
return resp.body['usages']
|
return resp.body['usages']
|
||||||
|
|
||||||
# NOTE(mriedem): It would be preferable to use the PlacementFixture as
|
|
||||||
# a context manager but that causes some issues when trying to delete the
|
|
||||||
# server in test_local_delete_removes_allocations_after_compute_restart.
|
|
||||||
def _stub_compute_api_to_not_configure_placement(self):
|
|
||||||
"""Prior to the compute API deleting allocations in the "local delete"
|
|
||||||
case, nova.conf for nova-api might not be configured for talking to
|
|
||||||
the placement service, so we can mock that behavior by stubbing out
|
|
||||||
the placement client in the compute API to no-op as if safe_connect
|
|
||||||
failed and returned None to the caller.
|
|
||||||
"""
|
|
||||||
orig_delete_alloc = (
|
|
||||||
reportclient.SchedulerReportClient.delete_allocation_for_instance)
|
|
||||||
self.call_count = 0
|
|
||||||
|
|
||||||
def fake_delete_allocation_for_instance(*args, **kwargs):
|
|
||||||
# The first call will be from the API, so ignore that one and
|
|
||||||
# return None like the @safe_connect decorator would if nova-api
|
|
||||||
# wasn't configured to talk to placement.
|
|
||||||
if self.call_count:
|
|
||||||
orig_delete_alloc(*args, **kwargs)
|
|
||||||
else:
|
|
||||||
self.call_count += 1
|
|
||||||
|
|
||||||
self.stub_out('nova.scheduler.client.report.SchedulerReportClient.'
|
|
||||||
'delete_allocation_for_instance',
|
|
||||||
fake_delete_allocation_for_instance)
|
|
||||||
|
|
||||||
def test_local_delete_removes_allocations_after_compute_restart(self):
|
def test_local_delete_removes_allocations_after_compute_restart(self):
|
||||||
"""Tests that allocations are removed after a local delete.
|
"""Tests that allocations are removed after a local delete.
|
||||||
|
|
||||||
This tests the scenario where a server is local deleted (because the
|
This tests the scenario where a server is local deleted (because the
|
||||||
compute host is down) and we want to make sure that its allocations
|
compute host is down) and we want to make sure that its allocations
|
||||||
have been cleaned up once the nova-compute service restarts.
|
have been cleaned up once the nova-compute service restarts.
|
||||||
|
|
||||||
|
In this scenario we conditionally use the PlacementFixture to simulate
|
||||||
|
the case that nova-api isn't configured to talk to placement.
|
||||||
"""
|
"""
|
||||||
self._stub_compute_api_to_not_configure_placement()
|
|
||||||
# Get allocations, make sure they are 0.
|
# Get allocations, make sure they are 0.
|
||||||
resp = self.placement_api.get('/resource_providers')
|
with nova_fixtures.PlacementFixture() as placement:
|
||||||
rp_uuid = resp.body['resource_providers'][0]['uuid']
|
compute = self.start_service('compute')
|
||||||
usages_before = self._get_usages(rp_uuid)
|
placement_api = placement.api
|
||||||
for usage in usages_before.values():
|
resp = placement_api.get('/resource_providers')
|
||||||
self.assertEqual(0, usage)
|
rp_uuid = resp.body['resource_providers'][0]['uuid']
|
||||||
|
usages_before = self._get_usages(placement_api, rp_uuid)
|
||||||
|
for usage in usages_before.values():
|
||||||
|
self.assertEqual(0, usage)
|
||||||
|
|
||||||
# Create a server.
|
# Create a server.
|
||||||
server = self._build_minimal_create_server_request(self.api,
|
server = self._build_minimal_create_server_request(self.api,
|
||||||
'local-delete-test', self.image_id, self.flavor_id, 'none')
|
'local-delete-test', self.image_id, self.flavor_id, 'none')
|
||||||
server = self.admin_api.post_server({'server': server})
|
server = self.admin_api.post_server({'server': server})
|
||||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||||
|
|
||||||
# Assert usages are non zero now.
|
# Assert usages are non zero now.
|
||||||
usages_during = self._get_usages(rp_uuid)
|
usages_during = self._get_usages(placement_api, rp_uuid)
|
||||||
for usage in usages_during.values():
|
for usage in usages_during.values():
|
||||||
self.assertNotEqual(0, usage)
|
self.assertNotEqual(0, usage)
|
||||||
|
|
||||||
# Force-down compute to trigger local delete.
|
# Force-down compute to trigger local delete.
|
||||||
self.compute.stop()
|
compute.stop()
|
||||||
compute_service_id = self.admin_api.get_services(
|
compute_service_id = self.admin_api.get_services(
|
||||||
host=self.compute.host, binary='nova-compute')[0]['id']
|
host=compute.host, binary='nova-compute')[0]['id']
|
||||||
self.admin_api.put_service(compute_service_id, {'forced_down': True})
|
self.admin_api.put_service(compute_service_id,
|
||||||
|
{'forced_down': True})
|
||||||
|
|
||||||
# Delete the server (will be a local delete because compute is down).
|
# Delete the server (will be a local delete because compute is down).
|
||||||
self.api.delete_server(server['id'])
|
self.api.delete_server(server['id'])
|
||||||
|
self._wait_until_deleted(server)
|
||||||
|
|
||||||
# Assert usages are still non-zero.
|
with nova_fixtures.PlacementFixture() as placement:
|
||||||
usages_during = self._get_usages(rp_uuid)
|
placement_api = placement.api
|
||||||
for usage in usages_during.values():
|
# Assert usages are still non-zero.
|
||||||
self.assertNotEqual(0, usage)
|
usages_during = self._get_usages(placement_api, rp_uuid)
|
||||||
|
for usage in usages_during.values():
|
||||||
|
self.assertNotEqual(0, usage)
|
||||||
|
|
||||||
# Start the compute service again. Before it comes up, it will call the
|
# Start the compute service again. Before it comes up, it will
|
||||||
# update_available_resource code in the ResourceTracker which is what
|
# call the update_available_resource code in the ResourceTracker
|
||||||
# "heals" the allocations for the deleted instance.
|
# which is what "heals" the allocations for the deleted instance.
|
||||||
self.compute.start()
|
compute.start()
|
||||||
|
|
||||||
# Get the allocations again to check against the original.
|
# Get the allocations again to check against the original.
|
||||||
usages_after = self._get_usages(rp_uuid)
|
usages_after = self._get_usages(placement_api, rp_uuid)
|
||||||
|
|
||||||
# They should match.
|
# They should match.
|
||||||
self.assertEqual(usages_before, usages_after)
|
self.assertEqual(usages_before, usages_after)
|
||||||
@ -138,10 +111,12 @@ class TestLocalDeleteAllocations(test.TestCase,
|
|||||||
"""Tests that the compute API deletes allocations when the compute
|
"""Tests that the compute API deletes allocations when the compute
|
||||||
service on which the instance was running is down.
|
service on which the instance was running is down.
|
||||||
"""
|
"""
|
||||||
|
placement_api = self.useFixture(nova_fixtures.PlacementFixture()).api
|
||||||
|
compute = self.start_service('compute')
|
||||||
# Get allocations, make sure they are 0.
|
# Get allocations, make sure they are 0.
|
||||||
resp = self.placement_api.get('/resource_providers')
|
resp = placement_api.get('/resource_providers')
|
||||||
rp_uuid = resp.body['resource_providers'][0]['uuid']
|
rp_uuid = resp.body['resource_providers'][0]['uuid']
|
||||||
usages_before = self._get_usages(rp_uuid)
|
usages_before = self._get_usages(placement_api, rp_uuid)
|
||||||
for usage in usages_before.values():
|
for usage in usages_before.values():
|
||||||
self.assertEqual(0, usage)
|
self.assertEqual(0, usage)
|
||||||
|
|
||||||
@ -152,21 +127,22 @@ class TestLocalDeleteAllocations(test.TestCase,
|
|||||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||||
|
|
||||||
# Assert usages are non zero now.
|
# Assert usages are non zero now.
|
||||||
usages_during = self._get_usages(rp_uuid)
|
usages_during = self._get_usages(placement_api, rp_uuid)
|
||||||
for usage in usages_during.values():
|
for usage in usages_during.values():
|
||||||
self.assertNotEqual(0, usage)
|
self.assertNotEqual(0, usage)
|
||||||
|
|
||||||
# Force-down compute to trigger local delete.
|
# Force-down compute to trigger local delete.
|
||||||
self.compute.stop()
|
compute.stop()
|
||||||
compute_service_id = self.admin_api.get_services(
|
compute_service_id = self.admin_api.get_services(
|
||||||
host=self.compute.host, binary='nova-compute')[0]['id']
|
host=compute.host, binary='nova-compute')[0]['id']
|
||||||
self.admin_api.put_service(compute_service_id, {'forced_down': True})
|
self.admin_api.put_service(compute_service_id, {'forced_down': True})
|
||||||
|
|
||||||
# Delete the server (will be a local delete because compute is down).
|
# Delete the server (will be a local delete because compute is down).
|
||||||
self.api.delete_server(server['id'])
|
self.api.delete_server(server['id'])
|
||||||
|
self._wait_until_deleted(server)
|
||||||
|
|
||||||
# Get the allocations again to make sure they were deleted.
|
# Get the allocations again to make sure they were deleted.
|
||||||
usages_after = self._get_usages(rp_uuid)
|
usages_after = self._get_usages(placement_api, rp_uuid)
|
||||||
|
|
||||||
# They should match.
|
# They should match.
|
||||||
self.assertEqual(usages_before, usages_after)
|
self.assertEqual(usages_before, usages_after)
|
||||||
|
@ -26,4 +26,4 @@ gabbi>=1.35.0 # Apache-2.0
|
|||||||
oslo.vmware>=2.17.0 # Apache-2.0
|
oslo.vmware>=2.17.0 # Apache-2.0
|
||||||
|
|
||||||
# placement functional tests
|
# placement functional tests
|
||||||
wsgi-intercept>=1.4.1 # MIT License
|
wsgi-intercept>=1.7.0 # MIT License
|
||||||
|
Loading…
x
Reference in New Issue
Block a user