Merge "nova-manage db archive_deleted_rows is not multi-cell aware"

This commit is contained in:
Zuul 2019-08-28 20:59:18 +00:00 committed by Gerrit Code Review
commit 9c2b1be0d6
11 changed files with 514 additions and 94 deletions

View File

@ -62,7 +62,7 @@ Nova Database
Returns exit code 0 if the database schema was synced successfully, or 1 if
cell0 cannot be accessed.
``nova-manage db archive_deleted_rows [--max_rows <number>] [--verbose] [--until-complete] [--before <date>] [--purge]``
``nova-manage db archive_deleted_rows [--max_rows <number>] [--verbose] [--until-complete] [--before <date>] [--purge] [--all-cells]``
Move deleted rows from production tables to shadow tables. Note that the
corresponding rows in the ``instance_mappings``, ``request_specs`` and
``instance_group_member`` tables of the API database are purged when
@ -78,7 +78,8 @@ Nova Database
tables related to those instances. Specifying ``--purge`` will cause a
*full* DB purge to be completed after archival. If a date range is desired
for the purge, then run ``nova-manage db purge --before <date>`` manually
after archiving is complete.
after archiving is complete. Specifying ``--all-cells`` will
cause the process to run against all cell databases.
**Return Codes**

View File

@ -414,17 +414,19 @@ class DbCommands(object):
pass
@staticmethod
def _print_dict(dct, dict_property="Property", dict_value='Value'):
def _print_dict(dct, dict_property="Property", dict_value='Value',
sort_key=None):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
:param dict_value: header label for the value (second) column
:param sort_key: key used for sorting the dict
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
for k, v in sorted(dct.items()):
for k, v in sorted(dct.items(), key=sort_key):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
@ -495,9 +497,11 @@ Error: %s""") % six.text_type(e))
'max_rows as a batch size for each iteration.'))
@args('--purge', action='store_true', dest='purge', default=False,
help='Purge all data from shadow tables after archive completes')
@args('--all-cells', action='store_true', dest='all_cells',
default=False, help='Run command across all cells.')
def archive_deleted_rows(self, max_rows=1000, verbose=False,
until_complete=False, purge=False,
before=None):
before=None, all_cells=False):
"""Move deleted rows from production tables to shadow tables.
Returns 0 if nothing was archived, 1 if some number of rows were
@ -520,7 +524,7 @@ Error: %s""") % six.text_type(e))
# NOTE(tssurya): This check has been added to validate if the API
# DB is reachable or not as this is essential for purging the
# related API database records of the deleted instances.
objects.CellMappingList.get_all(ctxt)
cell_mappings = objects.CellMappingList.get_all(ctxt)
except db_exc.CantStartEngineError:
print(_('Failed to connect to API DB so aborting this archival '
'attempt. Please check your config file to make sure that '
@ -538,59 +542,146 @@ Error: %s""") % six.text_type(e))
before_date = None
table_to_rows_archived = {}
deleted_instance_uuids = []
if until_complete and verbose:
sys.stdout.write(_('Archiving') + '..') # noqa
while True:
try:
run, deleted_instance_uuids = db.archive_deleted_rows(
max_rows, before=before_date)
except KeyboardInterrupt:
run = {}
if until_complete and verbose:
print('.' + _('stopped')) # noqa
interrupt = False
if all_cells:
# Sort first by cell name, then by table:
# +--------------------------------+-------------------------+
# | Table | Number of Rows Archived |
# +--------------------------------+-------------------------+
# | cell0.block_device_mapping | 1 |
# | cell1.block_device_mapping | 1 |
# | cell1.instance_actions | 2 |
# | cell1.instance_actions_events | 2 |
# | cell2.block_device_mapping | 1 |
# | cell2.instance_actions | 2 |
# | cell2.instance_actions_events | 2 |
# ...
def sort_func(item):
cell_name, table = item[0].split('.')
return cell_name, table
print_sort_func = sort_func
else:
cell_mappings = [None]
print_sort_func = None
total_rows_archived = 0
for cell_mapping in cell_mappings:
# NOTE(Kevin_Zheng): No need to calculate limit for each
# cell if until_complete=True.
# We need not adjust max rows to avoid exceeding a specified total
# limit because with until_complete=True, we have no total limit.
if until_complete:
max_rows_to_archive = max_rows
elif max_rows > total_rows_archived:
# We reduce the max rows to archive based on what we've
# archived so far to avoid potentially exceeding the specified
# total limit.
max_rows_to_archive = max_rows - total_rows_archived
else:
break
# If all_cells=False, cell_mapping is None
with context.target_cell(ctxt, cell_mapping) as cctxt:
cell_name = cell_mapping.name if cell_mapping else None
try:
rows_archived = self._do_archive(
table_to_rows_archived,
cctxt,
max_rows_to_archive,
until_complete,
verbose,
before_date,
cell_name)
except KeyboardInterrupt:
interrupt = True
break
for k, v in run.items():
table_to_rows_archived.setdefault(k, 0)
table_to_rows_archived[k] += v
if deleted_instance_uuids:
table_to_rows_archived.setdefault('instance_mappings', 0)
table_to_rows_archived.setdefault('request_specs', 0)
table_to_rows_archived.setdefault('instance_group_member', 0)
deleted_mappings = objects.InstanceMappingList.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived['instance_mappings'] += deleted_mappings
deleted_specs = objects.RequestSpec.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived['request_specs'] += deleted_specs
deleted_group_members = (
objects.InstanceGroup.destroy_members_bulk(
ctxt, deleted_instance_uuids))
table_to_rows_archived['instance_group_member'] += (
deleted_group_members)
if not until_complete:
break
elif not run:
if verbose:
print('.' + _('complete')) # noqa
break
if verbose:
sys.stdout.write('.')
# TODO(melwitt): Handle skip/warn for unreachable cells. Note
# that cell_mappings = [None] if not --all-cells
total_rows_archived += rows_archived
if until_complete and verbose:
if interrupt:
print('.' + _('stopped')) # noqa
else:
print('.' + _('complete')) # noqa
if verbose:
if table_to_rows_archived:
self._print_dict(table_to_rows_archived, _('Table'),
dict_value=_('Number of Rows Archived'))
dict_value=_('Number of Rows Archived'),
sort_key=print_sort_func)
else:
print(_('Nothing was archived.'))
if table_to_rows_archived and purge:
if verbose:
print(_('Rows were archived, running purge...'))
self.purge(purge_all=True, verbose=verbose)
self.purge(purge_all=True, verbose=verbose, all_cells=all_cells)
# NOTE(danms): Return nonzero if we archived something
return int(bool(table_to_rows_archived))
def _do_archive(self, table_to_rows_archived, cctxt, max_rows,
until_complete, verbose, before_date, cell_name):
"""Helper function for archiving deleted rows for a cell.
This will archive deleted rows for a cell database and remove the
associated API database records for deleted instances.
:param table_to_rows_archived: Dict tracking the number of rows
archived by <cell_name>.<table name>. Example:
{'cell0.instances': 2,
'cell1.instances': 5}
:param cctxt: Cell-targeted nova.context.RequestContext if archiving
across all cells
:param max_rows: Maximum number of deleted rows to archive
:param until_complete: Whether to run continuously until all deleted
rows are archived
:param verbose: Whether to print how many rows were archived per table
:param before_date: Archive rows that were deleted before this date
:param cell_name: Name of the cell or None if not archiving across all
cells
"""
ctxt = context.get_admin_context()
while True:
run, deleted_instance_uuids, total_rows_archived = \
db.archive_deleted_rows(cctxt, max_rows, before=before_date)
for table_name, rows_archived in run.items():
if cell_name:
table_name = cell_name + '.' + table_name
table_to_rows_archived.setdefault(table_name, 0)
table_to_rows_archived[table_name] += rows_archived
if deleted_instance_uuids:
table_to_rows_archived.setdefault(
'API_DB.instance_mappings', 0)
table_to_rows_archived.setdefault(
'API_DB.request_specs', 0)
table_to_rows_archived.setdefault(
'API_DB.instance_group_member', 0)
deleted_mappings = objects.InstanceMappingList.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived[
'API_DB.instance_mappings'] += deleted_mappings
deleted_specs = objects.RequestSpec.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived[
'API_DB.request_specs'] += deleted_specs
deleted_group_members = (
objects.InstanceGroup.destroy_members_bulk(
ctxt, deleted_instance_uuids))
table_to_rows_archived[
'API_DB.instance_group_member'] += deleted_group_members
# If we're not archiving until there is nothing more to archive, we
# have reached max_rows in this cell DB or there was nothing to
# archive.
if not until_complete or not run:
break
if verbose:
sys.stdout.write('.')
return total_rows_archived
@args('--before', metavar='<before>', dest='before',
help='If specified, purge rows from shadow tables that are older '
'than this. Accepts date strings in the default format output '

View File

@ -1772,14 +1772,15 @@ def task_log_get(context, task_name, period_beginning,
####################
def archive_deleted_rows(max_rows=None, before=None):
def archive_deleted_rows(context=None, max_rows=None, before=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:param context: nova.context.RequestContext for database access
:param max_rows: Maximum number of rows to archive (required)
:param before: optional datetime which when specified filters the records
to only archive those records deleted before the given date
:returns: 2-item tuple:
:returns: 3-item tuple:
- dict that maps table name to number of rows archived from that table,
for example::
@ -1790,8 +1791,10 @@ def archive_deleted_rows(max_rows=None, before=None):
'pci_devices': 2,
}
- list of UUIDs of instances that were archived
- total number of rows that were archived
"""
return IMPL.archive_deleted_rows(max_rows=max_rows, before=before)
return IMPL.archive_deleted_rows(context=context, max_rows=max_rows,
before=before)
def pcidevice_online_data_migration(context, max_count):

View File

@ -5569,14 +5569,15 @@ def _archive_deleted_rows_for_table(metadata, tablename, max_rows, before):
return rows_archived, deleted_instance_uuids
def archive_deleted_rows(max_rows=None, before=None):
def archive_deleted_rows(context=None, max_rows=None, before=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:param context: nova.context.RequestContext for database access
:param max_rows: Maximum number of rows to archive (required)
:param before: optional datetime which when specified filters the records
to only archive those records deleted before the given date
:returns: 2-item tuple:
:returns: 3-item tuple:
- dict that maps table name to number of rows archived from that table,
for example::
@ -5587,11 +5588,12 @@ def archive_deleted_rows(max_rows=None, before=None):
'pci_devices': 2,
}
- list of UUIDs of instances that were archived
- total number of rows that were archived
"""
table_to_rows_archived = {}
deleted_instance_uuids = []
total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True))
meta = MetaData(get_engine(use_slave=True, context=context))
meta.reflect()
# Reverse sort the tables so we get the leaf nodes first for processing.
for table in reversed(meta.sorted_tables):
@ -5615,7 +5617,7 @@ def archive_deleted_rows(max_rows=None, before=None):
table_to_rows_archived[tablename] = rows_archived
if total_rows_archived >= max_rows:
break
return table_to_rows_archived, deleted_instance_uuids
return table_to_rows_archived, deleted_instance_uuids, total_rows_archived
def _purgeable_tables(metadata):

View File

@ -94,7 +94,8 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100)
results, deleted_instance_uuids, archived = \
db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
@ -105,6 +106,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
# by the archive
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
self.assertEqual(sum(results.values()), archived)
def test_archive_deleted_rows_with_undeleted_residue(self):
# Boots a server, deletes it, and then tries to archive it.
@ -136,7 +138,8 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100)
results, deleted_instance_uuids, archived = \
db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
@ -147,6 +150,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
# by the archive
self.assertIn('instance_actions', results)
self.assertIn('instance_actions_events', results)
self.assertEqual(sum(results.values()), archived)
def _get_table_counts(self):
engine = sqlalchemy_api.get_engine()
@ -165,7 +169,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
server = self._create_server()
server_id = server['id']
self._delete_server(server_id)
results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
lines = []
@ -178,6 +182,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
None, status_fn=status)
self.assertNotEqual(0, deleted)
self.assertNotEqual(0, len(lines))
self.assertEqual(sum(results.values()), archived)
for line in lines:
self.assertIsNotNone(re.match(r'Deleted [1-9][0-9]* rows from .*',
line))
@ -190,8 +195,9 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
server = self._create_server()
server_id = server['id']
self._delete_server(server_id)
results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
self.assertEqual(sum(results.values()), archived)
pre_purge_results = self._get_table_counts()
@ -224,9 +230,10 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
server = self._create_server()
server_id = server['id']
self._delete_server(server_id)
results, deleted_ids = db.archive_deleted_rows(max_rows=1000)
results, deleted_ids, archived = db.archive_deleted_rows(max_rows=1000)
self.assertEqual([server_id], deleted_ids)
date = dateutil_parser.parse('oct 21 2015', fuzzy=True)
admin_context = context.get_admin_context()
deleted = sqlalchemy_api.purge_shadow_tables(admin_context, date)
self.assertEqual(0, deleted)
self.assertEqual(sum(results.values()), archived)

View File

@ -69,7 +69,7 @@ class FillVirtualInterfaceListMigration(
# Now archive the deleted instance record.
# The following (archive stuff) is used to prove that the migration
# created a "fake instance". It is not necessary to trigger the bug.
table_to_rows_archived, deleted_instance_uuids = (
table_to_rows_archived, deleted_instance_uuids, total_rows_archived = (
db_api.archive_deleted_rows(max_rows=1000))
self.assertIn('instances', table_to_rows_archived)
self.assertEqual(1, table_to_rows_archived['instances'])

View File

@ -105,7 +105,7 @@ class PeriodicNodeRecreateTestCase(test.TestCase,
# Now that the node2 record was un-soft-deleted, archiving should not
# remove any compute_nodes.
LOG.info('Archiving the database.')
archived = db_api.archive_deleted_rows(1000)[0]
archived = db_api.archive_deleted_rows(max_rows=1000)[0]
self.assertNotIn('compute_nodes', archived)
cn2 = objects.ComputeNode.get_by_host_and_nodename(
ctxt, 'node1', 'node2')

View File

@ -28,8 +28,10 @@ from nova.network.neutronv2 import constants
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.functional import test_servers
from nova.tests.unit.image import fake as image_fake
CONF = config.CONF
INCOMPLETE_CONSUMER_ID = '00000000-0000-0000-0000-000000000000'
@ -1423,3 +1425,93 @@ class TestDBArchiveDeletedRows(integrated_helpers._IntegratedTestBase):
# And that we still have one remaining group member.
self.assertEqual(
1, len(self.api.get_server_group(group['id'])['members']))
class TestDBArchiveDeletedRowsMultiCell(integrated_helpers.InstanceHelperMixin,
test.TestCase):
NUMBER_OF_CELLS = 2
def setUp(self):
super(TestDBArchiveDeletedRowsMultiCell, self).setUp()
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# We need the admin api to forced_host for server create
self.api = api_fixture.admin_api
image_fake.stub_out_image_service(self)
self.addCleanup(image_fake.FakeImageService_reset)
self.start_service('conductor')
self.start_service('scheduler')
self.context = context.RequestContext('fake-user', 'fake-project')
self.cli = manage.DbCommands()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
# Start two compute services, one per cell
self.compute1 = self.start_service('compute', host='host1',
cell='cell1')
self.compute2 = self.start_service('compute', host='host2',
cell='cell2')
def test_archive_deleted_rows(self):
admin_context = context.get_admin_context(read_deleted='yes')
# Boot a server to cell1
server_ids = {}
server = self._build_minimal_create_server_request(
self.api, 'cell1-server', az='nova:host1')
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
server_ids['cell1'] = created_server['id']
# Boot a server to cell2
server = self._build_minimal_create_server_request(
self.api, 'cell2-server', az='nova:host2')
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ACTIVE')
server_ids['cell2'] = created_server['id']
# Boot a server to cell0 (cause ERROR state prior to schedule)
server = self._build_minimal_create_server_request(
self.api, 'cell0-server')
# Flavor m1.xlarge cannot be fulfilled
server['flavorRef'] = 'http://fake.server/5'
created_server = self.api.post_server({'server': server})
self._wait_for_state_change(self.api, created_server, 'ERROR')
server_ids['cell0'] = created_server['id']
# Verify all the servers are in the databases
for cell_name, server_id in server_ids.items():
with context.target_cell(admin_context,
self.cell_mappings[cell_name]) as cctxt:
objects.Instance.get_by_uuid(cctxt, server_id)
# Delete the servers
for cell_name in server_ids.keys():
self.api.delete_server(server_ids[cell_name])
# Verify all the servers are in the databases still (as soft deleted)
for cell_name, server_id in server_ids.items():
with context.target_cell(admin_context,
self.cell_mappings[cell_name]) as cctxt:
objects.Instance.get_by_uuid(cctxt, server_id)
# Archive the deleted rows
self.cli.archive_deleted_rows(verbose=True, all_cells=True)
# Three instances should have been archived (cell0, cell1, cell2)
self.assertRegex(self.output.getvalue(),
r"| cell0\.instances.*\| 1.*")
self.assertRegex(self.output.getvalue(),
r"| cell1\.instances.*\| 1.*")
self.assertRegex(self.output.getvalue(),
r"| cell2\.instances.*\| 1.*")
self.assertRegex(self.output.getvalue(),
r"| API_DB\.instance_mappings.*\| 3.*")
self.assertRegex(self.output.getvalue(),
r"| API_DB\.request_specs.*\| 3.*")
# Verify all the servers are gone from the cell databases
for cell_name, server_id in server_ids.items():
with context.target_cell(admin_context,
self.cell_mappings[cell_name]) as cctxt:
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
cctxt, server_id)

View File

@ -8580,7 +8580,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.assertEqual(len(rows), 6)
# Make sure 'before' comparison is for < not <=, nothing deleted
before_date = dateutil_parser.parse('2017-01-01', fuzzy=True)
_, uuids = db.archive_deleted_rows(max_rows=1, before=before_date)
_, uuids, _ = db.archive_deleted_rows(max_rows=1, before=before_date)
self.assertEqual([], uuids)
# Archive rows deleted before 2017-01-02

View File

@ -385,6 +385,8 @@ class DBCommandsTestCase(test.NoDBTestCase):
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.commands = manage.DbCommands()
self.useFixture(nova_fixtures.Database())
self.useFixture(nova_fixtures.Database(database='api'))
def test_archive_deleted_rows_negative(self):
self.assertEqual(2, self.commands.archive_deleted_rows(-1))
@ -393,13 +395,158 @@ class DBCommandsTestCase(test.NoDBTestCase):
large_number = '1' * 100
self.assertEqual(2, self.commands.archive_deleted_rows(large_number))
@mock.patch.object(manage.DbCommands, 'purge')
@mock.patch.object(db, 'archive_deleted_rows',
return_value=(dict(instances=10, consoles=5), list()))
@mock.patch.object(objects.CellMappingList, 'get_all')
def _test_archive_deleted_rows(self, mock_get_all, mock_db_archive,
verbose=False):
# Each call to archive in each cell returns
# total_rows_archived=15, so passing max_rows=30 will
# only iterate the first two cells.
return_value=(dict(instances=10, consoles=5),
list(), 15))
def _test_archive_deleted_rows_all_cells(self, mock_db_archive,
mock_purge, purge=False):
cell_dbs = nova_fixtures.CellDatabases()
cell_dbs.add_cell_database('fake:///db1')
cell_dbs.add_cell_database('fake:///db2')
cell_dbs.add_cell_database('fake:///db3')
self.useFixture(cell_dbs)
ctxt = context.RequestContext()
cell_mapping1 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db1',
transport_url='fake:///mq1',
name='cell1')
cell_mapping1.create()
cell_mapping2 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db2',
transport_url='fake:///mq2',
name='cell2')
cell_mapping2.create()
cell_mapping3 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db3',
transport_url='fake:///mq3',
name='cell3')
cell_mapping3.create()
# Archive with max_rows=30, so we test the case that when we are out of
# limit, we don't go to the remaining cell.
result = self.commands.archive_deleted_rows(30, verbose=True,
all_cells=True,
purge=purge)
mock_db_archive.assert_has_calls([
# Called with max_rows=30 but only 15 were archived.
mock.call(test.MatchType(context.RequestContext), 30, before=None),
# So the total from the last call was 15 and the new max_rows=15
# for the next call in the second cell.
mock.call(test.MatchType(context.RequestContext), 15, before=None)
])
output = self.output.getvalue()
expected = '''\
+-----------------+-------------------------+
| Table | Number of Rows Archived |
+-----------------+-------------------------+
| cell1.consoles | 5 |
| cell1.instances | 10 |
| cell2.consoles | 5 |
| cell2.instances | 10 |
+-----------------+-------------------------+
'''
if purge:
expected += 'Rows were archived, running purge...\n'
mock_purge.assert_called_once_with(purge_all=True, verbose=True,
all_cells=True)
else:
mock_purge.assert_not_called()
self.assertEqual(expected, output)
self.assertEqual(1, result)
def test_archive_deleted_rows_all_cells(self):
self._test_archive_deleted_rows_all_cells()
def test_archive_deleted_rows_all_cells_purge(self):
self._test_archive_deleted_rows_all_cells(purge=True)
@mock.patch.object(db, 'archive_deleted_rows')
def test_archive_deleted_rows_all_cells_until_complete(self,
mock_db_archive):
# First two calls to archive in each cell return total_rows_archived=15
# and the last call returns 0 (nothing left to archive).
fake_return = (dict(instances=10, consoles=5), list(), 15)
mock_db_archive.side_effect = [fake_return,
(dict(), list(), 0),
fake_return,
(dict(), list(), 0),
(dict(), list(), 0)]
cell_dbs = nova_fixtures.CellDatabases()
cell_dbs.add_cell_database('fake:///db1')
cell_dbs.add_cell_database('fake:///db2')
cell_dbs.add_cell_database('fake:///db3')
self.useFixture(cell_dbs)
ctxt = context.RequestContext()
cell_mapping1 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db1',
transport_url='fake:///mq1',
name='cell1')
cell_mapping1.create()
cell_mapping2 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db2',
transport_url='fake:///mq2',
name='cell2')
cell_mapping2.create()
cell_mapping3 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db3',
transport_url='fake:///mq3',
name='cell3')
cell_mapping3.create()
# Archive with max_rows=30, so we test that subsequent max_rows are not
# reduced when until_complete=True. There is no max total limit.
result = self.commands.archive_deleted_rows(30, verbose=True,
all_cells=True,
until_complete=True)
mock_db_archive.assert_has_calls([
# Called with max_rows=30 but only 15 were archived.
mock.call(test.MatchType(context.RequestContext), 30, before=None),
# Called with max_rows=30 but 0 were archived (nothing left to
# archive in this cell)
mock.call(test.MatchType(context.RequestContext), 30, before=None),
# So the total from the last call was 0 and the new max_rows=30
# because until_complete=True.
mock.call(test.MatchType(context.RequestContext), 30, before=None),
# Called with max_rows=30 but 0 were archived (nothing left to
# archive in this cell)
mock.call(test.MatchType(context.RequestContext), 30, before=None),
# Called one final time with max_rows=30
mock.call(test.MatchType(context.RequestContext), 30, before=None)
])
output = self.output.getvalue()
expected = '''\
Archiving.....complete
+-----------------+-------------------------+
| Table | Number of Rows Archived |
+-----------------+-------------------------+
| cell1.consoles | 5 |
| cell1.instances | 10 |
| cell2.consoles | 5 |
| cell2.instances | 10 |
+-----------------+-------------------------+
'''
self.assertEqual(expected, output)
self.assertEqual(1, result)
@mock.patch.object(db, 'archive_deleted_rows',
return_value=(
dict(instances=10, consoles=5), list(), 15))
def _test_archive_deleted_rows(self, mock_db_archive, verbose=False):
result = self.commands.archive_deleted_rows(20, verbose=verbose)
mock_db_archive.assert_called_once_with(20, before=None)
mock_db_archive.assert_called_once_with(
test.MatchType(context.RequestContext), 20, before=None)
output = self.output.getvalue()
if verbose:
expected = '''\
@ -429,9 +576,9 @@ class DBCommandsTestCase(test.NoDBTestCase):
mock_db_archive,
verbose=False):
mock_db_archive.side_effect = [
({'instances': 10, 'instance_extra': 5}, list()),
({'instances': 5, 'instance_faults': 1}, list()),
({}, list())]
({'instances': 10, 'instance_extra': 5}, list(), 15),
({'instances': 5, 'instance_faults': 1}, list(), 6),
({}, list(), 0)]
result = self.commands.archive_deleted_rows(20, verbose=verbose,
until_complete=True)
self.assertEqual(1, result)
@ -450,9 +597,11 @@ Archiving.....complete
expected = ''
self.assertEqual(expected, self.output.getvalue())
mock_db_archive.assert_has_calls([mock.call(20, before=None),
mock.call(20, before=None),
mock.call(20, before=None)])
mock_db_archive.assert_has_calls([
mock.call(test.MatchType(context.RequestContext), 20, before=None),
mock.call(test.MatchType(context.RequestContext), 20, before=None),
mock.call(test.MatchType(context.RequestContext), 20, before=None),
])
def test_archive_deleted_rows_until_complete_quiet(self):
self.test_archive_deleted_rows_until_complete(verbose=False)
@ -465,8 +614,8 @@ Archiving.....complete
mock_db_purge,
verbose=True):
mock_db_archive.side_effect = [
({'instances': 10, 'instance_extra': 5}, list()),
({'instances': 5, 'instance_faults': 1}, list()),
({'instances': 10, 'instance_extra': 5}, list(), 15),
({'instances': 5, 'instance_faults': 1}, list(), 6),
KeyboardInterrupt]
result = self.commands.archive_deleted_rows(20, verbose=verbose,
until_complete=True,
@ -488,12 +637,72 @@ Rows were archived, running purge...
expected = ''
self.assertEqual(expected, self.output.getvalue())
mock_db_archive.assert_has_calls([mock.call(20, before=None),
mock.call(20, before=None),
mock.call(20, before=None)])
mock_db_archive.assert_has_calls([
mock.call(test.MatchType(context.RequestContext), 20, before=None),
mock.call(test.MatchType(context.RequestContext), 20, before=None),
mock.call(test.MatchType(context.RequestContext), 20, before=None),
])
mock_db_purge.assert_called_once_with(mock.ANY, None,
status_fn=mock.ANY)
@mock.patch.object(db, 'archive_deleted_rows')
def test_archive_deleted_rows_until_stopped_cells(self, mock_db_archive,
verbose=True):
# Test when archive with all_cells=True and until_complete=True,
# when hit KeyboardInterrupt, it will directly return and not
# process remaining cells.
mock_db_archive.side_effect = [
({'instances': 10, 'instance_extra': 5}, list(), 15),
KeyboardInterrupt]
cell_dbs = nova_fixtures.CellDatabases()
cell_dbs.add_cell_database('fake:///db1')
cell_dbs.add_cell_database('fake:///db2')
cell_dbs.add_cell_database('fake:///db3')
self.useFixture(cell_dbs)
ctxt = context.RequestContext()
cell_mapping1 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db1',
transport_url='fake:///mq1',
name='cell1')
cell_mapping1.create()
cell_mapping2 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db2',
transport_url='fake:///mq2',
name='cell2')
cell_mapping2.create()
cell_mapping3 = objects.CellMapping(context=ctxt,
uuid=uuidutils.generate_uuid(),
database_connection='fake:///db3',
transport_url='fake:///mq3',
name='cell3')
cell_mapping3.create()
result = self.commands.archive_deleted_rows(20, verbose=verbose,
until_complete=True,
all_cells=True)
self.assertEqual(1, result)
if verbose:
expected = '''\
Archiving....stopped
+----------------------+-------------------------+
| Table | Number of Rows Archived |
+----------------------+-------------------------+
| cell1.instance_extra | 5 |
| cell1.instances | 10 |
+----------------------+-------------------------+
'''
else:
expected = ''
self.assertEqual(expected, self.output.getvalue())
mock_db_archive.assert_has_calls([
mock.call(test.MatchType(context.RequestContext), 20, before=None),
mock.call(test.MatchType(context.RequestContext), 20, before=None)
])
def test_archive_deleted_rows_until_stopped_quiet(self):
self.test_archive_deleted_rows_until_stopped(verbose=False)
@ -501,21 +710,23 @@ Rows were archived, running purge...
@mock.patch.object(objects.CellMappingList, 'get_all')
def test_archive_deleted_rows_before(self, mock_get_all, mock_db_archive):
mock_db_archive.side_effect = [
({'instances': 10, 'instance_extra': 5}, list()),
({'instances': 5, 'instance_faults': 1}, list()),
({'instances': 10, 'instance_extra': 5}, list(), 15),
({'instances': 5, 'instance_faults': 1}, list(), 6),
KeyboardInterrupt]
result = self.commands.archive_deleted_rows(20, before='2017-01-13')
mock_db_archive.assert_called_once_with(20,
mock_db_archive.assert_called_once_with(
test.MatchType(context.RequestContext), 20,
before=datetime.datetime(2017, 1, 13))
self.assertEqual(1, result)
@mock.patch.object(db, 'archive_deleted_rows', return_value=({}, []))
@mock.patch.object(db, 'archive_deleted_rows', return_value=({}, [], 0))
@mock.patch.object(objects.CellMappingList, 'get_all')
def test_archive_deleted_rows_verbose_no_results(self, mock_get_all,
mock_db_archive):
result = self.commands.archive_deleted_rows(20, verbose=True,
purge=True)
mock_db_archive.assert_called_once_with(20, before=None)
mock_db_archive.assert_called_once_with(
test.MatchType(context.RequestContext), 20, before=None)
output = self.output.getvalue()
# If nothing was archived, there should be no purge messages
self.assertIn('Nothing was archived.', output)
@ -533,9 +744,10 @@ Rows were archived, running purge...
ctxt = context.RequestContext('fake-user', 'fake_project')
cell_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(context=ctxt,
uuid=cell_uuid,
database_connection='fake:///db',
transport_url='fake:///mq')
uuid=cell_uuid,
database_connection='fake:///db',
transport_url='fake:///mq',
name='cell1')
cell_mapping.create()
uuids = []
for i in range(2):
@ -547,28 +759,32 @@ Rows were archived, running purge...
cell_mapping=cell_mapping, instance_uuid=uuid)\
.create()
mock_db_archive.return_value = (dict(instances=2, consoles=5), uuids)
mock_db_archive.return_value = (
dict(instances=2, consoles=5), uuids, 7)
mock_reqspec_destroy.return_value = 2
mock_members_destroy.return_value = 0
result = self.commands.archive_deleted_rows(20, verbose=verbose)
result = self.commands.archive_deleted_rows(20, verbose=verbose,
all_cells=True)
self.assertEqual(1, result)
mock_db_archive.assert_called_once_with(20, before=None)
mock_db_archive.assert_has_calls([
mock.call(test.MatchType(context.RequestContext), 20, before=None)
])
self.assertEqual(1, mock_reqspec_destroy.call_count)
mock_members_destroy.assert_called_once()
output = self.output.getvalue()
if verbose:
expected = '''\
+-----------------------+-------------------------+
| Table | Number of Rows Archived |
+-----------------------+-------------------------+
| consoles | 5 |
| instance_group_member | 0 |
| instance_mappings | 2 |
| instances | 2 |
| request_specs | 2 |
+-----------------------+-------------------------+
+------------------------------+-------------------------+
| Table | Number of Rows Archived |
+------------------------------+-------------------------+
| API_DB.instance_group_member | 0 |
| API_DB.instance_mappings | 2 |
| API_DB.request_specs | 2 |
| cell1.consoles | 5 |
| cell1.instances | 2 |
+------------------------------+-------------------------+
'''
self.assertEqual(expected, output)
else:

View File

@ -0,0 +1,8 @@
---
features:
- |
Support for archiving deleted rows from the database across
all cells has been added to the ``nova-manage db archive_deleted_rows``
command. Specify the ``--all-cells`` option to run the process across all
existing cells. It is only possible to archive all DBs from a node where
the ``[api_database]/connection`` option is configured.