Raise hacking version to 2.0.0

We've kept hacking capped for a long time now. This raises the hacking
package version to the latest release and fixes the issues that it
found.

Change-Id: I933d541d9198f9742c95494bae6030cb3e4f2499
Signed-off-by: Sean McGinnis <sean.mcginnis@gmail.com>
This commit is contained in:
Sean McGinnis 2019-12-19 12:53:02 -06:00
parent b1ef924feb
commit d9ce598f0c
24 changed files with 43 additions and 42 deletions

View File

@ -174,7 +174,7 @@ class VolumeAdminController(AdminController):
try: try:
self.volume_api.terminate_connection(context, volume, connector) self.volume_api.terminate_connection(context, volume, connector)
except exception.VolumeBackendAPIException as error: except exception.VolumeBackendAPIException:
msg = _("Unable to terminate volume connection from backend.") msg = _("Unable to terminate volume connection from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg) raise webob.exc.HTTPInternalServerError(explanation=msg)

View File

@ -68,7 +68,7 @@ class CgsnapshotsController(wsgi.Controller):
except exception.InvalidGroupSnapshot as e: except exception.InvalidGroupSnapshot as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e)) raise exc.HTTPBadRequest(explanation=six.text_type(e))
except (exception.GroupSnapshotNotFound, except (exception.GroupSnapshotNotFound,
exception.PolicyNotAuthorized) as e: exception.PolicyNotAuthorized):
# Exceptions will be handled at the wsgi level # Exceptions will be handled at the wsgi level
raise raise
except Exception: except Exception:

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
class APIMapper(routes.Mapper): class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None): def routematch(self, url=None, environ=None):
if url is "": if url == "":
result = self._match("", environ) result = self._match("", environ)
return result[0], result[1] return result[0], result[1]
return routes.Mapper.routematch(self, url, environ) return routes.Mapper.routematch(self, url, environ)

View File

@ -190,7 +190,7 @@ class AttachmentsController(wsgi.Controller):
err_msg = _( err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg "Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg) LOG.exception(err_msg)
except Exception as ex: except Exception:
err_msg = _("Unable to create attachment for volume.") err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg) LOG.exception(err_msg)
finally: finally:

View File

@ -179,7 +179,10 @@ class LVM(executor.Executor):
consumed_space = pool_size / 100 * data_percent consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space free_space = pool_size - consumed_space
free_space = round(free_space, 2) free_space = round(free_space, 2)
except putils.ProcessExecutionError as err: # Need noqa due to a false error about the 'err' variable being unused
# even though it is used in the logging. Possibly related to
# https://github.com/PyCQA/pyflakes/issues/378.
except putils.ProcessExecutionError as err: # noqa
LOG.exception('Error querying thin pool about data_percent') LOG.exception('Error querying thin pool about data_percent')
LOG.error('Cmd :%s', err.cmd) LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout) LOG.error('StdOut :%s', err.stdout)

View File

@ -280,7 +280,7 @@ class CinderPersistentObject(object):
# registration. # registration.
try: try:
cls.model = db.get_model_for_versioned_object(cls) cls.model = db.get_model_for_versioned_object(cls)
except (ImportError, AttributeError) as e: except (ImportError, AttributeError):
msg = _("Couldn't find ORM model for Persistent Versioned " msg = _("Couldn't find ORM model for Persistent Versioned "
"Object %s.") % cls.obj_name() "Object %s.") % cls.obj_name()
LOG.exception("Failed to initialize object.") LOG.exception("Failed to initialize object.")

View File

@ -44,7 +44,6 @@ def fake_get(self, context, *args, **kwargs):
'host': 'fake-host', 'host': 'fake-host',
'status': 'available', 'status': 'available',
'encryption_key_id': None, 'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None, 'migration_status': None,
'availability_zone': 'fake-zone', 'availability_zone': 'fake-zone',
'attach_status': 'detached', 'attach_status': 'detached',

View File

@ -102,7 +102,6 @@ class _FakeImageService(object):
'updated_at': timestamp, 'updated_at': timestamp,
'deleted_at': None, 'deleted_at': None,
'deleted': False, 'deleted': False,
'size': 1024,
'status': 'active', 'status': 'active',
'visibility': 'public', 'visibility': 'public',
'protected': True, 'protected': True,

View File

@ -2017,7 +2017,6 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
mock_client.getVolume.return_value = { mock_client.getVolume.return_value = {
'name': mock.ANY, 'name': mock.ANY,
'snapCPG': mock.ANY,
'comment': "{'display_name': 'Foo Volume'}", 'comment': "{'display_name': 'Foo Volume'}",
'provisioningType': mock.ANY, 'provisioningType': mock.ANY,
'userCPG': 'OpenStackCPG', 'userCPG': 'OpenStackCPG',
@ -2124,7 +2123,6 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
mock_client.getVolume.return_value = { mock_client.getVolume.return_value = {
'name': mock.ANY, 'name': mock.ANY,
'snapCPG': mock.ANY,
'comment': "{'display_name': 'Foo Volume'}", 'comment': "{'display_name': 'Foo Volume'}",
'provisioningType': mock.ANY, 'provisioningType': mock.ANY,
'userCPG': 'OpenStackCPG', 'userCPG': 'OpenStackCPG',
@ -2224,7 +2222,6 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
mock_client.getVolume.return_value = { mock_client.getVolume.return_value = {
'name': mock.ANY, 'name': mock.ANY,
'snapCPG': mock.ANY,
'comment': "{'display_name': 'Foo Volume'}", 'comment': "{'display_name': 'Foo Volume'}",
'provisioningType': mock.ANY, 'provisioningType': mock.ANY,
'userCPG': 'OpenStackCPG', 'userCPG': 'OpenStackCPG',

View File

@ -4069,8 +4069,9 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
volume_fc, volume_fc,
connector) connector)
# All the wwpns of this host are not configured. # All the wwpns of this host are not configured.
host_site_2 = {'site1': 'ff00000000000000', # TODO: This needs to be fixed. See bug #1857043
'site1': 'ff00000000000001'} host_site_2 = {'site1': 'ff00000000000000', # noqa
'site1': 'ff00000000000001'} # noqa
self.fc_driver.configuration.set_override( self.fc_driver.configuration.set_override(
'storwize_preferred_host_site', host_site_2) 'storwize_preferred_host_site', host_site_2)
self.assertRaises(exception.VolumeDriverException, self.assertRaises(exception.VolumeDriverException,
@ -4078,8 +4079,9 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
volume_fc, volume_fc,
connector) connector)
host_site_3 = {'site1': 'ffff000000000000', # TODO: This needs to be fixed. See bug #1857043
'site1': 'ffff000000000001'} host_site_3 = {'site1': 'ffff000000000000', # noqa
'site1': 'ffff000000000001'} # noqa
self.fc_driver.configuration.set_override( self.fc_driver.configuration.set_override(
'storwize_preferred_host_site', host_site_3) 'storwize_preferred_host_site', host_site_3)
self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.initialize_connection(volume_fc, connector)
@ -4088,8 +4090,9 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
host_info = self.fc_driver._helpers.ssh.lshost(host=host_name) host_info = self.fc_driver._helpers.ssh.lshost(host=host_name)
self.assertEqual('site1', host_info[0]['site_name']) self.assertEqual('site1', host_info[0]['site_name'])
host_site_4 = {'site2': 'ffff000000000000', # TODO: This needs to be fixed. See bug #1857043
'site2': 'ffff000000000001'} host_site_4 = {'site2': 'ffff000000000000', # noqa
'site2': 'ffff000000000001'} # noqa
self.fc_driver.configuration.set_override( self.fc_driver.configuration.set_override(
'storwize_preferred_host_site', host_site_4) 'storwize_preferred_host_site', host_site_4)
self.assertRaises(exception.InvalidConfigurationValue, self.assertRaises(exception.InvalidConfigurationValue,

View File

@ -1319,7 +1319,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
image_id, image_id,
image_meta, image_meta,
fake_image_service) fake_image_service)
if format is 'raw' and not owner and location: if format == 'raw' and not owner and location:
fake_driver.create_cloned_volume.assert_called_once_with( fake_driver.create_cloned_volume.assert_called_once_with(
volume, image_volume) volume, image_volume)
handle_bootable.assert_called_once_with(self.ctxt, volume, handle_bootable.assert_called_once_with(self.ctxt, volume,
@ -1384,7 +1384,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
image_id, image_id,
image_meta, image_meta,
fake_image_service) fake_image_service)
if format is 'raw' and not owner and location: if format == 'raw' and not owner and location:
fake_driver.create_cloned_volume.assert_called_once_with( fake_driver.create_cloned_volume.assert_called_once_with(
volume, image_volume) volume, image_volume)
handle_bootable.assert_called_once_with(self.ctxt, volume, handle_bootable.assert_called_once_with(self.ctxt, volume,

View File

@ -88,7 +88,7 @@ class DiscardFlagTestCase(base.BaseVolumeTestCase):
self.volume.driver.initialize_connection.return_value = conn_info self.volume.driver.initialize_connection.return_value = conn_info
def _safe_get(key): def _safe_get(key):
if key is 'report_discard_supported': if key == 'report_discard_supported':
return config_discard_flag return config_discard_flag
else: else:
return None return None

View File

@ -209,7 +209,7 @@ class API(base.Base):
donor_reservations = QUOTAS.reserve(context, donor_reservations = QUOTAS.reserve(context,
project_id=donor_id, project_id=donor_id,
**reserve_opts) **reserve_opts)
except exception.OverQuota as e: except exception.OverQuota:
donor_reservations = None donor_reservations = None
LOG.exception("Failed to update volume providing snapshots quota:" LOG.exception("Failed to update volume providing snapshots quota:"
" Over quota.") " Over quota.")

View File

@ -505,7 +505,7 @@ class Client(object):
"""Adds the `lun` to `storage_group`.""" """Adds the `lun` to `storage_group`."""
try: try:
return storage_group.attach_alu(lun, max_retries) return storage_group.attach_alu(lun, max_retries)
except storops_ex.VNXAluAlreadyAttachedError as ex: except storops_ex.VNXAluAlreadyAttachedError:
# Ignore the failure due to retry. # Ignore the failure due to retry.
return storage_group.get_hlu(lun) return storage_group.get_hlu(lun)
except storops_ex.VNXNoHluAvailableError as ex: except storops_ex.VNXNoHluAvailableError as ex:

View File

@ -315,7 +315,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
replica_info = self.replica.create_replica(lun_info, replica_info = self.replica.create_replica(lun_info,
replica_model) replica_model)
model_update.update(replica_info) model_update.update(replica_info)
except Exception as err: except Exception:
LOG.exception('Create replication volume error.') LOG.exception('Create replication volume error.')
self._delete_lun_with_check(lun_id) self._delete_lun_with_check(lun_id)
raise raise
@ -392,7 +392,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
if replica_data: if replica_data:
try: try:
self.replica.delete_replica(volume) self.replica.delete_replica(volume)
except exception.VolumeBackendAPIException as err: except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception("Delete replication error.") LOG.exception("Delete replication error.")
self._delete_volume(volume) self._delete_volume(volume)

View File

@ -199,7 +199,7 @@ class ReplicaCommonDriver(object):
try: try:
self.wait_expect_state(replica_id, running_status) self.wait_expect_state(replica_id, running_status)
except Exception as err: except Exception:
msg = _('Split replication failed.') msg = _('Split replication failed.')
LOG.error(msg) LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)

View File

@ -250,7 +250,7 @@ class GPFSDriver(driver.CloneableImageVD,
try: try:
fileset = fs_regex.match(out).group('fileset') fileset = fs_regex.match(out).group('fileset')
return fileset return fileset
except AttributeError as exc: except AttributeError:
msg = (_('Failed to find fileset for path %(path)s, command ' msg = (_('Failed to find fileset for path %(path)s, command '
'output: %(cmdout)s.') % 'output: %(cmdout)s.') %
{'path': path, {'path': path,

View File

@ -1801,12 +1801,12 @@ class XIVProxy(proxy.IBMStorageProxy):
"cg_create", cg=cgname, "cg_create", cg=cgname,
pool=self.storage_info[ pool=self.storage_info[
storage.FLAG_KEYS['storage_pool']]).as_list storage.FLAG_KEYS['storage_pool']]).as_list
except errors.CgNameExistsError as e: except errors.CgNameExistsError:
error = (_("consistency group %s already exists on backend") % error = (_("consistency group %s already exists on backend") %
cgname) cgname)
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except errors.CgLimitReachedError as e: except errors.CgLimitReachedError:
error = _("Reached Maximum number of consistency groups") error = _("Reached Maximum number of consistency groups")
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
@ -2176,37 +2176,37 @@ class XIVProxy(proxy.IBMStorageProxy):
self._call_xiv_xcli( self._call_xiv_xcli(
"cg_snapshots_create", cg=cgname, "cg_snapshots_create", cg=cgname,
snap_group=groupname).as_list snap_group=groupname).as_list
except errors.CgDoesNotExistError as e: except errors.CgDoesNotExistError:
error = (_("Consistency group %s does not exist on backend") % error = (_("Consistency group %s does not exist on backend") %
cgname) cgname)
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except errors.CgBadNameError as e: except errors.CgBadNameError:
error = (_("Consistency group %s has an illegal name") % cgname) error = (_("Consistency group %s has an illegal name") % cgname)
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except errors.SnapshotGroupDoesNotExistError as e: except errors.SnapshotGroupDoesNotExistError:
error = (_("Snapshot group %s has an illegal name") % cgname) error = (_("Snapshot group %s has an illegal name") % cgname)
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except errors.PoolSnapshotLimitReachedError as e: except errors.PoolSnapshotLimitReachedError:
error = _("Reached maximum snapshots allocation size") error = _("Reached maximum snapshots allocation size")
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except errors.CgEmptyError as e: except errors.CgEmptyError:
error = (_("Consistency group %s is empty") % cgname) error = (_("Consistency group %s is empty") % cgname)
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except (errors.MaxVolumesReachedError, except (errors.MaxVolumesReachedError,
errors.DomainMaxVolumesReachedError) as e: errors.DomainMaxVolumesReachedError):
error = _("Reached Maximum number of volumes") error = _("Reached Maximum number of volumes")
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except errors.SnapshotGroupIsReservedError as e: except errors.SnapshotGroupIsReservedError:
error = (_("Consistency group %s name is reserved") % cgname) error = (_("Consistency group %s name is reserved") % cgname)
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)
except errors.SnapshotGroupAlreadyExistsError as e: except errors.SnapshotGroupAlreadyExistsError:
error = (_("Snapshot group %s already exists") % groupname) error = (_("Snapshot group %s already exists") % groupname)
LOG.error(error) LOG.error(error)
raise self._get_exception()(error) raise self._get_exception()(error)

View File

@ -176,7 +176,7 @@ class InStorageMCSReplication(object):
# Reverse the role of the primary and secondary volumes # Reverse the role of the primary and secondary volumes
self.target_assistant.switch_relationship(rel_info['name']) self.target_assistant.switch_relationship(rel_info['name'])
return {'replication_status': fields.ReplicationStatus.FAILED_OVER} return {'replication_status': fields.ReplicationStatus.FAILED_OVER}
except Exception as e: except Exception:
LOG.exception('Unable to fail-over the volume %(id)s to the ' LOG.exception('Unable to fail-over the volume %(id)s to the '
'secondary back-end by switchrcrelationship ' 'secondary back-end by switchrcrelationship '
'command.', {"id": vref.id}) 'command.', {"id": vref.id})

View File

@ -417,7 +417,7 @@ class RSDClient(object):
detail=(_("Volume %s already attached") % volume_url)) detail=(_("Volume %s already attached") % volume_url))
node.attach_endpoint(volume.path) node.attach_endpoint(volume.path)
except sushy_exceptions.InvalidParameterValueError as e: except sushy_exceptions.InvalidParameterValueError:
LOG.exception("Attach volume failed (not allowable)") LOG.exception("Attach volume failed (not allowable)")
raise RSDRetryableException( raise RSDRetryableException(
reason=(_("Not allowed to attach from " reason=(_("Not allowed to attach from "

View File

@ -395,7 +395,7 @@ class STXClient(object):
LOG.debug("volume '{}' is already mapped to {} at lun {}". LOG.debug("volume '{}' is already mapped to {} at lun {}".
format(volume_name, iid, lun)) format(volume_name, iid, lun))
return int(lun) return int(lun)
except Exception as e: except Exception:
LOG.exception("failed to look up mappings for volume '%s'", LOG.exception("failed to look up mappings for volume '%s'",
volume_name) volume_name)
raise raise

View File

@ -223,7 +223,7 @@ class SCSTAdm(iscsi.ISCSITarget):
# starts # starts
try: try:
self.scst_execute('-write_config', '/etc/scst.conf') self.scst_execute('-write_config', '/etc/scst.conf')
except putils.ProcessExecutionError as e: except putils.ProcessExecutionError:
LOG.error("Failed to write in /etc/scst.conf.") LOG.error("Failed to write in /etc/scst.conf.")
raise exception.ISCSITargetHelperCommandFailed( raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to write in /etc/scst.conf.") error_message="Failed to write in /etc/scst.conf.")

View File

@ -38,7 +38,7 @@ gitdb2==2.0.3
GitPython==2.1.8 GitPython==2.1.8
google-api-python-client==1.4.2 google-api-python-client==1.4.2
greenlet==0.4.10 greenlet==0.4.10
hacking==1.1.0 hacking==2.0.0
httplib2==0.9.1 httplib2==0.9.1
idna==2.6 idna==2.6
imagesize==1.0.0 imagesize==1.0.0

View File

@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
# Install bounded pep8/pyflakes first, then let flake8 install # Install bounded pep8/pyflakes first, then let flake8 install
hacking>=1.1.0,<1.2.0 # Apache-2.0 hacking>=2.0.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0
ddt>=1.2.1 # MIT ddt>=1.2.1 # MIT