Raise hacking version to 2.0.0
We've kept hacking capped for a long time now. This raises the hacking package version to the latest release and fixes the issues that it found. Change-Id: I933d541d9198f9742c95494bae6030cb3e4f2499 Signed-off-by: Sean McGinnis <sean.mcginnis@gmail.com>
This commit is contained in:
parent
b1ef924feb
commit
d9ce598f0c
@ -174,7 +174,7 @@ class VolumeAdminController(AdminController):
|
||||
|
||||
try:
|
||||
self.volume_api.terminate_connection(context, volume, connector)
|
||||
except exception.VolumeBackendAPIException as error:
|
||||
except exception.VolumeBackendAPIException:
|
||||
msg = _("Unable to terminate volume connection from backend.")
|
||||
raise webob.exc.HTTPInternalServerError(explanation=msg)
|
||||
|
||||
|
@ -68,7 +68,7 @@ class CgsnapshotsController(wsgi.Controller):
|
||||
except exception.InvalidGroupSnapshot as e:
|
||||
raise exc.HTTPBadRequest(explanation=six.text_type(e))
|
||||
except (exception.GroupSnapshotNotFound,
|
||||
exception.PolicyNotAuthorized) as e:
|
||||
exception.PolicyNotAuthorized):
|
||||
# Exceptions will be handled at the wsgi level
|
||||
raise
|
||||
except Exception:
|
||||
|
@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class APIMapper(routes.Mapper):
|
||||
def routematch(self, url=None, environ=None):
|
||||
if url is "":
|
||||
if url == "":
|
||||
result = self._match("", environ)
|
||||
return result[0], result[1]
|
||||
return routes.Mapper.routematch(self, url, environ)
|
||||
|
@ -190,7 +190,7 @@ class AttachmentsController(wsgi.Controller):
|
||||
err_msg = _(
|
||||
"Unable to create attachment for volume (%s).") % ex.msg
|
||||
LOG.exception(err_msg)
|
||||
except Exception as ex:
|
||||
except Exception:
|
||||
err_msg = _("Unable to create attachment for volume.")
|
||||
LOG.exception(err_msg)
|
||||
finally:
|
||||
|
@ -179,7 +179,10 @@ class LVM(executor.Executor):
|
||||
consumed_space = pool_size / 100 * data_percent
|
||||
free_space = pool_size - consumed_space
|
||||
free_space = round(free_space, 2)
|
||||
except putils.ProcessExecutionError as err:
|
||||
# Need noqa due to a false error about the 'err' variable being unused
|
||||
# even though it is used in the logging. Possibly related to
|
||||
# https://github.com/PyCQA/pyflakes/issues/378.
|
||||
except putils.ProcessExecutionError as err: # noqa
|
||||
LOG.exception('Error querying thin pool about data_percent')
|
||||
LOG.error('Cmd :%s', err.cmd)
|
||||
LOG.error('StdOut :%s', err.stdout)
|
||||
|
@ -280,7 +280,7 @@ class CinderPersistentObject(object):
|
||||
# registration.
|
||||
try:
|
||||
cls.model = db.get_model_for_versioned_object(cls)
|
||||
except (ImportError, AttributeError) as e:
|
||||
except (ImportError, AttributeError):
|
||||
msg = _("Couldn't find ORM model for Persistent Versioned "
|
||||
"Object %s.") % cls.obj_name()
|
||||
LOG.exception("Failed to initialize object.")
|
||||
|
@ -44,7 +44,6 @@ def fake_get(self, context, *args, **kwargs):
|
||||
'host': 'fake-host',
|
||||
'status': 'available',
|
||||
'encryption_key_id': None,
|
||||
'volume_type_id': None,
|
||||
'migration_status': None,
|
||||
'availability_zone': 'fake-zone',
|
||||
'attach_status': 'detached',
|
||||
|
@ -102,7 +102,6 @@ class _FakeImageService(object):
|
||||
'updated_at': timestamp,
|
||||
'deleted_at': None,
|
||||
'deleted': False,
|
||||
'size': 1024,
|
||||
'status': 'active',
|
||||
'visibility': 'public',
|
||||
'protected': True,
|
||||
|
@ -2017,7 +2017,6 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
|
||||
|
||||
mock_client.getVolume.return_value = {
|
||||
'name': mock.ANY,
|
||||
'snapCPG': mock.ANY,
|
||||
'comment': "{'display_name': 'Foo Volume'}",
|
||||
'provisioningType': mock.ANY,
|
||||
'userCPG': 'OpenStackCPG',
|
||||
@ -2124,7 +2123,6 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
|
||||
|
||||
mock_client.getVolume.return_value = {
|
||||
'name': mock.ANY,
|
||||
'snapCPG': mock.ANY,
|
||||
'comment': "{'display_name': 'Foo Volume'}",
|
||||
'provisioningType': mock.ANY,
|
||||
'userCPG': 'OpenStackCPG',
|
||||
@ -2224,7 +2222,6 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
|
||||
|
||||
mock_client.getVolume.return_value = {
|
||||
'name': mock.ANY,
|
||||
'snapCPG': mock.ANY,
|
||||
'comment': "{'display_name': 'Foo Volume'}",
|
||||
'provisioningType': mock.ANY,
|
||||
'userCPG': 'OpenStackCPG',
|
||||
|
@ -4069,8 +4069,9 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
|
||||
volume_fc,
|
||||
connector)
|
||||
# All the wwpns of this host are not configured.
|
||||
host_site_2 = {'site1': 'ff00000000000000',
|
||||
'site1': 'ff00000000000001'}
|
||||
# TODO: This needs to be fixed. See bug #1857043
|
||||
host_site_2 = {'site1': 'ff00000000000000', # noqa
|
||||
'site1': 'ff00000000000001'} # noqa
|
||||
self.fc_driver.configuration.set_override(
|
||||
'storwize_preferred_host_site', host_site_2)
|
||||
self.assertRaises(exception.VolumeDriverException,
|
||||
@ -4078,8 +4079,9 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
|
||||
volume_fc,
|
||||
connector)
|
||||
|
||||
host_site_3 = {'site1': 'ffff000000000000',
|
||||
'site1': 'ffff000000000001'}
|
||||
# TODO: This needs to be fixed. See bug #1857043
|
||||
host_site_3 = {'site1': 'ffff000000000000', # noqa
|
||||
'site1': 'ffff000000000001'} # noqa
|
||||
self.fc_driver.configuration.set_override(
|
||||
'storwize_preferred_host_site', host_site_3)
|
||||
self.fc_driver.initialize_connection(volume_fc, connector)
|
||||
@ -4088,8 +4090,9 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
|
||||
host_info = self.fc_driver._helpers.ssh.lshost(host=host_name)
|
||||
self.assertEqual('site1', host_info[0]['site_name'])
|
||||
|
||||
host_site_4 = {'site2': 'ffff000000000000',
|
||||
'site2': 'ffff000000000001'}
|
||||
# TODO: This needs to be fixed. See bug #1857043
|
||||
host_site_4 = {'site2': 'ffff000000000000', # noqa
|
||||
'site2': 'ffff000000000001'} # noqa
|
||||
self.fc_driver.configuration.set_override(
|
||||
'storwize_preferred_host_site', host_site_4)
|
||||
self.assertRaises(exception.InvalidConfigurationValue,
|
||||
|
@ -1319,7 +1319,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
|
||||
image_id,
|
||||
image_meta,
|
||||
fake_image_service)
|
||||
if format is 'raw' and not owner and location:
|
||||
if format == 'raw' and not owner and location:
|
||||
fake_driver.create_cloned_volume.assert_called_once_with(
|
||||
volume, image_volume)
|
||||
handle_bootable.assert_called_once_with(self.ctxt, volume,
|
||||
@ -1384,7 +1384,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
|
||||
image_id,
|
||||
image_meta,
|
||||
fake_image_service)
|
||||
if format is 'raw' and not owner and location:
|
||||
if format == 'raw' and not owner and location:
|
||||
fake_driver.create_cloned_volume.assert_called_once_with(
|
||||
volume, image_volume)
|
||||
handle_bootable.assert_called_once_with(self.ctxt, volume,
|
||||
|
@ -88,7 +88,7 @@ class DiscardFlagTestCase(base.BaseVolumeTestCase):
|
||||
self.volume.driver.initialize_connection.return_value = conn_info
|
||||
|
||||
def _safe_get(key):
|
||||
if key is 'report_discard_supported':
|
||||
if key == 'report_discard_supported':
|
||||
return config_discard_flag
|
||||
else:
|
||||
return None
|
||||
|
@ -209,7 +209,7 @@ class API(base.Base):
|
||||
donor_reservations = QUOTAS.reserve(context,
|
||||
project_id=donor_id,
|
||||
**reserve_opts)
|
||||
except exception.OverQuota as e:
|
||||
except exception.OverQuota:
|
||||
donor_reservations = None
|
||||
LOG.exception("Failed to update volume providing snapshots quota:"
|
||||
" Over quota.")
|
||||
|
@ -505,7 +505,7 @@ class Client(object):
|
||||
"""Adds the `lun` to `storage_group`."""
|
||||
try:
|
||||
return storage_group.attach_alu(lun, max_retries)
|
||||
except storops_ex.VNXAluAlreadyAttachedError as ex:
|
||||
except storops_ex.VNXAluAlreadyAttachedError:
|
||||
# Ignore the failure due to retry.
|
||||
return storage_group.get_hlu(lun)
|
||||
except storops_ex.VNXNoHluAvailableError as ex:
|
||||
|
@ -315,7 +315,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
replica_info = self.replica.create_replica(lun_info,
|
||||
replica_model)
|
||||
model_update.update(replica_info)
|
||||
except Exception as err:
|
||||
except Exception:
|
||||
LOG.exception('Create replication volume error.')
|
||||
self._delete_lun_with_check(lun_id)
|
||||
raise
|
||||
@ -392,7 +392,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
if replica_data:
|
||||
try:
|
||||
self.replica.delete_replica(volume)
|
||||
except exception.VolumeBackendAPIException as err:
|
||||
except exception.VolumeBackendAPIException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception("Delete replication error.")
|
||||
self._delete_volume(volume)
|
||||
|
@ -199,7 +199,7 @@ class ReplicaCommonDriver(object):
|
||||
|
||||
try:
|
||||
self.wait_expect_state(replica_id, running_status)
|
||||
except Exception as err:
|
||||
except Exception:
|
||||
msg = _('Split replication failed.')
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
@ -250,7 +250,7 @@ class GPFSDriver(driver.CloneableImageVD,
|
||||
try:
|
||||
fileset = fs_regex.match(out).group('fileset')
|
||||
return fileset
|
||||
except AttributeError as exc:
|
||||
except AttributeError:
|
||||
msg = (_('Failed to find fileset for path %(path)s, command '
|
||||
'output: %(cmdout)s.') %
|
||||
{'path': path,
|
||||
|
@ -1801,12 +1801,12 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||
"cg_create", cg=cgname,
|
||||
pool=self.storage_info[
|
||||
storage.FLAG_KEYS['storage_pool']]).as_list
|
||||
except errors.CgNameExistsError as e:
|
||||
except errors.CgNameExistsError:
|
||||
error = (_("consistency group %s already exists on backend") %
|
||||
cgname)
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except errors.CgLimitReachedError as e:
|
||||
except errors.CgLimitReachedError:
|
||||
error = _("Reached Maximum number of consistency groups")
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
@ -2176,37 +2176,37 @@ class XIVProxy(proxy.IBMStorageProxy):
|
||||
self._call_xiv_xcli(
|
||||
"cg_snapshots_create", cg=cgname,
|
||||
snap_group=groupname).as_list
|
||||
except errors.CgDoesNotExistError as e:
|
||||
except errors.CgDoesNotExistError:
|
||||
error = (_("Consistency group %s does not exist on backend") %
|
||||
cgname)
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except errors.CgBadNameError as e:
|
||||
except errors.CgBadNameError:
|
||||
error = (_("Consistency group %s has an illegal name") % cgname)
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except errors.SnapshotGroupDoesNotExistError as e:
|
||||
except errors.SnapshotGroupDoesNotExistError:
|
||||
error = (_("Snapshot group %s has an illegal name") % cgname)
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except errors.PoolSnapshotLimitReachedError as e:
|
||||
except errors.PoolSnapshotLimitReachedError:
|
||||
error = _("Reached maximum snapshots allocation size")
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except errors.CgEmptyError as e:
|
||||
except errors.CgEmptyError:
|
||||
error = (_("Consistency group %s is empty") % cgname)
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except (errors.MaxVolumesReachedError,
|
||||
errors.DomainMaxVolumesReachedError) as e:
|
||||
errors.DomainMaxVolumesReachedError):
|
||||
error = _("Reached Maximum number of volumes")
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except errors.SnapshotGroupIsReservedError as e:
|
||||
except errors.SnapshotGroupIsReservedError:
|
||||
error = (_("Consistency group %s name is reserved") % cgname)
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
except errors.SnapshotGroupAlreadyExistsError as e:
|
||||
except errors.SnapshotGroupAlreadyExistsError:
|
||||
error = (_("Snapshot group %s already exists") % groupname)
|
||||
LOG.error(error)
|
||||
raise self._get_exception()(error)
|
||||
|
@ -176,7 +176,7 @@ class InStorageMCSReplication(object):
|
||||
# Reverse the role of the primary and secondary volumes
|
||||
self.target_assistant.switch_relationship(rel_info['name'])
|
||||
return {'replication_status': fields.ReplicationStatus.FAILED_OVER}
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
LOG.exception('Unable to fail-over the volume %(id)s to the '
|
||||
'secondary back-end by switchrcrelationship '
|
||||
'command.', {"id": vref.id})
|
||||
|
@ -417,7 +417,7 @@ class RSDClient(object):
|
||||
detail=(_("Volume %s already attached") % volume_url))
|
||||
|
||||
node.attach_endpoint(volume.path)
|
||||
except sushy_exceptions.InvalidParameterValueError as e:
|
||||
except sushy_exceptions.InvalidParameterValueError:
|
||||
LOG.exception("Attach volume failed (not allowable)")
|
||||
raise RSDRetryableException(
|
||||
reason=(_("Not allowed to attach from "
|
||||
|
@ -395,7 +395,7 @@ class STXClient(object):
|
||||
LOG.debug("volume '{}' is already mapped to {} at lun {}".
|
||||
format(volume_name, iid, lun))
|
||||
return int(lun)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
LOG.exception("failed to look up mappings for volume '%s'",
|
||||
volume_name)
|
||||
raise
|
||||
|
@ -223,7 +223,7 @@ class SCSTAdm(iscsi.ISCSITarget):
|
||||
# starts
|
||||
try:
|
||||
self.scst_execute('-write_config', '/etc/scst.conf')
|
||||
except putils.ProcessExecutionError as e:
|
||||
except putils.ProcessExecutionError:
|
||||
LOG.error("Failed to write in /etc/scst.conf.")
|
||||
raise exception.ISCSITargetHelperCommandFailed(
|
||||
error_message="Failed to write in /etc/scst.conf.")
|
||||
|
@ -38,7 +38,7 @@ gitdb2==2.0.3
|
||||
GitPython==2.1.8
|
||||
google-api-python-client==1.4.2
|
||||
greenlet==0.4.10
|
||||
hacking==1.1.0
|
||||
hacking==2.0.0
|
||||
httplib2==0.9.1
|
||||
idna==2.6
|
||||
imagesize==1.0.0
|
||||
|
@ -3,7 +3,7 @@
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
# Install bounded pep8/pyflakes first, then let flake8 install
|
||||
hacking>=1.1.0,<1.2.0 # Apache-2.0
|
||||
hacking>=2.0.0 # Apache-2.0
|
||||
|
||||
coverage!=4.4,>=4.0 # Apache-2.0
|
||||
ddt>=1.2.1 # MIT
|
||||
|
Loading…
x
Reference in New Issue
Block a user