Merge "vmware: Use oslo.vmware's get_moref_value()"

This commit is contained in:
Zuul 2021-05-21 17:28:07 +00:00 committed by Gerrit Code Review
commit 6768312117
3 changed files with 36 additions and 22 deletions

View File

@ -94,7 +94,8 @@ class DatastoreSelector(object):
hubs = pbm.convert_datastores_to_hubs(cf, datastores)
hubs = pbm.filter_hubs_by_profile(self._session, hubs, profile_id)
hub_ids = [hub.hubId for hub in hubs]
return {k: v for k, v in datastores.items() if k.value in hub_ids}
return {k: v for k, v in datastores.items()
if vim_util.get_moref_value(k) in hub_ids}
def _filter_datastores(self,
datastores,
@ -118,11 +119,12 @@ class DatastoreSelector(object):
summary)
valid_host_refs = valid_host_refs or []
valid_hosts = [host_ref.value for host_ref in valid_host_refs]
valid_hosts = [vim_util.get_moref_value(host_ref)
for host_ref in valid_host_refs]
def _is_ds_accessible_to_valid_host(host_mounts):
for host_mount in host_mounts:
if host_mount.key.value in valid_hosts:
if vim_util.get_moref_value(host_mount.key) in valid_hosts:
return True
def _is_ds_valid(ds_ref, ds_props):
@ -135,7 +137,7 @@ class DatastoreSelector(object):
return False
if (hard_anti_affinity_ds and
ds_ref.value in hard_anti_affinity_ds):
vim_util.get_moref_value(ds_ref) in hard_anti_affinity_ds):
return False
if summary.capacity == 0 or summary.freeSpace < size_bytes:
@ -220,10 +222,10 @@ class DatastoreSelector(object):
host_prop_map = {}
def _is_host_usable(host_ref):
props = host_prop_map.get(host_ref.value)
props = host_prop_map.get(vim_util.get_moref_value(host_ref))
if props is None:
props = self._get_host_properties(host_ref)
host_prop_map[host_ref.value] = props
host_prop_map[vim_util.get_moref_value(host_ref)] = props
runtime = props.get('runtime')
parent = props.get('parent')
@ -234,12 +236,14 @@ class DatastoreSelector(object):
return False
valid_host_refs = valid_host_refs or []
valid_hosts = [host_ref.value for host_ref in valid_host_refs]
valid_hosts = [vim_util.get_moref_value(host_ref)
for host_ref in valid_host_refs]
def _select_host(host_mounts):
random.shuffle(host_mounts)
for host_mount in host_mounts:
if valid_hosts and host_mount.key.value not in valid_hosts:
host_mount_key_value = vim_util.get_moref_value(host_mount.key)
if valid_hosts and host_mount_key_value not in valid_hosts:
continue
if (self._vops._is_usable(host_mount.mountInfo) and
_is_host_usable(host_mount.key)):
@ -249,8 +253,9 @@ class DatastoreSelector(object):
for ds_props in sorted_ds_props:
host_ref = _select_host(ds_props['host'])
if host_ref:
host_ref_value = vim_util.get_moref_value(host_ref)
rp = self._get_resource_pool(
host_prop_map[host_ref.value]['parent'])
host_prop_map[host_ref_value]['parent'])
return (host_ref, rp, ds_props['summary'])
def select_datastore(self, req, hosts=None):

View File

@ -474,7 +474,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
cluster_access_to_ds = not available_hosts
for host_mount in ds_host_mounts.DatastoreHostMount:
for avlbl_host in available_hosts:
if avlbl_host.value == host_mount.key.value:
avlbl_host_value = vim_util.get_moref_value(avlbl_host)
host_mount_key_value = vim_util.get_moref_value(host_mount.key)
if avlbl_host_value == host_mount_key_value:
cluster_access_to_ds = True
return (ds_summary.accessible
and not self.volumeops._in_maintenance(ds_summary)
@ -688,10 +690,10 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
return best_candidate
def _get_dc(self, resource_pool):
dc = self._dc_cache.get(resource_pool.value)
dc = self._dc_cache.get(vim_util.get_moref_value(resource_pool))
if not dc:
dc = self.volumeops.get_dc(resource_pool)
self._dc_cache[resource_pool.value] = dc
self._dc_cache[vim_util.get_moref_value(resource_pool)] = dc
return dc
def _select_ds_for_volume(self, volume, host=None, create_params=None):
@ -719,7 +721,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
def _get_connection_info(self, volume, backing, connector):
connection_info = {'driver_volume_type': 'vmdk'}
connection_info['data'] = {
'volume': backing.value,
'volume': vim_util.get_moref_value(backing),
'volume_id': volume.id,
'name': volume.name,
'profile_id': self._get_storage_profile_id(volume)
@ -733,10 +735,12 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
connection_info['data']['vmdk_path'] = vmdk_path
datastore = self.volumeops.get_datastore(backing)
connection_info['data']['datastore'] = datastore.value
connection_info['data']['datastore'] = \
vim_util.get_moref_value(datastore)
datacenter = self.volumeops.get_dc(backing)
connection_info['data']['datacenter'] = datacenter.value
connection_info['data']['datacenter'] = \
vim_util.get_moref_value(datacenter)
config = self.configuration
vmdk_connector_config = {
@ -1144,7 +1148,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
folder_path) = self._get_temp_image_folder(image_size_in_bytes)
# pylint: disable=E1101
if ds_name == dest_ds_name and dc_ref.value == dest_dc_ref.value:
dc_ref_value = vim_util.get_moref_value(dc_ref)
dest_dc_ref_value = vim_util.get_moref_value(dest_dc_ref)
if ds_name == dest_ds_name and dc_ref_value == dest_dc_ref_value:
# Temporary image folder and destination path are on the same
# datastore. We can directly download the image to the destination
# folder to save one virtual disk copy.
@ -1661,7 +1667,7 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
if need_relocate:
LOG.debug("Backing: %s should be relocated.", backing)
req[hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = (
[datastore.value])
[vim_util.get_moref_value(datastore)])
if new_profile:
req[hub.DatastoreSelector.PROFILE_NAME] = new_profile
@ -1680,7 +1686,9 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
dc = self._get_dc(rp)
folder = self._get_volume_group_folder(dc, volume['project_id'])
new_datastore = summary.datastore
if datastore.value != new_datastore.value:
datastore_value = vim_util.get_moref_value(datastore)
new_datastore_value = vim_util.get_moref_value(new_datastore)
if datastore_value != new_datastore_value:
# Datastore changed; relocate the backing.
LOG.debug("Backing: %s needs to be relocated for retype.",
backing)

View File

@ -455,7 +455,8 @@ class VMwareVolumeOps(object):
connected_hosts = []
for host_mount in host_mounts.DatastoreHostMount:
if self._is_usable(host_mount.mountInfo):
connected_hosts.append(host_mount.key.value)
host_mount_key_value = vim_util.get_moref_value(host_mount.key)
connected_hosts.append(host_mount_key_value)
return connected_hosts
@ -466,7 +467,7 @@ class VMwareVolumeOps(object):
:return: True if the datastore is accessible
"""
hosts = self.get_connected_hosts(datastore)
return host.value in hosts
return vim_util.get_moref_value(host) in hosts
# TODO(vbala): move this method to datastore module
def _in_maintenance(self, summary):
@ -574,7 +575,7 @@ class VMwareVolumeOps(object):
"of datacenter: %(datacenter)s.",
{'path_comp': path_comp,
'datacenter': datacenter})
path = "/" + datacenter.value
path = "/" + vim_util.get_moref_value(datacenter)
parent = self._folder_cache.get(path)
if not parent:
parent = self.get_vmfolder(datacenter)
@ -1992,7 +1993,7 @@ class FcdLocation(object):
@classmethod
def create(cls, fcd_id_obj, ds_ref):
return cls(fcd_id_obj.id, ds_ref.value)
return cls(fcd_id_obj.id, vim_util.get_moref_value(ds_ref))
def provider_location(self):
return "%s@%s" % (self.fcd_id, self.ds_ref_val)