NSX|V: Fix host groups for DRS HA for AZ
For the fire cell anti affinity to work as designed, there is a need to use different groups & rules per host group, since those hostgroups can be different for differnet availability zones Change-Id: I092f5c228489a3a0d73f060380f1a1a6c526fb00
This commit is contained in:
parent
fb3baeac73
commit
cda47aa304
@ -747,7 +747,14 @@ class ClusterManager(VCManagerBase):
|
|||||||
rules_spec.info = rules_info
|
rules_spec.info = rules_info
|
||||||
return rules_spec
|
return rules_spec
|
||||||
|
|
||||||
def get_configured_vms(self, resource_id, n_host_groups=2):
|
def _group_name(self, index, host_group_names):
|
||||||
|
return 'neutron-group-%s-%s' % (index, host_group_names[index - 1])
|
||||||
|
|
||||||
|
def _rule_name(self, index, host_group_names):
|
||||||
|
return 'neutron-rule-%s-%s' % (index, host_group_names[index - 1])
|
||||||
|
|
||||||
|
def get_configured_vms(self, resource_id, host_group_names):
|
||||||
|
n_host_groups = len(host_group_names)
|
||||||
session = self._session
|
session = self._session
|
||||||
resource = vim_util.get_moref(resource_id, 'ResourcePool')
|
resource = vim_util.get_moref(resource_id, 'ResourcePool')
|
||||||
# TODO(garyk): cache the cluster details
|
# TODO(garyk): cache the cluster details
|
||||||
@ -765,7 +772,7 @@ class ClusterManager(VCManagerBase):
|
|||||||
if hasattr(cluster_config, 'group'):
|
if hasattr(cluster_config, 'group'):
|
||||||
groups = cluster_config.group
|
groups = cluster_config.group
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if 'neutron-group-%s' % entry_id == group.name:
|
if self._group_name(entry_id, host_group_names) == group.name:
|
||||||
vm_group = group
|
vm_group = group
|
||||||
break
|
break
|
||||||
if vm_group and hasattr(vm_group, 'vm'):
|
if vm_group and hasattr(vm_group, 'vm'):
|
||||||
@ -804,25 +811,25 @@ class ClusterManager(VCManagerBase):
|
|||||||
if hasattr(cluster_config, 'group'):
|
if hasattr(cluster_config, 'group'):
|
||||||
groups = cluster_config.group
|
groups = cluster_config.group
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if 'neutron-group-%s' % index == group.name:
|
if self._group_name(index, host_group_names) == group.name:
|
||||||
vmGroup = group
|
vmGroup = group
|
||||||
break
|
break
|
||||||
# Create/update the VM group
|
# Create/update the VM group
|
||||||
groupSpec = self._create_vm_group_spec(
|
groupSpec = self._create_vm_group_spec(
|
||||||
client_factory,
|
client_factory,
|
||||||
'neutron-group-%s' % index,
|
self._group_name(index, host_group_names),
|
||||||
[vm], vmGroup)
|
[vm], vmGroup)
|
||||||
config_spec.groupSpec.append(groupSpec)
|
config_spec.groupSpec.append(groupSpec)
|
||||||
config_rule = None
|
config_rule = None
|
||||||
# Create the config rule if it does not exist
|
# Create the config rule if it does not exist
|
||||||
for rule in rules:
|
for rule in rules:
|
||||||
if 'neutron-rule-%s' % index == rule.name:
|
if self._rule_name(index, host_group_names) == rule.name:
|
||||||
config_rule = rule
|
config_rule = rule
|
||||||
break
|
break
|
||||||
if config_rule is None and index <= num_host_groups:
|
if config_rule is None and index <= num_host_groups:
|
||||||
ruleSpec = self._create_cluster_rules_spec(
|
ruleSpec = self._create_cluster_rules_spec(
|
||||||
client_factory, 'neutron-rule-%s' % index,
|
client_factory, self._rule_name(index, host_group_names),
|
||||||
'neutron-group-%s' % index,
|
self._group_name(index, host_group_names),
|
||||||
host_group_names[index - 1])
|
host_group_names[index - 1])
|
||||||
config_spec.rulesSpec.append(ruleSpec)
|
config_spec.rulesSpec.append(ruleSpec)
|
||||||
self._reconfigure_cluster(session, cluster, config_spec)
|
self._reconfigure_cluster(session, cluster, config_spec)
|
||||||
@ -861,13 +868,13 @@ class ClusterManager(VCManagerBase):
|
|||||||
entry_id = index + 1
|
entry_id = index + 1
|
||||||
vmGroup = None
|
vmGroup = None
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if 'neutron-group-%s' % entry_id == group.name:
|
if self._group_name(entry_id, host_group_names) == group.name:
|
||||||
vmGroup = group
|
vmGroup = group
|
||||||
break
|
break
|
||||||
if vmGroup is None:
|
if vmGroup is None:
|
||||||
groupSpec = self._create_vm_group_spec(
|
groupSpec = self._create_vm_group_spec(
|
||||||
client_factory,
|
client_factory,
|
||||||
'neutron-group-%s' % entry_id,
|
self._group_name(entry_id, host_group_names),
|
||||||
[], vmGroup)
|
[], vmGroup)
|
||||||
config_spec.groupSpec.append(groupSpec)
|
config_spec.groupSpec.append(groupSpec)
|
||||||
update_cluster = True
|
update_cluster = True
|
||||||
@ -875,13 +882,14 @@ class ClusterManager(VCManagerBase):
|
|||||||
config_rule = None
|
config_rule = None
|
||||||
# Create the config rule if it does not exist
|
# Create the config rule if it does not exist
|
||||||
for rule in rules:
|
for rule in rules:
|
||||||
if 'neutron-rule-%s' % entry_id == rule.name:
|
if self._rule_name(entry_id, host_group_names) == rule.name:
|
||||||
config_rule = rule
|
config_rule = rule
|
||||||
break
|
break
|
||||||
if config_rule is None and index < num_host_groups:
|
if config_rule is None and index < num_host_groups:
|
||||||
ruleSpec = self._create_cluster_rules_spec(
|
ruleSpec = self._create_cluster_rules_spec(
|
||||||
client_factory, 'neutron-rule-%s' % entry_id,
|
client_factory, self._rule_name(entry_id,
|
||||||
'neutron-group-%s' % entry_id,
|
host_group_names),
|
||||||
|
self._group_name(entry_id, host_group_names),
|
||||||
host_group_names[index - 1])
|
host_group_names[index - 1])
|
||||||
config_spec.rulesSpec.append(ruleSpec)
|
config_spec.rulesSpec.append(ruleSpec)
|
||||||
update_cluster = True
|
update_cluster = True
|
||||||
@ -912,7 +920,8 @@ class ClusterManager(VCManagerBase):
|
|||||||
rules_spec.info = rules_info
|
rules_spec.info = rules_info
|
||||||
return rules_spec
|
return rules_spec
|
||||||
|
|
||||||
def cluster_host_group_cleanup(self, resource_id, n_host_groups=2):
|
def cluster_host_group_cleanup(self, resource_id, host_group_names):
|
||||||
|
n_host_groups = len(host_group_names)
|
||||||
session = self._session
|
session = self._session
|
||||||
resource = vim_util.get_moref(resource_id, 'ResourcePool')
|
resource = vim_util.get_moref(resource_id, 'ResourcePool')
|
||||||
# TODO(garyk): cache the cluster details
|
# TODO(garyk): cache the cluster details
|
||||||
@ -936,12 +945,12 @@ class ClusterManager(VCManagerBase):
|
|||||||
for index in range(n_host_groups):
|
for index in range(n_host_groups):
|
||||||
entry_id = index + 1
|
entry_id = index + 1
|
||||||
for group in groups:
|
for group in groups:
|
||||||
if 'neutron-group-%s' % entry_id == group.name:
|
if self._group_name(entry_id, host_group_names) == group.name:
|
||||||
groupSpec.append(self._delete_vm_group_spec(
|
groupSpec.append(self._delete_vm_group_spec(
|
||||||
client_factory, group.name))
|
client_factory, group.name))
|
||||||
# Delete the config rule if it exists
|
# Delete the config rule if it exists
|
||||||
for rule in rules:
|
for rule in rules:
|
||||||
if 'neutron-rule-%s' % entry_id == rule.name:
|
if self._rule_name(entry_id, host_group_names) == rule.name:
|
||||||
ruleSpec.append(self._delete_cluster_rules_spec(
|
ruleSpec.append(self._delete_cluster_rules_spec(
|
||||||
client_factory, rule))
|
client_factory, rule))
|
||||||
|
|
||||||
|
@ -2525,7 +2525,7 @@ def update_edge_host_groups(vcns, edge_id, dvs, availability_zone,
|
|||||||
if validate:
|
if validate:
|
||||||
configured_vms = dvs.get_configured_vms(
|
configured_vms = dvs.get_configured_vms(
|
||||||
availability_zone.resource_pool,
|
availability_zone.resource_pool,
|
||||||
len(availability_zone.edge_host_groups))
|
availability_zone.edge_host_groups)
|
||||||
for vm in vms:
|
for vm in vms:
|
||||||
if vm in configured_vms:
|
if vm in configured_vms:
|
||||||
LOG.info('Edge %s already configured', edge_id)
|
LOG.info('Edge %s already configured', edge_id)
|
||||||
@ -2558,7 +2558,7 @@ def clean_host_groups(dvs, availability_zone):
|
|||||||
availability_zone.name)
|
availability_zone.name)
|
||||||
dvs.cluster_host_group_cleanup(
|
dvs.cluster_host_group_cleanup(
|
||||||
availability_zone.resource_pool,
|
availability_zone.resource_pool,
|
||||||
len(availability_zone.edge_host_groups))
|
availability_zone.edge_host_groups)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error('Unable to cleanup. Error: %s', e)
|
LOG.error('Unable to cleanup. Error: %s', e)
|
||||||
|
|
||||||
|
@ -508,8 +508,9 @@ def _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge):
|
|||||||
cluster_mng, az,
|
cluster_mng, az,
|
||||||
validate=True)
|
validate=True)
|
||||||
else:
|
else:
|
||||||
LOG.error("%s does not have HA enabled or no host "
|
LOG.error("Availability zone:%s does not have HA enabled or "
|
||||||
"groups defined. Skipping %s.", az_name, edge_id)
|
"no host groups defined. Skipping %s.",
|
||||||
|
az_name, edge_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error("Failed to update edge %(id)s - %(e)s",
|
LOG.error("Failed to update edge %(id)s - %(e)s",
|
||||||
{'id': edge['id'],
|
{'id': edge['id'],
|
||||||
|
Loading…
x
Reference in New Issue
Block a user