
This change adds the capability to rename the subcloud after bootstrap or during subcloud rehome operation. Added a field in the database to separate the region name from the subcloud name. The region name determines the subcloud reference in the Openstack core, through which it is possible to access the endpoints of a given subcloud. Since the region name cannot be changed, this commit adds the ability to maintain a unique region name based on the UUID format, and allows subcloud renaming when necessary without any endpoint impact. The region is randomly generated to configure the subcloud when it is created and only applies to future subclouds. For those systems that have existing subclouds, the region will be the same as on day 0, that is, region will keep the same name as the subcloud, but subclouds can be renamed. This topic involves changes to dcmanager, dcmanager-client and GUI. To ensure the region name reference needed by the cert-monitor, a mechanism to determine if the request is coming from the cert-monitor has been created. Usage for subcloud rename: dcmanager subcloud update <subcloud-name> --name <new-name> Usage for subcloud rehoming: dcmanager subcloud add --name <subcloud-name> --migrate ... Note: Upgrade test from StarlingX 8 -> 9 for this commit is deferred until upgrade functionality in master is restored. Any issue found during upgrade test will be addressed in a separate commit Test Plan: PASS: Run dcmanager subcloud passing subcommands: - add/delete/migrate/list/show/show --detail - errors/manage/unmanage/reinstall/reconfig - update/deploy PASS: Run dcmanager subcloud add supplying --name parameter and validate the operation is not allowed PASS: Run dcmanager supplying subcommands: - kube/patch/prestage strategies PASS: Run dcmanager to apply patch and remove it PASS: Run dcmanager subcloud-backup: - create/delete/restore/show/upload PASS: Run subcloud-group: - add/delete/list/list-subclouds/show/update PASS: Run dcmanager subcloud strategy for: - patch/kubernetes/firmware PASS: Run dcmanager subcloud update command passing --name parameter supplying the following values: - current subcloud name (not changed) - different existing subcloud name PASS: Run dcmanager to migrate a subcloud passing --name parameter supplying a new subcloud name PASS: Run dcmanager to migrate a subcloud without --name parameter PASS: Run dcmanager to migrate a subcloud passing --name parameter supplying a new subcloud name and different subcloud name in bootstrap file PASS: Test dcmanager API response using cURL command line to validate new region name field PASS: Run full DC sanity and regression Story: 2010788 Task: 48217 Signed-off-by: Cristian Mondo <cristian.mondo@windriver.com> Change-Id: Id04f42504b8e325d9ec3880c240fe4a06e3a20b7
166 lines
6.6 KiB
Python
166 lines
6.6 KiB
Python
# Copyright 2017 Ericsson AB.
|
|
# Copyright (c) 2017-2023 Wind River Systems, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
from keystoneauth1 import exceptions as keystone_exceptions
|
|
from oslo_log import log as logging
|
|
|
|
from dccommon import consts as dccommon_consts
|
|
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
|
|
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
|
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
class KubernetesAuditData(object):
|
|
def __init__(self, target, version, state):
|
|
self.target = target
|
|
self.version = version
|
|
self.state = state
|
|
|
|
def to_dict(self):
|
|
return {
|
|
'target': self.target,
|
|
'version': self.version,
|
|
'state': self.state,
|
|
}
|
|
|
|
@classmethod
|
|
def from_dict(cls, values):
|
|
if values is None:
|
|
return None
|
|
return cls(**values)
|
|
|
|
|
|
class KubernetesAudit(object):
|
|
"""Manages tasks related to kubernetes audits."""
|
|
|
|
def __init__(self, context, dcmanager_state_rpc_client):
|
|
LOG.debug('KubernetesAudit initialization...')
|
|
self.context = context
|
|
self.state_rpc_client = dcmanager_state_rpc_client
|
|
self.audit_count = 0
|
|
|
|
def _update_subcloud_sync_status(self, sc_name, sc_region, sc_endpoint_type,
|
|
sc_status):
|
|
self.state_rpc_client.update_subcloud_endpoint_status(
|
|
self.context,
|
|
subcloud_name=sc_name,
|
|
subcloud_region=sc_region,
|
|
endpoint_type=sc_endpoint_type,
|
|
sync_status=sc_status)
|
|
|
|
def get_regionone_audit_data(self):
|
|
"""Query RegionOne to determine kubernetes information
|
|
|
|
:return: A list of kubernetes versions on the system controller
|
|
|
|
"""
|
|
try:
|
|
m_os_ks_client = OpenStackDriver(
|
|
region_name=dccommon_consts.DEFAULT_REGION_NAME,
|
|
region_clients=None).keystone_client
|
|
endpoint = m_os_ks_client.endpoint_cache.get_endpoint('sysinv')
|
|
sysinv_client = SysinvClient(
|
|
dccommon_consts.DEFAULT_REGION_NAME, m_os_ks_client.session,
|
|
endpoint=endpoint)
|
|
except Exception:
|
|
LOG.exception('Failed init OS Client, skip kubernetes audit.')
|
|
return None
|
|
|
|
region_one_data = []
|
|
results_list = sysinv_client.get_kube_versions()
|
|
for result in results_list:
|
|
region_one_data.append(KubernetesAuditData(result.target,
|
|
result.version,
|
|
result.state))
|
|
LOG.debug("RegionOne kubernetes versions: %s" % region_one_data)
|
|
return region_one_data
|
|
|
|
def subcloud_kubernetes_audit(self, subcloud_name, subcloud_region, audit_data):
|
|
LOG.info('Triggered kubernetes audit for: %s' % subcloud_name)
|
|
if not audit_data:
|
|
self._update_subcloud_sync_status(
|
|
subcloud_name,
|
|
subcloud_region, dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
|
|
dccommon_consts.SYNC_STATUS_IN_SYNC)
|
|
LOG.debug('No region one audit data, exiting kubernetes audit')
|
|
return
|
|
try:
|
|
sc_os_client = OpenStackDriver(region_name=subcloud_region,
|
|
region_clients=None).keystone_client
|
|
endpoint = sc_os_client.endpoint_cache.get_endpoint('sysinv')
|
|
sysinv_client = SysinvClient(subcloud_region, sc_os_client.session,
|
|
endpoint=endpoint)
|
|
except (keystone_exceptions.EndpointNotFound,
|
|
keystone_exceptions.ConnectFailure,
|
|
keystone_exceptions.ConnectTimeout,
|
|
IndexError):
|
|
LOG.exception("Endpoint for online subcloud:(%s) not found, skip "
|
|
"kubernetes audit." % subcloud_name)
|
|
return
|
|
|
|
# Retrieve kubernetes info for this subcloud
|
|
# state - active, partial, available
|
|
# active - true / false
|
|
# version - any value ex: v1.18.1
|
|
|
|
# Find the target=true state=active version on system controller
|
|
# The audit_data for region one is a dictionary
|
|
region_one_version = None
|
|
for result in audit_data:
|
|
# audit_data will be a dict from passing through RPC, so objectify
|
|
result = KubernetesAuditData.from_dict(result)
|
|
if result.target and result.state == 'active':
|
|
region_one_version = result.version
|
|
break
|
|
if region_one_version is None:
|
|
LOG.info("No active target version found in region one audit data")
|
|
return
|
|
|
|
out_of_sync = True
|
|
|
|
# if there is a kubernetes upgrade operation in the subcloud,
|
|
# the subcloud can immediately be flagged as out of sync
|
|
subcloud_kube_upgrades = sysinv_client.get_kube_upgrades()
|
|
if len(subcloud_kube_upgrades) > 0:
|
|
# We are out of sync
|
|
LOG.debug('Existing Kubernetes upgrade exists for:(%s)'
|
|
% subcloud_name)
|
|
else:
|
|
# We will consider it out of sync even for 'partial' state
|
|
# The audit data for subcloud_results is an object not a dictionary
|
|
subcloud_results = sysinv_client.get_kube_versions()
|
|
for result in subcloud_results:
|
|
if result.target and result.state == 'active':
|
|
subcloud_version = result.version
|
|
if subcloud_version == region_one_version:
|
|
out_of_sync = False
|
|
break
|
|
|
|
if out_of_sync:
|
|
self._update_subcloud_sync_status(
|
|
subcloud_name,
|
|
subcloud_region, dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
|
|
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
|
else:
|
|
self._update_subcloud_sync_status(
|
|
subcloud_name,
|
|
subcloud_region, dccommon_consts.ENDPOINT_TYPE_KUBERNETES,
|
|
dccommon_consts.SYNC_STATUS_IN_SYNC)
|
|
LOG.info('Kubernetes audit completed for: %s' % subcloud_name)
|