
This change adds the capability to rename the subcloud after bootstrap or during subcloud rehome operation. Added a field in the database to separate the region name from the subcloud name. The region name determines the subcloud reference in the Openstack core, through which it is possible to access the endpoints of a given subcloud. Since the region name cannot be changed, this commit adds the ability to maintain a unique region name based on the UUID format, and allows subcloud renaming when necessary without any endpoint impact. The region is randomly generated to configure the subcloud when it is created and only applies to future subclouds. For those systems that have existing subclouds, the region will be the same as on day 0, that is, region will keep the same name as the subcloud, but subclouds can be renamed. This topic involves changes to dcmanager, dcmanager-client and GUI. To ensure the region name reference needed by the cert-monitor, a mechanism to determine if the request is coming from the cert-monitor has been created. Usage for subcloud rename: dcmanager subcloud update <subcloud-name> --name <new-name> Usage for subcloud rehoming: dcmanager subcloud add --name <subcloud-name> --migrate ... Note: Upgrade test from StarlingX 8 -> 9 for this commit is deferred until upgrade functionality in master is restored. Any issue found during upgrade test will be addressed in a separate commit Test Plan: PASS: Run dcmanager subcloud passing subcommands: - add/delete/migrate/list/show/show --detail - errors/manage/unmanage/reinstall/reconfig - update/deploy PASS: Run dcmanager subcloud add supplying --name parameter and validate the operation is not allowed PASS: Run dcmanager supplying subcommands: - kube/patch/prestage strategies PASS: Run dcmanager to apply patch and remove it PASS: Run dcmanager subcloud-backup: - create/delete/restore/show/upload PASS: Run subcloud-group: - add/delete/list/list-subclouds/show/update PASS: Run dcmanager subcloud strategy for: - patch/kubernetes/firmware PASS: Run dcmanager subcloud update command passing --name parameter supplying the following values: - current subcloud name (not changed) - different existing subcloud name PASS: Run dcmanager to migrate a subcloud passing --name parameter supplying a new subcloud name PASS: Run dcmanager to migrate a subcloud without --name parameter PASS: Run dcmanager to migrate a subcloud passing --name parameter supplying a new subcloud name and different subcloud name in bootstrap file PASS: Test dcmanager API response using cURL command line to validate new region name field PASS: Run full DC sanity and regression Story: 2010788 Task: 48217 Signed-off-by: Cristian Mondo <cristian.mondo@windriver.com> Change-Id: Id04f42504b8e325d9ec3880c240fe4a06e3a20b7
929 lines
41 KiB
Python
929 lines
41 KiB
Python
# Copyright (c) 2017 Ericsson AB.
|
|
# Copyright (c) 2017-2023 Wind River Systems, Inc.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
#
|
|
|
|
|
|
from requests_toolbelt.multipart import decoder
|
|
|
|
import base64
|
|
import json
|
|
import keyring
|
|
import os
|
|
from oslo_config import cfg
|
|
from oslo_log import log as logging
|
|
from oslo_messaging import RemoteError
|
|
import re
|
|
import yaml
|
|
|
|
import pecan
|
|
from pecan import expose
|
|
from pecan import request
|
|
|
|
from dccommon import consts as dccommon_consts
|
|
from dccommon.drivers.openstack.fm import FmClient
|
|
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
|
from dccommon import exceptions as dccommon_exceptions
|
|
|
|
from keystoneauth1 import exceptions as keystone_exceptions
|
|
|
|
import tsconfig.tsconfig as tsc
|
|
|
|
from dcmanager.api.controllers import restcomm
|
|
from dcmanager.api.policies import subclouds as subclouds_policy
|
|
from dcmanager.api import policy
|
|
from dcmanager.common import consts
|
|
from dcmanager.common import exceptions
|
|
from dcmanager.common.i18n import _
|
|
from dcmanager.common import phased_subcloud_deploy as psd_common
|
|
from dcmanager.common import prestage
|
|
from dcmanager.common import utils
|
|
from dcmanager.db import api as db_api
|
|
|
|
from dcmanager.rpc import client as rpc_client
|
|
from fm_api.constants import FM_ALARM_ID_UNSYNCHRONIZED_RESOURCE
|
|
|
|
CONF = cfg.CONF
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
LOCK_NAME = 'SubcloudsController'
|
|
|
|
SUBCLOUD_ADD_GET_FILE_CONTENTS = [
|
|
consts.BOOTSTRAP_VALUES,
|
|
consts.INSTALL_VALUES,
|
|
]
|
|
|
|
SUBCLOUD_REDEPLOY_GET_FILE_CONTENTS = [
|
|
consts.INSTALL_VALUES,
|
|
consts.BOOTSTRAP_VALUES,
|
|
consts.DEPLOY_CONFIG
|
|
]
|
|
|
|
SUBCLOUD_MANDATORY_NETWORK_PARAMS = [
|
|
'management_subnet', 'management_gateway_ip',
|
|
'management_start_ip', 'management_end_ip'
|
|
]
|
|
|
|
|
|
def _get_multipart_field_name(part):
|
|
content = part.headers[b"Content-Disposition"].decode("utf8")
|
|
regex = 'name="([^"]*)"'
|
|
return re.search(regex, content).group(1)
|
|
|
|
|
|
class SubcloudsController(object):
|
|
VERSION_ALIASES = {
|
|
'Newton': '1.0',
|
|
}
|
|
|
|
def __init__(self):
|
|
super(SubcloudsController, self).__init__()
|
|
self.dcmanager_rpc_client = rpc_client.ManagerClient()
|
|
self.dcmanager_state_rpc_client = rpc_client.SubcloudStateClient()
|
|
|
|
# to do the version compatibility for future purpose
|
|
def _determine_version_cap(self, target):
|
|
version_cap = 1.0
|
|
return version_cap
|
|
|
|
@expose(generic=True, template='json')
|
|
def index(self):
|
|
# Route the request to specific methods with parameters
|
|
pass
|
|
|
|
@staticmethod
|
|
def _get_patch_data(request):
|
|
payload = dict()
|
|
content_type = request.headers.get("Content-Type")
|
|
multipart_data = decoder.MultipartDecoder(request.body, content_type)
|
|
|
|
for part in multipart_data.parts:
|
|
field_name = _get_multipart_field_name(part)
|
|
field_content = part.text
|
|
|
|
# only the install_values field is yaml, force should be bool
|
|
if field_name in [consts.INSTALL_VALUES, 'force']:
|
|
field_content = yaml.safe_load(field_content)
|
|
|
|
payload[field_name] = field_content
|
|
|
|
return payload
|
|
|
|
@staticmethod
|
|
def _get_prestage_payload(request):
|
|
fields = ['sysadmin_password', 'force', consts.PRESTAGE_REQUEST_RELEASE]
|
|
payload = {
|
|
'force': False
|
|
}
|
|
try:
|
|
body = json.loads(request.body)
|
|
except Exception:
|
|
pecan.abort(400, _('Request body is malformed.'))
|
|
|
|
for field in fields:
|
|
val = body.get(field)
|
|
if val is None:
|
|
if field == 'sysadmin_password':
|
|
pecan.abort(400, _("%s is required." % field))
|
|
else:
|
|
if field == 'sysadmin_password':
|
|
try:
|
|
base64.b64decode(val).decode('utf-8')
|
|
payload['sysadmin_password'] = val
|
|
except Exception:
|
|
pecan.abort(
|
|
400,
|
|
_('Failed to decode subcloud sysadmin_password, '
|
|
'verify the password is base64 encoded'))
|
|
elif field == 'force':
|
|
if val.lower() in ('true', 'false', 't', 'f'):
|
|
payload['force'] = val.lower() in ('true', 't')
|
|
else:
|
|
pecan.abort(
|
|
400, _('Invalid value for force option: %s' % val))
|
|
elif field == consts.PRESTAGE_REQUEST_RELEASE:
|
|
payload[consts.PRESTAGE_REQUEST_RELEASE] = val
|
|
return payload
|
|
|
|
@staticmethod
|
|
def _get_updatestatus_payload(request):
|
|
"""retrieve payload of a patch request for update_status
|
|
|
|
:param request: request from the http client
|
|
:return: dict object submitted from the http client
|
|
"""
|
|
|
|
payload = dict()
|
|
payload.update(json.loads(request.body))
|
|
return payload
|
|
|
|
# TODO(nicodemos): Check if subcloud is online and network already exist in the
|
|
# subcloud when the lock/unlock is not required for network reconfiguration
|
|
def _validate_network_reconfiguration(self, payload, subcloud):
|
|
if payload.get('management-state'):
|
|
pecan.abort(422, _("Management state and network reconfiguration must "
|
|
"be updated separately"))
|
|
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED:
|
|
pecan.abort(422, _("A subcloud must be unmanaged to perform network "
|
|
"reconfiguration"))
|
|
if not payload.get('bootstrap_address'):
|
|
pecan.abort(422, _("The bootstrap_address parameter is required for "
|
|
"network reconfiguration"))
|
|
# Check if all parameters exist
|
|
if not all(payload.get(value) is not None for value in (
|
|
SUBCLOUD_MANDATORY_NETWORK_PARAMS)):
|
|
mandatory_params = ', '.join('--{}'.format(param.replace(
|
|
'_', '-')) for param in SUBCLOUD_MANDATORY_NETWORK_PARAMS)
|
|
abort_msg = (
|
|
"The following parameters are necessary for "
|
|
"subcloud network reconfiguration: {}".format(mandatory_params)
|
|
)
|
|
pecan.abort(422, _(abort_msg))
|
|
|
|
# Check if any network values are already in use
|
|
for param in SUBCLOUD_MANDATORY_NETWORK_PARAMS:
|
|
if payload.get(param) == getattr(subcloud, param):
|
|
pecan.abort(422, _("%s already in use by the subcloud.") % param)
|
|
|
|
# Check password and decode it
|
|
sysadmin_password = payload.get('sysadmin_password')
|
|
if not sysadmin_password:
|
|
pecan.abort(400, _('subcloud sysadmin_password required'))
|
|
try:
|
|
payload['sysadmin_password'] = utils.decode_and_normalize_passwd(
|
|
sysadmin_password)
|
|
except Exception:
|
|
msg = _('Failed to decode subcloud sysadmin_password, '
|
|
'verify the password is base64 encoded')
|
|
LOG.exception(msg)
|
|
pecan.abort(400, msg)
|
|
|
|
def _get_subcloud_users(self):
|
|
"""Get the subcloud users and passwords from keyring"""
|
|
DEFAULT_SERVICE_PROJECT_NAME = 'services'
|
|
# First entry is openstack user name, second entry is the user stored
|
|
# in keyring. Not sure why heat_admin uses a different keystone name.
|
|
SUBCLOUD_USERS = [
|
|
('sysinv', 'sysinv'),
|
|
('patching', 'patching'),
|
|
('vim', 'vim'),
|
|
('mtce', 'mtce'),
|
|
('fm', 'fm'),
|
|
('barbican', 'barbican'),
|
|
('smapi', 'smapi'),
|
|
('dcdbsync', 'dcdbsync')
|
|
]
|
|
|
|
user_list = list()
|
|
for user in SUBCLOUD_USERS:
|
|
password = keyring.get_password(user[1],
|
|
DEFAULT_SERVICE_PROJECT_NAME)
|
|
if password:
|
|
user_dict = dict()
|
|
user_dict['name'] = user[0]
|
|
user_dict['password'] = password
|
|
user_list.append(user_dict)
|
|
else:
|
|
LOG.error("User %s not found in keyring as %s" % (user[0],
|
|
user[1]))
|
|
pecan.abort(500, _('System configuration error'))
|
|
|
|
return user_list
|
|
|
|
# TODO(gsilvatr): refactor to use implementation from common/utils and test
|
|
def _get_oam_addresses(self, context, subcloud_name, sc_ks_client):
|
|
"""Get the subclouds oam addresses"""
|
|
|
|
# First need to retrieve the Subcloud's Keystone session
|
|
try:
|
|
endpoint = sc_ks_client.endpoint_cache.get_endpoint('sysinv')
|
|
sysinv_client = SysinvClient(subcloud_name,
|
|
sc_ks_client.session,
|
|
endpoint=endpoint)
|
|
return sysinv_client.get_oam_addresses()
|
|
except (keystone_exceptions.EndpointNotFound, IndexError) as e:
|
|
message = ("Identity endpoint for subcloud: %s not found. %s" %
|
|
(subcloud_name, e))
|
|
LOG.error(message)
|
|
except dccommon_exceptions.OAMAddressesNotFound:
|
|
message = ("OAM addresses for subcloud: %s not found." %
|
|
(subcloud_name))
|
|
LOG.error(message)
|
|
return None
|
|
|
|
def _get_deploy_config_sync_status(self, context, subcloud_name, keystone_client):
|
|
"""Get the deploy configuration insync status of the subcloud """
|
|
detected_alarms = None
|
|
try:
|
|
fm_client = FmClient(subcloud_name, keystone_client.session)
|
|
detected_alarms = fm_client.get_alarms_by_id(
|
|
FM_ALARM_ID_UNSYNCHRONIZED_RESOURCE)
|
|
except Exception as ex:
|
|
LOG.error(str(ex))
|
|
return None
|
|
|
|
out_of_date = False
|
|
if detected_alarms:
|
|
# Check if any alarm.entity_instance_id contains any of the values
|
|
# in MONITORED_ALARM_ENTITIES.
|
|
# We want to scope 260.002 alarms to the host entity only.
|
|
out_of_date = any(
|
|
any(entity_id in alarm.entity_instance_id
|
|
for entity_id in dccommon_consts.MONITORED_ALARM_ENTITIES)
|
|
for alarm in detected_alarms
|
|
)
|
|
sync_status = dccommon_consts.DEPLOY_CONFIG_OUT_OF_DATE if out_of_date \
|
|
else dccommon_consts.DEPLOY_CONFIG_UP_TO_DATE
|
|
return sync_status
|
|
|
|
def _validate_migrate(self, payload, subcloud):
|
|
# Verify rehome data
|
|
if not subcloud.rehome_data:
|
|
LOG.exception("Unable to migrate subcloud %s, "
|
|
"required rehoming data is missing" % subcloud.name)
|
|
pecan.abort(500, _("Unable to migrate subcloud %s, "
|
|
"required rehoming data is missing" % subcloud.name))
|
|
rehome_data = json.loads(subcloud.rehome_data)
|
|
if 'saved_payload' not in rehome_data:
|
|
LOG.exception("Unable to migrate subcloud %s, "
|
|
"saved_payload is missing in rehoming data" % subcloud.name)
|
|
pecan.abort(500, _("Unable to migrate subcloud %s, "
|
|
"saved_payload is missing in rehoming data" % subcloud.name))
|
|
saved_payload = rehome_data['saved_payload']
|
|
# Validate saved_payload
|
|
if len(saved_payload) == 0:
|
|
LOG.exception("Unable to migrate subcloud %s, "
|
|
"saved_payload is empty" % subcloud.name)
|
|
pecan.abort(500, _("Unable to migrate subcloud %s, "
|
|
"saved_payload is empty" % subcloud.name))
|
|
if 'bootstrap-address' not in saved_payload:
|
|
LOG.exception("Unable to migrate subcloud %s, "
|
|
"bootstrap-address is missing in rehoming data" % subcloud.name)
|
|
pecan.abort(500, _("Unable to migrate subcloud %s, "
|
|
"bootstrap-address is missing in rehoming data" % subcloud.name))
|
|
# Validate sysadmin_password is in payload
|
|
if 'sysadmin_password' not in payload:
|
|
LOG.exception("Unable to migrate subcloud %s, "
|
|
"need sysadmin_password" % subcloud.name)
|
|
pecan.abort(500, _("Unable to migrate subcloud %s, "
|
|
"need sysadmin_password" % subcloud.name))
|
|
|
|
@staticmethod
|
|
def _append_static_err_content(subcloud):
|
|
err_dict = consts.ERR_MSG_DICT
|
|
status = subcloud.get('deploy-status')
|
|
err_msg = [subcloud.get('error-description')]
|
|
err_code = \
|
|
re.search(r"err_code\s*=\s*(\S*)", err_msg[0], re.IGNORECASE)
|
|
if err_code and err_code.group(1) in err_dict:
|
|
err_msg.append(err_dict.get(err_code.group(1)))
|
|
if status == consts.DEPLOY_STATE_CONFIG_FAILED:
|
|
err_msg.append(err_dict.get(consts.CONFIG_ERROR_MSG))
|
|
elif status == consts.DEPLOY_STATE_BOOTSTRAP_FAILED:
|
|
err_msg.append(err_dict.get(consts.BOOTSTRAP_ERROR_MSG))
|
|
subcloud['error-description'] = '\n'.join(err_msg)
|
|
return None
|
|
|
|
@index.when(method='GET', template='json')
|
|
def get(self, subcloud_ref=None, detail=None):
|
|
"""Get details about subcloud.
|
|
|
|
:param subcloud_ref: ID or name of subcloud
|
|
"""
|
|
policy.authorize(subclouds_policy.POLICY_ROOT % "get", {},
|
|
restcomm.extract_credentials_for_policy())
|
|
context = restcomm.extract_context_from_environ()
|
|
|
|
if subcloud_ref is None:
|
|
# List of subclouds requested
|
|
subclouds = db_api.subcloud_get_all_with_status(context)
|
|
result = dict()
|
|
result['subclouds'] = []
|
|
first_time = True
|
|
subcloud_list = []
|
|
subcloud_status_list = []
|
|
|
|
# We get back a subcloud, subcloud_status pair for every
|
|
# subcloud_status entry corresponding to a subcloud. (Subcloud
|
|
# info repeats)
|
|
# Aggregate all the sync status for each of the
|
|
# endpoints per subcloud into an overall sync status
|
|
for subcloud, subcloud_status in subclouds:
|
|
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
|
|
subcloud_status_dict = db_api.subcloud_status_db_model_to_dict(
|
|
subcloud_status)
|
|
subcloud_dict.update(subcloud_status_dict)
|
|
|
|
self._append_static_err_content(subcloud_dict)
|
|
|
|
if not first_time:
|
|
if subcloud_list[-1]['id'] == subcloud_dict['id']:
|
|
# We have a match for this subcloud id already,
|
|
# check if we have a same sync_status
|
|
if subcloud_list[-1][consts.SYNC_STATUS] != \
|
|
subcloud_dict[consts.SYNC_STATUS]:
|
|
subcloud_list[-1][consts.SYNC_STATUS] = \
|
|
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
|
|
|
if subcloud_status:
|
|
subcloud_status_list.append(
|
|
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
|
|
subcloud_status))
|
|
subcloud_list[-1][
|
|
consts.ENDPOINT_SYNC_STATUS] = subcloud_status_list
|
|
|
|
else:
|
|
subcloud_status_list = []
|
|
if subcloud_status:
|
|
subcloud_status_list.append(
|
|
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
|
|
subcloud_status))
|
|
|
|
subcloud_list.append(subcloud_dict)
|
|
else:
|
|
if subcloud_status:
|
|
subcloud_status_list.append(
|
|
db_api.subcloud_endpoint_status_db_model_to_dict(
|
|
subcloud_status))
|
|
subcloud_list.append(subcloud_dict)
|
|
|
|
first_time = False
|
|
|
|
for s in subcloud_list:
|
|
# This is to reduce changes on cert-mon
|
|
# Overwrites the name value with region
|
|
if utils.is_req_from_cert_mon_agent(request):
|
|
s['name'] = s['region-name']
|
|
result['subclouds'].append(s)
|
|
|
|
return result
|
|
else:
|
|
# Single subcloud requested
|
|
subcloud = None
|
|
subcloud_dict = dict()
|
|
subcloud_status_list = []
|
|
endpoint_sync_dict = dict()
|
|
|
|
if subcloud_ref.isdigit():
|
|
# Look up subcloud as an ID
|
|
try:
|
|
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
|
except exceptions.SubcloudNotFound:
|
|
pecan.abort(404, _('Subcloud not found'))
|
|
else:
|
|
try:
|
|
# This method replaces subcloud_get_by_name, since it
|
|
# allows to lookup the subcloud either by region name
|
|
# or subcloud name.
|
|
# When the request comes from the cert-monitor, it is
|
|
# based on the region name (which is UUID format).
|
|
# Whereas, if the request comes from a client other
|
|
# than cert-monitor, it will do the lookup based on
|
|
# the subcloud name.
|
|
subcloud = db_api.subcloud_get_by_name_or_region_name(
|
|
context,
|
|
subcloud_ref)
|
|
except exceptions.SubcloudNameOrRegionNameNotFound:
|
|
pecan.abort(404, _('Subcloud not found'))
|
|
|
|
subcloud_id = subcloud.id
|
|
|
|
# Data for this subcloud requested
|
|
# Build up and append a dictionary of the endpoints
|
|
# sync status to the result.
|
|
for subcloud, subcloud_status in db_api. \
|
|
subcloud_get_with_status(context, subcloud_id):
|
|
subcloud_dict = db_api.subcloud_db_model_to_dict(
|
|
subcloud)
|
|
# may be empty subcloud_status entry, account for this
|
|
if subcloud_status:
|
|
subcloud_status_list.append(
|
|
db_api.subcloud_endpoint_status_db_model_to_dict(
|
|
subcloud_status))
|
|
endpoint_sync_dict = {consts.ENDPOINT_SYNC_STATUS:
|
|
subcloud_status_list}
|
|
subcloud_dict.update(endpoint_sync_dict)
|
|
|
|
self._append_static_err_content(subcloud_dict)
|
|
|
|
subcloud_region = subcloud.region_name
|
|
subcloud_dict.pop('region-name')
|
|
if detail is not None:
|
|
oam_floating_ip = "unavailable"
|
|
deploy_config_sync_status = "unknown"
|
|
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE:
|
|
|
|
# Get the keystone client that will be used
|
|
# for _get_deploy_config_sync_status and _get_oam_addresses
|
|
sc_ks_client = psd_common.get_ks_client(subcloud_region)
|
|
oam_addresses = self._get_oam_addresses(context,
|
|
subcloud_region, sc_ks_client)
|
|
if oam_addresses is not None:
|
|
oam_floating_ip = oam_addresses.oam_floating_ip
|
|
|
|
deploy_config_state = self._get_deploy_config_sync_status(
|
|
context, subcloud_region, sc_ks_client)
|
|
if deploy_config_state is not None:
|
|
deploy_config_sync_status = deploy_config_state
|
|
|
|
extra_details = {"oam_floating_ip": oam_floating_ip,
|
|
"deploy_config_sync_status": deploy_config_sync_status,
|
|
"region_name": subcloud_region}
|
|
|
|
subcloud_dict.update(extra_details)
|
|
return subcloud_dict
|
|
|
|
@utils.synchronized(LOCK_NAME)
|
|
@index.when(method='POST', template='json')
|
|
def post(self):
|
|
"""Create and deploy a new subcloud."""
|
|
|
|
policy.authorize(subclouds_policy.POLICY_ROOT % "create", {},
|
|
restcomm.extract_credentials_for_policy())
|
|
context = restcomm.extract_context_from_environ()
|
|
|
|
bootstrap_sc_name = psd_common.get_bootstrap_subcloud_name(request)
|
|
|
|
payload = psd_common.get_request_data(request, None,
|
|
SUBCLOUD_ADD_GET_FILE_CONTENTS)
|
|
|
|
psd_common.validate_migrate_parameter(payload, request)
|
|
|
|
psd_common.validate_secondary_parameter(payload, request)
|
|
|
|
# Compares to match both supplied and bootstrap name param
|
|
# of the subcloud if migrate is on
|
|
if payload.get('migrate') == 'true' and bootstrap_sc_name is not None:
|
|
if bootstrap_sc_name != payload.get('name'):
|
|
pecan.abort(400, _('subcloud name does not match the '
|
|
'name defined in bootstrap file'))
|
|
|
|
# No need sysadmin_password when add a secondary subcloud
|
|
if 'secondary' not in payload:
|
|
psd_common.validate_sysadmin_password(payload)
|
|
|
|
psd_common.subcloud_region_create(payload, context)
|
|
|
|
psd_common.pre_deploy_create(payload, context, request)
|
|
|
|
try:
|
|
# Add the subcloud details to the database
|
|
subcloud = psd_common.add_subcloud_to_database(context, payload)
|
|
|
|
# Ask dcmanager-manager to add the subcloud.
|
|
# It will do all the real work...
|
|
self.dcmanager_rpc_client.add_subcloud(
|
|
context, subcloud.id, payload)
|
|
|
|
return db_api.subcloud_db_model_to_dict(subcloud)
|
|
except RemoteError as e:
|
|
pecan.abort(422, e.value)
|
|
except Exception:
|
|
LOG.exception(
|
|
"Unable to add subcloud %s" % payload.get('name'))
|
|
pecan.abort(500, _('Unable to add subcloud'))
|
|
|
|
@utils.synchronized(LOCK_NAME)
|
|
@index.when(method='PATCH', template='json')
|
|
def patch(self, subcloud_ref=None, verb=None):
|
|
"""Update a subcloud.
|
|
|
|
:param subcloud_ref: ID or name of subcloud to update
|
|
|
|
:param verb: Specifies the patch action to be taken
|
|
or subcloud update operation
|
|
"""
|
|
|
|
policy.authorize(subclouds_policy.POLICY_ROOT % "modify", {},
|
|
restcomm.extract_credentials_for_policy())
|
|
context = restcomm.extract_context_from_environ()
|
|
subcloud = None
|
|
|
|
if subcloud_ref is None:
|
|
pecan.abort(400, _('Subcloud ID required'))
|
|
|
|
if subcloud_ref.isdigit():
|
|
# Look up subcloud as an ID
|
|
try:
|
|
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
|
except exceptions.SubcloudNotFound:
|
|
pecan.abort(404, _('Subcloud not found'))
|
|
else:
|
|
try:
|
|
# This method replaces subcloud_get_by_name, since it
|
|
# allows to lookup the subcloud either by region name
|
|
# or subcloud name.
|
|
# When the request comes from the cert-monitor, it is
|
|
# based on the region name (which is UUID format).
|
|
# Whereas, if the request comes from a client other
|
|
# than cert-monitor, it will do the lookup based on
|
|
# the subcloud name.
|
|
subcloud = db_api.subcloud_get_by_name_or_region_name(
|
|
context,
|
|
subcloud_ref)
|
|
except exceptions.SubcloudNameOrRegionNameNotFound:
|
|
pecan.abort(404, _('Subcloud not found'))
|
|
|
|
subcloud_id = subcloud.id
|
|
|
|
if verb is None:
|
|
# subcloud update
|
|
payload = self._get_patch_data(request)
|
|
if not payload:
|
|
pecan.abort(400, _('Body required'))
|
|
|
|
# Rename the subcloud
|
|
new_subcloud_name = payload.get('name')
|
|
if new_subcloud_name is not None:
|
|
# To be renamed the subcloud must be in unmanaged and valid deploy state
|
|
if subcloud.management_state != dccommon_consts.MANAGEMENT_UNMANAGED \
|
|
or subcloud.deploy_status not in consts.STATES_FOR_SUBCLOUD_RENAME:
|
|
msg = ('Subcloud %s must be unmanaged and in a valid deploy state '
|
|
'for the subcloud rename operation.' % subcloud.name)
|
|
|
|
# Validates new name
|
|
if not utils.is_subcloud_name_format_valid(new_subcloud_name):
|
|
pecan.abort(400, _("new name must contain alphabetic characters"))
|
|
|
|
# Checks if new subcloud name is the same as the current subcloud
|
|
if new_subcloud_name == subcloud.name:
|
|
pecan.abort(400, _('Provided subcloud name %s is the same as the '
|
|
'current subcloud %s. A different name is '
|
|
'required to rename the subcloud' %
|
|
(new_subcloud_name, subcloud.name)))
|
|
|
|
error_msg = ('Unable to rename subcloud %s with their region %s to %s' %
|
|
(subcloud.name, subcloud.region_name, new_subcloud_name))
|
|
try:
|
|
LOG.info("Renaming subcloud %s to: %s\n" % (subcloud.name,
|
|
new_subcloud_name))
|
|
sc = self.dcmanager_rpc_client.rename_subcloud(context,
|
|
subcloud_id,
|
|
subcloud.name,
|
|
new_subcloud_name)
|
|
subcloud.name = sc['name']
|
|
except RemoteError as e:
|
|
LOG.error(error_msg)
|
|
pecan.abort(422, e.value)
|
|
except Exception:
|
|
LOG.error(error_msg)
|
|
pecan.abort(500, _('Unable to rename subcloud'))
|
|
|
|
# Check if exist any network reconfiguration parameters
|
|
reconfigure_network = any(payload.get(value) is not None for value in (
|
|
SUBCLOUD_MANDATORY_NETWORK_PARAMS))
|
|
|
|
if reconfigure_network:
|
|
if utils.subcloud_is_secondary_state(subcloud.deploy_status):
|
|
pecan.abort(500, _("Cannot perform on %s "
|
|
"state subcloud" % subcloud.deploy_status))
|
|
system_controller_mgmt_pool = psd_common.get_network_address_pool()
|
|
# Required parameters
|
|
payload['name'] = subcloud.name
|
|
payload['region_name'] = subcloud.region_name
|
|
payload['system_controller_network'] = (
|
|
system_controller_mgmt_pool.network)
|
|
payload['system_controller_network_prefix'] = (
|
|
system_controller_mgmt_pool.prefix
|
|
)
|
|
# Needed for service endpoint reconfiguration
|
|
payload['management_start_address'] = (
|
|
payload.get('management_start_ip', None)
|
|
)
|
|
# Validation
|
|
self._validate_network_reconfiguration(payload, subcloud)
|
|
|
|
management_state = payload.get('management-state')
|
|
group_id = payload.get('group_id')
|
|
description = payload.get('description')
|
|
location = payload.get('location')
|
|
bootstrap_values = payload.get('bootstrap_values')
|
|
bootstrap_address = payload.get('bootstrap_address')
|
|
|
|
# Syntax checking
|
|
if management_state and \
|
|
management_state not in [dccommon_consts.MANAGEMENT_UNMANAGED,
|
|
dccommon_consts.MANAGEMENT_MANAGED]:
|
|
pecan.abort(400, _('Invalid management-state'))
|
|
|
|
force_flag = payload.get('force')
|
|
if force_flag is not None:
|
|
if force_flag not in [True, False]:
|
|
pecan.abort(400, _('Invalid force value'))
|
|
elif management_state != dccommon_consts.MANAGEMENT_MANAGED:
|
|
pecan.abort(400, _('Invalid option: force'))
|
|
|
|
# Verify the group_id is valid
|
|
if group_id is not None:
|
|
try:
|
|
# group_id may be passed in the payload as an int or str
|
|
group_id = str(group_id)
|
|
if group_id.isdigit():
|
|
grp = db_api.subcloud_group_get(context, group_id)
|
|
else:
|
|
# replace the group_id (name) with the id
|
|
grp = db_api.subcloud_group_get_by_name(context,
|
|
group_id)
|
|
group_id = grp.id
|
|
except (exceptions.SubcloudGroupNameNotFound,
|
|
exceptions.SubcloudGroupNotFound):
|
|
pecan.abort(400, _('Invalid group'))
|
|
|
|
if consts.INSTALL_VALUES in payload:
|
|
psd_common.validate_install_values(payload, subcloud)
|
|
payload['data_install'] = json.dumps(payload[consts.INSTALL_VALUES])
|
|
|
|
try:
|
|
if reconfigure_network:
|
|
self.dcmanager_rpc_client.update_subcloud_with_network_reconfig(
|
|
context, subcloud_id, payload)
|
|
return db_api.subcloud_db_model_to_dict(subcloud)
|
|
subcloud = self.dcmanager_rpc_client.update_subcloud(
|
|
context, subcloud_id, management_state=management_state,
|
|
description=description, location=location,
|
|
group_id=group_id, data_install=payload.get('data_install'),
|
|
force=force_flag,
|
|
bootstrap_values=bootstrap_values,
|
|
bootstrap_address=bootstrap_address)
|
|
return subcloud
|
|
except RemoteError as e:
|
|
pecan.abort(422, e.value)
|
|
except Exception as e:
|
|
# additional exceptions.
|
|
LOG.exception(e)
|
|
pecan.abort(500, _('Unable to update subcloud'))
|
|
|
|
elif verb == "redeploy":
|
|
if utils.subcloud_is_secondary_state(subcloud.deploy_status):
|
|
pecan.abort(500, _("Cannot perform on %s "
|
|
"state subcloud" % subcloud.deploy_status))
|
|
config_file = psd_common.get_config_file_path(subcloud.name,
|
|
consts.DEPLOY_CONFIG)
|
|
has_bootstrap_values = consts.BOOTSTRAP_VALUES in request.POST
|
|
has_original_config_values = os.path.exists(config_file)
|
|
has_new_config_values = consts.DEPLOY_CONFIG in request.POST
|
|
has_config_values = has_original_config_values or has_new_config_values
|
|
payload = psd_common.get_request_data(
|
|
request, subcloud, SUBCLOUD_REDEPLOY_GET_FILE_CONTENTS)
|
|
|
|
if (subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE or
|
|
subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED):
|
|
msg = _('Cannot re-deploy an online and/or managed subcloud')
|
|
LOG.warning(msg)
|
|
pecan.abort(400, msg)
|
|
|
|
# If a subcloud release is not passed, use the current
|
|
# system controller software_version
|
|
payload['software_version'] = payload.get('release', tsc.SW_VERSION)
|
|
|
|
# Don't load previously stored bootstrap_values if they are present in
|
|
# the request, as this would override the already loaded values from it.
|
|
# As config_values are optional, only attempt to load previously stored
|
|
# values if this phase should be executed.
|
|
files_for_redeploy = SUBCLOUD_REDEPLOY_GET_FILE_CONTENTS.copy()
|
|
if has_bootstrap_values:
|
|
files_for_redeploy.remove(consts.BOOTSTRAP_VALUES)
|
|
if not has_config_values:
|
|
files_for_redeploy.remove(consts.DEPLOY_CONFIG)
|
|
|
|
psd_common.populate_payload_with_pre_existing_data(
|
|
payload, subcloud, files_for_redeploy)
|
|
|
|
psd_common.validate_sysadmin_password(payload)
|
|
psd_common.pre_deploy_install(payload, validate_password=False)
|
|
psd_common.pre_deploy_bootstrap(context, payload, subcloud,
|
|
has_bootstrap_values,
|
|
validate_password=False)
|
|
payload['bootstrap-address'] = \
|
|
payload['install_values']['bootstrap_address']
|
|
|
|
try:
|
|
# Align the software version of the subcloud with redeploy
|
|
# version. Update description, location and group id if offered,
|
|
# update the deploy status as pre-install.
|
|
subcloud = db_api.subcloud_update(
|
|
context,
|
|
subcloud_id,
|
|
description=payload.get('description'),
|
|
location=payload.get('location'),
|
|
software_version=payload['software_version'],
|
|
deploy_status=consts.DEPLOY_STATE_PRE_INSTALL,
|
|
first_identity_sync_complete=False,
|
|
data_install=json.dumps(payload['install_values']))
|
|
|
|
self.dcmanager_rpc_client.redeploy_subcloud(
|
|
context, subcloud_id, payload)
|
|
|
|
return db_api.subcloud_db_model_to_dict(subcloud)
|
|
except RemoteError as e:
|
|
pecan.abort(422, e.value)
|
|
except Exception:
|
|
LOG.exception("Unable to redeploy subcloud %s" % subcloud.name)
|
|
pecan.abort(500, _('Unable to redeploy subcloud'))
|
|
|
|
elif verb == "restore":
|
|
pecan.abort(410, _('This API is deprecated. '
|
|
'Please use /v1.0/subcloud-backup/restore'))
|
|
|
|
elif verb == "reconfigure":
|
|
pecan.abort(410, _('This API is deprecated. '
|
|
'Please use /v1.0/phased-subcloud-deploy/{subcloud}/configure'))
|
|
|
|
elif verb == "reinstall":
|
|
pecan.abort(410, _('This API is deprecated. '
|
|
'Please use /v1.0/subclouds/{subcloud}/redeploy'))
|
|
|
|
elif verb == 'update_status':
|
|
res = self.updatestatus(subcloud.name, subcloud.region_name)
|
|
return res
|
|
elif verb == 'prestage':
|
|
if utils.subcloud_is_secondary_state(subcloud.deploy_status):
|
|
pecan.abort(500, _("Cannot perform on %s "
|
|
"state subcloud" % subcloud.deploy_status))
|
|
payload = self._get_prestage_payload(request)
|
|
payload['subcloud_name'] = subcloud.name
|
|
try:
|
|
prestage.global_prestage_validate(payload)
|
|
except exceptions.PrestagePreCheckFailedException as exc:
|
|
LOG.exception("global_prestage_validate failed")
|
|
pecan.abort(400, _(str(exc)))
|
|
|
|
try:
|
|
payload['oam_floating_ip'] = \
|
|
prestage.validate_prestage(subcloud, payload)
|
|
except exceptions.PrestagePreCheckFailedException as exc:
|
|
LOG.exception("validate_prestage failed")
|
|
pecan.abort(400, _(str(exc)))
|
|
|
|
prestage_software_version = payload.get(
|
|
consts.PRESTAGE_REQUEST_RELEASE, tsc.SW_VERSION)
|
|
|
|
try:
|
|
self.dcmanager_rpc_client.prestage_subcloud(context, payload)
|
|
# local update to deploy_status - this is just for CLI response:
|
|
subcloud.deploy_status = consts.PRESTAGE_STATE_PACKAGES
|
|
|
|
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
|
|
subcloud_dict.update(
|
|
{consts.PRESTAGE_SOFTWARE_VERSION: prestage_software_version})
|
|
return subcloud_dict
|
|
except RemoteError as e:
|
|
pecan.abort(422, e.value)
|
|
except Exception:
|
|
LOG.exception("Unable to prestage subcloud %s" % subcloud.name)
|
|
pecan.abort(500, _('Unable to prestage subcloud'))
|
|
elif verb == 'migrate':
|
|
try:
|
|
# Reject if not in secondary/rehome-failed/rehome-prep-failed state
|
|
if subcloud.deploy_status not in [consts.DEPLOY_STATE_SECONDARY,
|
|
consts.DEPLOY_STATE_REHOME_FAILED,
|
|
consts.DEPLOY_STATE_REHOME_PREP_FAILED]:
|
|
LOG.exception("Unable to migrate subcloud %s, "
|
|
"must be in secondary or rehome failure state" % subcloud.name)
|
|
pecan.abort(400, _("Unable to migrate subcloud %s, "
|
|
"must be in secondary or rehome failure state" %
|
|
subcloud.name))
|
|
payload = json.loads(request.body)
|
|
self._validate_migrate(payload, subcloud)
|
|
|
|
# Call migrate
|
|
self.dcmanager_rpc_client.migrate_subcloud(context, subcloud.id, payload)
|
|
return db_api.subcloud_db_model_to_dict(subcloud)
|
|
except RemoteError as e:
|
|
pecan.abort(422, e.value)
|
|
except Exception:
|
|
LOG.exception(
|
|
"Unable to migrate subcloud %s" % subcloud.name)
|
|
pecan.abort(500, _('Unable to migrate subcloud'))
|
|
|
|
@utils.synchronized(LOCK_NAME)
|
|
@index.when(method='delete', template='json')
|
|
def delete(self, subcloud_ref):
|
|
"""Delete a subcloud.
|
|
|
|
:param subcloud_ref: ID or name of subcloud to delete.
|
|
"""
|
|
policy.authorize(subclouds_policy.POLICY_ROOT % "delete", {},
|
|
restcomm.extract_credentials_for_policy())
|
|
context = restcomm.extract_context_from_environ()
|
|
subcloud = None
|
|
|
|
if subcloud_ref.isdigit():
|
|
# Look up subcloud as an ID
|
|
try:
|
|
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
|
except exceptions.SubcloudNotFound:
|
|
pecan.abort(404, _('Subcloud not found'))
|
|
else:
|
|
# Look up subcloud by name
|
|
try:
|
|
subcloud = db_api.subcloud_get_by_name(context,
|
|
subcloud_ref)
|
|
except exceptions.SubcloudNameNotFound:
|
|
pecan.abort(404, _('Subcloud not found'))
|
|
|
|
subcloud_id = subcloud.id
|
|
|
|
try:
|
|
# Ask dcmanager-manager to delete the subcloud.
|
|
# It will do all the real work...
|
|
return self.dcmanager_rpc_client.delete_subcloud(context,
|
|
subcloud_id)
|
|
except RemoteError as e:
|
|
pecan.abort(422, e.value)
|
|
except Exception as e:
|
|
LOG.exception(e)
|
|
pecan.abort(500, _('Unable to delete subcloud'))
|
|
|
|
def updatestatus(self, subcloud_name, subcloud_region):
|
|
"""Update subcloud sync status
|
|
|
|
:param subcloud_name: name of the subcloud
|
|
:param subcloud_region: name of the subcloud region
|
|
:return: json result object for the operation on success
|
|
"""
|
|
|
|
payload = self._get_updatestatus_payload(request)
|
|
if not payload:
|
|
pecan.abort(400, _('Body required'))
|
|
|
|
endpoint = payload.get('endpoint')
|
|
if not endpoint:
|
|
pecan.abort(400, _('endpoint required'))
|
|
allowed_endpoints = [dccommon_consts.ENDPOINT_TYPE_DC_CERT]
|
|
if endpoint not in allowed_endpoints:
|
|
pecan.abort(400, _('updating endpoint %s status is not allowed'
|
|
% endpoint))
|
|
|
|
status = payload.get('status')
|
|
if not status:
|
|
pecan.abort(400, _('status required'))
|
|
|
|
allowed_status = [dccommon_consts.SYNC_STATUS_IN_SYNC,
|
|
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
|
dccommon_consts.SYNC_STATUS_UNKNOWN]
|
|
if status not in allowed_status:
|
|
pecan.abort(400, _('status %s in invalid.' % status))
|
|
|
|
LOG.info('update %s set %s=%s' % (subcloud_name, endpoint, status))
|
|
context = restcomm.extract_context_from_environ()
|
|
self.dcmanager_state_rpc_client.update_subcloud_endpoint_status(
|
|
context, subcloud_name, subcloud_region, endpoint, status)
|
|
|
|
result = {'result': 'OK'}
|
|
return result
|