Remove load, host upgrade, software upgrade tables and their references.

As USM is the new method to do an upgrade this commit removes the
tables load, host_upgrade, software_upgrade and their references used
by the legacy upgrade. This commit also removes some methods that
manipulate the load data and upgrade related code in controllerconfig.
Unit tests related to upgrade code in controllerconfig was also
removed.

Test Plan:
DX:
PASS: Upgrade stx10 -> stx11.
PASS: Host Swact.
PASS: Rollback stx11 -> stx10
PASS: stx-11 fresh install/bootstrap/unlock

SX:
PASS: Upgrade stx10 -> stx11.
PASS: Backup and Restore.
PASS: stx-11 fresh install/bootstrap/unlock

Story: 2011357
Task: 51709

Change-Id: I9b926f9627b789908a31f3a405cc48b506148b49
Signed-off-by: Luis Eduardo Bonatti <luizeduardo.bonatti@windriver.com>
This commit is contained in:
Luis Eduardo Bonatti 2025-02-17 16:01:40 -03:00
parent 3b9a060bb3
commit d7ecf46069
58 changed files with 26 additions and 5714 deletions

View File

@ -13,7 +13,6 @@
- sysinv-tox-flake8
- sysinv-tox-pylint
- sysinv-tox-bandit
- controllerconfig-tox-py39
- controllerconfig-tox-flake8
- controllerconfig-tox-pylint
- tsconfig__tox-pylint
@ -29,7 +28,6 @@
- sysinv-tox-flake8
- sysinv-tox-pylint
- sysinv-tox-bandit
- controllerconfig-tox-py39
- controllerconfig-tox-flake8
- controllerconfig-tox-pylint
- tsconfig__tox-pylint
@ -100,19 +98,6 @@
tox_envlist: bandit
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini
- job:
name: controllerconfig-tox-py39
parent: tox-py39
description: Run py39 tests for controllerconfig
nodeset: debian-bullseye
required-projects:
- starlingx/fault
files:
- controllerconfig/*
vars:
tox_envlist: py39
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
- job:
name: controllerconfig-tox-flake8
parent: tox

View File

@ -1,5 +0,0 @@
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,144 +0,0 @@
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""Base test code to test migration scripts
First, focus on the migration script name validation
Second, the validation script sequence call
"""
from mockproc import mockprocess
from os import listdir
from os.path import isfile
from os.path import join
from tempfile import mkdtemp
import os
import unittest
from controllerconfig.upgrades import utils
# The way to assert is to pass a script execution that writes the script file
# name into a file
# The content of the file will contain the sequence of the called scripts
script_body = '''#! /usr/bin/env python
with open('%s', 'a+') as f:
f.write("%s")
'''
from_release = "20.06"
to_release = "20.12"
action = "migrate"
# Lists to add scripts to be called, use a ":" separator for
# parsing/asserting
validScripts1 = ["71-bla1-bla2-bla3.sh", "8-bla1-bla2-bla3.py:",
"21-bla1-bla2-bla3.sh:"]
validScripts2 = ["75-deployment-ns-upgrade.py:", "65-k8s-app-upgrade.sh:",
"10-sysinv-adjust-partitions.py:",
"60-helm-releases-data-migration.py:",
"55-armada-helm-upgrade.py:",
"95-apply-mandatory-psp-policies.py:",
"10-sysinv-adjust-partitions.py:",
"85-update-sc-admin-endpoint-cert.py:",
"70-active-secured-etcd-after-upgrade.sh:",
"50-dcmanager-subcloud-status-migration.py:",
"45-sysinv-remove-identity-shared-service.py:",
"25-coredns-configmap.sh:",
"20-exempt-admin-from-lockout.sh:",
"115-foo-bar-test-ok.sh:", "299-foo-bar-test-ok.sh:",
"2123-foo-bar-test-ok.sh"]
invalidScripts1 = ["70-bla1-bla2-bla3.sh", "7-bla1-bla2-bla3.py:",
"20-bla1-bla2-bla3.sh:", "-20-bla1-bla2-bla3.sh"]
invalidScripts2 = ["95-apply-mandatory-psp-policies.py",
"10-sysinv-adjust-partitions.py:",
"85-update-sc-admin-endpoint-cert.py:",
"70_active-secured-etcd-after-upgrade.sh:"]
# Append scripts to be executed according to the passed list
def addScripts(self, scripts, output_filename):
for script in scripts:
self.scripts.append(script, returncode=0, script=script_body %
(output_filename, script))
# Test with the files under "controllerconfig/upgrade-scripts"
def addRealMigrationScripts(self, output_filename):
path = os.getcwd() + "/upgrade-scripts"
for f in listdir(path):
if isfile(join(path, f)):
self.scripts.append(f, returncode=0, script=script_body %
(output_filename, f))
def assertProperSorted(scripts):
output = False
sequence = []
for script in scripts:
sequence.append(int(script.split("-")[0]))
if sorted(sequence) == sequence:
output = True
return output
class TestMigrationScripts(unittest.TestCase):
def setUp(self):
self.scripts_dir = mkdtemp()
self.output_filename = mkdtemp() + "/output.txt"
# Re-create the file for each run
open(self.output_filename, 'w+').close()
self.scripts = mockprocess.MockProc(self.scripts_dir)
def test_migration_scripts_success_1(self):
addScripts(self, validScripts1, self.output_filename)
with self.scripts:
utils.execute_migration_scripts(from_release, to_release, action,
self.scripts_dir)
with open(self.output_filename, 'r') as f:
output = str(f.read())
if(assertProperSorted(output.split(':'))):
pass
def test_migration_scripts_success_2(self):
addScripts(self, validScripts2, self.output_filename)
with self.scripts:
utils.execute_migration_scripts(from_release, to_release, action,
self.scripts_dir)
with open(self.output_filename, 'r') as f:
output = str(f.read())
if(assertProperSorted(output.split(':'))):
pass
def test_real_migration_scripts(self):
addRealMigrationScripts(self, self.output_filename)
with self.scripts:
utils.execute_migration_scripts(from_release, to_release, action,
self.scripts_dir)
with open(self.output_filename, 'r') as f:
output = str(f.read())
if(assertProperSorted(output.split(':'))):
pass
def test_migration_scripts_validation_fail_1(self):
addScripts(self, invalidScripts1, self.output_filename)
with self.assertRaises(ValueError):
with self.scripts:
utils.execute_migration_scripts(from_release, to_release,
action, self.scripts_dir)
def test_migration_scripts_validation_fail_2(self):
addScripts(self, invalidScripts2, self.output_filename)
with self.assertRaises(ValueError):
with self.scripts:
utils.execute_migration_scripts(from_release, to_release,
action, self.scripts_dir)
def tearDown(self):
os.remove(self.output_filename)

View File

@ -1,5 +0,0 @@
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

View File

@ -1,385 +0,0 @@
#
# Copyright (c) 2015-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# This file contains functions used by sysinv to manage upgrades.
#
import json
import glob
import os
import shutil
import subprocess
import yaml
import tsconfig.tsconfig as tsc
from sysinv.common import constants as sysinv_constants
from controllerconfig.upgrades import utils
from oslo_log import log
LOG = log.getLogger(__name__)
def get_upgrade_databases(system_role, shared_services):
UPGRADE_DATABASES = ('postgres', 'template1', 'sysinv',
'barbican', 'fm')
UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (), 'template1': (),
'sysinv': (),
'barbican': (),
'fm': ('alarm',)}
if system_role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
UPGRADE_DATABASES += ('dcmanager', 'dcorch',)
UPGRADE_DATABASE_SKIP_TABLES.update({
'dcmanager': (),
'dcorch': ('service', 'orch_job', 'orch_request',)
})
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
UPGRADE_DATABASES += ('keystone',)
UPGRADE_DATABASE_SKIP_TABLES.update({'keystone': ('token',)})
return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES
def export_postgres(dest_dir, system_role, shared_services):
""" Export postgres databases """
devnull = open(os.devnull, 'w')
try:
upgrade_databases, upgrade_database_skip_tables = \
get_upgrade_databases(system_role, shared_services)
# Dump roles, table spaces and schemas for databases.
subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' +
'--schema-only > %s/%s' %
(dest_dir, 'postgres.postgreSql.config'))],
shell=True, stderr=devnull)
# Dump data for databases.
for _a, db_elem in enumerate(upgrade_databases):
db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts '
db_cmd += '--disable-triggers --data-only %s ' % db_elem
for _b, table_elem in \
enumerate(upgrade_database_skip_tables[db_elem]):
db_cmd += '--exclude-table=%s ' % table_elem
db_cmd += '> %s/%s.postgreSql.data' % (dest_dir, db_elem)
subprocess.check_call([db_cmd], shell=True, stderr=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to export postgres databases for upgrade.")
raise
def export_vim(dest_dir):
""" Export VIM database """
devnull = open(os.devnull, 'w')
try:
vim_cmd = ("nfv-vim-manage db-dump-data -d %s -f %s" %
(os.path.join(tsc.PLATFORM_PATH, 'nfv/vim', tsc.SW_VERSION),
os.path.join(dest_dir, 'vim.data')))
subprocess.check_call([vim_cmd], shell=True, stderr=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to export VIM databases for upgrade.")
raise
def prepare_upgrade(from_load, to_load, i_system, mgmt_address):
""" Executed on the release N side to prepare for an upgrade. """
devnull = open(os.devnull, 'w')
LOG.info("Starting upgrade preparations - from: %s, to: %s" %
(from_load, to_load))
dest_dir = os.path.join(utils.POSTGRES_PATH, "upgrade")
try:
os.mkdir(dest_dir, 0o755)
except OSError:
LOG.exception("Failed to create upgrade export directory %s." %
dest_dir)
raise
# Export databases
shared_services = i_system.capabilities.get("shared_services", "")
export_postgres(dest_dir, i_system.distributed_cloud_role, shared_services)
export_vim(dest_dir)
# Export filesystems so controller-1 can access them
try:
subprocess.check_call(
["exportfs",
"%s:%s" % (utils.CONTROLLER_1_HOSTNAME, utils.POSTGRES_PATH),
"-o",
"rw,no_root_squash"],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to export %s" % utils.POSTGRES_PATH)
raise
try:
subprocess.check_call(
["exportfs",
"%s:%s" % (utils.CONTROLLER_1_HOSTNAME, utils.RABBIT_PATH),
"-o",
"rw,no_root_squash"],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to export %s" % utils.RABBIT_PATH)
raise
# Migrate /opt/platform/config so controller-1 can access when it
# runs controller_config
try:
subprocess.check_call(
["cp",
"-a",
os.path.join(tsc.PLATFORM_PATH, "config", from_load),
os.path.join(tsc.PLATFORM_PATH, "config", to_load)],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to migrate %s" % os.path.join(tsc.PLATFORM_PATH,
"config"))
raise
# Point N+1 etcd to N for now. We will migrate when both controllers are
# running N+1, during the swact back to controller-0. This solution will
# present some problems when we do upgrade etcd, so further development
# will be required at that time.
etcd_to_dir = os.path.join(tsc.ETCD_PATH, to_load)
etcd_from_dir = os.path.join(tsc.ETCD_PATH, from_load)
os.symlink(etcd_from_dir, etcd_to_dir)
# Copy /etc/kubernetes/admin.conf so controller-1 can access
# during its upgrade
try:
subprocess.check_call(
["cp",
os.path.join(utils.KUBERNETES_CONF_PATH,
utils.KUBERNETES_ADMIN_CONF_FILE),
os.path.join(tsc.PLATFORM_PATH, "config", to_load,
"kubernetes", utils.KUBERNETES_ADMIN_CONF_FILE)],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to copy %s" %
os.path.join(utils.KUBERNETES_CONF_PATH,
utils.KUBERNETES_ADMIN_CONF_FILE))
raise
# Update admin.conf file to replace the cluster address with
# the floating management address
# This is a temporary change used in upgrade of N+1 node
admin_conf = os.path.join(tsc.PLATFORM_PATH, "config", to_load,
"kubernetes", utils.KUBERNETES_ADMIN_CONF_FILE)
with open(admin_conf, 'r') as yaml_file:
config = yaml.safe_load(yaml_file)
for item, values in config.items():
# update server address in cluster
if item == 'clusters':
if 'cluster' in values[0] and 'server' in values[0]['cluster']:
formatted_address = utils.format_url_address(mgmt_address)
# TODO use urlparse() to get url components and update
values[0]['cluster']['server'] = \
"https://" + formatted_address + ":6443"
break # no need to iterate further
with open(admin_conf, 'w') as yaml_file:
yaml.dump(config, yaml_file, default_flow_style=False)
# Remove branding tar files from the release N+1 directory as branding
# files are not compatible between releases.
branding_files = os.path.join(
tsc.PLATFORM_PATH, "config", to_load, "branding", "*.tgz")
try:
subprocess.check_call(["rm -f %s" % branding_files], shell=True,
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to remove branding files %s" % branding_files)
# Execute migration scripts
utils.execute_migration_scripts(
from_load, to_load, utils.ACTION_START)
LOG.info("Finished upgrade preparations")
def create_simplex_backup(software_upgrade):
"""Creates the upgrade metadata and creates the system backup"""
backup_data = {}
upgrade_data = software_upgrade.as_dict()
if upgrade_data['created_at']:
upgrade_data['created_at'] = \
upgrade_data['created_at'].replace(
microsecond=0).replace(tzinfo=None).isoformat()
if upgrade_data['updated_at']:
upgrade_data['updated_at'] = \
upgrade_data['updated_at'].replace(
microsecond=0).replace(tzinfo=None).isoformat()
backup_data['upgrade'] = upgrade_data
json_data = json.dumps(backup_data)
metadata_path = os.path.join(tsc.CONFIG_PATH, 'upgrades')
os.mkdir(metadata_path)
metadata_filename = os.path.join(metadata_path, 'metadata')
with open(metadata_filename, 'w') as metadata_file:
metadata_file.write(json_data)
upgrade_data, upgrade_images_data = get_upgrade_backup_filenames(
software_upgrade)
backup_vars = [
"platform_backup_file=%s.tgz" % upgrade_data,
"user_images_backup_file=%s.tgz" % upgrade_images_data,
"backup_user_images=true",
"backup_dir=%s" % tsc.PLATFORM_BACKUP_PATH]
args = [
'ansible-playbook',
'-e', ' '.join(backup_vars),
sysinv_constants.ANSIBLE_PLATFORM_BACKUP_PLAYBOOK]
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
universal_newlines=True)
out, _ = proc.communicate()
LOG.info(out)
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, args)
LOG.info("Create simplex backup complete")
def get_upgrade_backup_filenames(software_upgrade):
"""Generates the simplex upgrade backup filename"""
created_at_date = software_upgrade.created_at.replace(
microsecond=0).replace(tzinfo=None)
date_time = created_at_date.isoformat().replace(':', '')
suffix = date_time + '_' + software_upgrade.uuid
upgrade_data = 'upgrade_data_' + suffix
upgrade_images_data = 'upgrade_images_data_' + suffix
return upgrade_data, upgrade_images_data
def abort_upgrade(from_load, to_load, upgrade):
""" Executed on the release N side, cleans up data created for upgrade. """
devnull = open(os.devnull, 'w')
LOG.info("Starting aborting upgrade - from: %s, to: %s" %
(from_load, to_load))
# remove upgrade flags
upgrade_flags = [tsc.CONTROLLER_UPGRADE_FLAG,
tsc.CONTROLLER_UPGRADE_COMPLETE_FLAG,
tsc.CONTROLLER_UPGRADE_FAIL_FLAG,
tsc.CONTROLLER_UPGRADE_STARTED_FLAG,
]
for flag in upgrade_flags:
try:
if os.path.isfile(flag):
os.remove(flag)
except OSError:
LOG.exception("Failed to remove upgrade flag %s" % flag)
# unexport filesystems
export_list = [utils.POSTGRES_PATH, utils.RABBIT_PATH]
export_path = None
try:
for export_path in export_list:
subprocess.check_call(
["exportfs",
"-u",
"%s:%s" % (utils.CONTROLLER_1_HOSTNAME, export_path)],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to unexport %s" % export_path)
except Exception:
LOG.exception("Failed to unexport filesystems")
# Depending on where we are in the upgrade we may need to remove the
# symlink to the etcd directory
etcd_to_dir = os.path.join(tsc.ETCD_PATH, to_load)
if os.path.islink(etcd_to_dir):
LOG.info("Unlinking destination etcd directory: %s " % etcd_to_dir)
os.unlink(etcd_to_dir)
# Remove upgrade directories
upgrade_dirs = [
os.path.join(tsc.PLATFORM_PATH, "config", to_load),
os.path.join(tsc.PLATFORM_PATH, "armada", to_load),
os.path.join(tsc.PLATFORM_PATH, "helm", to_load),
os.path.join(tsc.ETCD_PATH, to_load),
os.path.join(utils.POSTGRES_PATH, "upgrade"),
os.path.join(utils.POSTGRES_PATH, to_load),
os.path.join(utils.RABBIT_PATH, to_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", to_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", to_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", to_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", to_load),
os.path.join(tsc.CONFIG_PATH, 'upgrades')
]
for directory in upgrade_dirs:
try:
if os.path.isdir(directory):
shutil.rmtree(directory)
except OSError:
LOG.exception("Failed to remove upgrade directory %s" % directory)
remove_simplex_upgrade_data(upgrade)
LOG.info("Finished upgrade abort")
def activate_upgrade(from_load, to_load, i_system):
""" Executed on release N+1, activate the upgrade on all nodes. """
LOG.info("Starting upgrade activate - from: %s, to: %s" %
(from_load, to_load))
utils.execute_migration_scripts(from_load, to_load, utils.ACTION_ACTIVATE)
LOG.info("Finished upgrade activation")
def remove_simplex_upgrade_data(upgrade):
upgrade_data, upgrade_images_data = get_upgrade_backup_filenames(upgrade)
simplex_backup_files = glob.glob(
os.path.join(tsc.PLATFORM_BACKUP_PATH, upgrade_data + "*"))
simplex_backup_files += glob.glob(
os.path.join(tsc.PLATFORM_BACKUP_PATH, upgrade_images_data + "*"))
for file in simplex_backup_files:
try:
LOG.info("Removing simplex upgrade file %s" % file)
os.remove(file)
except OSError:
LOG.exception("Failed to remove %s" % file)
def complete_upgrade(from_load, to_load, upgrade):
""" Executed on release N+1, cleans up data created for upgrade. """
LOG.info("Starting upgrade complete - from: %s, to: %s" %
(from_load, to_load))
# Remove upgrade directories
upgrade_dirs = [
os.path.join(tsc.PLATFORM_PATH, "config", from_load),
os.path.join(utils.POSTGRES_PATH, "upgrade"),
os.path.join(utils.POSTGRES_PATH, from_load),
os.path.join(utils.RABBIT_PATH, from_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", from_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", from_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load),
os.path.join(tsc.PLATFORM_PATH, "armada", from_load),
os.path.join(tsc.PLATFORM_PATH, "helm", from_load),
os.path.join(tsc.ETCD_PATH, from_load)
]
for directory in upgrade_dirs:
try:
shutil.rmtree(directory)
except OSError:
LOG.exception("Failed to remove upgrade directory %s" % directory)
remove_simplex_upgrade_data(upgrade)
LOG.info("Finished upgrade complete")

View File

@ -1,523 +0,0 @@
#
# Copyright (c) 2016-2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# This file contains common upgrades functions that can be used by both sysinv
# and during the upgrade of controller-1.
#
import keyring
import os
import psycopg2
from psycopg2.extras import RealDictCursor
import six
import subprocess
import tempfile
import yaml
import netaddr
# WARNING: The controller-1 upgrade is done before any puppet manifests
# have been applied, so only the static entries from tsconfig can be used.
# (the platform.conf file will not have been updated with dynamic values).
from tsconfig.tsconfig import SW_VERSION
from tsconfig.tsconfig import PLATFORM_PATH
from controllerconfig import utils as cutils
from controllerconfig.common import constants
from sysinv.common import constants as sysinv_constants
from sysinv.common import utils as sysinv_utils
from oslo_log import log
LOG = log.getLogger(__name__)
POSTGRES_PATH = '/var/lib/postgresql'
POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION)
RABBIT_PATH = '/var/lib/rabbitmq'
CONTROLLER_1_HOSTNAME = "controller-1"
DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n"
KUBERNETES_CONF_PATH = "/etc/kubernetes"
KUBERNETES_ADMIN_CONF_FILE = "admin.conf"
PLATFORM_LOG = '/var/log/platform.log'
ERROR_FILE = '/tmp/upgrade_fail_msg'
# well-known default domain name
DEFAULT_DOMAIN_NAME = 'Default'
# Migration script actions
ACTION_START = "start"
ACTION_MIGRATE = "migrate"
ACTION_ACTIVATE = "activate"
def execute_migration_scripts(from_release, to_release, action,
migration_script_dir="/etc/upgrade.d"):
""" Execute migration scripts with an action:
start: Prepare for upgrade on release N side. Called during
"system upgrade-start".
migrate: Perform data migration on release N+1 side. Called while
controller-1 is performing its upgrade.
"""
LOG.info("Executing migration scripts with from_release: %s, "
"to_release: %s, action: %s" % (from_release, to_release, action))
# Get a sorted list of all the migration scripts
# Exclude any files that can not be executed, including .pyc and .pyo files
files = [f for f in os.listdir(migration_script_dir)
if os.path.isfile(os.path.join(migration_script_dir, f)) and
os.access(os.path.join(migration_script_dir, f), os.X_OK)]
# From file name, get the number to sort the calling sequence,
# abort when the file name format does not follow the pattern
# "nnn-*.*", where "nnn" string shall contain only digits, corresponding
# to a valid unsigned integer (first sequence of characters before "-")
try:
files.sort(key=lambda x: int(x.split("-")[0]))
except Exception:
LOG.exception("Migration script sequence validation failed, invalid "
"file name format")
raise
MSG_SCRIPT_FAILURE = "Migration script %s failed with returncode %d" \
"Script output:\n%s"
# Execute each migration script
for f in files:
migration_script = os.path.join(migration_script_dir, f)
try:
# needed to flag each execution in case of error
start_script_exec = "Executing migration script"
LOG.info("%s %s" % (start_script_exec, migration_script))
# TODO(heitormatsui): remove py2 code when
# CentOS and zuul py2.7 are deprecated
if six.PY2:
subprocess.check_output([migration_script,
from_release,
to_release,
action],
stderr=subprocess.STDOUT,
universal_newlines=True)
else:
ret = subprocess.run([migration_script,
from_release,
to_release,
action],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
text=True)
if ret.returncode != 0:
script_output = ret.stdout.splitlines()
output_list = []
for item in script_output:
if item not in output_list:
output_list.append(item)
output_script = "\n".join(output_list)
msg = MSG_SCRIPT_FAILURE % (migration_script,
ret.returncode,
output_script)
LOG.error(msg)
start_script_line = get_exec_start_line(
start_script_exec, PLATFORM_LOG)
error_message = search_script_output(
start_script_line, PLATFORM_LOG, f)
save_temp_file(msg, error_message)
raise Exception(msg)
except subprocess.CalledProcessError as e:
# log script output if script executed but failed.
LOG.error(MSG_SCRIPT_FAILURE %
(migration_script, e.returncode, e.output))
# Abort when a migration script fails
raise
except Exception as e:
# log exception if script not executed.
LOG.exception(e)
raise
def get_exec_start_line(start_script_exec, file_name):
""" Search the last ocurrence of the start of the script.
Get the line number and use it to find the last start
of script execution in logs.
Used to prevent reading an outdated error log.
"""
cmd = [
"awk",
'/{pattern_to_find}/ {{last_match = $0; start_line = NR}}'
'END {{if (last_match) print start_line, last_match}}'
.format(pattern_to_find=start_script_exec),
file_name
]
start_line = None
try:
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
last_match = output.decode().strip().splitlines()
start_line, last_match = last_match[0].split(' ', 1)
start_line = int(start_line)
except Exception:
LOG.error("Failed to exec cmd. \n %s" % error)
return None
return start_line
def search_script_output(start_script_line, file_name, script):
"""Search error lines for this script.
Then, compare the line number and just add the
lines after the start of the last execution.
"""
cmd = [
"awk",
'/{script}/ && /error|ERROR/ {{print NR, $0}}'.format(script=script),
file_name
]
error_list = []
error_string = ""
try:
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
error_lines = output.decode().strip().splitlines()
# Compare the line numbers of each occurrence.
# If the line number is greater than 'start_script_line', then
# add this line to the output string
for i, current_line in enumerate(error_lines):
if i < (len(error_lines) - 1):
current_line, error_line = error_lines[i + 1].split(' ', 1)
current_line = int(current_line)
if current_line > start_script_line:
error_list.append(error_line)
error_string = '\n'.join(error_list)
except Exception:
LOG.error("Failed to exec cmd. \n %s" % error)
return None
return error_string
def save_temp_file(msg, error=None):
if os.path.isfile(ERROR_FILE):
os.remove(ERROR_FILE)
MSG_FAILURE = '%s \n\n'\
'%s \n\n'\
'Check specific service log or search for ' \
'this app in sysinv.log for details\n'
msg = MSG_FAILURE % (msg,
error)
try:
with open(ERROR_FILE, 'w+') as error_file:
error_file.write(msg)
except Exception:
LOG.warning("Error opening file %s" % ERROR_FILE)
return None
def get_db_connection(hiera_db_records, database):
username = hiera_db_records[database]['username']
password = hiera_db_records[database]['password']
return "postgresql://%s:%s@%s/%s" % (
username, password, 'localhost', database)
def get_password_from_keyring(service, username):
"""Retrieve password from keyring"""
password = ""
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
try:
password = keyring.get_password(service, username)
except Exception as e:
LOG.exception("Received exception when attempting to get password "
"for service %s, username %s: %s" %
(service, username, e))
raise
finally:
del os.environ["XDG_DATA_HOME"]
return password
def set_password_in_keyring(service, username):
"""Generate random password and store in keyring"""
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
try:
password = sysinv_utils.generate_random_password(length=16)
keyring.set_password(service, username, password)
except Exception as e:
LOG.exception("Received exception when attempting to generate "
"password for service %s, username %s: %s" %
(service, username, e))
raise
finally:
del os.environ["XDG_DATA_HOME"]
return password
def get_upgrade_token(from_release,
config,
secure_config):
# Get the system hiera data from the from release
from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
system_file = os.path.join(from_hiera_path, "system.yaml")
with open(system_file, 'r') as s_file:
system_config = yaml.load(s_file, Loader=yaml.FullLoader)
# during a controller-1 upgrade, keystone is running
# on the controller UNIT IP, however the service catalog
# that was migrated from controller-0 since lists the
# floating controller IP. Keystone operations that use
# the AUTH URL will hit this service URL and fail,
# therefore we have to issue an Upgrade token for
# all Keystone operations during an Upgrade. This token
# will allow us to circumvent the service catalog entry, by
# providing a bypass endpoint.
keystone_upgrade_url = "http://{}:5000/{}".format(
'127.0.0.1',
system_config['openstack::keystone::params::api_version'])
admin_user_domain = system_config.get(
'platform::client::params::admin_user_domain')
if admin_user_domain is None:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("platform::client::params::admin_user_domain key not found. "
"Using Default.")
admin_user_domain = DEFAULT_DOMAIN_NAME
admin_project_domain = system_config.get(
'platform::client::params::admin_project_domain')
if admin_project_domain is None:
# This value wasn't present in R2. So may be missing in upgrades from
# that release
LOG.info("platform::client::params::admin_project_domain key not "
"found. Using Default.")
admin_project_domain = DEFAULT_DOMAIN_NAME
admin_password = get_password_from_keyring("CGCS", "admin")
admin_username = system_config.get(
'platform::client::params::admin_username')
# the upgrade token command
keystone_upgrade_token = (
"openstack "
"--os-username {} "
"--os-password '{}' "
"--os-auth-url {} "
"--os-project-name admin "
"--os-user-domain-name {} "
"--os-project-domain-name {} "
"--os-interface internal "
"--os-identity-api-version 3 "
"token issue -c id -f value".format(
admin_username,
admin_password,
keystone_upgrade_url,
admin_user_domain,
admin_project_domain
))
config.update({
'openstack::keystone::upgrade::upgrade_token_file':
'/etc/keystone/upgrade_token',
'openstack::keystone::upgrade::url': keystone_upgrade_url
})
secure_config.update({
'openstack::keystone::upgrade::upgrade_token_cmd':
keystone_upgrade_token,
})
def get_upgrade_data(from_release,
system_config,
secure_config):
""" Retrieve required data from the from-release, update system_config
and secure_config with them.
This function is needed for adding new service account and endpoints
during upgrade.
"""
# Get the system hiera data from the from release
from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
"hieradata")
system_file = os.path.join(from_hiera_path, "system.yaml")
with open(system_file, 'r') as s_file:
system_config_from_release = yaml.load(s_file, Loader=yaml.FullLoader)
# Get keystone region
keystone_region = system_config_from_release.get(
'keystone::endpoint::region')
system_config.update({
'platform::client::params::identity_region': keystone_region,
# Retrieve keystone::auth::region from the from-release for the new
# service.
# 'newservice::keystone::auth::region': keystone_region,
})
# Generate password for the new service
# password = sysinv_utils.generate_random_password(16)
secure_config.update({
# Generate and set the keystone::auth::password for the new service.
# 'newservice::keystone::auth::password': password,
})
def add_upgrade_entries_to_hiera_data(from_release):
""" Adds upgrade entries to the hiera data """
filename = 'static.yaml'
secure_filename = 'secure_static.yaml'
path = constants.HIERADATA_PERMDIR
# Get the hiera data for this release
filepath = os.path.join(path, filename)
with open(filepath, 'r') as c_file:
config = yaml.load(c_file, Loader=yaml.FullLoader)
secure_filepath = os.path.join(path, secure_filename)
with open(secure_filepath, 'r') as s_file:
secure_config = yaml.load(s_file, Loader=yaml.FullLoader)
# File for system.yaml
# This is needed for adding new service account and endpoints
# during upgrade.
system_filename = 'system.yaml'
system_filepath = os.path.join(path, system_filename)
# Get a token and update the config
get_upgrade_token(from_release, config, secure_config)
# Get required data from the from-release and add them in system.yaml.
# We don't carry system.yaml from the from-release.
# This is needed for adding new service account and endpoints
# during upgrade.
system_config = {}
get_upgrade_data(from_release, system_config, secure_config)
# Update the hiera data on disk
try:
fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename,
text=True)
with open(tmppath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
os.close(fd)
os.rename(tmppath, filepath)
except Exception:
LOG.exception("failed to write config file: %s" % filepath)
raise
try:
fd, tmppath = tempfile.mkstemp(dir=path, prefix=secure_filename,
text=True)
with open(tmppath, 'w') as f:
yaml.dump(secure_config, f, default_flow_style=False)
os.close(fd)
os.rename(tmppath, secure_filepath)
except Exception:
LOG.exception("failed to write secure config: %s" % secure_filepath)
raise
# Add required hiera data into system.yaml.
# This is needed for adding new service account and endpoints
# during upgrade.
try:
fd, tmppath = tempfile.mkstemp(dir=path, prefix=system_filename,
text=True)
with open(tmppath, 'w') as f:
yaml.dump(system_config, f, default_flow_style=False)
os.close(fd)
os.rename(tmppath, system_filepath)
except Exception:
LOG.exception("failed to write system config: %s" % system_filepath)
raise
def create_simplex_runtime_config(filename):
""" Create any runtime parameters needed for simplex upgrades"""
config = {}
# Here is an example from a previous release...
# config.update({'nova::db::sync_api::cellv2_setup': False})
cutils.create_manifest_runtime_config(filename, config)
def apply_upgrade_manifest(controller_address):
"""Apply puppet upgrade manifest files."""
cmd = [
"/usr/local/bin/puppet-manifest-apply.sh",
constants.HIERADATA_PERMDIR,
str(controller_address),
sysinv_constants.CONTROLLER,
'upgrade'
]
logfile = "/tmp/apply_manifest.log"
try:
with open(logfile, "w") as flog:
subprocess.check_call(cmd, stdout=flog, stderr=flog)
except subprocess.CalledProcessError:
msg = "Failed to execute upgrade manifest"
print(msg)
raise Exception(msg)
def format_url_address(address):
"""Format the URL address according to RFC 2732"""
try:
addr = netaddr.IPAddress(address)
if addr.version == sysinv_constants.IPV6_FAMILY:
return "[%s]" % address
else:
return str(address)
except netaddr.AddrFormatError:
return address
def get_keystone_user_id(user_name):
""" Get the a keystone user id by name"""
conn = psycopg2.connect("dbname='keystone' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT user_id FROM local_user WHERE name='%s'" %
user_name)
user_id = cur.fetchone()
if user_id is not None:
return user_id['user_id']
else:
return user_id
def get_keystone_project_id(project_name):
""" Get the a keystone project id by name"""
conn = psycopg2.connect("dbname='keystone' user='postgres'")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("SELECT id FROM project WHERE name='%s'" %
project_name)
project_id = cur.fetchone()
if project_id is not None:
return project_id['id']
else:
return project_id
def get_postgres_bin():
""" Get the path to the postgres binaries"""
try:
return subprocess.check_output(
['pg_config', '--bindir']).decode().rstrip('\n')
except subprocess.CalledProcessError:
LOG.exception("Failed to get postgres bin directory.")
raise

View File

@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
envlist = flake8, pylint, py39
envlist = flake8, pylint
# Tox does not work if the path to the workdir is too long, so move it to /tmp
toxworkdir = /tmp/{env:USER}_cctox
stxdir = {toxinidir}/../../..

View File

@ -110,7 +110,6 @@ function cleanup_sysinv {
sudo rm -f $SYSINV_ETC_GOENABLEDD/sysinv_goenabled_check.sh
sudo rm -f $SYSINV_CONF_DIR/policy.yaml
sudo rm -f $SYSINV_ETC_MOTDD/10-system
sudo rm -f $SYSINV_CONF_DIR/upgrades/delete_load.sh
sudo rm -f $STX_OCF_ROOT/resource.d/platform/sysinv-api
sudo rm -f $STX_OCF_ROOT/resource.d/platform/sysinv-conductor
sudo rm -f $STX_SYSCONFDIR/systemd/system/sysinv-api.service
@ -251,7 +250,6 @@ function install_sysinv {
sudo install -d -m 755 $SYSINV_ETC_MOTDD
sudo install -p -D -m 755 $SYSINV_DIR/etc/sysinv/motd-system $SYSINV_ETC_MOTDD/10-system
sudo install -d -m 755 $SYSINV_CONF_DIR/upgrades
sudo install -p -D -m 755 $SYSINV_DIR/etc/sysinv/delete_load.sh $SYSINV_CONF_DIR/upgrades/delete_load.sh
sudo install -p -D -m 755 $SYSINV_DIR/scripts/sysinv-api $STX_OCF_ROOT/resource.d/platform/sysinv-api
sudo install -p -D -m 755 $SYSINV_DIR/scripts/sysinv-conductor $STX_OCF_ROOT/resource.d/platform/sysinv-conductor
sudo install -p -D -m 755 $SYSINV_DIR/scripts/sysinv-api.service $STX_SYSCONFDIR/systemd/system/sysinv-api.service

View File

@ -65,7 +65,6 @@ from cgtsclient.v1 import label
from cgtsclient.v1 import license
from cgtsclient.v1 import lldp_agent
from cgtsclient.v1 import lldp_neighbour
from cgtsclient.v1 import load
from cgtsclient.v1 import network
from cgtsclient.v1 import network_addrpool
from cgtsclient.v1 import partition
@ -145,7 +144,6 @@ class Client(object):
self.isensor = isensor.isensorManager(self.http_client)
self.isensorgroup = isensorgroup.isensorgroupManager(self.http_client)
self.pci_device = pci_device.PciDeviceManager(self.http_client)
self.load = load.LoadManager(self.http_client)
self.upgrade = upgrade.UpgradeManager(self.http_client)
self.network = network.NetworkManager(self.http_client)
self.network_addrpool = network_addrpool.NetworkAddrPoolManager(self.http_client)

View File

@ -1,87 +0,0 @@
# Copyright (c) 2015-2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['software_version', 'compatible_version',
'required_patches']
IMPORT_ATTRIBUTES = ['path_to_iso', 'path_to_sig', 'active', 'local',
'inactive']
class Load(base.Resource):
def __repr__(self):
return "<loads %s>" % self._info
class LoadManager(base.Manager):
resource_class = Load
def list(self):
return self._list('/v1/loads/', "loads")
def get(self, load_id):
path = '/v1/loads/%s' % load_id
try:
return self._list(path)[0]
except IndexError:
return None
def _create_load(self, load, path):
if set(list(load.keys())) != set(CREATION_ATTRIBUTES):
raise exc.InvalidAttribute()
return self._create(path, load)
def create(self, load):
path = '/v1/loads/'
self._create_load(load, path)
def import_load_metadata(self, load):
path = '/v1/loads/import_load_metadata'
return self._create_load(load, path)
def import_load(self, **kwargs):
path = '/v1/loads/import_load'
local = kwargs.pop('local')
load_info = {}
for key, value in kwargs.items():
if key in IMPORT_ATTRIBUTES:
if isinstance(value, bool):
load_info[key] = str(value).lower()
else:
load_info[key] = value
else:
raise exc.InvalidAttribute(key)
if local:
return self._create(path, body=load_info)
data = {
'active': load_info.pop('active', 'false'),
'inactive': load_info.pop('inactive', 'false'),
}
json_data = self._upload_multipart(
path,
body=load_info,
data=data,
check_exceptions=True,
)
return self.resource_class(self, json_data)
def delete(self, load_id):
path = '/v1/loads/%s' % load_id
return self._delete(path)
def update(self, load_id, patch):
path = '/v1/loads/%s' % load_id
return self._update(path, patch)

View File

@ -22,7 +22,6 @@ override_dh_install:
-d $(CURDIR)/debian/sysinv-wheels/usr/share/python-wheel
install -p -D -m 755 $(CURDIR)/etc/sysinv/motd-system $(CURDIR)/debian/tmp/etc/update-motd.d/10-system
install -p -D -m 755 $(CURDIR)/etc/sysinv/sysinv_goenabled_check.sh $(CURDIR)/debian/tmp/etc/goenabled.d/sysinv_goenabled_check.sh
install -p -D -m 700 $(CURDIR)/etc/sysinv/delete_load.sh $(CURDIR)/debian/tmp/etc/sysinv/upgrades/delete_load.sh
install -p -D -m 644 debian/tmpfiles.conf $(CURDIR)/debian/tmp/usr/lib/tmpfiles.d/sysinv.conf
install -p -D -m 700 $(CURDIR)/scripts/kube-cert-rotation.sh $(CURDIR)/debian/tmp/usr/bin/kube-cert-rotation.sh
install -p -D -m 700 $(CURDIR)/scripts/ipsec-cert-renew.sh $(CURDIR)/debian/tmp/usr/bin/ipsec-cert-renew.sh

View File

@ -11,7 +11,6 @@ scripts/query_pci_id usr/bin
scripts/ceph_k8s_update_monitors.sh usr/bin
usr/lib/python*/dist-packages/*
etc/goenabled.d/sysinv_goenabled_check.sh
etc/sysinv/upgrades/delete_load.sh
etc/update-motd.d/10-system
usr/bin/cert-alarm
usr/bin/cert-mon

View File

@ -1,30 +0,0 @@
#!/bin/bash
# Copyright (c) 2015-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script is remove a load from a controller.
# The load version is passed in as the first variable.
: ${1?"Usage $0 VERSION"}
VERSION=$1
FEED_DIR=/var/www/pages/feed/rel-$VERSION
PRESTAGE_DIR=/opt/platform/deploy/$VERSION
PLAYBOOKS_DIR=/opt/dc-vault/playbooks/$VERSION
rm -f /var/pxeboot/pxelinux.cfg.files/*-$VERSION
rm -rf /var/pxeboot/rel-$VERSION
rm -f /etc/pxeboot-update-$VERSION.sh
rm -rf $FEED_DIR
if [ -d $PRESTAGE_DIR ]; then
rm -rf $PRESTAGE_DIR
fi
if [ -d $PLAYBOOKS_DIR ]; then
rm -rf $PLAYBOOKS_DIR
fi

View File

@ -2136,71 +2136,6 @@ class AgentManager(service.PeriodicService):
# Update local puppet cache anyway to be consistent.
self._update_local_puppet_cache(hieradata_path)
def delete_load(self, context, host_uuid, software_version):
"""Remove the specified load
:param context: request context
:param host_uuid: the host uuid
:param software_version: the version of the load to remove
"""
LOG.debug("AgentManager.delete_load: %s" % (software_version))
if self._ihost_uuid and self._ihost_uuid == host_uuid:
LOG.info("AgentManager removing load %s" % software_version)
cleanup_script = constants.DELETE_LOAD_SCRIPT
if os.path.isfile(cleanup_script):
with open(os.devnull, "w") as fnull:
try:
subprocess.check_call( # pylint: disable=not-callable
[cleanup_script, software_version],
stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError:
LOG.error("Failure during cleanup script")
else:
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.finalize_delete_load(context, software_version)
else:
LOG.error("Cleanup script %s does not exist." % cleanup_script)
return
def create_simplex_backup(self, context, software_upgrade):
"""Creates the upgrade metadata and creates the system backup
:param context: request context.
:param software_upgrade: software_upgrade object
:returns: none
"""
try:
from controllerconfig.upgrades import \
management as upgrades_management
except ImportError:
LOG.error("Attempt to import during create_simplex_backup failed")
return
if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX:
LOG.error("create_simplex_backup called for non-simplex system")
return
LOG.info("Starting simplex upgrade data collection")
success = True
try:
upgrades_management.create_simplex_backup(software_upgrade)
except Exception as ex:
LOG.info("Exception during simplex upgrade data collection")
LOG.exception(ex)
success = False
else:
LOG.info("Simplex upgrade data collection complete")
rpcapi = conductor_rpcapi.ConductorAPI(
topic=conductor_rpcapi.MANAGER_TOPIC)
rpcapi.complete_simplex_backup(context, success=success)
return
def device_update_image(self, context, host_uuid, pci_addr, filename, transaction_id,
retimer_included):
"""Write the device image to the device at the specified address.

View File

@ -129,40 +129,6 @@ class AgentAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
config_dict=config_dict))
return retval
def delete_load(self, context, host_uuid, software_version):
"""Asynchronously, have the agent remove the specified load
:param context: request context.
:param host_uuid: the host uuid
:param software_version: the version of the load to remove
:returns: none ... uses asynchronous cast().
"""
# fanout / broadcast message to all inventory agents
LOG.debug("AgentApi.delete_load: fanout_cast: sending "
"delete load to agent: (%s) (%s) " %
(host_uuid, software_version))
retval = self.fanout_cast(
context, self.make_msg(
'delete_load',
host_uuid=host_uuid,
software_version=software_version))
return retval
def create_simplex_backup(self, context, software_upgrade):
"""Asynchronously, have the agent create the simplex backup data
:param context: request context.
:param software_upgrade: software_upgrade object
:returns: none
"""
retval = self.fanout_cast(context,
self.make_msg(
'create_simplex_backup',
software_upgrade=software_upgrade))
return retval
def apply_tpm_config(self, context, tpm_context):
"""Asynchronously, have the agent apply the tpm config

View File

@ -55,7 +55,6 @@ from sysinv.api.controllers.v1 import interface_network
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import lldp_agent
from sysinv.api.controllers.v1 import lldp_neighbour
from sysinv.api.controllers.v1 import load
from sysinv.api.controllers.v1 import lvg
from sysinv.api.controllers.v1 import license
from sysinv.api.controllers.v1 import memory
@ -656,12 +655,6 @@ class V1(base.APIBase):
bookmark=True)
]
v1.loads = [link.Link.make_link('self', pecan.request.host_url,
'loads', ''),
link.Link.make_link('bookmark', pecan.request.host_url,
'loads', '', bookmark=True)
]
v1.pci_devices = [link.Link.make_link('self',
pecan.request.host_url,
'pci_devices', ''),
@ -1003,7 +996,6 @@ class Controller(rest.RestController):
certificate = certificate.CertificateController()
isensors = sensor.SensorController()
isensorgroups = sensorgroup.SensorGroupController()
loads = load.LoadController()
pci_devices = pci_device.PCIDeviceController()
upgrade = upgrade.UpgradeController()
networks = network.NetworkController()

View File

@ -41,7 +41,6 @@ import wsmeext.pecan as wsme_pecan
from wsme import types as wtypes
from fm_api import constants as fm_constants
from fm_api import fm_api
from pecan import expose
from pecan import rest
@ -551,12 +550,6 @@ class Host(base.APIBase):
apparmor = wtypes.text
"Enable/Disable apparmor state"
software_load = wtypes.text
"The current load software version"
target_load = wtypes.text
"The target load software version"
install_state = wtypes.text
"Represent the install state"
@ -615,7 +608,7 @@ class Host(base.APIBase):
'created_at', 'updated_at', 'boot_device',
'rootfs_device', 'hw_settle', 'install_output',
'console', 'tboot', 'vsc_controllers', 'ttys_dcd',
'software_load', 'target_load', 'peers', 'peer_id',
'peers', 'peer_id',
'install_state', 'install_state_info',
'iscsi_initiator_name', 'device_image_update',
'reboot_needed', 'inv_state', 'clock_synchronization',
@ -2829,16 +2822,11 @@ class HostController(rest.RestController):
except exception.NotFound:
return
loads = pecan.request.dbapi.load_get_list()
target_load = cutils.get_imported_load(loads)
if personality == constants.STORAGE:
if hostname == constants.STORAGE_0_HOSTNAME:
LOG.warn("Allow storage-0 add during upgrade")
else:
LOG.info("Adding storage, ensure controllers upgraded")
self._check_personality_load(constants.CONTROLLER,
target_load)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(Host, six.text_type, body=six.text_type)
@ -2868,19 +2856,6 @@ class HostController(rest.RestController):
_("All worker and storage hosts not running a Ceph monitor "
"must be locked and offline before this operation can proceed"))
# TODO(heitormatsui): used only by legacy upgrade endpoint, remove
def _check_personality_load(self, personality, load):
hosts = pecan.request.dbapi.ihost_get_by_personality(personality)
for host in hosts:
host_upgrade = objects.host_upgrade.get_by_host_id(
pecan.request.context, host.id)
if host_upgrade.target_load != load.id or \
host_upgrade.software_load != load.id:
raise wsme.exc.ClientSideError(
_("All %s hosts must be using load %s before this "
"operation can proceed")
% (personality, load.software_version))
def _check_max_cpu_mhz_configured(self, host):
cpu_utils.check_power_manager(host.ihost_patch.get('uuid'))
@ -2923,105 +2898,6 @@ class HostController(rest.RestController):
raise wsme.exc.ClientSideError(
_("Host does not support configuration of Max CPU Frequency."))
# TODO(heitormatsui): used only by legacy upgrade endpoint, remove
def _check_host_load(self, hostname, load):
host = pecan.request.dbapi.ihost_get_by_hostname(hostname)
host_upgrade = objects.host_upgrade.get_by_host_id(
pecan.request.context, host.id)
if host_upgrade.target_load != load.id or \
host_upgrade.software_load != load.id:
raise wsme.exc.ClientSideError(
_("%s must be using load %s before this operation can proceed")
% (hostname, load.software_version))
# TODO(heitormatsui): used only by legacy upgrade endpoint, remove
def _check_storage_downgrade(self, load):
hosts = pecan.request.dbapi.ihost_get_by_personality(constants.STORAGE)
# Ensure all storage nodes are downgraded before storage-0
for host in hosts:
if host.hostname != constants.STORAGE_0_HOSTNAME:
host_upgrade = objects.host_upgrade.get_by_host_id(
pecan.request.context, host.id)
if host_upgrade.target_load != load.id or \
host_upgrade.software_load != load.id:
raise wsme.exc.ClientSideError(
_("All other %s hosts must be using load %s before "
"this operation can proceed")
% (constants.STORAGE, load.software_version))
# TODO(heitormatsui): used only by legacy upgrade endpoint, remove
def _update_load(self, uuid, body, new_target_load):
force = body.get('force', False) is True
rpc_ihost = objects.host.get_by_uuid(pecan.request.context, uuid)
host_upgrade = objects.host_upgrade.get_by_host_id(
pecan.request.context, rpc_ihost.id)
if host_upgrade.target_load == new_target_load.id:
raise wsme.exc.ClientSideError(
_("%s already targeted to install load %s") %
(rpc_ihost.hostname, new_target_load.software_version))
if rpc_ihost.administrative != constants.ADMIN_LOCKED:
raise wsme.exc.ClientSideError(
_("The host must be locked before performing this operation"))
elif rpc_ihost.invprovision not in [constants.UPGRADING, constants.PROVISIONED]:
raise wsme.exc.ClientSideError(_("The host must be provisioned "
"before performing this operation"))
elif not force and rpc_ihost.availability != "online":
raise wsme.exc.ClientSideError(
_("The host must be online to perform this operation"))
if rpc_ihost.personality == constants.STORAGE:
istors = pecan.request.dbapi.istor_get_by_ihost(rpc_ihost.id)
for stor in istors:
istor_obj = objects.storage.get_by_uuid(pecan.request.context,
stor.uuid)
self._ceph.remove_osd_key(istor_obj['osdid'])
if utils.get_system_mode() != constants.SYSTEM_MODE_SIMPLEX:
pecan.request.rpcapi.upgrade_ihost(pecan.request.context,
rpc_ihost,
new_target_load)
host_upgrade.target_load = new_target_load.id
host_upgrade.save()
# There may be alarms, clear them
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST,
rpc_ihost.hostname)
fm_api_obj = fm_api.FaultAPIs()
fm_api_obj.clear_fault(
fm_constants.FM_ALARM_ID_HOST_VERSION_MISMATCH,
entity_instance_id)
pecan.request.dbapi.ihost_update(
rpc_ihost.uuid, {'inv_state': constants.INV_STATE_REINSTALLING})
if rpc_ihost.availability == "online":
new_ihost_mtc = rpc_ihost.as_dict()
new_ihost_mtc.update({'operation': 'modify'})
new_ihost_mtc.update({'action': constants.REINSTALL_ACTION})
new_ihost_mtc = cutils.removekeys_nonmtce(new_ihost_mtc)
new_ihost_mtc['mgmt_ip'] = utils.get_mgmt_ip(rpc_ihost.hostname)
mtc_response = mtce_api.host_modify(
self._api_token, self._mtc_address, self._mtc_port,
new_ihost_mtc, constants.MTC_ADD_TIMEOUT_IN_SECS)
if mtc_response is None:
mtc_response = {'status': 'fail',
'reason': 'no response',
'action': 'retry'}
if mtc_response['status'] != 'pass':
# Report mtc error
raise wsme.exc.ClientSideError(_("Maintenance has returned with "
"a status of %s, reason: %s, recommended action: %s") % (
mtc_response.get('status'),
mtc_response.get('reason'),
mtc_response.get('action')))
@staticmethod
def _validate_ip_in_mgmt_network(ip):
network = pecan.request.dbapi.network_get_by_type(

View File

@ -18,7 +18,6 @@
# Copyright (c) 2015-2021 Wind River Systems, Inc.
#
import json
import os
import pecan
@ -26,7 +25,6 @@ from pecan import rest
import psutil
import six
import shutil
import socket
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
@ -34,19 +32,14 @@ import wsmeext.pecan as wsme_pecan
from eventlet.green import subprocess
from oslo_log import log
from pecan import expose
from pecan import request
from sysinv._i18n import _
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
from sysinv.openstack.common import rpc
from sysinv.openstack.common.rpc import common
LOG = log.getLogger(__name__)
@ -154,29 +147,6 @@ class LoadController(rest.RestController):
def __init__(self):
self._api_token = None
def _get_loads_collection(self, marker, limit, sort_key, sort_dir,
expand=False, resource_url=None):
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.load.get_by_uuid(
pecan.request.context,
marker)
loads = pecan.request.dbapi.load_get_list(
limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return LoadCollection.convert_with_links(loads, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(LoadCollection, types.uuid, int, wtypes.text,
wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
@ -205,45 +175,6 @@ class LoadController(rest.RestController):
raise wsme.exc.ClientSideError(
_("Can not set state during create"))
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(Load, body=Load)
def post(self, load):
"""Create a new Load."""
# This method is only used to populate the inital load for the system
# This is invoked during config_controller
# Loads after the first are added via import
# TODO(ShawnLi): This will be removed when we remove the Load table
loads = pecan.request.dbapi.load_get_list()
if loads:
raise wsme.exc.ClientSideError(_("Aborting. Active load exits."))
patch = load.as_dict()
self._new_load_semantic_checks(patch)
patch['state'] = constants.ACTIVE_LOAD_STATE
try:
new_load = pecan.request.dbapi.load_create(patch)
# Controller-0 is added to the database before we add this load
# so we must add a host_upgrade entry for (at least) controller-0
hosts = pecan.request.dbapi.ihost_get_list()
for host in hosts:
values = dict()
values['forihostid'] = host.id
values['software_load'] = new_load.id
values['target_load'] = new_load.id
pecan.request.dbapi.host_upgrade_create(host.id,
new_load.software_version,
values)
except exception.SysinvException as e:
LOG.exception(e)
raise wsme.exc.ClientSideError(_("Invalid data"))
return load.convert_with_links(new_load)
@staticmethod
def _upload_file(file_item):
try:
@ -304,121 +235,7 @@ class LoadController(rest.RestController):
raise NotImplementedError("This API is deprecated.")
def _import_load(self):
"""Create a new load from iso/sig files"""
LOG.info("Load import request received.")
# Only import loads on controller-0. This is required because the load
# is only installed locally and we will be booting controller-1 from
# this load during the upgrade.
if socket.gethostname() != constants.CONTROLLER_0_HOSTNAME:
raise wsme.exc.ClientSideError(_("A load can only be imported when"
" %s is active.")
% constants.CONTROLLER_0_HOSTNAME)
req_content = dict()
load_files = dict()
is_multiform_req = True
import_type = None
# Request coming from dc-api-proxy is not multiform, file transfer is handled
# by dc-api-proxy, the request contains only the vault file location
if request.content_type == "application/json":
req_content = dict(json.loads(request.body))
is_multiform_req = False
else:
req_content = dict(request.POST.items())
if not req_content:
raise wsme.exc.ClientSideError(_("Empty request."))
active = req_content.get('active')
inactive = req_content.get('inactive')
if active == 'true' and inactive == 'true':
raise wsme.exc.ClientSideError(_("Invalid use of --active and"
" --inactive arguments at"
" the same time."))
if active == 'true' or inactive == 'true':
isystem = pecan.request.dbapi.isystem_get_one()
if isystem.distributed_cloud_role == \
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
LOG.info("System Controller allow start import_load")
if active == 'true':
import_type = constants.ACTIVE_LOAD_IMPORT
elif inactive == 'true':
import_type = constants.INACTIVE_LOAD_IMPORT
self._check_existing_loads(import_type=import_type)
try:
for file in constants.IMPORT_LOAD_FILES:
if file not in req_content:
raise wsme.exc.ClientSideError(_("Missing required file for %s")
% file)
if not is_multiform_req:
load_files.update({file: req_content[file]})
else:
if file not in request.POST:
raise wsme.exc.ClientSideError(_("Missing required file for %s")
% file)
file_item = request.POST[file]
if not file_item.filename:
raise wsme.exc.ClientSideError(_("No %s file uploaded") % file)
file_location = self._upload_file(file_item)
if file_location:
load_files.update({file: file_location})
except subprocess.CalledProcessError as ex:
raise wsme.exc.ClientSideError(str(ex))
except Exception as ex:
raise wsme.exc.ClientSideError(_("Failed to save file %s to disk. Error: %s"
" Please check sysinv logs for"
" details." % (file_item.filename, str(ex))))
LOG.info("Load files: %s saved to disk." % load_files)
exception_occured = False
try:
new_load = pecan.request.rpcapi.start_import_load(
pecan.request.context,
load_files[constants.LOAD_ISO],
load_files[constants.LOAD_SIGNATURE],
import_type,
)
if new_load is None:
raise wsme.exc.ClientSideError(_("Error importing load. Load not found"))
if import_type != constants.ACTIVE_LOAD_IMPORT:
# Signature and upgrade path checks have passed, make rpc call
# to the conductor to run import script in the background.
pecan.request.rpcapi.import_load(
pecan.request.context,
load_files[constants.LOAD_ISO],
new_load,
import_type,
)
except (rpc.common.Timeout, common.RemoteError) as e:
exception_occured = True
error = e.value if hasattr(e, 'value') else str(e)
raise wsme.exc.ClientSideError(error)
except Exception:
exception_occured = True
raise
finally:
if exception_occured and os.path.isdir(constants.LOAD_FILES_STAGING_DIR):
shutil.rmtree(constants.LOAD_FILES_STAGING_DIR)
load_data = new_load.as_dict()
LOG.info("Load import request validated, returning new load data: %s"
% load_data)
return load_data
raise NotImplementedError("This API is deprecated.")
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(Load, body=Load)
@ -427,51 +244,6 @@ class LoadController(rest.RestController):
raise NotImplementedError("This API is deprecated.")
def _check_existing_loads(self, import_type=None):
# Only are allowed at one time:
# - the active load
# - an imported load regardless of its current state
# - an inactive load.
loads = pecan.request.dbapi.load_get_list()
if len(loads) <= constants.IMPORTED_LOAD_MAX_COUNT:
return
for load in loads:
if load.state == constants.ACTIVE_LOAD_STATE:
continue
load_state = load.state
if load_state == constants.ERROR_LOAD_STATE:
err_msg = _("Please remove the load in error state "
"before importing a new one.")
elif load_state == constants.DELETING_LOAD_STATE:
err_msg = _("Please wait for the current load delete "
"to complete before importing a new one.")
elif load_state == constants.INACTIVE_LOAD_STATE:
if import_type != constants.INACTIVE_LOAD_IMPORT:
continue
err_msg = _("An inactived load already exists. "
"Please, remove the inactive load "
"before trying to import a new one.")
elif import_type == constants.ACTIVE_LOAD_IMPORT or \
import_type == constants.INACTIVE_LOAD_IMPORT:
continue
elif not err_msg:
# Already imported or being imported
err_msg = _("Max number of loads (2) reached. Please "
"remove the old or unused load before "
"importing a new one.")
raise wsme.exc.ClientSideError(err_msg)
@cutils.synchronized(LOCK_NAME)
@wsme.validate(six.text_type, [LoadPatchType])
@wsme_pecan.wsexpose(Load, six.text_type,

View File

@ -21,11 +21,9 @@ from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv.common import constants
from sysinv import objects
LOG = log.getLogger(__name__)
@ -69,7 +67,7 @@ class Upgrade(base.APIBase):
"The load version that software upgrading to"
def __init__(self, **kwargs):
self.fields = list(objects.software_upgrade.fields.keys())
self.fields = list()
for k in self.fields:
if not hasattr(self, k):
continue
@ -128,24 +126,6 @@ class UpgradeController(rest.RestController):
def __init__(self, parent=None, **kwargs):
self._parent = parent
def _get_upgrade_collection(self, marker=None, limit=None,
sort_key=None, sort_dir=None,
expand=False, resource_url=None):
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.software_upgrade.get_by_uuid(
pecan.request.context, marker)
upgrades = pecan.request.dbapi.software_upgrade_get_list(
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
return UpgradeCollection.convert_with_links(
upgrades, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
@staticmethod
def check_restore_in_progress():
try:

View File

@ -1,93 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Sysinv upgrade utilities.
"""
import sys
from oslo_config import cfg
from oslo_log import log
from sysinv._i18n import _
from sysinv.common import constants
from sysinv.common import service
from sysinv.common import utils
from sysinv.db import api as dbapi
from tsconfig.tsconfig import system_mode
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# TODO(bqian): remove code below updating host_update, and software_upgrade table
# after USM transition completes.
def update_controller_state(skip_load_update):
mydbapi = dbapi.get_instance()
LOG.info("Updating upgrades data in sysinv database")
hostname = constants.CONTROLLER_1_HOSTNAME
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
hostname = constants.CONTROLLER_0_HOSTNAME
host = mydbapi.ihost_get_by_hostname(hostname)
# Update the states for controller-1
update_values = {'administrative': constants.ADMIN_UNLOCKED,
'operational': constants.OPERATIONAL_ENABLED,
'availability': constants.AVAILABILITY_AVAILABLE}
mydbapi.ihost_update(host.uuid, update_values)
if skip_load_update:
return
# Update the from and to load for controller-1
loads = mydbapi.load_get_list()
target_load = utils.get_imported_load(loads)
host_upgrade = mydbapi.host_upgrade_get_by_host(host.id)
update_values = {'software_load': target_load.id,
'target_load': target_load.id}
mydbapi.host_upgrade_update(host_upgrade.id, update_values)
# Update the upgrade state
upgrade = mydbapi.software_upgrade_get_one()
upgrade_update = {'state': constants.UPGRADE_UPGRADING_CONTROLLERS}
mydbapi.software_upgrade_update(upgrade.uuid, upgrade_update)
def add_action_parsers(subparsers):
for action in ['update_controller_state']:
parser = subparsers.add_parser(action)
parser.set_defaults(func=globals()[action])
CONF.register_cli_opt(
cfg.SubCommandOpt('action',
title='Action options',
help='Available upgrade options',
handler=add_action_parsers))
def main():
argv = sys.argv[:]
skip_load_update = False
for arg in sys.argv:
if arg == '--skip_load_update':
argv.remove(arg)
skip_load_update = True
# Parse config file and command line options, then start logging
service.prepare_service(argv)
if CONF.action.name in ['update_controller_state']:
msg = (_("Called '%(action)s'") %
{"action": CONF.action.name})
LOG.info(msg)
CONF.action.func(skip_load_update)
else:
LOG.error(_("Unknown action: %(action)") % {"action":
CONF.action.name})

View File

@ -960,7 +960,6 @@ IMPORTED_LOAD_STATES = [
INACTIVE_LOAD_STATE,
]
DELETE_LOAD_SCRIPT = '/etc/sysinv/upgrades/delete_load.sh'
IMPORTED_LOAD_MAX_COUNT = 1
LOAD_ISO = 'path_to_iso'
LOAD_SIGNATURE = 'path_to_sig'

View File

@ -23,8 +23,6 @@ from sysinv.cert_alarm.audit import CertAlarmAudit
from sysinv.api.controllers.v1 import patch_api
from sysinv.api.controllers.v1 import vim_api
import tsconfig.tsconfig as tsc
import cgcs_patch.constants as patch_constants
LOG = log.getLogger(__name__)
@ -774,36 +772,6 @@ class Health(object):
output += msg
health_ok = health_ok and success
loads = self._dbapi.load_get_list()
try:
imported_load = utils.get_imported_load(loads)
except Exception as e:
LOG.exception(e)
output += _('No imported load found. Unable to test further\n')
return health_ok, output
upgrade_version = imported_load.software_version
if imported_load.required_patches:
patches = imported_load.required_patches.split('\n')
else:
patches = []
success, missing_patches = \
self._check_required_patches_are_applied(patches)
output += _('Required patches are applied: [%s]\n') \
% (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
if not success:
output += _('Patches not applied: %s\n') \
% ', '.join(missing_patches)
health_ok = health_ok and success
success = self._check_license(upgrade_version)
output += _('License valid for upgrade: [%s]\n') \
% (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
health_ok = health_ok and success
success, message = self._check_bootdevice()
if not success:
# Make this an invisible check for the bootdevice and rootfs device.
@ -856,15 +824,6 @@ class Health(object):
% (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
health_ok = health_ok and success
# TODO (luisbonatti): remove when CentOS to Debian upgrade is deprecated
if upgrade_version == tsc.SW_VERSION_22_12:
msg, success = self._check_free_space_for_upgrade()
output += \
_('Disk space requirement: [%s]\n') \
% (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
if not success:
output += msg
health_ok = health_ok and success
return health_ok, output

File diff suppressed because it is too large Load Diff

View File

@ -1285,65 +1285,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
self.make_msg('update_apparmor_config',
ihost_uuid=ihost_uuid))
def start_import_load(self, context, path_to_iso, path_to_sig,
import_type=None, timeout=180):
"""Synchronously, mount the ISO and validate the load for import
:param context: request context.
:param path_to_iso: the file path of the iso on this host
:param path_to_sig: the file path of the iso's detached signature on
this host
:param import_type: the type of the import, the possible values are
constants.ACTIVE_LOAD_IMPORT for active load or
constants.INACTIVE_LOAD_IMPORT for inactive load.
:param timeout: rpc call timeout in seconds
:returns: the newly create load object.
"""
return self.call(context,
self.make_msg('start_import_load',
path_to_iso=path_to_iso,
path_to_sig=path_to_sig,
import_type=import_type),
timeout=timeout)
def import_load(self, context, path_to_iso, new_load,
import_type=None):
"""Asynchronously, import a load and add it to the database
:param context: request context.
:param path_to_iso: the file path of the iso on this host
:param new_load: the load object
:param import_type: the type of the import (active or inactive)
:returns: none.
"""
return self.cast(context,
self.make_msg('import_load',
path_to_iso=path_to_iso,
new_load=new_load,
import_type=import_type))
def delete_load(self, context, load_id):
"""Asynchronously, cleanup a load from both controllers
:param context: request context.
:param load_id: id of load to be deleted
:returns: none.
"""
return self.cast(context,
self.make_msg('delete_load',
load_id=load_id))
def finalize_delete_load(self, context, sw_version):
"""Asynchronously, delete the load from the database
:param context: request context.
:param sw_version: software version of load to be deleted
:returns: none.
"""
return self.cast(context,
self.make_msg('finalize_delete_load',
sw_version=sw_version))
def load_update_by_host(self, context, ihost_id, version):
"""Update the host_upgrade table with the running SW_VERSION
@ -1407,15 +1348,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
return self.call(context, self.make_msg('abort_upgrade',
upgrade=upgrade))
def complete_simplex_backup(self, context, success):
"""Asynchronously, complete the simplex upgrade start process
:param context: request context.
:param success: If the create_simplex_backup call completed
"""
return self.cast(context, self.make_msg('complete_simplex_backup',
success=success))
def get_system_health(self, context, force=False, upgrade=False,
kube_upgrade=False, kube_rootca_update=False,
alarm_ignore_list=None):

View File

@ -127,7 +127,7 @@ class Connection(object):
"""
@abc.abstractmethod
def ihost_create(self, values, software_load=None):
def ihost_create(self, values):
"""Create a new ihost.
:param values: A dict containing several items used to identify
@ -144,7 +144,6 @@ class Connection(object):
'availability': 'offduty',
'extra': { ... },
}
:param: software_load. The load software_version.
:returns: A ihost.
"""
@ -3680,75 +3679,6 @@ class Connection(object):
:param sensorgroup_id: id (PK) of the sensorgroup.
"""
@abc.abstractmethod
def load_create(self, values):
"""Create a new Load.
:param values: A dict containing several items used to identify
and track the load
{
'software_version': '16.10',
'compatible_version': '15.10',
'required_patches': '001,002,003',
}
:returns: A load.
"""
@abc.abstractmethod
def load_get(self, load):
"""Returns a load.
:param load: The id or uuid of a load.
:returns: A load.
"""
@abc.abstractmethod
def load_get_by_version(self, version):
"""Returns the load with the specified version.
:param version: The software version of a load.
:returns: A load.
"""
@abc.abstractmethod
def load_get_list(self, limit=None, marker=None, sort_key=None,
sort_dir=None):
"""Return a list of loads.
:param limit: Maximum number of loads to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def load_update(self, load, values):
"""Update properties of a load.
:param load: The id or uuid of a load.
:param values: Dict of values to update.
May be a partial list,
:returns: A load.
"""
@abc.abstractmethod
def load_destroy(self, load):
"""Destroy a load.
:param load: The id or uuid of a load.
"""
@abc.abstractmethod
def set_upgrade_loads_state(self, upgrade, to_state, from_state):
"""Change the states of the loads in an upgrade.
:param upgrade: An upgrade object.
:param to_state: The state of the 'to' load.
:param from_state: The state of the 'from' load.
"""
@abc.abstractmethod
def fpga_device_create(self, hostid, values):
"""Create a new FPGA device for a host.
@ -3883,123 +3813,6 @@ class Connection(object):
:param deviceid: The id or uuid of a pci device.
"""
@abc.abstractmethod
def software_upgrade_create(self, values):
"""Create a new software_upgrade entry
:param values: A dict containing several items used to identify
and track the entry, and several dicts which are passed
into the Drivers when managing this node. For example:
{
'uuid': uuidutils.generate_uuid(),
'state': 'start', 'migration_complete', 'activated',
'complete',
'from_load': '15.10',
'to_load' : '16.10',
}
:returns: A software_uprade record.
"""
@abc.abstractmethod
def software_upgrade_get(self, id):
"""Return a software_upgrade entry for a given id
:param _id: The id or uuid of a software_upgrade entry
:returns: a software_upgrade entry
"""
@abc.abstractmethod
def software_upgrade_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of software_upgrade entries.
:param limit: Maximum number of software_upgrade entries to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def software_upgrade_get_one(self):
"""Return exactly one software_upgrade.
:returns: A software_upgrade.
"""
@abc.abstractmethod
def software_upgrade_update(self, uuid, values):
"""Update properties of a software_upgrade.
:param node: The uuid of a software_upgrade entry.
:param values: Dict of values to update.
{
'state': 'complete',
}
:returns: A software_upgrade entry.
"""
@abc.abstractmethod
def software_upgrade_destroy(self, id):
"""Destroy a software_upgrade entry.
:param id: The id or uuid of a software_upgrade entry.
"""
@abc.abstractmethod
def host_upgrade_create(self, host_id, values):
"""Create host_upgrade entry.
:param ihost_id: id of the host.
:param values: Dict of values to update.
{
'software_load': 'load.id',
}
:returns: a host_upgrade
"""
@abc.abstractmethod
def host_upgrade_get(self, id):
"""Return a host_upgrade entry for a given host
:param id: id or uuid of the host_upgrade entry.
:returns: a host_upgrade
"""
@abc.abstractmethod
def host_upgrade_get_by_host(self, host_id):
"""Return a host_upgrade entry for a given host
:param id: id of the host entry.
:returns: a host_upgrade
"""
@abc.abstractmethod
def host_upgrade_get_list(self, limit=None, marker=None, sort_key=None,
sort_dir=None):
"""Return a list of host_upgrade entries.
:param limit: Maximum number of host_upgrade to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def host_upgrade_update(self, host_id, values):
"""Update properties of a host_upgrade entry.
:param host_id: The id of a host entry.
:param values: Dict of values to update.
{
'software_load': 'load.id'
}
:returns: A host_upgrade entry.
"""
@abc.abstractmethod
def service_parameter_create(self, values):
"""Create a new service_parameter entry

View File

@ -282,12 +282,7 @@ def add_filter_by_many_identities(query, model, values):
def add_host_options(query):
return query. \
options(joinedload(models.ihost.system)). \
options(joinedload(models.ihost.host_upgrade).
joinedload(models.HostUpgrade.load_software)). \
options(joinedload(models.ihost.host_upgrade).
joinedload(models.HostUpgrade.load_target))
return query.options(joinedload(models.ihost.system))
def add_inode_filter_by_ihost(query, value):
@ -1351,7 +1346,7 @@ class Connection(api.Connection):
raise exception.ServerNotFound(server=server)
@db_objects.objectify(objects.host)
def ihost_create(self, values, software_load=None):
def ihost_create(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
host = models.ihost()
@ -1362,7 +1357,6 @@ class Connection(api.Connection):
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.NodeAlreadyExists(uuid=values['uuid'])
self._host_upgrade_create(host.id, software_load)
self._kube_host_upgrade_create(host.id)
return self._host_get(values['uuid'])
@ -6986,242 +6980,6 @@ class Connection(api.Connection):
def isensorgroup_discrete_destroy(self, sensorgroup_id):
return self._isensorgroup_destroy(models.SensorGroupsDiscrete, sensorgroup_id)
@db_objects.objectify(objects.load)
def load_create(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
load = models.Load()
load.update(values)
with _session_for_write() as session:
try:
session.add(load)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.LoadAlreadyExists(uuid=values['uuid'])
return load
@db_objects.objectify(objects.load)
def load_get(self, load):
# load may be passed as a string. It may be uuid or Int.
query = model_query(models.Load)
query = add_identity_filter(query, load)
try:
result = query.one()
except NoResultFound:
raise exception.LoadNotFound(load=load)
return result
@db_objects.objectify(objects.load)
def load_get_by_version(self, version):
query = model_query(models.Load)
query = query.filter_by(software_version=version)
try:
result = query.one()
except NoResultFound:
raise exception.LoadNotFound(load=version)
return result
@db_objects.objectify(objects.load)
def load_get_list(self, limit=None, marker=None, sort_key=None,
sort_dir=None):
query = model_query(models.Load)
return _paginate_query(models.Load, limit, marker,
sort_key, sort_dir, query)
@db_objects.objectify(objects.load)
def load_update(self, load, values):
with _session_for_write() as session:
query = model_query(models.Load, session=session)
query = add_identity_filter(query, load)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.LoadNotFound(load=load)
return query.one()
def load_destroy(self, load):
with _session_for_write() as session:
query = model_query(models.Load, session=session)
query = add_identity_filter(query, load)
try:
query.one()
except NoResultFound:
raise exception.LoadNotFound(load=load)
query.delete()
def set_upgrade_loads_state(self, upgrade, to_state, from_state):
self.load_update(upgrade.from_load, {'state': from_state})
self.load_update(upgrade.to_load, {'state': to_state})
def _software_upgrade_get(self, id):
query = model_query(models.SoftwareUpgrade)
if utils.is_uuid_like(id):
query = query.filter_by(uuid=id)
else:
query = query.filter_by(id=id)
try:
result = query.one()
except NoResultFound:
raise exception.InvalidParameterValue(
err="No software upgrade entry found for %s" % id)
return result
@db_objects.objectify(objects.software_upgrade)
def software_upgrade_create(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
upgrade = models.SoftwareUpgrade()
upgrade.update(values)
with _session_for_write() as session:
try:
session.add(upgrade)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.UpgradeAlreadyExists(uuid=values['uuid'])
return self._software_upgrade_get(values['uuid'])
@db_objects.objectify(objects.software_upgrade)
def software_upgrade_get(self, id):
return self._software_upgrade_get(id)
@db_objects.objectify(objects.software_upgrade)
def software_upgrade_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.SoftwareUpgrade)
return _paginate_query(models.SoftwareUpgrade, limit, marker,
sort_key, sort_dir, query)
@db_objects.objectify(objects.software_upgrade)
def software_upgrade_get_one(self):
query = model_query(models.SoftwareUpgrade)
try:
return query.one()
except NoResultFound:
raise exception.NotFound()
@db_objects.objectify(objects.software_upgrade)
def software_upgrade_update(self, uuid, values):
with _session_for_write() as session:
query = model_query(models.SoftwareUpgrade, session=session)
query = query.filter_by(uuid=uuid)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.NotFound(id)
return query.one()
def software_upgrade_destroy(self, id):
with _session_for_write() as session:
query = model_query(models.SoftwareUpgrade, session=session)
query = query.filter_by(uuid=id)
try:
query.one()
except NoResultFound:
raise exception.NotFound(id)
query.delete()
def _host_upgrade_create(self, host_id, version, values=None):
if values is None:
values = dict()
if not version:
systems = self.isystem_get_list()
if systems is not None:
version = systems[0].software_version
LOG.info("_host_upgrade_create system version=%s" % version)
if version:
# get the load_id from the loads table
query = model_query(models.Load)
query = query.filter_by(software_version=version)
try:
result = query.one()
except NoResultFound:
LOG.info("Fail to get load id from load table %s" %
version)
return None
values['software_load'] = result.id
values['target_load'] = result.id
values['forihostid'] = host_id
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
upgrade = models.HostUpgrade()
upgrade.update(values)
with _session_for_write() as session:
try:
session.add(upgrade)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.UpgradeAlreadyExists(uuid=values['uuid'])
return upgrade
@db_objects.objectify(objects.host_upgrade)
def host_upgrade_create(self, host_id, version, values):
return self._host_upgrade_create(host_id, version, values)
@db_objects.objectify(objects.host_upgrade)
def host_upgrade_get(self, id):
query = model_query(models.HostUpgrade)
if utils.is_uuid_like(id):
query = query.filter_by(uuid=id)
else:
query = query.filter_by(id=id)
try:
result = query.one()
except NoResultFound:
raise exception.InvalidParameterValue(
err="No host upgrade entry found for %s" % id)
return result
@db_objects.objectify(objects.host_upgrade)
def host_upgrade_get_by_host(self, host_id):
query = model_query(models.HostUpgrade)
query = query.filter_by(forihostid=host_id)
try:
result = query.one()
except NoResultFound:
raise exception.NotFound(host_id)
return result
@db_objects.objectify(objects.host_upgrade)
def host_upgrade_get_list(self, limit=None, marker=None, sort_key=None,
sort_dir=None):
query = model_query(models.HostUpgrade)
return _paginate_query(models.HostUpgrade, limit, marker,
sort_key, sort_dir, query)
@db_objects.objectify(objects.host_upgrade)
def host_upgrade_update(self, object_id, values):
with _session_for_write() as session:
query = model_query(models.HostUpgrade, session=session)
query = query.filter_by(id=object_id)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.NotFound(id)
session.flush()
return query.one()
@db_objects.objectify(objects.service_parameter)
def service_parameter_create(self, values):
if not values.get('uuid'):

View File

@ -265,7 +265,6 @@ class ihost(Base):
system = relationship("isystem", lazy="joined", join_depth=1)
host_upgrade = relationship("HostUpgrade", uselist=False)
kube_host_upgrade = relationship("KubeHostUpgrade", uselist=False)
ptp_instances = relationship(
@ -1639,22 +1638,6 @@ class SensorsAnalog(Sensors):
}
class Load(Base):
__tablename__ = 'loads'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
state = Column(String(255))
software_version = Column(String(255))
compatible_version = Column(String(255))
required_patches = Column(String(2047))
UniqueConstraint('software_version')
class PciDevice(Base):
__tablename__ = 'pci_devices'
@ -1842,28 +1825,6 @@ class DeviceImageState(Base):
"DeviceImage", lazy="joined", backref="device_image_state")
class SoftwareUpgrade(Base):
__tablename__ = 'software_upgrade'
id = Column('id', Integer, primary_key=True, nullable=False)
uuid = Column('uuid', String(36), unique=True)
state = Column('state', String(128), nullable=False)
from_load = Column('from_load', Integer, ForeignKey('loads.id',
ondelete="CASCADE"),
nullable=False)
to_load = Column('to_load', Integer, ForeignKey('loads.id',
ondelete="CASCADE"),
nullable=False)
# the from_load and to_load should have been named with an _id, but since
# they weren't we will just reverse the naming to not clash with the
# foreign key column
load_from = relationship("Load", lazy="joined", join_depth=1,
foreign_keys=[from_load])
load_to = relationship("Load", lazy="joined", join_depth=1,
foreign_keys=[to_load])
class Restore(Base):
__tablename__ = 'backup_restore'
@ -1873,27 +1834,6 @@ class Restore(Base):
capabilities = Column(JSONEncodedDict)
class HostUpgrade(Base):
__tablename__ = 'host_upgrade'
id = Column('id', Integer, primary_key=True, nullable=False)
uuid = Column('uuid', String(36), unique=True)
forihostid = Column('forihostid', Integer, ForeignKey('i_host.id',
ondelete="CASCADE"))
software_load = Column('software_load', Integer, ForeignKey('loads.id'),
nullable=False)
target_load = Column('target_load', Integer, ForeignKey('loads.id'),
nullable=False)
# the software_load and target_load should have been named with an _id,
# but since they weren't we will just reverse the naming to not clash with
# the foreign key column
load_software = relationship("Load", lazy="joined", join_depth=1,
foreign_keys=[software_load])
load_target = relationship("Load", lazy="joined", join_depth=1,
foreign_keys=[target_load])
class ServiceParameter(Base):
__tablename__ = 'service_parameter'

View File

@ -36,7 +36,6 @@ from sysinv.objects import drbdconfig
from sysinv.objects import port_ethernet
from sysinv.objects import helm_overrides
from sysinv.objects import host
from sysinv.objects import host_upgrade
from sysinv.objects import kube_app
from sysinv.objects import kube_app_bundle
from sysinv.objects import kube_app_releases
@ -57,7 +56,6 @@ from sysinv.objects import label
from sysinv.objects import lldp_agent
from sysinv.objects import lldp_neighbour
from sysinv.objects import lldp_tlv
from sysinv.objects import load
from sysinv.objects import lvg
from sysinv.objects import memory
from sysinv.objects import network
@ -87,7 +85,6 @@ from sysinv.objects import sensorgroup
from sysinv.objects import sensorgroup_analog
from sysinv.objects import sensorgroup_discrete
from sysinv.objects import service_parameter
from sysinv.objects import software_upgrade
from sysinv.objects import storage
from sysinv.objects import storage_backend
from sysinv.objects import storage_ceph
@ -165,10 +162,7 @@ sensor_discrete = sensor_discrete.SensorDiscrete
sensorgroup = sensorgroup.SensorGroup
sensorgroup_analog = sensorgroup_analog.SensorGroupAnalog
sensorgroup_discrete = sensorgroup_discrete.SensorGroupDiscrete
load = load.Load
pci_device = pci_device.PCIDevice
software_upgrade = software_upgrade.SoftwareUpgrade
host_upgrade = host_upgrade.HostUpgrade
service_parameter = service_parameter.ServiceParameter
lldp_agent = lldp_agent.LLDPAgent
lldp_neighbour = lldp_neighbour.LLDPNeighbour
@ -251,10 +245,7 @@ __all__ = ("system",
"sensorgroup",
"sensorgroup_analog",
"sensorgroup_discrete",
"load",
"pci_device",
"software_upgrade",
"host_upgrade",
"network",
"interface_network",
"service_parameter",

View File

@ -13,16 +13,6 @@ from sysinv.objects import base
from sysinv.objects import utils
def _get_software_load(field, db_object):
if db_object.host_upgrade:
return db_object.host_upgrade.load_software.software_version
def _get_target_load(field, db_object):
if db_object.host_upgrade:
return db_object.host_upgrade.load_target.software_version
def _get_ptp_instance_names(field, db_object):
instances = db_object['ptp_instances']
names = []
@ -103,8 +93,6 @@ class Host(base.SysinvObject):
'tboot': utils.str_or_none,
'vsc_controllers': utils.str_or_none,
'ttys_dcd': utils.bool_or_none,
'software_load': utils.str_or_none,
'target_load': utils.str_or_none,
'install_state': utils.str_or_none,
'install_state_info': utils.str_or_none,
'iscsi_initiator_name': utils.str_or_none,
@ -119,9 +107,7 @@ class Host(base.SysinvObject):
}
_foreign_fields = {
'isystem_uuid': 'system:uuid',
'software_load': _get_software_load,
'target_load': _get_target_load
'isystem_uuid': 'system:uuid'
}
_optional_fields = {

View File

@ -1,38 +0,0 @@
# Copyright (c) 2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class HostUpgrade(base.SysinvObject):
# VERSION 1.0: Initial version
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {'id': int,
'uuid': utils.uuid_or_none,
'forihostid': utils.int_or_none,
'software_load': utils.int_or_none,
'target_load': utils.int_or_none,
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.host_upgrade_get(uuid)
@base.remotable_classmethod
def get_by_host_id(cls, context, host_id):
return cls.dbapi.host_upgrade_get_by_host(host_id)
def save_changes(self, context, updates):
self.dbapi.host_upgrade_update(self.id, # pylint: disable=no-member
updates)

View File

@ -1,37 +0,0 @@
#
# Copyright (c) 2015 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class Load(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'state': utils.str_or_none,
'software_version': utils.str_or_none,
'compatible_version': utils.str_or_none,
'required_patches': utils.str_or_none,
}
@base.remotable_classmethod
def get_by_uuid(self, context, uuid):
return self.dbapi.load_get(uuid)
def save_changes(self, context, updates):
self.dbapi.load_update(self.uuid, # pylint: disable=no-member
updates)

View File

@ -1,41 +0,0 @@
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class SoftwareUpgrade(base.SysinvObject):
# VERSION 1.0: Initial version
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {'id': int,
'uuid': utils.uuid_or_none,
'state': utils.str_or_none,
'from_load': utils.int_or_none,
'to_load': utils.int_or_none,
'from_release': utils.str_or_none,
'to_release': utils.str_or_none,
}
_foreign_fields = {
'from_release': 'load_from:software_version',
'to_release': 'load_to:software_version'
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.software_upgrade_get(uuid)
def save_changes(self, context, updates):
self.dbapi.software_upgrade_update(self.uuid, # pylint: disable=no-member
updates)

View File

@ -313,7 +313,7 @@ class NfvPuppet(openstack.OpenstackBasePuppet):
def get_host_config(self, host):
if (constants.CONTROLLER not in utils.get_personalities(host)):
return {}
database_dir = "/opt/platform/nfv/vim/%s" % host.software_load
database_dir = "/opt/platform/nfv/vim/%s" % host.sw_version
return {
'nfv::vim::database_dir': database_dir,
}

View File

@ -397,7 +397,7 @@ class PlatformPuppet(base.BasePuppet):
# required parameters
config = {
'platform::params::hostname': host.hostname,
'platform::params::software_version': self.quoted_str(host.software_load),
'platform::params::software_version': self.quoted_str(host.sw_version),
'platform::params::apparmor': host.apparmor,
}

View File

@ -15,14 +15,12 @@ from oslo_context import context
from sysinv.agent.manager import AgentManager
from sysinv.common import constants
from sysinv.common import exception
from sysinv.tests import base
class FakeConductorAPI(object):
def __init__(self, isystem=None):
self.finalize_delete_load = mock.MagicMock()
self.create_host_filesystems = mock.MagicMock()
self.update_host_max_cpu_mhz_configured = mock.MagicMock()
self.is_virtual_system_config_result = False
@ -473,60 +471,3 @@ class TestHostFileSystems(base.TestCase):
self.agent_manager._ihost_uuid,
expected_filesystems)
self.assertEqual(self.agent_manager._prev_fs, expected_filesystems)
@mock.patch('sysinv.agent.manager.os.path.isfile', mock.MagicMock())
@mock.patch('sysinv.agent.manager.subprocess.check_call', mock.MagicMock())
class TestLoad(base.TestCase):
def setUp(self):
super(TestLoad, self).setUp()
self.version = '1.0'
self.fake_uuid = 'FAKEUUID'
self.agent_manager = AgentManager('test-host', 'test-topic')
self.agent_manager._ihost_uuid = self.fake_uuid
self.context = context.get_admin_context()
conductor = mock.patch('sysinv.agent.manager.conductor_rpcapi.ConductorAPI')
self.mock_conductor_api = conductor.start()
self.fake_conductor = FakeConductorAPI()
self.mock_conductor_api.return_value = self.fake_conductor
self.addCleanup(conductor.stop)
def tearDown(self):
super(TestLoad, self).tearDown()
def test_delete_load(self):
self.agent_manager.delete_load(
self.context,
self.fake_uuid,
self.version,
)
self.fake_conductor.finalize_delete_load.assert_called_once()
def test_delete_load_without_delete_script(self):
with mock.patch('sysinv.agent.manager.os.path.isfile') as isfile:
isfile.return_value = False
self.agent_manager.delete_load(
self.context,
self.fake_uuid,
self.version,
)
self.fake_conductor.finalize_delete_load.assert_not_called()
def test_delete_load_script_exception(self):
with mock.patch('sysinv.agent.manager.subprocess.check_call') as check_call:
check_call.side_effect = exception.SysinvException()
self.assertRaises(
exception.SysinvException,
self.agent_manager.delete_load,
self.context,
self.fake_uuid,
self.version,
)
self.fake_conductor.finalize_delete_load.assert_not_called()

View File

@ -588,7 +588,6 @@ class TestPatchMixin(object):
"0.0",
"0.0")
mock_get_platform_upgrade.return_value = usm_deploy
dbutils.create_test_upgrade(state=constants.UPGRADE_STARTING)
addrpool = self.find_addrpool_by_networktype(constants.NETWORK_TYPE_OAM)
response = self.patch_oam_fail(addrpool, http_client.BAD_REQUEST,
controller1_address=str(self.oam_subnet[10]))

View File

@ -2615,9 +2615,6 @@ class TestPatch(TestHost):
operational=constants.OPERATIONAL_ENABLED,
availability=constants.AVAILABILITY_ONLINE)
upgrade = dbutils.create_test_upgrade(
state=constants.DEPLOY_STATE_START
)
# Verify the error response on lock controller attempt
response = self._patch_host_action(c1_host['hostname'],
constants.LOCK_ACTION,
@ -2626,15 +2623,6 @@ class TestPatch(TestHost):
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertIn("host-lock %s is not allowed during upgrade state '%s'" %
(c1_host['hostname'], upgrade.state),
response.json['error_message'])
def test_lock_action_controller_during_upgrade_started(self):
dbutils.create_test_upgrade(
state=constants.UPGRADE_STARTED
)
self._test_lock_action_controller()
@mock.patch('os.path.isfile')
def test_lock_action_controller_during_backup_in_progress(self, mock_os_is_file):

View File

@ -38,7 +38,6 @@ class InterfaceDataNetworkTestCase(base.FunctionalTest):
self.addCleanup(p.stop)
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
self.controller = dbutils.create_test_ihost(
id='1',
uuid=None,

View File

@ -39,7 +39,6 @@ class InterfaceNetworkTestCase(base.FunctionalTest):
self.addCleanup(p.stop)
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
self.controller = dbutils.create_test_ihost(
id='1',
uuid=None,

View File

@ -305,10 +305,6 @@ class TestPostKubeRootCAUpdate(TestKubeRootCAUpdate,
"0.0",
"0.0")
mock_get_platform_upgrade.return_value = usm_deploy
dbutils.create_test_load(software_version=dbutils.SW_VERSION_NEW,
compatible_version=dbutils.SW_VERSION,
state=constants.IMPORTED_LOAD_STATE)
dbutils.create_test_upgrade()
create_dict = dbutils.post_get_test_kube_rootca_update()
result = self.post_json('/kube_rootca_update?force=False', create_dict,

View File

@ -378,10 +378,6 @@ class TestPostKubeUpgrade(TestKubeUpgrade,
"0.0",
"0.0")
mock_get_platform_upgrade.return_value = usm_deploy
dbutils.create_test_load(software_version=dbutils.SW_VERSION_NEW,
compatible_version=dbutils.SW_VERSION,
state=constants.IMPORTED_LOAD_STATE)
dbutils.create_test_upgrade()
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
result = self.post_json('/kube_upgrade', create_dict,

View File

@ -43,7 +43,6 @@ class LabelTestCase(base.FunctionalTest):
super(LabelTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
def _get_path(self, host=None, params=None):
if host:

View File

@ -369,9 +369,6 @@ class TestPatchMixin(OAMNetworkTestCase):
'oam_c0_ip': str(oam_c0_ip),
'oam_c1_ip': str(oam_c1_ip),
}
dbutils.create_test_upgrade(
state=constants.UPGRADE_STARTING
)
error_message = "Action rejected while a " \
"platform upgrade is in progress"
self._test_patch_fail(patch_obj, http_client.BAD_REQUEST,

View File

@ -82,7 +82,6 @@ class TestPartition(base.FunctionalTest):
"sdn_enabled": False,
"shared_services": "[]"}
)
self.load = dbutils.create_test_load()
# Create controller-0
self.ihost = self._create_controller_0()
self.disk = self._create_disk(self.ihost.id)

View File

@ -21,7 +21,6 @@ class PTPTestCase(base.FunctionalTest):
super(PTPTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
self.controller = dbutils.create_test_ihost(
id='1',
uuid=None,

View File

@ -170,11 +170,6 @@ class RouteTestCase(base.FunctionalTest, dbbase.BaseHostTestCase):
self.assertEqual(response.status_code, status_code)
self.assertIn(error_message, response.json['error_message'])
def _create_platform_upgrade(self):
self.upgrade = dbutils.create_test_upgrade(
state=constants.UPGRADE_STARTING
)
class TestPost(RouteTestCase):
def test_create_route(self):
@ -204,7 +199,6 @@ class TestPostUpgrade(RouteTestCase):
def setUp(self):
super(TestPostUpgrade, self).setUp()
self.dbapi = db_api.get_instance()
self._create_platform_upgrade()
@mock.patch('sysinv.common.usm_service.is_usm_authapi_ready', lambda: True)
def test_create_route_during_disallowed_upgrade_state(self):
@ -276,7 +270,6 @@ class TestDeleteUpgrade(RouteTestCase):
def setUp(self):
super(TestDeleteUpgrade, self).setUp()
self.dbapi = db_api.get_instance()
self._create_platform_upgrade()
@mock.patch('sysinv.common.usm_service.is_usm_authapi_ready', lambda: True)
def test_delete_route_during_disallowed_upgrade_state(self):

View File

@ -16,7 +16,6 @@ class sensorgroupTestCase(base.FunctionalTest):
def setUp(self):
super(sensorgroupTestCase, self).setUp()
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
self.host = dbutils.create_test_ihost(forisystemid=self.system.id)
def assertDeleted(self, fullPath):

View File

@ -83,7 +83,6 @@ class StorageBackendTestCases(base.FunctionalTest):
self.system = dbutils.create_test_isystem()
self.cluster = dbutils.create_test_cluster(system_id=self.system.id)
self.tier = dbutils.create_test_storage_tier(forclusterid=self.cluster.id)
self.load = dbutils.create_test_load()
self.host = dbutils.create_test_ihost(forisystemid=self.system.id)
self.lvg = dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
forihostid=self.host.id)
@ -840,7 +839,6 @@ class StorageFileTestCases(base.FunctionalTest):
def setUp(self):
super(StorageFileTestCases, self).setUp()
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
self.host = dbutils.create_test_ihost(forisystemid=self.system.id)
def assertDeleted(self, fullPath):
@ -1086,7 +1084,6 @@ class StorageLvmTestCases(base.FunctionalTest):
def setUp(self):
super(StorageLvmTestCases, self).setUp()
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
self.host = dbutils.create_test_ihost(forisystemid=self.system.id)
def assertDeleted(self, fullPath):
@ -1248,7 +1245,6 @@ class StorageCephTestCases(base.FunctionalTest):
self.system = dbutils.create_test_isystem()
self.cluster = dbutils.create_test_cluster(system_id=self.system.id)
self.tier = dbutils.create_test_storage_tier(forclusterid=self.cluster.id)
self.load = dbutils.create_test_load()
self.host = dbutils.create_test_ihost(forisystemid=self.system.id)
self.lvg = dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
forihostid=self.host.id)
@ -1403,7 +1399,6 @@ class StorageCephRookTestCases(base.FunctionalTest):
self.system = dbutils.create_test_isystem()
self.cluster = dbutils.create_test_cluster(system_id=self.system.id)
self.tier = dbutils.create_test_storage_tier(forclusterid=self.cluster.id)
self.load = dbutils.create_test_load()
self.host = dbutils.create_test_ihost(forisystemid=self.system.id)
# Patch management network for ceph

View File

@ -57,7 +57,6 @@ class StorageTierIndependentTCs(base.FunctionalTest):
self.set_is_initial_config_patcher.return_value = True
self.system = dbutils.create_test_isystem()
self.cluster = dbutils.create_test_cluster(system_id=self.system.id, name='ceph_cluster')
self.load = dbutils.create_test_load()
self.host = dbutils.create_test_ihost(forisystemid=self.system.id)
def tearDown(self):
@ -560,7 +559,6 @@ class StorageTierDependentTCs(base.FunctionalTest):
self.context = context.get_admin_context()
self.dbapi = dbapi.get_instance()
self.system = dbutils.create_test_isystem()
self.load = dbutils.create_test_load()
self.host_index = -1
self.mon_index = -1

View File

@ -26,7 +26,6 @@ class TestUSMService(TestCase):
"2.0")
mock_get_software_upgrade.return_value = usm_deploy
mock_dbapi = mock.Mock()
mock_dbapi.software_upgrade_get_one.return_value = None
result = get_platform_upgrade(mock_dbapi)

View File

@ -52,7 +52,6 @@ class UpdateCephCluster(base.DbTestCase):
self.context = context.get_admin_context()
self.dbapi = dbapi.get_instance()
self.system = utils.create_test_isystem()
self.load = utils.create_test_load()
self.host_index = -1
self.mock_fix_crushmap = self.fix_crushmap_patcher.start()

View File

@ -27,15 +27,12 @@ import json
import mock
import os.path
import netaddr
import tempfile
import uuid
import threading
from time import sleep
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from shutil import copy as shutil_copy
from shutil import rmtree
from fm_api import constants as fm_constants
from oslo_context import context
@ -48,10 +45,7 @@ from sysinv.common import kubernetes
from sysinv.common import utils as cutils
from sysinv.common import usm_service
from sysinv.conductor import manager
from sysinv.db.sqlalchemy.api import Connection
from sysinv.db import api as dbapi
from sysinv.loads.loads import LoadImport
from sysinv.objects.load import Load
from sysinv.puppet import common as puppet_common
from sysinv.tests.db import utils as dbutils
from sysinv import objects
@ -377,7 +371,6 @@ class ManagerTestCase(base.DbTestCase):
self.context = context.get_admin_context()
self.dbapi = dbapi.get_instance()
self.system = utils.create_test_isystem()
self.load = utils.create_test_load()
self.dnsmasq_hosts_file = '/tmp/dnsmasq.hosts'
# Mock the ceph operator
@ -5921,504 +5914,6 @@ class ManagerTestCase(base.DbTestCase):
mock_update_cached_app_bundles_set.assert_called_once()
@mock.patch('sysinv.conductor.manager.verify_files', lambda x, y: True)
@mock.patch('sysinv.conductor.manager.cutils.ISO', mock.MagicMock())
class ManagerStartLoadImportTest(base.BaseHostTestCase):
def setUp(self):
super(ManagerStartLoadImportTest, self).setUp()
# Set up objects for testing
self.service = manager.ConductorManager('test-host', 'test-topic')
self.service.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
self.tmp_dir = tempfile.mkdtemp(dir='/tmp')
patch_mkdtemp = mock.patch('tempfile.mkdtemp')
mock_mkdtemp = patch_mkdtemp.start()
mock_mkdtemp.return_value = self.tmp_dir
self.addCleanup(patch_mkdtemp.stop)
self.upgrades_path = '%s/upgrades' % self.tmp_dir
os.makedirs(self.upgrades_path, exist_ok=True)
self.metadata = os.path.join(
os.path.dirname(__file__), "data", "metadata.xml"
)
shutil_copy(self.metadata, self.upgrades_path)
self.iso = os.path.join(
os.path.dirname(__file__), "data", "bootimage.iso"
)
self.sig = os.path.join(
os.path.dirname(__file__), "data", "bootimage.sig"
)
load_update = mock.patch.object(Connection, 'load_update')
self.mock_load_update = load_update.start()
self.mock_load_update.return_value = mock.MagicMock()
self.addCleanup(load_update.stop)
def test_start_import_load(self):
result = self.service.start_import_load(
self.context,
path_to_iso=self.iso,
path_to_sig=self.sig,
)
self.assertIsInstance(result, Load)
self.assertEqual(result.state, constants.IMPORTING_LOAD_STATE)
@mock.patch('sysinv.conductor.manager.cutils.get_active_load')
def test_start_import_load_same_version(self, mock_get_active_load):
mock_get_active_load.return_value.software_version = '0.1'
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
self.iso,
self.sig,
)
@mock.patch('sysinv.conductor.manager.cutils.get_active_load')
def test_start_import_load_invalid_from_version(self, mock_get_active_load):
mock_get_active_load.return_value.software_version = '0.2'
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
self.iso,
self.sig,
)
@mock.patch.object(Connection, 'load_get_list')
@mock.patch('sysinv.conductor.manager.cutils.get_active_load')
def test_start_import_load_active(self, mock_get_active_load, mock_load_get_list):
mock_get_active_load.return_value.software_version = '0.1'
load = utils.create_test_load(**{"software_version": "0.1"})
mock_load_get_list.return_value = [load]
result = self.service.start_import_load(
self.context,
path_to_iso=self.iso,
path_to_sig=self.sig,
import_type=constants.ACTIVE_LOAD_IMPORT,
)
self.assertIsInstance(result, Load)
self.assertEqual(result.state, constants.ACTIVE_LOAD_STATE)
def test_start_import_load_active_invalid_version(self):
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
self.iso,
self.sig,
import_type=constants.ACTIVE_LOAD_IMPORT,
)
@mock.patch.object(Connection, 'load_get_list')
def test_start_import_load_active_load_not_found(self, mock_load_get_list):
load = utils.create_test_load(**{"software_version": "0.1"})
mock_load_get_list.side_effect = [[load], []]
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
self.iso,
self.sig,
import_type=constants.ACTIVE_LOAD_IMPORT,
)
@mock.patch('os.path.exists', mock.MagicMock())
@mock.patch('sysinv.conductor.manager.cutils.get_active_load')
@mock.patch('sysinv.conductor.manager.ConductorManager._get_committed_patches_from_iso')
def test_start_import_load_inactive(self, mock__get_committed_patches_from_iso, mock_get_active_load):
mock_get_active_load.return_value.software_version = '0.2'
mock_get_active_load.return_value.uuid = "11111111-1111-1111-1111-111111111111"
mock_get_active_load.return_value.id = '1'
mock_get_active_load.return_value.compatible_version = ""
mock_get_active_load.return_value.required_patches = ""
mock__get_committed_patches_from_iso.return_value = ["PATCH_0001"]
loading_metadata = open(self.metadata, 'r').read()
current_metadata = '''
<build>\n<version>0.2</version>\n<supported_upgrades>
\n<upgrade>\n<version>0.1</version>\n<required_patch>PATCH_0001</required_patch>
\n</upgrade>\n</supported_upgrades>\n</build>
'''
mock_files = [
mock.mock_open(read_data=loading_metadata).return_value,
mock.mock_open(read_data=current_metadata).return_value,
]
mock_open = mock.mock_open()
mock_open.side_effect = mock_files
with mock.patch('builtins.open', mock_open):
result = self.service.start_import_load(
self.context,
path_to_iso=self.iso,
path_to_sig=self.sig,
import_type=constants.INACTIVE_LOAD_IMPORT,
)
self.mock_load_update.assert_called_once_with(
mock.ANY,
{'compatible_version': '0.1', 'required_patches': 'PATCH_0001'},
)
self.assertIsInstance(result, Load)
self.assertEqual(result.state, constants.IMPORTING_LOAD_STATE)
@mock.patch('sysinv.conductor.manager.open')
@mock.patch('sysinv.conductor.manager.cutils.get_active_load')
def test_start_import_load_inactive_incompatible_version(self, mock_get_active_load, mock_open):
mock_get_active_load.return_value.software_version = '0.3'
mock_get_active_load.return_value.uuid = "22222222-2222-2222-2222-222222222222"
mock_get_active_load.return_value.id = '1'
mock_get_active_load.return_value.compatible_version = ""
mock_get_active_load.return_value.required_patches = ""
current_metadata = b'''
<build>\n<version>0.3</version>\n<supported_upgrades>
\n<upgrade>\n<version>0.2</version>\n<required_patch>PATCH_0001</required_patch>
\n</upgrade>\n</supported_upgrades>\n</build>
'''
mock_files = [
mock.mock_open(read_data=current_metadata).return_value,
]
mock_open.side_effect = mock_files
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
path_to_iso=self.iso,
path_to_sig=self.sig,
import_type=constants.INACTIVE_LOAD_IMPORT,
)
def test_get_patch_id(self):
import tempfile
patches = {"PATCH_0001-metadata.xml": "<?xml version=\"1.0\" ?><patch><id>PATCH_0001</id></patch>",
"PATCH_0002-metadata.xml": "<?xml version=\"1.0\" ?><patch><id>PATCH_0002</id></patch>", }
patch_ids = []
with tempfile.TemporaryDirectory() as tempdir:
for fn, content in patches.items():
filename = os.path.join(tempdir, fn)
with open(filename, 'w') as f:
f.write(content)
patch_id = self.service._get_patch_id(filename)
if patch_id:
patch_ids.append(patch_id)
self.assertEqual(patch_ids, ["PATCH_0001", "PATCH_0002"])
@mock.patch('os.path.exists', mock.MagicMock())
# @mock.patch('sysinv.conductor.manager.open')
@mock.patch('sysinv.conductor.manager.cutils.get_active_load')
@mock.patch('sysinv.conductor.manager.ConductorManager._get_committed_patches_from_iso')
def test_start_import_load_inactive_invalid_patch(self, mock__get_committed_patches_from_iso, mock_get_active_load):
mock_get_active_load.return_value.software_version = '0.3'
mock_get_active_load.return_value.uuid = "f0905590-9c02-445a-87c7-568cba08c997"
mock_get_active_load.return_value.id = 1
mock_get_active_load.return_value.compatible_version = ""
mock_get_active_load.return_value.required_patches = ""
mock__get_committed_patches_from_iso.return_value = ["PATCH_0001"]
loading_metadata = open(self.metadata, 'r').read()
current_metadata = b'''
<build>\n<version>0.2</version>\n<supported_upgrades>
\n<upgrade>\n<version>0.1</version>\n<required_patch>PATCH_0002</required_patch>
\n</upgrade>\n</supported_upgrades>\n</build>
'''
mock_files = [
mock.mock_open(read_data=loading_metadata).return_value,
mock.mock_open(read_data=current_metadata).return_value,
]
mock_open = mock.mock_open()
mock_open.side_effect = mock_files
# load can be import, the restriction of required_patches only applies
# when upgrade starts
with mock.patch('builtins.open', mock_open):
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
path_to_iso=self.iso,
path_to_sig=self.sig,
import_type=constants.INACTIVE_LOAD_IMPORT,
)
def test_start_import_load_invalid_path(self):
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
'invalid/path/bootimage.iso',
'invalid/path/bootimage.sig',
)
def test_start_import_load_invalid_files(self):
with mock.patch('sysinv.conductor.manager.verify_files', lambda x, y: False):
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
self.iso,
self.sig,
)
def test_start_import_load_without_metadata(self):
rmtree(self.upgrades_path, ignore_errors=True)
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
self.iso,
self.sig,
)
def test_start_import_load_invalid_metadata(self):
iso = os.path.join(
os.path.dirname(__file__), "data", "bootimage.iso"
)
shutil_copy(iso, self.upgrades_path)
os.rename(
'%s/bootimage.iso' % self.upgrades_path,
'%s/metadata.xml' % self.upgrades_path,
)
self.assertRaises(
exception.SysinvException,
self.service.start_import_load,
self.context,
self.iso,
self.sig,
)
@mock.patch('sysinv.conductor.manager.subprocess', mock.MagicMock())
@mock.patch('sysinv.conductor.manager.cutils.ISO', mock.MagicMock())
class ManagerLoadImportTest(base.BaseHostTestCase):
def setUp(self):
super(ManagerLoadImportTest, self).setUp()
# Set up objects for testing
self.service = manager.ConductorManager('test-host', 'test-topic')
self.service.dbapi = dbapi.get_instance()
self.context = context.get_admin_context()
self.iso = os.path.join(
os.path.dirname(__file__), "data", "bootimage.iso"
)
self.load = utils.create_test_load(
**{"software_version": "0.1"}
)
load_update = mock.patch.object(Connection, 'load_update')
self.mock_load_update = load_update.start()
self.mock_load_update.return_value = mock.MagicMock()
self.addCleanup(load_update.stop)
extract_files = mock.patch.object(LoadImport, 'extract_files')
self.mock_extract_files = extract_files.start()
self.mock_extract_files.return_value = mock.MagicMock()
self.addCleanup(extract_files.stop)
def test_import_load(self):
result = self.service.import_load(
self.context,
path_to_iso=self.iso,
new_load=self.load,
)
self.assertTrue(result)
self.mock_load_update.assert_called_once_with(
mock.ANY,
{'state': constants.IMPORTED_LOAD_STATE},
)
@mock.patch('sysinv.conductor.manager.os.chmod', mock.Mock())
@mock.patch('sysinv.conductor.manager.os.makedirs', mock.Mock())
def test_import_load_inactive(self):
with mock.patch('builtins.open', mock.mock_open()):
result = self.service.import_load(
self.context,
path_to_iso=self.iso,
new_load=self.load,
import_type=constants.INACTIVE_LOAD_IMPORT,
)
self.assertTrue(result)
self.mock_load_update.assert_called_once_with(
mock.ANY,
{'state': constants.INACTIVE_LOAD_STATE},
)
@mock.patch('sysinv.conductor.manager.os.chmod', mock.Mock())
@mock.patch('sysinv.conductor.manager.os.makedirs', mock.Mock())
def test_import_load_inactive_failed_extract_files(self):
self.mock_extract_files.side_effect = exception.SysinvException()
with mock.patch('builtins.open', mock.mock_open()):
self.assertRaises(
exception.SysinvException,
self.service.import_load,
self.context,
path_to_iso=self.iso,
new_load=self.load,
import_type=constants.INACTIVE_LOAD_IMPORT,
)
self.mock_load_update.assert_called_once_with(
mock.ANY,
{'state': constants.ERROR_LOAD_STATE},
)
def test_import_load_empty_new_load(self):
self.assertRaises(
exception.SysinvException,
self.service.import_load,
self.context,
path_to_iso=self.iso,
new_load=None,
)
self.mock_load_update.assert_not_called()
def test_import_load_invalid_iso_path(self):
self.assertRaises(
exception.SysinvException,
self.service.import_load,
self.context,
path_to_iso='invalid',
new_load=self.load,
)
self.mock_load_update.assert_called_once_with(
mock.ANY,
{'state': constants.ERROR_LOAD_STATE},
)
def test_import_load_load_update_failed(self):
self.mock_load_update.side_effect = exception.SysinvException()
self.assertRaises(
exception.SysinvException,
self.service.import_load,
self.context,
path_to_iso=self.iso,
new_load=self.load,
)
self.mock_load_update.assert_called_once_with(
mock.ANY,
{'state': constants.IMPORTED_LOAD_STATE},
)
@mock.patch('sysinv.conductor.manager.os.path.isfile', mock.MagicMock())
@mock.patch('sysinv.conductor.manager.subprocess.check_call', mock.MagicMock())
class ManagerLoadDeleteTest(base.BaseHostTestCase):
def setUp(self):
super(ManagerLoadDeleteTest, self).setUp()
self.context = context.get_admin_context()
self.service = manager.ConductorManager('test-host', 'test-topic')
self.service.dbapi = dbapi.get_instance()
self.load = utils.create_test_load(
**{
'software_version': '0.1',
'state': constants.INACTIVE_LOAD_STATE,
}
)
ihost = utils.create_test_ihost()
controller_hostname = mock.patch.object(
cutils,
'get_mate_controller_hostname',
lambda: ihost.hostname,
)
self.mock_controller_hostname = controller_hostname.start()
self.addCleanup(controller_hostname.stop)
rpcapi_delete_load = mock.patch.object(
agent_rpcapi.AgentAPI,
'delete_load',
mock.MagicMock(),
)
self.mocked_rpcapi_delete_load = rpcapi_delete_load.start()
self.addCleanup(rpcapi_delete_load.stop)
def tearDown(self):
super(ManagerLoadDeleteTest, self).tearDown()
def test_load_delete(self):
self.service.delete_load(
self.context,
self.load.id,
)
self.mocked_rpcapi_delete_load.assert_called_once()
def test_load_delete_run_again(self):
utils.update_test_load(
self.load.id,
**{'state': constants.DELETING_LOAD_STATE},
)
self.service.delete_load(
self.context,
self.load.id,
)
self.mocked_rpcapi_delete_load.assert_called_once()
@mock.patch.object(cutils, 'get_mate_controller_hostname', lambda: '')
def test_load_delete_meta_controller_not_configured(self):
self.service.delete_load(
self.context,
self.load.id,
)
loads = self.dbapi.load_get_list()
self.assertEqual(1, len(loads))
self.mocked_rpcapi_delete_load.assert_not_called()
def test_load_delete_invalid_state(self):
utils.update_test_load(
self.load.id,
**{'state': constants.IMPORTING_LOAD_STATE},
)
self.assertRaises(
exception.SysinvException,
self.service.delete_load,
self.context,
self.load.id,
)
self.mocked_rpcapi_delete_load.assert_not_called()
class ManagerTestCaseInternal(base.BaseHostTestCase):
def setUp(self):
super(ManagerTestCaseInternal, self).setUp()

View File

@ -259,7 +259,6 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase):
def _create_test_common(self):
self._create_test_system()
self._create_test_load()
self._create_test_drbd()
self._create_test_remotelogging()
self._create_test_user()
@ -277,9 +276,6 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase):
system_type=self.system_type,
system_mode=self.system_mode)
def _create_test_load(self):
self.load = dbutils.create_test_load()
def _create_test_drbd(self):
self.drbd = dbutils.create_test_drbd(
forisystemid=self.system.id)
@ -889,18 +885,3 @@ class OpenstackTestCase(AppTestCase):
with open(os.path.join(os.getcwd(), "sysinv", "tests",
"puppet", "fake_hieradata.yaml")) as fake_data:
self.fake_hieradata = fake_data.read()
class PlatformUpgradeTestCase(OpenstackTestCase):
def _create_platform_upgrade(self):
self.upgrade = dbutils.create_test_upgrade(
state=constants.UPGRADE_STARTING
)
def setUp(self):
super(PlatformUpgradeTestCase, self).setUp()
self._create_platform_upgrade()
def tearDown(self):
super(PlatformUpgradeTestCase, self).tearDown()

View File

@ -1253,8 +1253,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin):
host_upgrades = db_utils.get_table(engine, 'host_upgrade')
host_upgrades_col = {
'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime',
'created_at': 'DateTime', 'updated_at': 'DateTime', 'forihostid': 'Integer',
'software_load': 'Integer', 'target_load': 'Integer',
'created_at': 'DateTime', 'updated_at': 'DateTime', 'forihostid': 'Integer'
}
for col, coltype in host_upgrades_col.items():
self.assertTrue(isinstance(host_upgrades.c[col].type,

View File

@ -23,7 +23,6 @@ class DbNodeTestCase(base.DbTestCase):
super(DbNodeTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.system = utils.create_test_isystem()
self.load = utils.create_test_load()
def _create_test_ihost(self, **kwargs):
# ensure the system ID for proper association

View File

@ -245,50 +245,6 @@ def update_test_isystem(system_dict):
return dbapi.isystem_update(system_dict['uuid'], system_dict)
def get_test_load(**kw):
load = {
"software_version": kw.get("software_version", SW_VERSION),
"compatible_version": kw.get("compatible_version", "N/A"),
"required_patches": "N/A",
"state": kw.get("state", constants.ACTIVE_LOAD_STATE),
}
return load
def create_test_load(**kw):
load = get_test_load(**kw)
dbapi = db_api.get_instance()
return dbapi.load_create(load)
def get_test_load_values(**kw):
values = {}
for key, value in kw.items():
values[key] = value
return values
def update_test_load(load_id, **kw):
values = get_test_load_values(**kw)
dbapi = db_api.get_instance()
return dbapi.load_update(load_id, values)
def get_test_upgrade(**kw):
upgrade = {'from_load': kw.get('from_load', 1),
'to_load': kw.get('to_load', 2),
'state': kw.get('state', constants.UPGRADE_STARTING)}
return upgrade
def create_test_upgrade(**kw):
upgrade = get_test_upgrade(**kw)
dbapi = db_api.get_instance()
return dbapi.software_upgrade_create(upgrade)
def post_get_test_kube_upgrade(**kw):
upgrade = get_test_kube_upgrade(**kw)
del upgrade['id']

View File

@ -140,8 +140,7 @@ class PlatformCephBackendAIODuplexHostTestCase(PuppetOperatorTestSuiteMixin,
# ============= Openstack environment tests ==============
class PlatformUpgradeOpenstackAIODuplexHostTestCase(PuppetOperatorTestSuiteMixin,
dbbase.BaseCephStorageBackendMixin,
dbbase.PlatformUpgradeTestCase):
dbbase.BaseCephStorageBackendMixin):
@mock.patch('sysinv.common.usm_service.is_usm_authapi_ready', lambda: True)
@mock.patch('sysinv.common.usm_service.get_platform_upgrade')