From d7ecf460699c159baf41c4a152300fb06bacee8a Mon Sep 17 00:00:00 2001 From: Luis Eduardo Bonatti Date: Mon, 17 Feb 2025 16:01:40 -0300 Subject: [PATCH] Remove load, host upgrade, software upgrade tables and their references. As USM is the new method to do an upgrade this commit removes the tables load, host_upgrade, software_upgrade and their references used by the legacy upgrade. This commit also removes some methods that manipulate the load data and upgrade related code in controllerconfig. Unit tests related to upgrade code in controllerconfig was also removed. Test Plan: DX: PASS: Upgrade stx10 -> stx11. PASS: Host Swact. PASS: Rollback stx11 -> stx10 PASS: stx-11 fresh install/bootstrap/unlock SX: PASS: Upgrade stx10 -> stx11. PASS: Backup and Restore. PASS: stx-11 fresh install/bootstrap/unlock Story: 2011357 Task: 51709 Change-Id: I9b926f9627b789908a31f3a405cc48b506148b49 Signed-off-by: Luis Eduardo Bonatti --- .zuul.yaml | 15 - .../tests/upgrades/__init__.py | 5 - .../tests/upgrades/test_migration_scripts.py | 144 -- .../controllerconfig/upgrades/__init__.py | 5 - .../controllerconfig/upgrades/controller.py | 1588 ----------------- .../controllerconfig/upgrades/management.py | 385 ---- .../controllerconfig/upgrades/utils.py | 523 ------ controllerconfig/controllerconfig/tox.ini | 2 +- devstack/lib/config | 2 - .../cgts-client/cgtsclient/v1/client.py | 2 - .../cgts-client/cgtsclient/v1/load.py | 87 - sysinv/sysinv/debian/deb_folder/rules | 1 - .../sysinv/debian/deb_folder/sysinv.install | 1 - .../sysinv/sysinv/etc/sysinv/delete_load.sh | 30 - sysinv/sysinv/sysinv/sysinv/agent/manager.py | 65 - sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py | 34 - .../sysinv/api/controllers/v1/__init__.py | 8 - .../sysinv/sysinv/api/controllers/v1/host.py | 126 +- .../sysinv/sysinv/api/controllers/v1/load.py | 230 +-- .../sysinv/api/controllers/v1/upgrade.py | 22 +- sysinv/sysinv/sysinv/sysinv/cmd/upgrade.py | 93 - .../sysinv/sysinv/sysinv/common/constants.py | 1 - sysinv/sysinv/sysinv/sysinv/common/health.py | 41 - .../sysinv/sysinv/sysinv/conductor/manager.py | 942 +--------- .../sysinv/sysinv/sysinv/conductor/rpcapi.py | 68 - sysinv/sysinv/sysinv/sysinv/db/api.py | 189 +- .../sysinv/sysinv/sysinv/db/sqlalchemy/api.py | 246 +-- .../sysinv/sysinv/db/sqlalchemy/models.py | 60 - .../sysinv/sysinv/sysinv/objects/__init__.py | 9 - sysinv/sysinv/sysinv/sysinv/objects/host.py | 16 +- .../sysinv/sysinv/objects/host_upgrade.py | 38 - sysinv/sysinv/sysinv/sysinv/objects/load.py | 37 - .../sysinv/sysinv/objects/software_upgrade.py | 41 - sysinv/sysinv/sysinv/sysinv/puppet/nfv.py | 2 +- .../sysinv/sysinv/sysinv/puppet/platform.py | 2 +- .../sysinv/sysinv/tests/agent/test_manager.py | 59 - .../sysinv/tests/api/test_address_pool.py | 1 - .../sysinv/sysinv/tests/api/test_host.py | 12 - .../tests/api/test_interface_datanetwork.py | 1 - .../tests/api/test_interface_network.py | 1 - .../tests/api/test_kube_rootca_update.py | 4 - .../sysinv/tests/api/test_kube_upgrade.py | 4 - .../sysinv/sysinv/tests/api/test_label.py | 1 - .../sysinv/tests/api/test_oamnetwork.py | 3 - .../sysinv/sysinv/tests/api/test_partition.py | 1 - .../sysinv/sysinv/tests/api/test_ptp.py | 1 - .../sysinv/sysinv/tests/api/test_route.py | 7 - .../sysinv/tests/api/test_sensorgroup.py | 1 - .../sysinv/tests/api/test_storage_backends.py | 5 - .../sysinv/tests/api/test_storage_tier.py | 2 - .../sysinv/tests/common/test_usm_service.py | 1 - .../sysinv/tests/conductor/test_ceph.py | 1 - .../sysinv/tests/conductor/test_manager.py | 505 ------ sysinv/sysinv/sysinv/sysinv/tests/db/base.py | 19 - .../tests/db/sqlalchemy/test_migrations.py | 3 +- .../sysinv/sysinv/tests/db/test_sysinv.py | 1 - sysinv/sysinv/sysinv/sysinv/tests/db/utils.py | 44 - .../sysinv/sysinv/tests/puppet/test_puppet.py | 3 +- 58 files changed, 26 insertions(+), 5714 deletions(-) delete mode 100644 controllerconfig/controllerconfig/controllerconfig/tests/upgrades/__init__.py delete mode 100644 controllerconfig/controllerconfig/controllerconfig/tests/upgrades/test_migration_scripts.py delete mode 100644 controllerconfig/controllerconfig/controllerconfig/upgrades/__init__.py delete mode 100644 controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py delete mode 100644 controllerconfig/controllerconfig/controllerconfig/upgrades/management.py delete mode 100644 controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py delete mode 100644 sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py delete mode 100644 sysinv/sysinv/sysinv/etc/sysinv/delete_load.sh delete mode 100644 sysinv/sysinv/sysinv/sysinv/cmd/upgrade.py delete mode 100644 sysinv/sysinv/sysinv/sysinv/objects/host_upgrade.py delete mode 100644 sysinv/sysinv/sysinv/sysinv/objects/load.py delete mode 100644 sysinv/sysinv/sysinv/sysinv/objects/software_upgrade.py diff --git a/.zuul.yaml b/.zuul.yaml index 26d545abbb..2e73f06226 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -13,7 +13,6 @@ - sysinv-tox-flake8 - sysinv-tox-pylint - sysinv-tox-bandit - - controllerconfig-tox-py39 - controllerconfig-tox-flake8 - controllerconfig-tox-pylint - tsconfig__tox-pylint @@ -29,7 +28,6 @@ - sysinv-tox-flake8 - sysinv-tox-pylint - sysinv-tox-bandit - - controllerconfig-tox-py39 - controllerconfig-tox-flake8 - controllerconfig-tox-pylint - tsconfig__tox-pylint @@ -100,19 +98,6 @@ tox_envlist: bandit tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini -- job: - name: controllerconfig-tox-py39 - parent: tox-py39 - description: Run py39 tests for controllerconfig - nodeset: debian-bullseye - required-projects: - - starlingx/fault - files: - - controllerconfig/* - vars: - tox_envlist: py39 - tox_extra_args: -c controllerconfig/controllerconfig/tox.ini - - job: name: controllerconfig-tox-flake8 parent: tox diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/upgrades/__init__.py b/controllerconfig/controllerconfig/controllerconfig/tests/upgrades/__init__.py deleted file mode 100644 index 6be15e8026..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/upgrades/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright (c) 2021 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/controllerconfig/controllerconfig/controllerconfig/tests/upgrades/test_migration_scripts.py b/controllerconfig/controllerconfig/controllerconfig/tests/upgrades/test_migration_scripts.py deleted file mode 100644 index 92f02219ff..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/tests/upgrades/test_migration_scripts.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) 2021 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -"""Base test code to test migration scripts -First, focus on the migration script name validation -Second, the validation script sequence call -""" - -from mockproc import mockprocess -from os import listdir -from os.path import isfile -from os.path import join -from tempfile import mkdtemp - -import os -import unittest - -from controllerconfig.upgrades import utils - - -# The way to assert is to pass a script execution that writes the script file -# name into a file -# The content of the file will contain the sequence of the called scripts -script_body = '''#! /usr/bin/env python -with open('%s', 'a+') as f: - f.write("%s") -''' - -from_release = "20.06" -to_release = "20.12" -action = "migrate" - -# Lists to add scripts to be called, use a ":" separator for -# parsing/asserting -validScripts1 = ["71-bla1-bla2-bla3.sh", "8-bla1-bla2-bla3.py:", - "21-bla1-bla2-bla3.sh:"] - -validScripts2 = ["75-deployment-ns-upgrade.py:", "65-k8s-app-upgrade.sh:", - "10-sysinv-adjust-partitions.py:", - "60-helm-releases-data-migration.py:", - "55-armada-helm-upgrade.py:", - "95-apply-mandatory-psp-policies.py:", - "10-sysinv-adjust-partitions.py:", - "85-update-sc-admin-endpoint-cert.py:", - "70-active-secured-etcd-after-upgrade.sh:", - "50-dcmanager-subcloud-status-migration.py:", - "45-sysinv-remove-identity-shared-service.py:", - "25-coredns-configmap.sh:", - "20-exempt-admin-from-lockout.sh:", - "115-foo-bar-test-ok.sh:", "299-foo-bar-test-ok.sh:", - "2123-foo-bar-test-ok.sh"] - -invalidScripts1 = ["70-bla1-bla2-bla3.sh", "7-bla1-bla2-bla3.py:", - "20-bla1-bla2-bla3.sh:", "-20-bla1-bla2-bla3.sh"] - -invalidScripts2 = ["95-apply-mandatory-psp-policies.py", - "10-sysinv-adjust-partitions.py:", - "85-update-sc-admin-endpoint-cert.py:", - "70_active-secured-etcd-after-upgrade.sh:"] - - -# Append scripts to be executed according to the passed list -def addScripts(self, scripts, output_filename): - for script in scripts: - self.scripts.append(script, returncode=0, script=script_body % - (output_filename, script)) - - -# Test with the files under "controllerconfig/upgrade-scripts" -def addRealMigrationScripts(self, output_filename): - path = os.getcwd() + "/upgrade-scripts" - for f in listdir(path): - if isfile(join(path, f)): - self.scripts.append(f, returncode=0, script=script_body % - (output_filename, f)) - - -def assertProperSorted(scripts): - output = False - sequence = [] - for script in scripts: - sequence.append(int(script.split("-")[0])) - if sorted(sequence) == sequence: - output = True - return output - - -class TestMigrationScripts(unittest.TestCase): - - def setUp(self): - self.scripts_dir = mkdtemp() - self.output_filename = mkdtemp() + "/output.txt" - # Re-create the file for each run - open(self.output_filename, 'w+').close() - self.scripts = mockprocess.MockProc(self.scripts_dir) - - def test_migration_scripts_success_1(self): - addScripts(self, validScripts1, self.output_filename) - with self.scripts: - utils.execute_migration_scripts(from_release, to_release, action, - self.scripts_dir) - with open(self.output_filename, 'r') as f: - output = str(f.read()) - if(assertProperSorted(output.split(':'))): - pass - - def test_migration_scripts_success_2(self): - addScripts(self, validScripts2, self.output_filename) - with self.scripts: - utils.execute_migration_scripts(from_release, to_release, action, - self.scripts_dir) - with open(self.output_filename, 'r') as f: - output = str(f.read()) - if(assertProperSorted(output.split(':'))): - pass - - def test_real_migration_scripts(self): - addRealMigrationScripts(self, self.output_filename) - with self.scripts: - utils.execute_migration_scripts(from_release, to_release, action, - self.scripts_dir) - with open(self.output_filename, 'r') as f: - output = str(f.read()) - if(assertProperSorted(output.split(':'))): - pass - - def test_migration_scripts_validation_fail_1(self): - addScripts(self, invalidScripts1, self.output_filename) - with self.assertRaises(ValueError): - with self.scripts: - utils.execute_migration_scripts(from_release, to_release, - action, self.scripts_dir) - - def test_migration_scripts_validation_fail_2(self): - addScripts(self, invalidScripts2, self.output_filename) - with self.assertRaises(ValueError): - with self.scripts: - utils.execute_migration_scripts(from_release, to_release, - action, self.scripts_dir) - - def tearDown(self): - os.remove(self.output_filename) diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/__init__.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/__init__.py deleted file mode 100644 index 754a8f4ef5..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright (c) 2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py deleted file mode 100644 index d1fe79a645..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py +++ /dev/null @@ -1,1588 +0,0 @@ -# -# Copyright (c) 2016-2025 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# This file contains functions used to upgrade controller-1 -# - -import copy -import fcntl -import glob -import json -import psycopg2 -import os -import shutil -import socket -import stat -import subprocess -import sys -import tarfile -import tempfile -import time -import yaml - -from sysinv.common import constants as sysinv_constants -from sysinv.puppet import common as puppet_common - - -# WARNING: The controller-1 upgrade is done before any puppet manifests -# have been applied, so only the static entries from tsconfig can be used -# (the platform.conf file will not have been updated with dynamic values). -from tsconfig.tsconfig import SW_VERSION -from tsconfig.tsconfig import PLATFORM_PATH -from tsconfig.tsconfig import VOLATILE_PXEBOOT_PATH -from tsconfig.tsconfig import KEYRING_PATH -from tsconfig.tsconfig import PLATFORM_CONF_FILE -from tsconfig.tsconfig import CONTROLLER_UPGRADE_FLAG -from tsconfig.tsconfig import CONTROLLER_UPGRADE_COMPLETE_FLAG -from tsconfig.tsconfig import CONTROLLER_UPGRADE_FAIL_FLAG -from tsconfig.tsconfig import CONTROLLER_UPGRADE_STARTED_FLAG - -from controllerconfig.common import constants -from controllerconfig import utils as cutils -from controllerconfig.upgrades import utils - -from controllerconfig.common import oslolog as log -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -POSTGRES_BIN = utils.get_postgres_bin() -POSTGRES_MOUNT_PATH = '/mnt/postgresql' -POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump' -DB_CONNECTION_CONF_FORMAT = "connection=postgresql://%s:%s@127.0.0.1/%s\n" -DB_CONNECTION_EXEC_FORMAT = "postgresql://%s:%s@127.0.0.1/%s" - -restore_patching_complete = '/etc/platform/.restore_patching_complete' -restore_compute_ready = '/var/run/.restore_compute_ready' -node_is_patched = '/var/run/node_is_patched' -patching_permdir = '/opt/patching' -patching_repo_permdir = '/var/www/pages/updates' - - -def gethostaddress(hostname): - """ Get the IP address for a hostname, supporting IPv4 and IPv6. """ - return socket.getaddrinfo(hostname, None)[0][4][0] - - -def get_db_credentials(shared_services, from_release, role=None): - """ - Returns the database credentials using the provided shared services, - from_release and role. - """ - db_credential_keys = \ - {'barbican': {'hiera_user_key': 'barbican::db::postgresql::user', - 'keyring_password_key': 'barbican', - }, - 'sysinv': {'hiera_user_key': 'sysinv::db::postgresql::user', - 'keyring_password_key': 'sysinv', - }, - 'fm': {'hiera_user_key': 'fm::db::postgresql::user', - 'keyring_password_key': 'fm', - }, - } - - if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services: - db_credential_keys.update( - {'keystone': {'hiera_user_key': - 'keystone::db::postgresql::user', - 'keyring_password_key': 'keystone', - }}) - - if role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - db_credential_keys.update( - {'dcmanager': {'hiera_user_key': 'dcmanager::db::postgresql::user', - 'keyring_password_key': 'dcmanager', - }, - 'dcorch': {'hiera_user_key': 'dcorch::db::postgresql::user', - 'keyring_password_key': 'dcorch', - }, - }) - - # Get the hiera data for the from release - hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release, - "hieradata") - static_file = os.path.join(hiera_path, "static.yaml") - with open(static_file, 'r') as s_file: - static_config = yaml.safe_load(s_file) - - db_credentials = dict() - for database, values in db_credential_keys.items(): - username = static_config[values['hiera_user_key']] - password = utils.get_password_from_keyring( - values['keyring_password_key'], "database") - db_credentials[database] = {'username': username, 'password': password} - - return db_credentials - - -def get_system_role(): - """ Get the system role from the sysinv database""" - - conn = psycopg2.connect("dbname=sysinv user=postgres") - cur = conn.cursor() - cur.execute("select distributed_cloud_role from i_system;") - row = cur.fetchone() - if row is None: - LOG.error("Failed to fetch i_system data") - raise psycopg2.ProgrammingError("Failed to fetch i_system data") - - role = row[0] - - return role - - -def get_shared_services(): - """ Get the list of shared services from the sysinv database""" - - shared_services = [] - DEFAULT_SHARED_SERVICES = [] - - conn = psycopg2.connect("dbname=sysinv user=postgres") - cur = conn.cursor() - cur.execute("select capabilities from i_system;") - row = cur.fetchone() - if row is None: - LOG.error("Failed to fetch i_system data") - raise psycopg2.ProgrammingError("Failed to fetch i_system data") - - cap_obj = json.loads(row[0]) - region_config = cap_obj.get('region_config', None) - if region_config: - shared_services = cap_obj.get('shared_services', - DEFAULT_SHARED_SERVICES) - - return shared_services - - -def get_connection_string(db_credentials, database, exec_format=False): - """ Generates a connection string for a given database - exec_format - True: the connection string can be used in line command - ( ex: barbican ) or in psycopg2.connect - False: the connection string is to be used in .conf files - """ - username = db_credentials[database]['username'] - password = db_credentials[database]['password'] - - if exec_format: - return DB_CONNECTION_EXEC_FORMAT % (username, password, database) - else: - # use format to be used in .conf files - return DB_CONNECTION_CONF_FORMAT % (username, password, database) - - -def create_temp_filesystem(vgname, lvname, mountpoint, size): - """ Creates and mounts a logical volume for temporary use. """ - devnull = open(os.devnull, 'w') - - try: - # lvcreate -n --size -y -Wy -Zy - subprocess.check_call( - ["lvcreate", - "--size", - size, - "-n", - lvname, - vgname, - "-y", - "-Wy", - "-Zy"], - close_fds=True, - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to create %s" % lvname) - raise - - devname = '/dev/%s/%s' % (vgname, lvname) - try: - subprocess.check_call( - ["mkfs.ext4", - devname], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to format %s" % devname) - raise - - try: - subprocess.check_call( - ["mount", - devname, - mountpoint, - "-t", - "ext4"], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to mount %s at %s" % (devname, mountpoint)) - raise - - -def remove_temp_filesystem(vgname, lvname, mountpoint): - """ Unmounts and removes a logical volume. """ - devnull = open(os.devnull, 'w') - - try: - subprocess.check_call( - ["umount", - mountpoint], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to umount %s" % mountpoint) - - try: - subprocess.check_call( - ["lvremove", - "-f", - "%s/%s" % (vgname, lvname)], - close_fds=True, - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to remove %s" % lvname) - - -def nfs_mount_filesystem(filesystem, mountdir=None): - """ Mounts a remote nfs filesystem. """ - devnull = open(os.devnull, 'w') - if not mountdir: - mountdir = filesystem - try: - subprocess.check_call( - ["nfs-mount", - "controller-platform-nfs:%s" % filesystem, - mountdir], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to nfs-mount %s at %s" % (filesystem, mountdir)) - raise - - -def unmount_filesystem(filesystem): - """ Unmounts a remote nfs filesystem. """ - devnull = open(os.devnull, 'w') - try: - subprocess.check_call( - ["umount", - filesystem], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to umount %s" % filesystem) - - -def migrate_keyring_data(from_release, to_release): - """ Migrates keyring data. """ - - LOG.info("Migrating keyring data") - # First delete any keyring files for the to_release - they can be created - # if release N+1 nodes are incorrectly left powered up when the release N - # load is installed. - shutil.rmtree(os.path.join(PLATFORM_PATH, ".keyring", to_release), - ignore_errors=True) - shutil.copytree(os.path.join(PLATFORM_PATH, ".keyring", from_release), - os.path.join(PLATFORM_PATH, ".keyring", to_release)) - - -def migrate_pxeboot_config(from_release, to_release): - """ Migrates pxeboot configuration. """ - devnull = open(os.devnull, 'w') - - LOG.info("Migrating pxeboot config") - - # Copy the entire pxelinux.cfg directory to pick up any changes made - # after the data was migrated (i.e. updates to the controller-1 load). - source_pxelinux = os.path.join(PLATFORM_PATH, "config", from_release, - "pxelinux.cfg", "") - dest_pxelinux = os.path.join(PLATFORM_PATH, "config", to_release, - "pxelinux.cfg") - - try: - subprocess.check_call( - ["rsync", - "-a", - os.path.join(source_pxelinux), - os.path.join(dest_pxelinux)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to migrate %s" % source_pxelinux) - raise - - to_release_symlink_target = os.path.join(VOLATILE_PXEBOOT_PATH, - "pxelinux.cfg.files", "grub.cfg") - - dest_symlink_exists = os.path.islink(dest_pxelinux + "/grub.cfg") - if dest_symlink_exists: - os.unlink(dest_pxelinux + "/grub.cfg") - os.symlink(to_release_symlink_target, dest_pxelinux + "/grub.cfg") - - -def migrate_armada_config(from_release, to_release): - """ Migrates armada configuration. """ - - # Check if the folder exist before migration - if not os.path.exists(os.path.join(PLATFORM_PATH, "armada")): - LOG.info("Skipping armada migration, the directory doesn't exist") - return - - LOG.info("Migrating armada config") - devnull = open(os.devnull, 'w') - - # Copy the entire armada.cfg directory to pick up any changes made - # after the data was migrated (i.e. updates to the controller-1 load). - source_armada = os.path.join(PLATFORM_PATH, "armada", from_release, "") - dest_armada = os.path.join(PLATFORM_PATH, "armada", to_release) - try: - subprocess.check_call( - ["rsync", - "-a", - os.path.join(source_armada), - os.path.join(dest_armada)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to migrate %s" % source_armada) - raise - - -def migrate_fluxcd_config(from_release, to_release): - """ Migrates fluxcd configuration. """ - - # Check if the folder exists before migration - if not os.path.exists(os.path.join(PLATFORM_PATH, "fluxcd")): - LOG.info("Skipping fluxcd migration, the directory doesn't exist") - return - - LOG.info("Migrating fluxcd config") - devnull = open(os.devnull, 'w') - - # Copy the entire fluxcd.cfg directory to pick up any changes made - # after the data was migrated. - source_fluxcd = os.path.join(PLATFORM_PATH, "fluxcd", from_release, "") - dest_fluxcd = os.path.join(PLATFORM_PATH, "fluxcd", to_release) - try: - subprocess.check_call( - ["rsync", - "-a", - os.path.join(source_fluxcd), - os.path.join(dest_fluxcd)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to migrate %s" % source_fluxcd) - raise - - -def migrate_helm_config(from_release, to_release): - """ Migrates helm configuration. """ - - LOG.info("Migrating helm config") - devnull = open(os.devnull, 'w') - - # Copy the entire helm.cfg directory to pick up any changes made - # after the data was migrated (i.e. updates to the controller-1 load). - source_helm = os.path.join(PLATFORM_PATH, "helm", from_release, "") - dest_helm = os.path.join(PLATFORM_PATH, "helm", to_release) - try: - subprocess.check_call( - ["rsync", - "-a", - os.path.join(source_helm), - os.path.join(dest_helm)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to migrate %s" % source_helm) - raise - - -def migrate_sysinv_data(from_release, to_release): - """ Migrates sysinv data. """ - devnull = open(os.devnull, 'w') - - LOG.info("Migrating sysinv data") - - # If the /opt/platform/sysinv//sysinv.conf.default file has - # changed between releases it must be modified at this point. - try: - subprocess.check_call( - ["rsync", - "-a", - os.path.join(PLATFORM_PATH, "sysinv", from_release, ""), - os.path.join(PLATFORM_PATH, "sysinv", to_release)], - stdout=devnull) - - except subprocess.CalledProcessError: - LOG.exception("Failed to copy sysinv platform dir to new version") - raise - - # Get the hiera data for the from release - hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release, - "hieradata") - static_file = os.path.join(hiera_path, "static.yaml") - with open(static_file, 'r') as s_file: - static_config = yaml.safe_load(s_file) - - username = static_config["sysinv::db::postgresql::user"] - password = utils.get_password_from_keyring("sysinv", "database") - - # We need a bare bones /etc/sysinv/sysinv.conf file in order to do the - # sysinv database migration and then generate the upgrades manifests. - with open("/etc/sysinv/sysinv.conf", "w") as f: - f.write("[DEFAULT]\n") - f.write("logging_context_format_string=sysinv %(asctime)s.%" - "(msecs)03d %(process)d %(levelname)s %" - "(name)s [%(request_id)s %(user)s %" - "(tenant)s] %(instance)s%(message)s\n") - f.write("verbose=True\n") - f.write("syslog_log_facility=local6\n") - f.write("use_syslog=True\n") - f.write("logging_default_format_string=sysinv %(asctime)s.%" - "(msecs)03d %(process)d %(levelname)s %(name)s [-] %" - "(instance)s%(message)s\n") - f.write("debug=False\n") - f.write('sql_connection=postgresql://%s:%s@127.0.0.1/%s\n' % - (username, password, 'sysinv')) - - -def prepare_postgres_filesystems(): - """ Prepares postgres filesystems for migration. """ - devnull = open(os.devnull, 'w') - - LOG.info("Preparing postgres filesystems") - - # In order to avoid the speed penalty for doing database operations on an - # nfs mounted filesystem, we create the databases locally and then copy - # them to the nfs mounted filesystem after data migration. - - # Create a temporary filesystem for the dumped database - from_dir = os.path.join(POSTGRES_MOUNT_PATH, "upgrade") - statvfs = os.statvfs(from_dir) - db_dump_filesystem_size = str(statvfs.f_frsize * statvfs.f_blocks) + "B" - - # Move the dumped files to a temporary filesystem. - os.mkdir(POSTGRES_DUMP_MOUNT_PATH) - create_temp_filesystem("cgts-vg", "dbdump-temp-lv", - POSTGRES_DUMP_MOUNT_PATH, - db_dump_filesystem_size) - shutil.move(from_dir, POSTGRES_DUMP_MOUNT_PATH) - - # Create a temporary filesystem for the migrated database - statvfs = os.statvfs(POSTGRES_MOUNT_PATH) - db_filesystem_size = str(statvfs.f_frsize * statvfs.f_blocks) + "B" - if not os.path.isdir(utils.POSTGRES_PATH): - os.mkdir(utils.POSTGRES_PATH) - create_temp_filesystem("cgts-vg", "postgres-temp-lv", utils.POSTGRES_PATH, - db_filesystem_size) - subprocess.check_call(['chown', 'postgres:postgres', utils.POSTGRES_PATH], - stdout=devnull) - - -def create_database(): - """ Creates empty postgres database. """ - - devnull = open(os.devnull, 'w') - - LOG.info("Creating postgres database") - - db_create_commands = [ - # Configure new data directory for postgres - 'sudo -u postgres {} -D {}'.format( - os.path.join(POSTGRES_BIN, 'initdb'), - utils.POSTGRES_DATA_DIR), - 'chmod -R 700 ' + utils.POSTGRES_DATA_DIR, - 'chown -R postgres ' + utils.POSTGRES_DATA_DIR, - ] - - # Execute db creation commands - for cmd in db_create_commands: - try: - LOG.info("Executing db create command: %s" % cmd) - subprocess.check_call([cmd], - shell=True, stdout=devnull, stderr=devnull) - except subprocess.CalledProcessError as ex: - LOG.exception("Failed to execute command: '%s' during upgrade " - "processing, return code: %d" % (cmd, ex.returncode)) - raise - - -def import_databases(from_release, to_release, from_path=None, simplex=False): - """ Imports databases. """ - - devnull = open(os.devnull, 'w') - if not from_path: - from_path = POSTGRES_DUMP_MOUNT_PATH - from_dir = os.path.join(from_path, "upgrade") - - LOG.info("Importing databases") - try: - postgres_config_path = os.path.join( - from_dir, 'postgres.postgreSql.config') - # Do postgres schema import (suppress stderr due to noise) - subprocess.check_call(['sudo -u postgres psql -f ' + - postgres_config_path + ' postgres'], - shell=True, - stdout=devnull, - stderr=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to import schemas.") - raise - - import_commands = [] - - # Do postgres data import - for data in glob.glob(from_dir + '/*.*Sql.data'): - db_elem = data.split('/')[-1].split('.')[0] - import_commands.append((db_elem, - "sudo -u postgres psql -f " + data + - " " + db_elem)) - - # Import VIM data - if not simplex: - # in some cases, file fd_lock cannot be acquired on nfs device. - # create the database on local device and copy it over to nfs is - # a safe solution. - VIM_DB_NAME = 'vim_db_v1' - temp_db_path = '/tmp/' - db_dir = os.path.join(PLATFORM_PATH, 'nfv/vim', SW_VERSION) - os.mkdir(db_dir) - db_path = os.path.join(db_dir, VIM_DB_NAME) - import_commands.append( - ("nfv-vim", - "nfv-vim-manage db-load-data -d %s -f %s" % - (temp_db_path, os.path.join(from_dir, 'vim.data')))) - - # copy the vim db - import_commands.append( - ('move database to %s' % db_path, - ("mv %s %s" % (os.path.join(temp_db_path, VIM_DB_NAME), - db_path)))) - - # Execute import commands - for cmd in import_commands: - try: - print("Importing %s" % cmd[0]) - LOG.info("Executing import command: %s" % cmd[1]) - subprocess.check_call([cmd[1]], - shell=True, stdout=devnull) - - except subprocess.CalledProcessError as ex: - LOG.exception("Failed to execute command: '%s' during upgrade " - "processing, return code: %d" % - (cmd[1], ex.returncode)) - raise - - if not simplex: - # examine if flock works in nfs mount device (DX only), report if not - # being able to - with open(os.path.join(temp_db_path, VIM_DB_NAME), "w") as f: - try: - fcntl.flock(f, fcntl.LOCK_SH) - except Exception: - bug_url = 'https://bugs.launchpad.net/starlingx/+bug/1990544' - LOG.exception('Experiencing bug %s' % bug_url) - finally: - try: - fcntl.flock(f, fcntl.LOCK_UN) - except Exception: - # this is not going to be a problem, file is closed - # by with statement. Just log and move on. - LOG.exception('Error when unlock file') - - -def create_databases(from_release, to_release, db_credentials): - """ Creates databases. """ - LOG.info("Creating new databases") - - # Create databases that are new in this release - - conn = psycopg2.connect('dbname=postgres user=postgres') - - # Postgres won't allow transactions around database create operations - # so we set the connection to autocommit - conn.set_isolation_level( - psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) - - databases_to_create = [] - if not databases_to_create: - return - - with conn: - with conn.cursor() as cur: - for database in databases_to_create: - print("Creating %s database" % database) - username = psycopg2.extensions.AsIs( - '\"%s\"' % db_credentials[database]['username']) - db_name = psycopg2.extensions.AsIs('\"%s\"' % database) - password = db_credentials[database]['password'] - - try: - # Here we create the new database and the role for it - # The role will be used by the dbsync command to - # connect to the database. This ensures any new tables - # are added with the correct owner - cur.execute('CREATE DATABASE %s', (db_name,)) - cur.execute('CREATE ROLE %s', (username,)) - cur.execute('ALTER ROLE %s LOGIN PASSWORD %s', - (username, password)) - cur.execute('GRANT ALL ON DATABASE %s TO %s', - (db_name, username)) - except Exception as ex: - LOG.exception("Failed to create database and role. " + - "(%s : %s) Exception: %s" % - (database, username, ex)) - raise - - -def migrate_sysinv_database(): - """ Migrates the sysinv database. """ - devnull = open(os.devnull, 'w') - - sysinv_cmd = 'sysinv-dbsync' - try: - print("Migrating sysinv") - LOG.info("Executing migrate command: %s" % sysinv_cmd) - subprocess.check_call(sysinv_cmd, - shell=True, stdout=devnull, stderr=devnull) - - except subprocess.CalledProcessError as ex: - LOG.exception("Failed to execute command: '%s' during upgrade " - "processing, return code: %d" - % (sysinv_cmd, ex.returncode)) - raise - - -def migrate_databases(from_release, shared_services, db_credentials, - simplex=False, role=None): - """ Migrates databases. """ - - devnull = open(os.devnull, 'w') - - # Create minimal config files for each OpenStack service so they can - # run their database migration. - if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services: - with open("/etc/keystone/keystone-dbsync.conf", "w") as f: - f.write("[database]\n") - f.write(get_connection_string(db_credentials, 'keystone')) - - migrate_commands = [ - # Migrate barbican - ('barbican', - 'barbican-manage db upgrade ' + - '--db-url %s' % get_connection_string(db_credentials, 'barbican', - True)), - ] - - # Migrate fm - # append the migrate command for dcmanager db - with open("/etc/fm/fm.conf", "w") as f: - f.write("[database]\n") - f.write(get_connection_string(db_credentials, 'fm')) - - migrate_commands += [ - ('fm', - 'fm-dbsync') - ] - - if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services: - # To avoid a deadlock during keystone contract we will use offline - # migration for simplex upgrades. Other upgrades will have to use - # another method to resolve the deadlock - if not simplex: - migrate_commands += [ - # Migrate keystone - # - # EXPAND - we will first expand the database scheme to a - # superset of what both the previous and next release can - # utilize, and create triggers to facilitate the live - # migration process. - # - # MIGRATE - will perform the data migration, while still] - # preserving the old schema - ('keystone', - 'keystone-manage --config-file ' + - '/etc/keystone/keystone-dbsync.conf db_sync --expand'), - ('keystone', - 'keystone-manage --config-file ' + - '/etc/keystone/keystone-dbsync.conf db_sync --migrate'), - ] - else: - migrate_commands += [ - # In simplex we're the only node so we can do an offline - # migration - ('keystone', - 'keystone-manage --config-file ' + - '/etc/keystone/keystone-dbsync.conf db_sync') - ] - - if role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - # append the migrate command for dcmanager db - with open("/etc/dcmanager/dcmanager.conf", "w") as f: - f.write("[database]\n") - f.write(get_connection_string(db_credentials, 'dcmanager')) - - migrate_commands += [ - ('dcmanager', - 'dcmanager-manage db_sync') - ] - - # append the migrate command for dcorch db - with open("/etc/dcorch/dcorch.conf", "w") as f: - f.write("[database]\n") - f.write(get_connection_string(db_credentials, 'dcorch')) - - migrate_commands += [ - ('dcorch', - 'dcorch-manage db_sync') - ] - - # Execute migrate commands - for cmd in migrate_commands: - try: - print("Migrating %s" % cmd[0]) - LOG.info("Executing migrate command: %s" % cmd[1]) - subprocess.check_call([cmd[1]], - shell=True, stdout=devnull, stderr=devnull) - - except subprocess.CalledProcessError as ex: - LOG.exception("Failed to execute command: '%s' during upgrade " - "processing, return code: %d" % - (cmd[1], ex.returncode)) - raise - - # The database entry for controller-1 will be set to whatever it was when - # the sysinv database was dumped on controller-0. Update the state and - # from/to load to what it should be when it becomes active. - try: - subprocess.check_call( - ["/usr/bin/sysinv-upgrade", - "update_controller_state"], - stdout=devnull, stderr=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to update state of %s" % - utils.CONTROLLER_1_HOSTNAME) - raise - - -def get_controller_1_uuid(): - """ Read in the uuid from the sysinv db""" - conn = psycopg2.connect("dbname=sysinv user=postgres") - with conn: - with conn.cursor() as cur: - cur.execute("select uuid from i_host where hostname=" - "'controller-1';") - row = cur.fetchone() - if row is None: - LOG.error("Failed to fetch controller-1 uuid") - raise Exception("Error reading controller UUID") - - return row[0] - - -def update_platform_conf_file(uuid): - """ Update the platform conf file with the uuid - This is needed for the compute_huge script to update the CPU - allocations - """ - if os.path.isfile(PLATFORM_CONF_FILE): - # read the platform config file and check for UUID - with open(PLATFORM_CONF_FILE, "r") as fd: - for line in fd: - if line.find("UUID=") == 0: - LOG.info("Found UUID in platform.conf: %s" % line) - return - - # the UUID is not found, append it - LOG.info("Appending UUID to platform.conf. UUID: %s" % uuid) - with open(PLATFORM_CONF_FILE, "a") as fd: - fd.write("UUID=" + uuid + "\n") - - -def migrate_hiera_data(from_release, to_release, role=None): - """ Migrate hiera data. """ - - LOG.info("Migrating hiera data") - from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release, - "hieradata") - to_hiera_path = constants.HIERADATA_PERMDIR - - shutil.rmtree(to_hiera_path, ignore_errors=True) - os.makedirs(to_hiera_path) - - # Copy only the static yaml files. The other yaml files will be generated - # when required. - for f in ['secure_static.yaml', 'static.yaml']: - shutil.copy(os.path.join(from_hiera_path, f), to_hiera_path) - - # Make any necessary updates to the static yaml files. - # Update the static.yaml file - static_file = os.path.join(constants.HIERADATA_PERMDIR, "static.yaml") - with open(static_file, 'r') as yaml_file: - static_config = yaml.safe_load(yaml_file) - static_config.update({ - 'platform::params::software_version': SW_VERSION, - 'platform::client::credentials::params::keyring_directory': - KEYRING_PATH, - 'platform::client::credentials::params::keyring_file': - os.path.join(KEYRING_PATH, '.CREDENTIAL'), - }) - - with open(static_file, 'w') as yaml_file: - yaml.dump(static_config, yaml_file, default_flow_style=False) - - secure_static_file = os.path.join( - constants.HIERADATA_PERMDIR, "secure_static.yaml") - with open(secure_static_file, 'r') as yaml_file: - secure_static_config = yaml.safe_load(yaml_file) - - with open(secure_static_file, 'w') as yaml_file: - yaml.dump(secure_static_config, yaml_file, default_flow_style=False) - - -def apply_sriov_config(db_credentials, hostname): - # If controller-1 has any FEC devices or sriov vfs configured, apply the - # sriov runtime manifest. We can't apply it from controller-0 during the - # host-unlock process as controller-1 is running the new release. - - connection_string = get_connection_string(db_credentials, 'sysinv', True) - - conn = psycopg2.connect(connection_string) - cur = conn.cursor() - cur.execute( - "select id from i_host where hostname=%s;", (hostname,)) - host = cur.fetchone() - host_id = host[0] - cur.execute("select id from pci_devices " - "where sriov_numvfs > 0 and host_id=%s", - (host_id,)) - fec_device = cur.fetchone() - cur.execute("select id from interfaces " - "where forihostid=%s and iftype='ethernet' " - "and sriov_numvfs>0;", - (host_id,)) - interface = cur.fetchone() - if interface or fec_device: - # There are FEC devices/sriov vfs configured, apply the sriov manifest - LOG.info("Applying sriov/fec manifest") - personality = sysinv_constants.WORKER - classes = [ - 'platform::network::interfaces::sriov::runtime', - 'platform::devices::fpga::fec::runtime' - ] - config = {'classes': classes} - # create a temporary file to hold the runtime configuration values - fd, tmpfile = tempfile.mkstemp(suffix='.yaml') - with open(tmpfile, 'w') as f: - yaml.dump(config, f, default_flow_style=False) - puppet_common.puppet_apply_manifest( - hostname, personality, manifest='runtime', runtime=tmpfile) - os.close(fd) - os.remove(tmpfile) - - -def get_db_host_mgmt_ip(db_credentials, hostname): - """ Get the Hostname management IP from DB""" - - # the postgres server was stopped during the upgrade_controller - # need to use db_credentials to acess the DB - connection_string = get_connection_string(db_credentials, 'sysinv', True) - conn = psycopg2.connect(connection_string) - - db_hostname = hostname + "-mgmt" - try: - cur = conn.cursor() - cur.execute("SELECT address FROM addresses WHERE name='{}';".format( - db_hostname)) - row = cur.fetchone() - - if row is None: - msg = "MGMT IP not found for: '{}'".format(db_hostname) - raise Exception(msg) - - return row[0] - - except Exception as ex: - LOG.error("Failed to get MGMT IP for: '%s'" % db_hostname) - raise ex - - -def upgrade_controller(from_release, to_release): - """ Executed on the release N+1 side upgrade controller-1. """ - - if from_release == to_release: - raise Exception("Cannot upgrade from release %s to the same " - "release %s." % (from_release, to_release)) - - devnull = open(os.devnull, 'w') - - LOG.info("Upgrading controller from %s to %s" % (from_release, to_release)) - - # Stop sysinv-agent so it doesn't interfere - LOG.info("Stopping sysinv-agent") - try: - subprocess.check_call(["systemctl", "stop", "sysinv-agent"], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.error("Failed to stop %s service" % "sysinv-agent") - raise - - # Mount required filesystems from mate controller - LOG.info("Mounting filesystems") - nfs_mount_filesystem(PLATFORM_PATH) - nfs_mount_filesystem(utils.RABBIT_PATH) - os.mkdir(POSTGRES_MOUNT_PATH) - nfs_mount_filesystem(utils.POSTGRES_PATH, POSTGRES_MOUNT_PATH) - - # Migrate keyring data - print("Migrating keyring data...") - migrate_keyring_data(from_release, to_release) - - # Migrate pxeboot config - print("Migrating pxeboot configuration...") - migrate_pxeboot_config(from_release, to_release) - - # Migrate armada config - print("Migrating armada configuration...") - migrate_armada_config(from_release, to_release) - - # Migrate fluxcd config - print("Migrating fluxcd configuration...") - migrate_fluxcd_config(from_release, to_release) - - # Migrate helm config - print("Migrating helm configuration...") - migrate_helm_config(from_release, to_release) - - # Migrate sysinv data. - print("Migrating sysinv configuration...") - migrate_sysinv_data(from_release, to_release) - - # Prepare for database migration - print("Preparing for database migration...") - prepare_postgres_filesystems() - - # Create the postgres database - create_database() - - # Start the postgres server - try: - subprocess.check_call([ - 'sudo', - '-u', - 'postgres', - os.path.join(POSTGRES_BIN, 'pg_ctl'), - '-D', - utils.POSTGRES_DATA_DIR, - 'start'], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to start postgres service") - raise - - # Wait for postgres to start - # TODO: Make this deterministic (use wait_service?) - time.sleep(5) - - # Import databases - print("Importing databases...") - import_databases(from_release, to_release) - - role = get_system_role() - shared_services = get_shared_services() - - # Create /tmp/python_keyring - used by keystone manifest. - shutil.copytree(os.path.join(PLATFORM_PATH, ".keyring", to_release, - "python_keyring"), - "/tmp/python_keyring") - - # Copy admin.conf file from /opt/platform to /etc/kubernetes/admin.conf - # during upgrade - try: - subprocess.check_call( - ["cp", - os.path.join(PLATFORM_PATH, "config", to_release, - "kubernetes", utils.KUBERNETES_ADMIN_CONF_FILE), - os.path.join(utils.KUBERNETES_CONF_PATH, - utils.KUBERNETES_ADMIN_CONF_FILE)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to copy %s" % - os.path.join(utils.KUBERNETES_CONF_PATH, - utils.KUBERNETES_ADMIN_CONF_FILE)) - raise - - # Migrate hiera data - migrate_hiera_data(from_release, to_release, role=role) - utils.add_upgrade_entries_to_hiera_data(from_release) - - # Get database credentials - db_credentials = get_db_credentials( - shared_services, from_release, role=role) - - # Create any new databases - print("Creating new databases...") - create_databases(from_release, to_release, db_credentials) - - print("Migrating databases...") - # Migrate sysinv database - migrate_sysinv_database() - - # Migrate databases - migrate_databases(from_release, shared_services, db_credentials, role=role) - - print("Applying configuration...") - - # Execute migration scripts - utils.execute_migration_scripts( - from_release, to_release, utils.ACTION_MIGRATE) - - uuid = get_controller_1_uuid() - - update_platform_conf_file(uuid) - - # Stop postgres server - try: - subprocess.check_call([ - 'sudo', - '-u', - 'postgres', - os.path.join(POSTGRES_BIN, 'pg_ctl'), - '-D', - utils.POSTGRES_DATA_DIR, - 'stop'], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to stop postgres service") - raise - - # Apply "upgrades" manifest - LOG.info("Applying upgrades manifest") - myip = gethostaddress(utils.CONTROLLER_1_HOSTNAME) - utils.apply_upgrade_manifest(myip) - - # Remove manifest and keyring files - shutil.rmtree("/tmp/puppet") - shutil.rmtree("/tmp/python_keyring") - - # Generate config to be used by "regular" manifest - LOG.info("Generating config for %s" % utils.CONTROLLER_1_HOSTNAME) - try: - cutils.create_system_config() - cutils.create_host_config(utils.CONTROLLER_1_HOSTNAME) - except Exception as e: - LOG.exception(e) - LOG.info("Failed to update hiera configuration") - raise - - # this is just necessary for 22.12 - # since the old releases uses the hieradata/.yaml - # and the new one uses hieradata/.yaml - # during the AIO-DX upgrade, the controller-0 runs the old - # release to upgrade the controller-1 - # the controller-0 want to still use hieradata/.yaml - # but the controller-1 want to use hieradata/.yaml - # so rename the .yaml to .yaml - # and creates a symlink: .yaml -> .yaml - try: - ctrl1_mgmt_ip = get_db_host_mgmt_ip(db_credentials, - utils.CONTROLLER_1_HOSTNAME) - except Exception as e: - LOG.exception(e) - LOG.info("Failed to get MGMT IP for controller-1 during upgrade") - raise - - ctrl1_hostname_hieradata = constants.HIERADATA_PERMDIR + "/" \ - + utils.CONTROLLER_1_HOSTNAME + ".yaml" - - ctrl1_ipaddr_hieradata_file = ctrl1_mgmt_ip + ".yaml" - ctrl1_ipaddr_hieradata = constants.HIERADATA_PERMDIR + "/" \ - + ctrl1_ipaddr_hieradata_file - - os.rename(ctrl1_hostname_hieradata, ctrl1_ipaddr_hieradata) - os.symlink(ctrl1_ipaddr_hieradata_file, ctrl1_hostname_hieradata) - - apply_sriov_config(db_credentials, utils.CONTROLLER_1_HOSTNAME) - - # Remove /etc/kubernetes/admin.conf after it is used to generate - # the hiera data - admin_conf = os.path.join(utils.KUBERNETES_CONF_PATH, - utils.KUBERNETES_ADMIN_CONF_FILE) - try: - subprocess.check_call(["rm -f %s" % admin_conf], shell=True, - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to remove file %s" % admin_conf) - - print("Shutting down upgrade processes...") - - # Stop postgres service - LOG.info("Stopping postgresql service") - try: - subprocess.check_call(["systemctl", "stop", "postgresql"], - stdout=devnull) - - except subprocess.CalledProcessError: - LOG.exception("Failed to stop postgresql service") - raise - - # Stop rabbitmq-server service - LOG.info("Stopping rabbitmq-server service") - try: - subprocess.check_call(["systemctl", "stop", "rabbitmq-server"], - stdout=devnull) - - except subprocess.CalledProcessError: - LOG.exception("Failed to stop rabbitmq-server service") - raise - - # Copy upgraded database back to controller-0 - print("Writing upgraded databases...") - LOG.info("Copying upgraded database to controller-0") - try: - subprocess.check_call( - ["cp", - "-a", - os.path.join(utils.POSTGRES_PATH, to_release), - os.path.join(POSTGRES_MOUNT_PATH, to_release)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception( - "Failed to copy migrated postgres database to controller-0") - raise - - # Remove temporary filesystems - remove_temp_filesystem("cgts-vg", "dbdump-temp-lv", - POSTGRES_DUMP_MOUNT_PATH) - remove_temp_filesystem("cgts-vg", "postgres-temp-lv", utils.POSTGRES_PATH) - - # Remove mounts - LOG.info("Removing mounts") - unmount_filesystem(PLATFORM_PATH) - unmount_filesystem(utils.RABBIT_PATH) - unmount_filesystem(POSTGRES_MOUNT_PATH) - os.rmdir(POSTGRES_MOUNT_PATH) - - # Set upgrade flags on mate controller - LOG.info("Setting upgrade flags on mate controller") - os.mkdir("/tmp/etc_platform") - nfs_mount_filesystem("/etc/platform", "/tmp/etc_platform") - upgrade_complete_flag_file = os.path.join( - "/tmp/etc_platform", - os.path.basename(CONTROLLER_UPGRADE_COMPLETE_FLAG)) - open(upgrade_complete_flag_file, "w").close() - upgrade_flag_file = os.path.join( - "/tmp/etc_platform", os.path.basename(CONTROLLER_UPGRADE_FLAG)) - os.remove(upgrade_flag_file) - - upgrade_complete_flag_file = os.path.join( - "/tmp/etc_platform", os.path.basename(CONTROLLER_UPGRADE_STARTED_FLAG)) - os.remove(upgrade_complete_flag_file) - - unmount_filesystem("/tmp/etc_platform") - os.rmdir("/tmp/etc_platform") - - # Restart the sysinv agent to report the inventory status - # The sysinv.conf contains temporary parameters that are used for - # data-migration. By removing that sysinv.conf we trigger the sysinv-agent - # to load the correct conf from the drbd filesystem - os.remove("/etc/sysinv/sysinv.conf") - LOG.info("Starting sysinv-agent") - cutils.start_service("sysinv-agent") - - print("Controller-1 upgrade complete") - LOG.info("Controller-1 upgrade complete!!!") - - -def show_help(): - print("Usage: %s " % sys.argv[0]) - print("Upgrade controller-1. For internal use only.") - - -def main(): - - from_release = None - to_release = None - arg = 1 - while arg < len(sys.argv): - if sys.argv[arg] in ['--help', '-h', '-?']: - show_help() - exit(1) - elif arg == 1: - from_release = sys.argv[arg] - elif arg == 2: - to_release = sys.argv[arg] - else: - print("Invalid option %s. Use --help for more information." % - sys.argv[arg]) - exit(1) - arg += 1 - - log.configure() - - if not from_release or not to_release: - print("Both the FROM_RELEASE and TO_RELEASE must be specified") - exit(1) - - try: - upgrade_controller(from_release, to_release) - except Exception as e: - LOG.exception(e) - print("Upgrade failed: {}".format(e)) - - # Set upgrade fail flag on mate controller - LOG.info("Set upgrade fail flag on mate controller") - os.mkdir("/tmp/etc_platform") - nfs_mount_filesystem("/etc/platform", "/tmp/etc_platform") - upgrade_fail_flag_file = os.path.join( - "/tmp/etc_platform", - os.path.basename(CONTROLLER_UPGRADE_FAIL_FLAG)) - open(upgrade_fail_flag_file, "w").close() - unmount_filesystem("/tmp/etc_platform") - os.rmdir("/tmp/etc_platform") - - exit(1) - - -def extract_relative_directory(archive, member_path, dest_dir): - """ Extracts all members from the archive that match the path specified - Will strip the specified path from the member before copying to the - destination - """ - if not member_path.endswith('/'): - member_path += '/' - - # Remove leading /. Allows us to pass filesystem constants if needed - if member_path.startswith('/'): - member_path = member_path[1:] - - offset = len(member_path) - filtered_members = [copy.copy(member) for member in archive.getmembers() - if member.name.startswith(member_path)] - for member in filtered_members: - member.name = member.name[offset:] - - archive.extractall(dest_dir, filtered_members) - - -def extract_relative_file(archive, member_name, dest_dir): - """ Extracts the specified member to destination using only the filename - with no preceding paths - """ - # Remove leading /. Allows us to pass filesystem constants if needed - if member_name.startswith('/'): - member_name = member_name[1:] - - member = archive.getmember(member_name) - temp_member = copy.copy(member) - temp_member.name = os.path.basename(temp_member.name) - archive.extract(temp_member, dest_dir) - - -def extract_data_from_archive(archive, staging_dir, from_release, to_release): - """Extracts the data from the archive to the staging directory""" - from_sysinv_path = os.path.join(PLATFORM_PATH, "sysinv", from_release) - from_keyring_path = os.path.join(PLATFORM_PATH, ".keyring", - from_release) - - # 0755 permissions - dir_options = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | \ - stat.S_IROTH | stat.S_IXOTH - - # On newer release this part is handled in the upgrade playbook - if from_release in ["21.12", "22.06"]: - from_puppet_path = os.path.join(PLATFORM_PATH, "puppet", - from_release, "hieradata") - from_pxelinux_path = os.path.join(PLATFORM_PATH, "config", - from_release, "pxelinux.cfg") - - shutil.rmtree(from_puppet_path, ignore_errors=True) - shutil.rmtree( - os.path.join(PLATFORM_PATH, "config", to_release, "pxelinux.cfg"), - ignore_errors=True) - - os.makedirs(from_puppet_path, dir_options) - - extract_relative_directory( - archive, from_puppet_path, from_puppet_path) - extract_relative_directory( - archive, from_pxelinux_path, from_pxelinux_path) - - # During legacy upgrade the from_sysinv_path directory should be recreated. - # During optimized upgrade, the from_sysinv_path is already prepared. - # The only from_release that supports legacy upgrade is 22.06. - if from_release == "22.06": - shutil.rmtree(from_sysinv_path, ignore_errors=True) - os.makedirs(from_sysinv_path, dir_options) - - shutil.rmtree(from_keyring_path, ignore_errors=True) - - os.makedirs(from_keyring_path, dir_options) - - extract_relative_directory(archive, from_keyring_path, from_keyring_path) - - os.makedirs( - os.path.join(PLATFORM_PATH, "config", to_release, "pxelinux.cfg"), - dir_options) - - # Extract etc files - archive.extract('etc/hostname', '/') - extract_relative_file(archive, PLATFORM_CONF_FILE, staging_dir) - - extract_relative_file( - archive, sysinv_constants.SYSINV_CONFIG_FILE_LOCAL, from_sysinv_path) - - -def extract_postgres_data(archive): - """ Extract postgres data to temp directory """ - postgres_data_dir = os.path.join(utils.POSTGRES_PATH, "upgrade") - ansible_start_path = 'opt/platform-backup/ansible' - ansible_path = '' - offset = len(ansible_start_path) - for member in archive.getmembers(): - if member.name.startswith(ansible_start_path): - if member.isdir() and not member.name.endswith("/"): - member.name = member.name + "/" - ansible_path = member.name[:member.name.index('/', offset)] - break - extract_relative_directory( - archive, ansible_path + "/postgres", postgres_data_dir) - - -def read_config_file_kvp(config_file): - """ A Generic method to read the .conf file. - - param config_file: Absolute path of the target file. - result: A dictionary with key value pairs retrieved from the target file. - """ - result = dict() - - with open(config_file, 'r') as temp_file: - for line in temp_file: - key, value = line.split('=', 1) - result.update({key: value}) - return result - - -def migrate_platform_conf(staging_dir): - """ Migrate platform.conf """ - backup_platform_conf_path = os.path.join(staging_dir, 'platform.conf') - temp_platform_conf_file = os.path.join(staging_dir, 'platform-temp.conf') - backup_platform_conf_values = read_config_file_kvp( - backup_platform_conf_path) - new_platform_conf_values = read_config_file_kvp(PLATFORM_CONF_FILE) - - # The following values are expected to preserve in the newly - # generated platform.conf file - skip_options = ['nodetype', - 'subfunction', - 'management_interface', - 'oam_interface', - 'sw_version', - 'INSTALL_UUID', - 'system_type'] - for key in skip_options: - if key in backup_platform_conf_values: - del backup_platform_conf_values[key] - new_platform_conf_values.update(backup_platform_conf_values) - with open(temp_platform_conf_file, 'w') as f: - for key, value in new_platform_conf_values.items(): - line = key + "=" + value - f.write(line) - shutil.move(temp_platform_conf_file, PLATFORM_CONF_FILE) - - -def get_simplex_metadata(archive, staging_dir): - """Gets the metadata from the archive""" - # Get the metadate path from the archive - metadata_filename = 'upgrades/metadata' - metadata_path = '' - for member in archive.getmembers(): - if member.name.endswith(metadata_filename): - metadata_path = member.name - break - - extract_relative_file(archive, metadata_path, staging_dir) - metadata_filename = os.path.join(staging_dir, 'metadata') - with open(metadata_filename, 'r') as metadata_file: - metadata_contents = metadata_file.read() - metadata = json.loads(metadata_contents) - - return metadata - - -def check_load_version(to_release): - """Ensure that the running release matches the archive metadata""" - if to_release != SW_VERSION: - raise Exception("Incorrect software load installed. Found: %s " - "expecting: %s" % (SW_VERSION, to_release)) - - -def upgrade_controller_simplex(backup_file): - """ Performs the data migration on controller-0. - We extract the data from the archive, restore the database, migrate - the databases and restore/migrate the rest of the platform data. - The ansible playbook takes care of bootstrapping the system and - restoring other data (eg ceph/etcd). - """ - - if not os.path.isfile(backup_file): - raise Exception("Backup file (%s) not found." % backup_file) - - if not os.path.isabs(backup_file): - backup_file = os.path.abspath(backup_file) - - devnull = open(os.devnull, 'w') - - print_log_info("Starting controller upgrade") - - staging_dir = tempfile.mkdtemp(dir='/tmp') - # Permission change required or postgres restore fails - subprocess.call(['chmod', 'a+rx', staging_dir], stdout=devnull) - os.chdir('/') - - try: - archive = tarfile.open(backup_file) - except tarfile.TarError as e: - LOG.exception(e) - raise Exception("Error opening backup file. Invalid backup file.") - - metadata = get_simplex_metadata(archive, staging_dir) - - from_release = metadata['upgrade']['from_release'] - to_release = metadata['upgrade']['to_release'] - - check_load_version(to_release) - - if from_release == to_release: - raise Exception("Cannot upgrade from release %s to the same " - "release %s." % (from_release, to_release)) - - print_log_info("Extracting data from archive") - extract_data_from_archive(archive, staging_dir, from_release, to_release) - - # Backup sysinv.conf - shutil.move("/etc/sysinv/sysinv.conf", "/etc/sysinv/sysinv-temp.conf") - # Backup fm.conf - shutil.move("/etc/fm/fm.conf", "/etc/fm/fm-temp.conf") - - migrate_platform_conf(staging_dir) - - # Migrate pxeboot config - print_log_info("Migrating pxeboot configuration...") - migrate_pxeboot_config(from_release, to_release) - - # Migrate armada config - print("Migrating armada configuration...") - migrate_armada_config(from_release, to_release) - - # Migrate fluxcd config - print("Migrating fluxcd configuration...") - migrate_fluxcd_config(from_release, to_release) - - # Migrate helm config - print("Migrating helm configuration...") - migrate_helm_config(from_release, to_release) - - # Migrate sysinv data. - print_log_info("Migrating sysinv configuration...") - migrate_sysinv_data(from_release, to_release) - - # Simplex configurations can not have shared services - shared_services = [] - - role = get_system_role() - # Migrate hiera data - migrate_hiera_data(from_release, to_release, role=role) - db_credentials = get_db_credentials(shared_services, from_release) - - # On newer releases this part is handled in the upgrade playbook - if from_release in ["22.06"]: - extract_postgres_data(archive) - - # Import databases - print_log_info("Importing databases...") - import_databases(from_release, to_release, utils.POSTGRES_PATH, - simplex=True) - - # Create any new databases - print_log_info("Creating new databases...") - create_databases(from_release, to_release, db_credentials) - - print_log_info("Migrating databases...") - # Migrate sysinv database - migrate_sysinv_database() - - # Migrate databases - migrate_databases(from_release, shared_services, db_credentials, - simplex=True) - - print_log_info("Applying configuration...") - - # Execute migration scripts - utils.execute_migration_scripts( - from_release, to_release, utils.ACTION_MIGRATE) - - hostname = 'controller-0' - LOG.info("Generating config for %s" % hostname) - try: - cutils.create_system_config() - cutils.create_host_config(hostname) - except Exception as e: - LOG.exception(e) - LOG.info("Failed to update hiera configuration") - raise - - # Runtime manifests may modify platform.conf, so we'll back it up - temp_platform_conf = PLATFORM_CONF_FILE + ".backup" - shutil.copy(PLATFORM_CONF_FILE, temp_platform_conf) - apply_sriov_config(db_credentials, hostname) - - archive.close() - shutil.rmtree(staging_dir, ignore_errors=True) - - # Restore platform.conf - shutil.move(temp_platform_conf, PLATFORM_CONF_FILE) - # Restore sysinv.conf - shutil.move("/etc/sysinv/sysinv-temp.conf", "/etc/sysinv/sysinv.conf") - # Restore fm.conf - shutil.move("/etc/fm/fm-temp.conf", "/etc/fm/fm.conf") - - print_log_info("Data restore complete") - - -def print_log_info(string): - print(string) - LOG.info(string) - - -def show_help_simplex(): - print("Usage: %s " % sys.argv[0]) - print("Upgrade controller-0 simplex. For internal use only.") - - -def simplex_main(): - backup_file = None - arg = 1 - while arg < len(sys.argv): - if sys.argv[arg] in ['--help', '-h', '-?']: - show_help_simplex() - exit(1) - elif arg == 1: - backup_file = sys.argv[arg] - else: - print("Invalid option %s. Use --help for more information." % - sys.argv[arg]) - exit(1) - arg += 1 - - log.configure() - - if not backup_file: - print("The BACKUP_FILE must be specified") - exit(1) - - try: - upgrade_controller_simplex(backup_file) - except Exception as e: - LOG.exception(e) - print("Upgrade failed: {}".format(e)) - # TODO SET Upgrade fail flag - # Set upgrade fail flag on mate controller - exit(1) diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py deleted file mode 100644 index 302697a49b..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/management.py +++ /dev/null @@ -1,385 +0,0 @@ -# -# Copyright (c) 2015-2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# This file contains functions used by sysinv to manage upgrades. -# -import json -import glob -import os -import shutil -import subprocess -import yaml - -import tsconfig.tsconfig as tsc - -from sysinv.common import constants as sysinv_constants -from controllerconfig.upgrades import utils - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -def get_upgrade_databases(system_role, shared_services): - - UPGRADE_DATABASES = ('postgres', 'template1', 'sysinv', - 'barbican', 'fm') - - UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (), 'template1': (), - 'sysinv': (), - 'barbican': (), - 'fm': ('alarm',)} - - if system_role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - UPGRADE_DATABASES += ('dcmanager', 'dcorch',) - UPGRADE_DATABASE_SKIP_TABLES.update({ - 'dcmanager': (), - 'dcorch': ('service', 'orch_job', 'orch_request',) - }) - - if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services: - UPGRADE_DATABASES += ('keystone',) - UPGRADE_DATABASE_SKIP_TABLES.update({'keystone': ('token',)}) - - return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES - - -def export_postgres(dest_dir, system_role, shared_services): - """ Export postgres databases """ - devnull = open(os.devnull, 'w') - try: - upgrade_databases, upgrade_database_skip_tables = \ - get_upgrade_databases(system_role, shared_services) - # Dump roles, table spaces and schemas for databases. - subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' + - '--schema-only > %s/%s' % - (dest_dir, 'postgres.postgreSql.config'))], - shell=True, stderr=devnull) - - # Dump data for databases. - for _a, db_elem in enumerate(upgrade_databases): - - db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts ' - db_cmd += '--disable-triggers --data-only %s ' % db_elem - - for _b, table_elem in \ - enumerate(upgrade_database_skip_tables[db_elem]): - db_cmd += '--exclude-table=%s ' % table_elem - - db_cmd += '> %s/%s.postgreSql.data' % (dest_dir, db_elem) - - subprocess.check_call([db_cmd], shell=True, stderr=devnull) - - except subprocess.CalledProcessError: - LOG.exception("Failed to export postgres databases for upgrade.") - raise - - -def export_vim(dest_dir): - """ Export VIM database """ - devnull = open(os.devnull, 'w') - try: - vim_cmd = ("nfv-vim-manage db-dump-data -d %s -f %s" % - (os.path.join(tsc.PLATFORM_PATH, 'nfv/vim', tsc.SW_VERSION), - os.path.join(dest_dir, 'vim.data'))) - subprocess.check_call([vim_cmd], shell=True, stderr=devnull) - - except subprocess.CalledProcessError: - LOG.exception("Failed to export VIM databases for upgrade.") - raise - - -def prepare_upgrade(from_load, to_load, i_system, mgmt_address): - """ Executed on the release N side to prepare for an upgrade. """ - devnull = open(os.devnull, 'w') - - LOG.info("Starting upgrade preparations - from: %s, to: %s" % - (from_load, to_load)) - dest_dir = os.path.join(utils.POSTGRES_PATH, "upgrade") - try: - os.mkdir(dest_dir, 0o755) - except OSError: - LOG.exception("Failed to create upgrade export directory %s." % - dest_dir) - raise - - # Export databases - shared_services = i_system.capabilities.get("shared_services", "") - export_postgres(dest_dir, i_system.distributed_cloud_role, shared_services) - export_vim(dest_dir) - - # Export filesystems so controller-1 can access them - try: - subprocess.check_call( - ["exportfs", - "%s:%s" % (utils.CONTROLLER_1_HOSTNAME, utils.POSTGRES_PATH), - "-o", - "rw,no_root_squash"], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to export %s" % utils.POSTGRES_PATH) - raise - try: - subprocess.check_call( - ["exportfs", - "%s:%s" % (utils.CONTROLLER_1_HOSTNAME, utils.RABBIT_PATH), - "-o", - "rw,no_root_squash"], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to export %s" % utils.RABBIT_PATH) - raise - - # Migrate /opt/platform/config so controller-1 can access when it - # runs controller_config - try: - subprocess.check_call( - ["cp", - "-a", - os.path.join(tsc.PLATFORM_PATH, "config", from_load), - os.path.join(tsc.PLATFORM_PATH, "config", to_load)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to migrate %s" % os.path.join(tsc.PLATFORM_PATH, - "config")) - raise - - # Point N+1 etcd to N for now. We will migrate when both controllers are - # running N+1, during the swact back to controller-0. This solution will - # present some problems when we do upgrade etcd, so further development - # will be required at that time. - etcd_to_dir = os.path.join(tsc.ETCD_PATH, to_load) - etcd_from_dir = os.path.join(tsc.ETCD_PATH, from_load) - os.symlink(etcd_from_dir, etcd_to_dir) - - # Copy /etc/kubernetes/admin.conf so controller-1 can access - # during its upgrade - try: - subprocess.check_call( - ["cp", - os.path.join(utils.KUBERNETES_CONF_PATH, - utils.KUBERNETES_ADMIN_CONF_FILE), - os.path.join(tsc.PLATFORM_PATH, "config", to_load, - "kubernetes", utils.KUBERNETES_ADMIN_CONF_FILE)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to copy %s" % - os.path.join(utils.KUBERNETES_CONF_PATH, - utils.KUBERNETES_ADMIN_CONF_FILE)) - raise - - # Update admin.conf file to replace the cluster address with - # the floating management address - # This is a temporary change used in upgrade of N+1 node - admin_conf = os.path.join(tsc.PLATFORM_PATH, "config", to_load, - "kubernetes", utils.KUBERNETES_ADMIN_CONF_FILE) - with open(admin_conf, 'r') as yaml_file: - config = yaml.safe_load(yaml_file) - - for item, values in config.items(): - # update server address in cluster - if item == 'clusters': - if 'cluster' in values[0] and 'server' in values[0]['cluster']: - formatted_address = utils.format_url_address(mgmt_address) - # TODO use urlparse() to get url components and update - values[0]['cluster']['server'] = \ - "https://" + formatted_address + ":6443" - break # no need to iterate further - - with open(admin_conf, 'w') as yaml_file: - yaml.dump(config, yaml_file, default_flow_style=False) - - # Remove branding tar files from the release N+1 directory as branding - # files are not compatible between releases. - branding_files = os.path.join( - tsc.PLATFORM_PATH, "config", to_load, "branding", "*.tgz") - try: - subprocess.check_call(["rm -f %s" % branding_files], shell=True, - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to remove branding files %s" % branding_files) - - # Execute migration scripts - utils.execute_migration_scripts( - from_load, to_load, utils.ACTION_START) - - LOG.info("Finished upgrade preparations") - - -def create_simplex_backup(software_upgrade): - """Creates the upgrade metadata and creates the system backup""" - backup_data = {} - upgrade_data = software_upgrade.as_dict() - if upgrade_data['created_at']: - upgrade_data['created_at'] = \ - upgrade_data['created_at'].replace( - microsecond=0).replace(tzinfo=None).isoformat() - if upgrade_data['updated_at']: - upgrade_data['updated_at'] = \ - upgrade_data['updated_at'].replace( - microsecond=0).replace(tzinfo=None).isoformat() - backup_data['upgrade'] = upgrade_data - json_data = json.dumps(backup_data) - metadata_path = os.path.join(tsc.CONFIG_PATH, 'upgrades') - os.mkdir(metadata_path) - metadata_filename = os.path.join(metadata_path, 'metadata') - with open(metadata_filename, 'w') as metadata_file: - metadata_file.write(json_data) - - upgrade_data, upgrade_images_data = get_upgrade_backup_filenames( - software_upgrade) - backup_vars = [ - "platform_backup_file=%s.tgz" % upgrade_data, - "user_images_backup_file=%s.tgz" % upgrade_images_data, - "backup_user_images=true", - "backup_dir=%s" % tsc.PLATFORM_BACKUP_PATH] - args = [ - 'ansible-playbook', - '-e', ' '.join(backup_vars), - sysinv_constants.ANSIBLE_PLATFORM_BACKUP_PLAYBOOK] - proc = subprocess.Popen(args, stdout=subprocess.PIPE, - universal_newlines=True) - out, _ = proc.communicate() - LOG.info(out) - if proc.returncode: - raise subprocess.CalledProcessError(proc.returncode, args) - LOG.info("Create simplex backup complete") - - -def get_upgrade_backup_filenames(software_upgrade): - """Generates the simplex upgrade backup filename""" - created_at_date = software_upgrade.created_at.replace( - microsecond=0).replace(tzinfo=None) - date_time = created_at_date.isoformat().replace(':', '') - suffix = date_time + '_' + software_upgrade.uuid - upgrade_data = 'upgrade_data_' + suffix - upgrade_images_data = 'upgrade_images_data_' + suffix - return upgrade_data, upgrade_images_data - - -def abort_upgrade(from_load, to_load, upgrade): - """ Executed on the release N side, cleans up data created for upgrade. """ - devnull = open(os.devnull, 'w') - LOG.info("Starting aborting upgrade - from: %s, to: %s" % - (from_load, to_load)) - - # remove upgrade flags - upgrade_flags = [tsc.CONTROLLER_UPGRADE_FLAG, - tsc.CONTROLLER_UPGRADE_COMPLETE_FLAG, - tsc.CONTROLLER_UPGRADE_FAIL_FLAG, - tsc.CONTROLLER_UPGRADE_STARTED_FLAG, - ] - for flag in upgrade_flags: - try: - if os.path.isfile(flag): - os.remove(flag) - except OSError: - LOG.exception("Failed to remove upgrade flag %s" % flag) - - # unexport filesystems - export_list = [utils.POSTGRES_PATH, utils.RABBIT_PATH] - export_path = None - try: - for export_path in export_list: - subprocess.check_call( - ["exportfs", - "-u", - "%s:%s" % (utils.CONTROLLER_1_HOSTNAME, export_path)], - stdout=devnull) - except subprocess.CalledProcessError: - LOG.exception("Failed to unexport %s" % export_path) - except Exception: - LOG.exception("Failed to unexport filesystems") - - # Depending on where we are in the upgrade we may need to remove the - # symlink to the etcd directory - etcd_to_dir = os.path.join(tsc.ETCD_PATH, to_load) - if os.path.islink(etcd_to_dir): - LOG.info("Unlinking destination etcd directory: %s " % etcd_to_dir) - os.unlink(etcd_to_dir) - - # Remove upgrade directories - upgrade_dirs = [ - os.path.join(tsc.PLATFORM_PATH, "config", to_load), - os.path.join(tsc.PLATFORM_PATH, "armada", to_load), - os.path.join(tsc.PLATFORM_PATH, "helm", to_load), - os.path.join(tsc.ETCD_PATH, to_load), - os.path.join(utils.POSTGRES_PATH, "upgrade"), - os.path.join(utils.POSTGRES_PATH, to_load), - os.path.join(utils.RABBIT_PATH, to_load), - os.path.join(tsc.PLATFORM_PATH, "nfv/vim", to_load), - os.path.join(tsc.PLATFORM_PATH, ".keyring", to_load), - os.path.join(tsc.PLATFORM_PATH, "puppet", to_load), - os.path.join(tsc.PLATFORM_PATH, "sysinv", to_load), - os.path.join(tsc.CONFIG_PATH, 'upgrades') - ] - - for directory in upgrade_dirs: - try: - if os.path.isdir(directory): - shutil.rmtree(directory) - except OSError: - LOG.exception("Failed to remove upgrade directory %s" % directory) - - remove_simplex_upgrade_data(upgrade) - - LOG.info("Finished upgrade abort") - - -def activate_upgrade(from_load, to_load, i_system): - """ Executed on release N+1, activate the upgrade on all nodes. """ - LOG.info("Starting upgrade activate - from: %s, to: %s" % - (from_load, to_load)) - utils.execute_migration_scripts(from_load, to_load, utils.ACTION_ACTIVATE) - - LOG.info("Finished upgrade activation") - - -def remove_simplex_upgrade_data(upgrade): - upgrade_data, upgrade_images_data = get_upgrade_backup_filenames(upgrade) - simplex_backup_files = glob.glob( - os.path.join(tsc.PLATFORM_BACKUP_PATH, upgrade_data + "*")) - simplex_backup_files += glob.glob( - os.path.join(tsc.PLATFORM_BACKUP_PATH, upgrade_images_data + "*")) - - for file in simplex_backup_files: - try: - LOG.info("Removing simplex upgrade file %s" % file) - os.remove(file) - except OSError: - LOG.exception("Failed to remove %s" % file) - - -def complete_upgrade(from_load, to_load, upgrade): - """ Executed on release N+1, cleans up data created for upgrade. """ - LOG.info("Starting upgrade complete - from: %s, to: %s" % - (from_load, to_load)) - - # Remove upgrade directories - upgrade_dirs = [ - os.path.join(tsc.PLATFORM_PATH, "config", from_load), - os.path.join(utils.POSTGRES_PATH, "upgrade"), - os.path.join(utils.POSTGRES_PATH, from_load), - os.path.join(utils.RABBIT_PATH, from_load), - os.path.join(tsc.PLATFORM_PATH, "nfv/vim", from_load), - os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load), - os.path.join(tsc.PLATFORM_PATH, "puppet", from_load), - os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load), - os.path.join(tsc.PLATFORM_PATH, "armada", from_load), - os.path.join(tsc.PLATFORM_PATH, "helm", from_load), - os.path.join(tsc.ETCD_PATH, from_load) - ] - - for directory in upgrade_dirs: - try: - shutil.rmtree(directory) - except OSError: - LOG.exception("Failed to remove upgrade directory %s" % directory) - - remove_simplex_upgrade_data(upgrade) - - LOG.info("Finished upgrade complete") diff --git a/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py b/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py deleted file mode 100644 index 565d4d6420..0000000000 --- a/controllerconfig/controllerconfig/controllerconfig/upgrades/utils.py +++ /dev/null @@ -1,523 +0,0 @@ -# -# Copyright (c) 2016-2023 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# -# This file contains common upgrades functions that can be used by both sysinv -# and during the upgrade of controller-1. -# - -import keyring -import os -import psycopg2 -from psycopg2.extras import RealDictCursor -import six -import subprocess -import tempfile -import yaml -import netaddr - -# WARNING: The controller-1 upgrade is done before any puppet manifests -# have been applied, so only the static entries from tsconfig can be used. -# (the platform.conf file will not have been updated with dynamic values). -from tsconfig.tsconfig import SW_VERSION -from tsconfig.tsconfig import PLATFORM_PATH - -from controllerconfig import utils as cutils -from controllerconfig.common import constants -from sysinv.common import constants as sysinv_constants -from sysinv.common import utils as sysinv_utils - -from oslo_log import log - -LOG = log.getLogger(__name__) - -POSTGRES_PATH = '/var/lib/postgresql' -POSTGRES_DATA_DIR = os.path.join(POSTGRES_PATH, SW_VERSION) -RABBIT_PATH = '/var/lib/rabbitmq' -CONTROLLER_1_HOSTNAME = "controller-1" -DB_CONNECTION = "postgresql://%s:%s@127.0.0.1/%s\n" -KUBERNETES_CONF_PATH = "/etc/kubernetes" -KUBERNETES_ADMIN_CONF_FILE = "admin.conf" -PLATFORM_LOG = '/var/log/platform.log' -ERROR_FILE = '/tmp/upgrade_fail_msg' - -# well-known default domain name -DEFAULT_DOMAIN_NAME = 'Default' - -# Migration script actions -ACTION_START = "start" -ACTION_MIGRATE = "migrate" -ACTION_ACTIVATE = "activate" - - -def execute_migration_scripts(from_release, to_release, action, - migration_script_dir="/etc/upgrade.d"): - """ Execute migration scripts with an action: - start: Prepare for upgrade on release N side. Called during - "system upgrade-start". - migrate: Perform data migration on release N+1 side. Called while - controller-1 is performing its upgrade. - """ - - LOG.info("Executing migration scripts with from_release: %s, " - "to_release: %s, action: %s" % (from_release, to_release, action)) - - # Get a sorted list of all the migration scripts - # Exclude any files that can not be executed, including .pyc and .pyo files - files = [f for f in os.listdir(migration_script_dir) - if os.path.isfile(os.path.join(migration_script_dir, f)) and - os.access(os.path.join(migration_script_dir, f), os.X_OK)] - # From file name, get the number to sort the calling sequence, - # abort when the file name format does not follow the pattern - # "nnn-*.*", where "nnn" string shall contain only digits, corresponding - # to a valid unsigned integer (first sequence of characters before "-") - try: - files.sort(key=lambda x: int(x.split("-")[0])) - except Exception: - LOG.exception("Migration script sequence validation failed, invalid " - "file name format") - raise - - MSG_SCRIPT_FAILURE = "Migration script %s failed with returncode %d" \ - "Script output:\n%s" - # Execute each migration script - for f in files: - migration_script = os.path.join(migration_script_dir, f) - try: - # needed to flag each execution in case of error - start_script_exec = "Executing migration script" - LOG.info("%s %s" % (start_script_exec, migration_script)) - # TODO(heitormatsui): remove py2 code when - # CentOS and zuul py2.7 are deprecated - if six.PY2: - subprocess.check_output([migration_script, - from_release, - to_release, - action], - stderr=subprocess.STDOUT, - universal_newlines=True) - else: - ret = subprocess.run([migration_script, - from_release, - to_release, - action], - stderr=subprocess.STDOUT, - stdout=subprocess.PIPE, - text=True) - if ret.returncode != 0: - script_output = ret.stdout.splitlines() - output_list = [] - for item in script_output: - if item not in output_list: - output_list.append(item) - output_script = "\n".join(output_list) - msg = MSG_SCRIPT_FAILURE % (migration_script, - ret.returncode, - output_script) - LOG.error(msg) - start_script_line = get_exec_start_line( - start_script_exec, PLATFORM_LOG) - error_message = search_script_output( - start_script_line, PLATFORM_LOG, f) - save_temp_file(msg, error_message) - raise Exception(msg) - - except subprocess.CalledProcessError as e: - # log script output if script executed but failed. - LOG.error(MSG_SCRIPT_FAILURE % - (migration_script, e.returncode, e.output)) - # Abort when a migration script fails - raise - except Exception as e: - # log exception if script not executed. - LOG.exception(e) - raise - - -def get_exec_start_line(start_script_exec, file_name): - """ Search the last ocurrence of the start of the script. - Get the line number and use it to find the last start - of script execution in logs. - - Used to prevent reading an outdated error log. - """ - cmd = [ - "awk", - '/{pattern_to_find}/ {{last_match = $0; start_line = NR}}' - 'END {{if (last_match) print start_line, last_match}}' - .format(pattern_to_find=start_script_exec), - file_name - ] - start_line = None - - try: - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - output, error = process.communicate() - last_match = output.decode().strip().splitlines() - start_line, last_match = last_match[0].split(' ', 1) - start_line = int(start_line) - except Exception: - LOG.error("Failed to exec cmd. \n %s" % error) - return None - return start_line - - -def search_script_output(start_script_line, file_name, script): - """Search error lines for this script. - - Then, compare the line number and just add the - lines after the start of the last execution. - """ - cmd = [ - "awk", - '/{script}/ && /error|ERROR/ {{print NR, $0}}'.format(script=script), - file_name - ] - error_list = [] - error_string = "" - - try: - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - output, error = process.communicate() - error_lines = output.decode().strip().splitlines() - # Compare the line numbers of each occurrence. - # If the line number is greater than 'start_script_line', then - # add this line to the output string - for i, current_line in enumerate(error_lines): - if i < (len(error_lines) - 1): - current_line, error_line = error_lines[i + 1].split(' ', 1) - current_line = int(current_line) - if current_line > start_script_line: - error_list.append(error_line) - error_string = '\n'.join(error_list) - except Exception: - LOG.error("Failed to exec cmd. \n %s" % error) - return None - return error_string - - -def save_temp_file(msg, error=None): - if os.path.isfile(ERROR_FILE): - os.remove(ERROR_FILE) - - MSG_FAILURE = '%s \n\n'\ - '%s \n\n'\ - 'Check specific service log or search for ' \ - 'this app in sysinv.log for details\n' - msg = MSG_FAILURE % (msg, - error) - try: - with open(ERROR_FILE, 'w+') as error_file: - error_file.write(msg) - except Exception: - LOG.warning("Error opening file %s" % ERROR_FILE) - return None - - -def get_db_connection(hiera_db_records, database): - username = hiera_db_records[database]['username'] - password = hiera_db_records[database]['password'] - return "postgresql://%s:%s@%s/%s" % ( - username, password, 'localhost', database) - - -def get_password_from_keyring(service, username): - """Retrieve password from keyring""" - password = "" - os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR - try: - password = keyring.get_password(service, username) - except Exception as e: - LOG.exception("Received exception when attempting to get password " - "for service %s, username %s: %s" % - (service, username, e)) - raise - finally: - del os.environ["XDG_DATA_HOME"] - return password - - -def set_password_in_keyring(service, username): - """Generate random password and store in keyring""" - os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR - try: - password = sysinv_utils.generate_random_password(length=16) - keyring.set_password(service, username, password) - except Exception as e: - LOG.exception("Received exception when attempting to generate " - "password for service %s, username %s: %s" % - (service, username, e)) - raise - finally: - del os.environ["XDG_DATA_HOME"] - return password - - -def get_upgrade_token(from_release, - config, - secure_config): - - # Get the system hiera data from the from release - from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release, - "hieradata") - system_file = os.path.join(from_hiera_path, "system.yaml") - with open(system_file, 'r') as s_file: - system_config = yaml.load(s_file, Loader=yaml.FullLoader) - - # during a controller-1 upgrade, keystone is running - # on the controller UNIT IP, however the service catalog - # that was migrated from controller-0 since lists the - # floating controller IP. Keystone operations that use - # the AUTH URL will hit this service URL and fail, - # therefore we have to issue an Upgrade token for - # all Keystone operations during an Upgrade. This token - # will allow us to circumvent the service catalog entry, by - # providing a bypass endpoint. - keystone_upgrade_url = "http://{}:5000/{}".format( - '127.0.0.1', - system_config['openstack::keystone::params::api_version']) - - admin_user_domain = system_config.get( - 'platform::client::params::admin_user_domain') - if admin_user_domain is None: - # This value wasn't present in R2. So may be missing in upgrades from - # that release - LOG.info("platform::client::params::admin_user_domain key not found. " - "Using Default.") - admin_user_domain = DEFAULT_DOMAIN_NAME - - admin_project_domain = system_config.get( - 'platform::client::params::admin_project_domain') - if admin_project_domain is None: - # This value wasn't present in R2. So may be missing in upgrades from - # that release - LOG.info("platform::client::params::admin_project_domain key not " - "found. Using Default.") - admin_project_domain = DEFAULT_DOMAIN_NAME - - admin_password = get_password_from_keyring("CGCS", "admin") - admin_username = system_config.get( - 'platform::client::params::admin_username') - - # the upgrade token command - keystone_upgrade_token = ( - "openstack " - "--os-username {} " - "--os-password '{}' " - "--os-auth-url {} " - "--os-project-name admin " - "--os-user-domain-name {} " - "--os-project-domain-name {} " - "--os-interface internal " - "--os-identity-api-version 3 " - "token issue -c id -f value".format( - admin_username, - admin_password, - keystone_upgrade_url, - admin_user_domain, - admin_project_domain - )) - - config.update({ - 'openstack::keystone::upgrade::upgrade_token_file': - '/etc/keystone/upgrade_token', - 'openstack::keystone::upgrade::url': keystone_upgrade_url - }) - - secure_config.update({ - 'openstack::keystone::upgrade::upgrade_token_cmd': - keystone_upgrade_token, - }) - - -def get_upgrade_data(from_release, - system_config, - secure_config): - """ Retrieve required data from the from-release, update system_config - and secure_config with them. - This function is needed for adding new service account and endpoints - during upgrade. - """ - # Get the system hiera data from the from release - from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release, - "hieradata") - system_file = os.path.join(from_hiera_path, "system.yaml") - with open(system_file, 'r') as s_file: - system_config_from_release = yaml.load(s_file, Loader=yaml.FullLoader) - - # Get keystone region - keystone_region = system_config_from_release.get( - 'keystone::endpoint::region') - - system_config.update({ - 'platform::client::params::identity_region': keystone_region, - # Retrieve keystone::auth::region from the from-release for the new - # service. - # 'newservice::keystone::auth::region': keystone_region, - }) - - # Generate password for the new service - # password = sysinv_utils.generate_random_password(16) - - secure_config.update({ - # Generate and set the keystone::auth::password for the new service. - # 'newservice::keystone::auth::password': password, - }) - - -def add_upgrade_entries_to_hiera_data(from_release): - """ Adds upgrade entries to the hiera data """ - - filename = 'static.yaml' - secure_filename = 'secure_static.yaml' - path = constants.HIERADATA_PERMDIR - - # Get the hiera data for this release - filepath = os.path.join(path, filename) - with open(filepath, 'r') as c_file: - config = yaml.load(c_file, Loader=yaml.FullLoader) - secure_filepath = os.path.join(path, secure_filename) - with open(secure_filepath, 'r') as s_file: - secure_config = yaml.load(s_file, Loader=yaml.FullLoader) - - # File for system.yaml - # This is needed for adding new service account and endpoints - # during upgrade. - system_filename = 'system.yaml' - system_filepath = os.path.join(path, system_filename) - - # Get a token and update the config - get_upgrade_token(from_release, config, secure_config) - - # Get required data from the from-release and add them in system.yaml. - # We don't carry system.yaml from the from-release. - # This is needed for adding new service account and endpoints - # during upgrade. - system_config = {} - get_upgrade_data(from_release, system_config, secure_config) - - # Update the hiera data on disk - try: - fd, tmppath = tempfile.mkstemp(dir=path, prefix=filename, - text=True) - with open(tmppath, 'w') as f: - yaml.dump(config, f, default_flow_style=False) - os.close(fd) - os.rename(tmppath, filepath) - except Exception: - LOG.exception("failed to write config file: %s" % filepath) - raise - - try: - fd, tmppath = tempfile.mkstemp(dir=path, prefix=secure_filename, - text=True) - with open(tmppath, 'w') as f: - yaml.dump(secure_config, f, default_flow_style=False) - os.close(fd) - os.rename(tmppath, secure_filepath) - except Exception: - LOG.exception("failed to write secure config: %s" % secure_filepath) - raise - - # Add required hiera data into system.yaml. - # This is needed for adding new service account and endpoints - # during upgrade. - try: - fd, tmppath = tempfile.mkstemp(dir=path, prefix=system_filename, - text=True) - with open(tmppath, 'w') as f: - yaml.dump(system_config, f, default_flow_style=False) - os.close(fd) - os.rename(tmppath, system_filepath) - except Exception: - LOG.exception("failed to write system config: %s" % system_filepath) - raise - - -def create_simplex_runtime_config(filename): - """ Create any runtime parameters needed for simplex upgrades""" - config = {} - # Here is an example from a previous release... - # config.update({'nova::db::sync_api::cellv2_setup': False}) - cutils.create_manifest_runtime_config(filename, config) - - -def apply_upgrade_manifest(controller_address): - """Apply puppet upgrade manifest files.""" - - cmd = [ - "/usr/local/bin/puppet-manifest-apply.sh", - constants.HIERADATA_PERMDIR, - str(controller_address), - sysinv_constants.CONTROLLER, - 'upgrade' - ] - - logfile = "/tmp/apply_manifest.log" - try: - with open(logfile, "w") as flog: - subprocess.check_call(cmd, stdout=flog, stderr=flog) - except subprocess.CalledProcessError: - msg = "Failed to execute upgrade manifest" - print(msg) - raise Exception(msg) - - -def format_url_address(address): - """Format the URL address according to RFC 2732""" - try: - addr = netaddr.IPAddress(address) - if addr.version == sysinv_constants.IPV6_FAMILY: - return "[%s]" % address - else: - return str(address) - except netaddr.AddrFormatError: - return address - - -def get_keystone_user_id(user_name): - """ Get the a keystone user id by name""" - - conn = psycopg2.connect("dbname='keystone' user='postgres'") - with conn: - with conn.cursor(cursor_factory=RealDictCursor) as cur: - cur.execute("SELECT user_id FROM local_user WHERE name='%s'" % - user_name) - user_id = cur.fetchone() - if user_id is not None: - return user_id['user_id'] - else: - return user_id - - -def get_keystone_project_id(project_name): - """ Get the a keystone project id by name""" - - conn = psycopg2.connect("dbname='keystone' user='postgres'") - with conn: - with conn.cursor(cursor_factory=RealDictCursor) as cur: - cur.execute("SELECT id FROM project WHERE name='%s'" % - project_name) - project_id = cur.fetchone() - if project_id is not None: - return project_id['id'] - else: - return project_id - - -def get_postgres_bin(): - """ Get the path to the postgres binaries""" - - try: - return subprocess.check_output( - ['pg_config', '--bindir']).decode().rstrip('\n') - except subprocess.CalledProcessError: - LOG.exception("Failed to get postgres bin directory.") - raise diff --git a/controllerconfig/controllerconfig/tox.ini b/controllerconfig/controllerconfig/tox.ini index ff456a94fc..de77d22d4d 100644 --- a/controllerconfig/controllerconfig/tox.ini +++ b/controllerconfig/controllerconfig/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = flake8, pylint, py39 +envlist = flake8, pylint # Tox does not work if the path to the workdir is too long, so move it to /tmp toxworkdir = /tmp/{env:USER}_cctox stxdir = {toxinidir}/../../.. diff --git a/devstack/lib/config b/devstack/lib/config index 08577d787b..eebde415f6 100644 --- a/devstack/lib/config +++ b/devstack/lib/config @@ -110,7 +110,6 @@ function cleanup_sysinv { sudo rm -f $SYSINV_ETC_GOENABLEDD/sysinv_goenabled_check.sh sudo rm -f $SYSINV_CONF_DIR/policy.yaml sudo rm -f $SYSINV_ETC_MOTDD/10-system - sudo rm -f $SYSINV_CONF_DIR/upgrades/delete_load.sh sudo rm -f $STX_OCF_ROOT/resource.d/platform/sysinv-api sudo rm -f $STX_OCF_ROOT/resource.d/platform/sysinv-conductor sudo rm -f $STX_SYSCONFDIR/systemd/system/sysinv-api.service @@ -251,7 +250,6 @@ function install_sysinv { sudo install -d -m 755 $SYSINV_ETC_MOTDD sudo install -p -D -m 755 $SYSINV_DIR/etc/sysinv/motd-system $SYSINV_ETC_MOTDD/10-system sudo install -d -m 755 $SYSINV_CONF_DIR/upgrades - sudo install -p -D -m 755 $SYSINV_DIR/etc/sysinv/delete_load.sh $SYSINV_CONF_DIR/upgrades/delete_load.sh sudo install -p -D -m 755 $SYSINV_DIR/scripts/sysinv-api $STX_OCF_ROOT/resource.d/platform/sysinv-api sudo install -p -D -m 755 $SYSINV_DIR/scripts/sysinv-conductor $STX_OCF_ROOT/resource.d/platform/sysinv-conductor sudo install -p -D -m 755 $SYSINV_DIR/scripts/sysinv-api.service $STX_SYSCONFDIR/systemd/system/sysinv-api.service diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/client.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/client.py index ed7632a9ad..4bb259bc1d 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/client.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/client.py @@ -65,7 +65,6 @@ from cgtsclient.v1 import label from cgtsclient.v1 import license from cgtsclient.v1 import lldp_agent from cgtsclient.v1 import lldp_neighbour -from cgtsclient.v1 import load from cgtsclient.v1 import network from cgtsclient.v1 import network_addrpool from cgtsclient.v1 import partition @@ -145,7 +144,6 @@ class Client(object): self.isensor = isensor.isensorManager(self.http_client) self.isensorgroup = isensorgroup.isensorgroupManager(self.http_client) self.pci_device = pci_device.PciDeviceManager(self.http_client) - self.load = load.LoadManager(self.http_client) self.upgrade = upgrade.UpgradeManager(self.http_client) self.network = network.NetworkManager(self.http_client) self.network_addrpool = network_addrpool.NetworkAddrPoolManager(self.http_client) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py deleted file mode 100644 index e22fed6a25..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) 2015-2023 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from cgtsclient.common import base -from cgtsclient import exc - - -CREATION_ATTRIBUTES = ['software_version', 'compatible_version', - 'required_patches'] - -IMPORT_ATTRIBUTES = ['path_to_iso', 'path_to_sig', 'active', 'local', - 'inactive'] - - -class Load(base.Resource): - def __repr__(self): - return "" % self._info - - -class LoadManager(base.Manager): - resource_class = Load - - def list(self): - return self._list('/v1/loads/', "loads") - - def get(self, load_id): - path = '/v1/loads/%s' % load_id - try: - return self._list(path)[0] - except IndexError: - return None - - def _create_load(self, load, path): - if set(list(load.keys())) != set(CREATION_ATTRIBUTES): - raise exc.InvalidAttribute() - - return self._create(path, load) - - def create(self, load): - path = '/v1/loads/' - self._create_load(load, path) - - def import_load_metadata(self, load): - path = '/v1/loads/import_load_metadata' - return self._create_load(load, path) - - def import_load(self, **kwargs): - path = '/v1/loads/import_load' - local = kwargs.pop('local') - load_info = {} - - for key, value in kwargs.items(): - if key in IMPORT_ATTRIBUTES: - if isinstance(value, bool): - load_info[key] = str(value).lower() - else: - load_info[key] = value - else: - raise exc.InvalidAttribute(key) - - if local: - return self._create(path, body=load_info) - - data = { - 'active': load_info.pop('active', 'false'), - 'inactive': load_info.pop('inactive', 'false'), - } - - json_data = self._upload_multipart( - path, - body=load_info, - data=data, - check_exceptions=True, - ) - - return self.resource_class(self, json_data) - - def delete(self, load_id): - path = '/v1/loads/%s' % load_id - return self._delete(path) - - def update(self, load_id, patch): - path = '/v1/loads/%s' % load_id - return self._update(path, patch) diff --git a/sysinv/sysinv/debian/deb_folder/rules b/sysinv/sysinv/debian/deb_folder/rules index 6f2198207a..e3b9f42902 100755 --- a/sysinv/sysinv/debian/deb_folder/rules +++ b/sysinv/sysinv/debian/deb_folder/rules @@ -22,7 +22,6 @@ override_dh_install: -d $(CURDIR)/debian/sysinv-wheels/usr/share/python-wheel install -p -D -m 755 $(CURDIR)/etc/sysinv/motd-system $(CURDIR)/debian/tmp/etc/update-motd.d/10-system install -p -D -m 755 $(CURDIR)/etc/sysinv/sysinv_goenabled_check.sh $(CURDIR)/debian/tmp/etc/goenabled.d/sysinv_goenabled_check.sh - install -p -D -m 700 $(CURDIR)/etc/sysinv/delete_load.sh $(CURDIR)/debian/tmp/etc/sysinv/upgrades/delete_load.sh install -p -D -m 644 debian/tmpfiles.conf $(CURDIR)/debian/tmp/usr/lib/tmpfiles.d/sysinv.conf install -p -D -m 700 $(CURDIR)/scripts/kube-cert-rotation.sh $(CURDIR)/debian/tmp/usr/bin/kube-cert-rotation.sh install -p -D -m 700 $(CURDIR)/scripts/ipsec-cert-renew.sh $(CURDIR)/debian/tmp/usr/bin/ipsec-cert-renew.sh diff --git a/sysinv/sysinv/debian/deb_folder/sysinv.install b/sysinv/sysinv/debian/deb_folder/sysinv.install index e5eb7173f8..9354d5c18d 100644 --- a/sysinv/sysinv/debian/deb_folder/sysinv.install +++ b/sysinv/sysinv/debian/deb_folder/sysinv.install @@ -11,7 +11,6 @@ scripts/query_pci_id usr/bin scripts/ceph_k8s_update_monitors.sh usr/bin usr/lib/python*/dist-packages/* etc/goenabled.d/sysinv_goenabled_check.sh -etc/sysinv/upgrades/delete_load.sh etc/update-motd.d/10-system usr/bin/cert-alarm usr/bin/cert-mon diff --git a/sysinv/sysinv/sysinv/etc/sysinv/delete_load.sh b/sysinv/sysinv/sysinv/etc/sysinv/delete_load.sh deleted file mode 100644 index 7629b3199d..0000000000 --- a/sysinv/sysinv/sysinv/etc/sysinv/delete_load.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright (c) 2015-2018 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# This script is remove a load from a controller. -# The load version is passed in as the first variable. - -: ${1?"Usage $0 VERSION"} -VERSION=$1 - -FEED_DIR=/var/www/pages/feed/rel-$VERSION -PRESTAGE_DIR=/opt/platform/deploy/$VERSION -PLAYBOOKS_DIR=/opt/dc-vault/playbooks/$VERSION - -rm -f /var/pxeboot/pxelinux.cfg.files/*-$VERSION -rm -rf /var/pxeboot/rel-$VERSION - -rm -f /etc/pxeboot-update-$VERSION.sh - -rm -rf $FEED_DIR - -if [ -d $PRESTAGE_DIR ]; then - rm -rf $PRESTAGE_DIR -fi - -if [ -d $PLAYBOOKS_DIR ]; then - rm -rf $PLAYBOOKS_DIR -fi diff --git a/sysinv/sysinv/sysinv/sysinv/agent/manager.py b/sysinv/sysinv/sysinv/sysinv/agent/manager.py index ad619e2c5c..9b589fb1e0 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/manager.py @@ -2136,71 +2136,6 @@ class AgentManager(service.PeriodicService): # Update local puppet cache anyway to be consistent. self._update_local_puppet_cache(hieradata_path) - def delete_load(self, context, host_uuid, software_version): - """Remove the specified load - - :param context: request context - :param host_uuid: the host uuid - :param software_version: the version of the load to remove - """ - - LOG.debug("AgentManager.delete_load: %s" % (software_version)) - if self._ihost_uuid and self._ihost_uuid == host_uuid: - LOG.info("AgentManager removing load %s" % software_version) - - cleanup_script = constants.DELETE_LOAD_SCRIPT - if os.path.isfile(cleanup_script): - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call( # pylint: disable=not-callable - [cleanup_script, software_version], - stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError: - LOG.error("Failure during cleanup script") - else: - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - rpcapi.finalize_delete_load(context, software_version) - else: - LOG.error("Cleanup script %s does not exist." % cleanup_script) - - return - - def create_simplex_backup(self, context, software_upgrade): - """Creates the upgrade metadata and creates the system backup - - :param context: request context. - :param software_upgrade: software_upgrade object - :returns: none - """ - try: - from controllerconfig.upgrades import \ - management as upgrades_management - except ImportError: - LOG.error("Attempt to import during create_simplex_backup failed") - return - - if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX: - LOG.error("create_simplex_backup called for non-simplex system") - return - - LOG.info("Starting simplex upgrade data collection") - success = True - try: - upgrades_management.create_simplex_backup(software_upgrade) - except Exception as ex: - LOG.info("Exception during simplex upgrade data collection") - LOG.exception(ex) - success = False - else: - LOG.info("Simplex upgrade data collection complete") - - rpcapi = conductor_rpcapi.ConductorAPI( - topic=conductor_rpcapi.MANAGER_TOPIC) - rpcapi.complete_simplex_backup(context, success=success) - - return - def device_update_image(self, context, host_uuid, pci_addr, filename, transaction_id, retimer_included): """Write the device image to the device at the specified address. diff --git a/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py b/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py index eabaf6b228..e14fdf4435 100644 --- a/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py +++ b/sysinv/sysinv/sysinv/sysinv/agent/rpcapi.py @@ -129,40 +129,6 @@ class AgentAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): config_dict=config_dict)) return retval - def delete_load(self, context, host_uuid, software_version): - """Asynchronously, have the agent remove the specified load - - :param context: request context. - :param host_uuid: the host uuid - :param software_version: the version of the load to remove - :returns: none ... uses asynchronous cast(). - """ - # fanout / broadcast message to all inventory agents - LOG.debug("AgentApi.delete_load: fanout_cast: sending " - "delete load to agent: (%s) (%s) " % - (host_uuid, software_version)) - retval = self.fanout_cast( - context, self.make_msg( - 'delete_load', - host_uuid=host_uuid, - software_version=software_version)) - - return retval - - def create_simplex_backup(self, context, software_upgrade): - """Asynchronously, have the agent create the simplex backup data - - :param context: request context. - :param software_upgrade: software_upgrade object - :returns: none - """ - retval = self.fanout_cast(context, - self.make_msg( - 'create_simplex_backup', - software_upgrade=software_upgrade)) - - return retval - def apply_tpm_config(self, context, tpm_context): """Asynchronously, have the agent apply the tpm config diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py index f302ff193b..9c3e2a468b 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py @@ -55,7 +55,6 @@ from sysinv.api.controllers.v1 import interface_network from sysinv.api.controllers.v1 import link from sysinv.api.controllers.v1 import lldp_agent from sysinv.api.controllers.v1 import lldp_neighbour -from sysinv.api.controllers.v1 import load from sysinv.api.controllers.v1 import lvg from sysinv.api.controllers.v1 import license from sysinv.api.controllers.v1 import memory @@ -656,12 +655,6 @@ class V1(base.APIBase): bookmark=True) ] - v1.loads = [link.Link.make_link('self', pecan.request.host_url, - 'loads', ''), - link.Link.make_link('bookmark', pecan.request.host_url, - 'loads', '', bookmark=True) - ] - v1.pci_devices = [link.Link.make_link('self', pecan.request.host_url, 'pci_devices', ''), @@ -1003,7 +996,6 @@ class Controller(rest.RestController): certificate = certificate.CertificateController() isensors = sensor.SensorController() isensorgroups = sensorgroup.SensorGroupController() - loads = load.LoadController() pci_devices = pci_device.PCIDeviceController() upgrade = upgrade.UpgradeController() networks = network.NetworkController() diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index 1b366d9f62..d1ca5d1c55 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -41,7 +41,6 @@ import wsmeext.pecan as wsme_pecan from wsme import types as wtypes from fm_api import constants as fm_constants -from fm_api import fm_api from pecan import expose from pecan import rest @@ -551,12 +550,6 @@ class Host(base.APIBase): apparmor = wtypes.text "Enable/Disable apparmor state" - software_load = wtypes.text - "The current load software version" - - target_load = wtypes.text - "The target load software version" - install_state = wtypes.text "Represent the install state" @@ -615,7 +608,7 @@ class Host(base.APIBase): 'created_at', 'updated_at', 'boot_device', 'rootfs_device', 'hw_settle', 'install_output', 'console', 'tboot', 'vsc_controllers', 'ttys_dcd', - 'software_load', 'target_load', 'peers', 'peer_id', + 'peers', 'peer_id', 'install_state', 'install_state_info', 'iscsi_initiator_name', 'device_image_update', 'reboot_needed', 'inv_state', 'clock_synchronization', @@ -2829,16 +2822,11 @@ class HostController(rest.RestController): except exception.NotFound: return - loads = pecan.request.dbapi.load_get_list() - target_load = cutils.get_imported_load(loads) - if personality == constants.STORAGE: if hostname == constants.STORAGE_0_HOSTNAME: LOG.warn("Allow storage-0 add during upgrade") else: LOG.info("Adding storage, ensure controllers upgraded") - self._check_personality_load(constants.CONTROLLER, - target_load) @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(Host, six.text_type, body=six.text_type) @@ -2868,19 +2856,6 @@ class HostController(rest.RestController): _("All worker and storage hosts not running a Ceph monitor " "must be locked and offline before this operation can proceed")) - # TODO(heitormatsui): used only by legacy upgrade endpoint, remove - def _check_personality_load(self, personality, load): - hosts = pecan.request.dbapi.ihost_get_by_personality(personality) - for host in hosts: - host_upgrade = objects.host_upgrade.get_by_host_id( - pecan.request.context, host.id) - if host_upgrade.target_load != load.id or \ - host_upgrade.software_load != load.id: - raise wsme.exc.ClientSideError( - _("All %s hosts must be using load %s before this " - "operation can proceed") - % (personality, load.software_version)) - def _check_max_cpu_mhz_configured(self, host): cpu_utils.check_power_manager(host.ihost_patch.get('uuid')) @@ -2923,105 +2898,6 @@ class HostController(rest.RestController): raise wsme.exc.ClientSideError( _("Host does not support configuration of Max CPU Frequency.")) - # TODO(heitormatsui): used only by legacy upgrade endpoint, remove - def _check_host_load(self, hostname, load): - host = pecan.request.dbapi.ihost_get_by_hostname(hostname) - host_upgrade = objects.host_upgrade.get_by_host_id( - pecan.request.context, host.id) - if host_upgrade.target_load != load.id or \ - host_upgrade.software_load != load.id: - raise wsme.exc.ClientSideError( - _("%s must be using load %s before this operation can proceed") - % (hostname, load.software_version)) - - # TODO(heitormatsui): used only by legacy upgrade endpoint, remove - def _check_storage_downgrade(self, load): - hosts = pecan.request.dbapi.ihost_get_by_personality(constants.STORAGE) - # Ensure all storage nodes are downgraded before storage-0 - for host in hosts: - if host.hostname != constants.STORAGE_0_HOSTNAME: - host_upgrade = objects.host_upgrade.get_by_host_id( - pecan.request.context, host.id) - if host_upgrade.target_load != load.id or \ - host_upgrade.software_load != load.id: - raise wsme.exc.ClientSideError( - _("All other %s hosts must be using load %s before " - "this operation can proceed") - % (constants.STORAGE, load.software_version)) - - # TODO(heitormatsui): used only by legacy upgrade endpoint, remove - def _update_load(self, uuid, body, new_target_load): - force = body.get('force', False) is True - - rpc_ihost = objects.host.get_by_uuid(pecan.request.context, uuid) - - host_upgrade = objects.host_upgrade.get_by_host_id( - pecan.request.context, rpc_ihost.id) - - if host_upgrade.target_load == new_target_load.id: - raise wsme.exc.ClientSideError( - _("%s already targeted to install load %s") % - (rpc_ihost.hostname, new_target_load.software_version)) - - if rpc_ihost.administrative != constants.ADMIN_LOCKED: - raise wsme.exc.ClientSideError( - _("The host must be locked before performing this operation")) - elif rpc_ihost.invprovision not in [constants.UPGRADING, constants.PROVISIONED]: - raise wsme.exc.ClientSideError(_("The host must be provisioned " - "before performing this operation")) - elif not force and rpc_ihost.availability != "online": - raise wsme.exc.ClientSideError( - _("The host must be online to perform this operation")) - - if rpc_ihost.personality == constants.STORAGE: - istors = pecan.request.dbapi.istor_get_by_ihost(rpc_ihost.id) - for stor in istors: - istor_obj = objects.storage.get_by_uuid(pecan.request.context, - stor.uuid) - self._ceph.remove_osd_key(istor_obj['osdid']) - if utils.get_system_mode() != constants.SYSTEM_MODE_SIMPLEX: - pecan.request.rpcapi.upgrade_ihost(pecan.request.context, - rpc_ihost, - new_target_load) - host_upgrade.target_load = new_target_load.id - host_upgrade.save() - - # There may be alarms, clear them - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - rpc_ihost.hostname) - - fm_api_obj = fm_api.FaultAPIs() - fm_api_obj.clear_fault( - fm_constants.FM_ALARM_ID_HOST_VERSION_MISMATCH, - entity_instance_id) - - pecan.request.dbapi.ihost_update( - rpc_ihost.uuid, {'inv_state': constants.INV_STATE_REINSTALLING}) - - if rpc_ihost.availability == "online": - new_ihost_mtc = rpc_ihost.as_dict() - new_ihost_mtc.update({'operation': 'modify'}) - new_ihost_mtc.update({'action': constants.REINSTALL_ACTION}) - new_ihost_mtc = cutils.removekeys_nonmtce(new_ihost_mtc) - new_ihost_mtc['mgmt_ip'] = utils.get_mgmt_ip(rpc_ihost.hostname) - - mtc_response = mtce_api.host_modify( - self._api_token, self._mtc_address, self._mtc_port, - new_ihost_mtc, constants.MTC_ADD_TIMEOUT_IN_SECS) - - if mtc_response is None: - mtc_response = {'status': 'fail', - 'reason': 'no response', - 'action': 'retry'} - - if mtc_response['status'] != 'pass': - # Report mtc error - raise wsme.exc.ClientSideError(_("Maintenance has returned with " - "a status of %s, reason: %s, recommended action: %s") % ( - mtc_response.get('status'), - mtc_response.get('reason'), - mtc_response.get('action'))) - @staticmethod def _validate_ip_in_mgmt_network(ip): network = pecan.request.dbapi.network_get_by_type( diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py index 8a451d3760..560cb1d5a8 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py @@ -18,7 +18,6 @@ # Copyright (c) 2015-2021 Wind River Systems, Inc. # -import json import os import pecan @@ -26,7 +25,6 @@ from pecan import rest import psutil import six import shutil -import socket import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan @@ -34,19 +32,14 @@ import wsmeext.pecan as wsme_pecan from eventlet.green import subprocess from oslo_log import log from pecan import expose -from pecan import request from sysinv._i18n import _ from sysinv.api.controllers.v1 import base from sysinv.api.controllers.v1 import collection from sysinv.api.controllers.v1 import link from sysinv.api.controllers.v1 import types -from sysinv.api.controllers.v1 import utils from sysinv.common import constants -from sysinv.common import exception from sysinv.common import utils as cutils from sysinv import objects -from sysinv.openstack.common import rpc -from sysinv.openstack.common.rpc import common LOG = log.getLogger(__name__) @@ -154,29 +147,6 @@ class LoadController(rest.RestController): def __init__(self): self._api_token = None - def _get_loads_collection(self, marker, limit, sort_key, sort_dir, - expand=False, resource_url=None): - - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.load.get_by_uuid( - pecan.request.context, - marker) - - loads = pecan.request.dbapi.load_get_list( - limit, marker_obj, - sort_key=sort_key, - sort_dir=sort_dir) - - return LoadCollection.convert_with_links(loads, limit, - url=resource_url, - expand=expand, - sort_key=sort_key, - sort_dir=sort_dir) - @wsme_pecan.wsexpose(LoadCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): @@ -205,45 +175,6 @@ class LoadController(rest.RestController): raise wsme.exc.ClientSideError( _("Can not set state during create")) - @cutils.synchronized(LOCK_NAME) - @wsme_pecan.wsexpose(Load, body=Load) - def post(self, load): - """Create a new Load.""" - # This method is only used to populate the inital load for the system - # This is invoked during config_controller - # Loads after the first are added via import - # TODO(ShawnLi): This will be removed when we remove the Load table - loads = pecan.request.dbapi.load_get_list() - - if loads: - raise wsme.exc.ClientSideError(_("Aborting. Active load exits.")) - - patch = load.as_dict() - self._new_load_semantic_checks(patch) - patch['state'] = constants.ACTIVE_LOAD_STATE - - try: - new_load = pecan.request.dbapi.load_create(patch) - - # Controller-0 is added to the database before we add this load - # so we must add a host_upgrade entry for (at least) controller-0 - hosts = pecan.request.dbapi.ihost_get_list() - - for host in hosts: - values = dict() - values['forihostid'] = host.id - values['software_load'] = new_load.id - values['target_load'] = new_load.id - pecan.request.dbapi.host_upgrade_create(host.id, - new_load.software_version, - values) - - except exception.SysinvException as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_("Invalid data")) - - return load.convert_with_links(new_load) - @staticmethod def _upload_file(file_item): try: @@ -304,121 +235,7 @@ class LoadController(rest.RestController): raise NotImplementedError("This API is deprecated.") def _import_load(self): - """Create a new load from iso/sig files""" - - LOG.info("Load import request received.") - - # Only import loads on controller-0. This is required because the load - # is only installed locally and we will be booting controller-1 from - # this load during the upgrade. - if socket.gethostname() != constants.CONTROLLER_0_HOSTNAME: - raise wsme.exc.ClientSideError(_("A load can only be imported when" - " %s is active.") - % constants.CONTROLLER_0_HOSTNAME) - - req_content = dict() - load_files = dict() - is_multiform_req = True - import_type = None - - # Request coming from dc-api-proxy is not multiform, file transfer is handled - # by dc-api-proxy, the request contains only the vault file location - if request.content_type == "application/json": - req_content = dict(json.loads(request.body)) - is_multiform_req = False - else: - req_content = dict(request.POST.items()) - - if not req_content: - raise wsme.exc.ClientSideError(_("Empty request.")) - - active = req_content.get('active') - inactive = req_content.get('inactive') - - if active == 'true' and inactive == 'true': - raise wsme.exc.ClientSideError(_("Invalid use of --active and" - " --inactive arguments at" - " the same time.")) - - if active == 'true' or inactive == 'true': - isystem = pecan.request.dbapi.isystem_get_one() - - if isystem.distributed_cloud_role == \ - constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - LOG.info("System Controller allow start import_load") - - if active == 'true': - import_type = constants.ACTIVE_LOAD_IMPORT - elif inactive == 'true': - import_type = constants.INACTIVE_LOAD_IMPORT - - self._check_existing_loads(import_type=import_type) - - try: - for file in constants.IMPORT_LOAD_FILES: - if file not in req_content: - raise wsme.exc.ClientSideError(_("Missing required file for %s") - % file) - - if not is_multiform_req: - load_files.update({file: req_content[file]}) - else: - if file not in request.POST: - raise wsme.exc.ClientSideError(_("Missing required file for %s") - % file) - - file_item = request.POST[file] - if not file_item.filename: - raise wsme.exc.ClientSideError(_("No %s file uploaded") % file) - - file_location = self._upload_file(file_item) - if file_location: - load_files.update({file: file_location}) - except subprocess.CalledProcessError as ex: - raise wsme.exc.ClientSideError(str(ex)) - except Exception as ex: - raise wsme.exc.ClientSideError(_("Failed to save file %s to disk. Error: %s" - " Please check sysinv logs for" - " details." % (file_item.filename, str(ex)))) - - LOG.info("Load files: %s saved to disk." % load_files) - - exception_occured = False - try: - new_load = pecan.request.rpcapi.start_import_load( - pecan.request.context, - load_files[constants.LOAD_ISO], - load_files[constants.LOAD_SIGNATURE], - import_type, - ) - - if new_load is None: - raise wsme.exc.ClientSideError(_("Error importing load. Load not found")) - - if import_type != constants.ACTIVE_LOAD_IMPORT: - # Signature and upgrade path checks have passed, make rpc call - # to the conductor to run import script in the background. - pecan.request.rpcapi.import_load( - pecan.request.context, - load_files[constants.LOAD_ISO], - new_load, - import_type, - ) - except (rpc.common.Timeout, common.RemoteError) as e: - exception_occured = True - error = e.value if hasattr(e, 'value') else str(e) - raise wsme.exc.ClientSideError(error) - except Exception: - exception_occured = True - raise - finally: - if exception_occured and os.path.isdir(constants.LOAD_FILES_STAGING_DIR): - shutil.rmtree(constants.LOAD_FILES_STAGING_DIR) - - load_data = new_load.as_dict() - LOG.info("Load import request validated, returning new load data: %s" - % load_data) - return load_data + raise NotImplementedError("This API is deprecated.") @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(Load, body=Load) @@ -427,51 +244,6 @@ class LoadController(rest.RestController): raise NotImplementedError("This API is deprecated.") - def _check_existing_loads(self, import_type=None): - # Only are allowed at one time: - # - the active load - # - an imported load regardless of its current state - # - an inactive load. - - loads = pecan.request.dbapi.load_get_list() - - if len(loads) <= constants.IMPORTED_LOAD_MAX_COUNT: - return - - for load in loads: - if load.state == constants.ACTIVE_LOAD_STATE: - continue - - load_state = load.state - - if load_state == constants.ERROR_LOAD_STATE: - err_msg = _("Please remove the load in error state " - "before importing a new one.") - - elif load_state == constants.DELETING_LOAD_STATE: - err_msg = _("Please wait for the current load delete " - "to complete before importing a new one.") - - elif load_state == constants.INACTIVE_LOAD_STATE: - if import_type != constants.INACTIVE_LOAD_IMPORT: - continue - - err_msg = _("An inactived load already exists. " - "Please, remove the inactive load " - "before trying to import a new one.") - - elif import_type == constants.ACTIVE_LOAD_IMPORT or \ - import_type == constants.INACTIVE_LOAD_IMPORT: - continue - - elif not err_msg: - # Already imported or being imported - err_msg = _("Max number of loads (2) reached. Please " - "remove the old or unused load before " - "importing a new one.") - - raise wsme.exc.ClientSideError(err_msg) - @cutils.synchronized(LOCK_NAME) @wsme.validate(six.text_type, [LoadPatchType]) @wsme_pecan.wsexpose(Load, six.text_type, diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py index 81764c68c8..76d4d55e99 100755 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py @@ -21,11 +21,9 @@ from sysinv.api.controllers.v1 import base from sysinv.api.controllers.v1 import collection from sysinv.api.controllers.v1 import link from sysinv.api.controllers.v1 import types -from sysinv.api.controllers.v1 import utils from sysinv.common import exception from sysinv.common import utils as cutils from sysinv.common import constants -from sysinv import objects LOG = log.getLogger(__name__) @@ -69,7 +67,7 @@ class Upgrade(base.APIBase): "The load version that software upgrading to" def __init__(self, **kwargs): - self.fields = list(objects.software_upgrade.fields.keys()) + self.fields = list() for k in self.fields: if not hasattr(self, k): continue @@ -128,24 +126,6 @@ class UpgradeController(rest.RestController): def __init__(self, parent=None, **kwargs): self._parent = parent - def _get_upgrade_collection(self, marker=None, limit=None, - sort_key=None, sort_dir=None, - expand=False, resource_url=None): - limit = utils.validate_limit(limit) - sort_dir = utils.validate_sort_dir(sort_dir) - marker_obj = None - if marker: - marker_obj = objects.software_upgrade.get_by_uuid( - pecan.request.context, marker) - - upgrades = pecan.request.dbapi.software_upgrade_get_list( - limit=limit, marker=marker_obj, - sort_key=sort_key, sort_dir=sort_dir) - - return UpgradeCollection.convert_with_links( - upgrades, limit, url=resource_url, expand=expand, - sort_key=sort_key, sort_dir=sort_dir) - @staticmethod def check_restore_in_progress(): try: diff --git a/sysinv/sysinv/sysinv/sysinv/cmd/upgrade.py b/sysinv/sysinv/sysinv/sysinv/cmd/upgrade.py deleted file mode 100644 index e06f3e5704..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/cmd/upgrade.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2015-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Sysinv upgrade utilities. -""" - -import sys - -from oslo_config import cfg -from oslo_log import log -from sysinv._i18n import _ -from sysinv.common import constants -from sysinv.common import service -from sysinv.common import utils -from sysinv.db import api as dbapi - -from tsconfig.tsconfig import system_mode - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - - -# TODO(bqian): remove code below updating host_update, and software_upgrade table -# after USM transition completes. -def update_controller_state(skip_load_update): - mydbapi = dbapi.get_instance() - - LOG.info("Updating upgrades data in sysinv database") - hostname = constants.CONTROLLER_1_HOSTNAME - if system_mode == constants.SYSTEM_MODE_SIMPLEX: - hostname = constants.CONTROLLER_0_HOSTNAME - host = mydbapi.ihost_get_by_hostname(hostname) - - # Update the states for controller-1 - update_values = {'administrative': constants.ADMIN_UNLOCKED, - 'operational': constants.OPERATIONAL_ENABLED, - 'availability': constants.AVAILABILITY_AVAILABLE} - mydbapi.ihost_update(host.uuid, update_values) - - if skip_load_update: - return - - # Update the from and to load for controller-1 - loads = mydbapi.load_get_list() - target_load = utils.get_imported_load(loads) - host_upgrade = mydbapi.host_upgrade_get_by_host(host.id) - update_values = {'software_load': target_load.id, - 'target_load': target_load.id} - mydbapi.host_upgrade_update(host_upgrade.id, update_values) - - # Update the upgrade state - upgrade = mydbapi.software_upgrade_get_one() - upgrade_update = {'state': constants.UPGRADE_UPGRADING_CONTROLLERS} - mydbapi.software_upgrade_update(upgrade.uuid, upgrade_update) - - -def add_action_parsers(subparsers): - for action in ['update_controller_state']: - parser = subparsers.add_parser(action) - parser.set_defaults(func=globals()[action]) - - -CONF.register_cli_opt( - cfg.SubCommandOpt('action', - title='Action options', - help='Available upgrade options', - handler=add_action_parsers)) - - -def main(): - argv = sys.argv[:] - skip_load_update = False - for arg in sys.argv: - if arg == '--skip_load_update': - argv.remove(arg) - skip_load_update = True - - # Parse config file and command line options, then start logging - service.prepare_service(argv) - - if CONF.action.name in ['update_controller_state']: - msg = (_("Called '%(action)s'") % - {"action": CONF.action.name}) - LOG.info(msg) - CONF.action.func(skip_load_update) - else: - LOG.error(_("Unknown action: %(action)") % {"action": - CONF.action.name}) diff --git a/sysinv/sysinv/sysinv/sysinv/common/constants.py b/sysinv/sysinv/sysinv/sysinv/common/constants.py index 37dd2fb8a8..16ffe2042f 100644 --- a/sysinv/sysinv/sysinv/sysinv/common/constants.py +++ b/sysinv/sysinv/sysinv/sysinv/common/constants.py @@ -960,7 +960,6 @@ IMPORTED_LOAD_STATES = [ INACTIVE_LOAD_STATE, ] -DELETE_LOAD_SCRIPT = '/etc/sysinv/upgrades/delete_load.sh' IMPORTED_LOAD_MAX_COUNT = 1 LOAD_ISO = 'path_to_iso' LOAD_SIGNATURE = 'path_to_sig' diff --git a/sysinv/sysinv/sysinv/sysinv/common/health.py b/sysinv/sysinv/sysinv/sysinv/common/health.py index 59f9da6abb..04aec63f43 100755 --- a/sysinv/sysinv/sysinv/sysinv/common/health.py +++ b/sysinv/sysinv/sysinv/sysinv/common/health.py @@ -23,8 +23,6 @@ from sysinv.cert_alarm.audit import CertAlarmAudit from sysinv.api.controllers.v1 import patch_api from sysinv.api.controllers.v1 import vim_api -import tsconfig.tsconfig as tsc - import cgcs_patch.constants as patch_constants LOG = log.getLogger(__name__) @@ -774,36 +772,6 @@ class Health(object): output += msg health_ok = health_ok and success - loads = self._dbapi.load_get_list() - try: - imported_load = utils.get_imported_load(loads) - except Exception as e: - LOG.exception(e) - output += _('No imported load found. Unable to test further\n') - return health_ok, output - - upgrade_version = imported_load.software_version - if imported_load.required_patches: - patches = imported_load.required_patches.split('\n') - else: - patches = [] - - success, missing_patches = \ - self._check_required_patches_are_applied(patches) - output += _('Required patches are applied: [%s]\n') \ - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - if not success: - output += _('Patches not applied: %s\n') \ - % ', '.join(missing_patches) - - health_ok = health_ok and success - - success = self._check_license(upgrade_version) - output += _('License valid for upgrade: [%s]\n') \ - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - - health_ok = health_ok and success - success, message = self._check_bootdevice() if not success: # Make this an invisible check for the bootdevice and rootfs device. @@ -856,15 +824,6 @@ class Health(object): % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) health_ok = health_ok and success - # TODO (luisbonatti): remove when CentOS to Debian upgrade is deprecated - if upgrade_version == tsc.SW_VERSION_22_12: - msg, success = self._check_free_space_for_upgrade() - output += \ - _('Disk space requirement: [%s]\n') \ - % (Health.SUCCESS_MSG if success else Health.FAIL_MSG) - if not success: - output += msg - health_ok = health_ok and success return health_ok, output diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py index fde431bdc0..82b7612478 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/manager.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/manager.py @@ -64,8 +64,6 @@ from urllib3.exceptions import MaxRetryError import tsconfig.tsconfig as tsc from collections import namedtuple from collections import OrderedDict -from cgcs_patch.patch_verify import verify_files -from controllerconfig.upgrades import management as upgrades_management from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import rsa @@ -86,7 +84,6 @@ from oslo_serialization import base64 from oslo_serialization import jsonutils from oslo_service import periodic_task from oslo_utils import encodeutils -from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils from platform_util.license import license @@ -105,7 +102,6 @@ from sysinv.common import barbican_config from sysinv.common import fpga_constants from sysinv.common import constants from sysinv.common import ceph as cceph -from sysinv.common import dc_api from sysinv.common import device as dconstants from sysinv.common import etcd from sysinv.common import exception @@ -131,7 +127,6 @@ from sysinv.conductor import openstack from sysinv.conductor import docker_registry from sysinv.conductor import keystone_listener from sysinv.db import api as dbapi -from sysinv.loads.loads import LoadImport from sysinv import objects from sysinv.objects import kube_app as kubeapp_obj from sysinv.puppet import common as puppet_common @@ -140,7 +135,6 @@ from sysinv.puppet import interface as pinterface from sysinv.helm import helm from sysinv.helm.lifecycle_constants import LifecycleConstants from sysinv.helm.lifecycle_hook import LifecycleHookInfo -from sysinv.helm import common from sysinv.zmq_rpc.zmq_rpc import ZmqRpcServer MANAGER_TOPIC = 'sysinv.conductor_manager' @@ -465,8 +459,6 @@ class ConductorManager(service.PeriodicService): self._fernet = fernet.FernetOperator() # Upgrade start tasks - self._clear_stuck_loads() - self._upgrade_init_actions() self._kube_upgrade_init_actions() self._handle_restore_in_progress() @@ -738,46 +730,6 @@ class ConductorManager(service.PeriodicService): elif self.fm_api.get_faults_by_id(fm_constants.FM_ALARM_ID_K8S_RESOURCE_PV): greenthread.spawn(self._pvc_monitor_migration) - def _upgrade_init_actions(self): - """ Perform any upgrade related startup actions""" - try: - # NOTE(bqian) this is legacy upgrade only code - upgrade = self.dbapi.software_upgrade_get_one() - except exception.NotFound: - # Not upgrading. No need to update status - return - - hostname = socket.gethostname() - if hostname == constants.CONTROLLER_0_HOSTNAME: - if os.path.isfile(tsc.UPGRADE_ROLLBACK_FLAG): - self._set_state_for_rollback(upgrade) - elif os.path.isfile(tsc.UPGRADE_ABORT_FLAG): - self._set_state_for_abort(upgrade) - elif hostname == constants.CONTROLLER_1_HOSTNAME: - self._init_controller_for_upgrade(upgrade) - if upgrade.state == constants.UPGRADE_UPGRADING_CONTROLLERS: - # request report initial inventory as controller-1 has - # not had a chance to report inventory to upgraded system - context = ctx.RequestContext('admin', 'admin', is_admin=True) - ihost = self.dbapi.ihost_get_by_hostname(hostname) - rpcapi = agent_rpcapi.AgentAPI() - rpcapi.report_initial_inventory(context, ihost.uuid) - - system_mode = self.dbapi.isystem_get_one().system_mode - if system_mode == constants.SYSTEM_MODE_SIMPLEX: - self._init_controller_for_upgrade(upgrade) - - if upgrade.state in [constants.UPGRADE_ACTIVATION_REQUESTED, - constants.UPGRADE_ACTIVATING]: - # Reset to activation-failed if the conductor restarts. This could - # be due to a swact or the process restarting. Either way we'll - # need to rerun the activation. - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_ACTIVATION_FAILED}) - - self._upgrade_default_service() - self._upgrade_default_service_parameter() - def _handle_restore_in_progress(self): if os.path.isfile(tsc.SKIP_CEPH_OSD_WIPING): LOG.info("Starting thread to fix storage nodes install uuid.") @@ -797,61 +749,6 @@ class ConductorManager(service.PeriodicService): for fname in glob.glob(files): cutils.remove(fname) - def _clear_stuck_loads(self): - load_stuck_states = [constants.IMPORTING_LOAD_STATE] - - loads = self.dbapi.load_get_list() - stuck_loads = [load for load in loads - if load.state in load_stuck_states] - if stuck_loads: - # set stuck isos state to error - for load in stuck_loads: - LOG.error("Unexpected restart during import of load %s for " - "release %s, please delete the load and try again." % - (load.id, load.software_version)) - self.dbapi.load_update(load.id, {'state': constants.ERROR_LOAD_STATE}) - cutils.unmount_stuck_isos() - - def _set_state_for_abort(self, upgrade): - """ Update the database to reflect the abort""" - LOG.info("Upgrade Abort detected. Correcting database state.") - - # Update the upgrade state - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_ABORTING}) - - try: - os.remove(tsc.UPGRADE_ABORT_FLAG) - except OSError: - LOG.exception("Failed to remove upgrade rollback flag") - - def _set_state_for_rollback(self, upgrade): - """ Update the database to reflect the rollback""" - LOG.info("Upgrade Rollback detected. Correcting database state.") - - # Update the upgrade state - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_ABORTING_ROLLBACK}) - - # At this point we are swacting to controller-0 which has just been - # downgraded. - # Before downgrading controller-0 all storage/worker nodes were locked - # The database of the from_load is not aware of this, so we set the - # state in the database to match the state of the system. This does not - # actually lock the nodes. - hosts = self.dbapi.ihost_get_list() - for host in hosts: - if host.personality not in [constants.WORKER, constants.STORAGE]: - continue - self.dbapi.ihost_update(host.uuid, { - 'administrative': constants.ADMIN_LOCKED}) - - # Remove the rollback flag, we only want to modify the database once - try: - os.remove(tsc.UPGRADE_ROLLBACK_FLAG) - except OSError: - LOG.exception("Failed to remove upgrade rollback flag") - def _init_controller_for_upgrade(self, upgrade): # Raise alarm to show an upgrade is in progress # After upgrading controller-1 and swacting to it, we must @@ -1265,15 +1162,12 @@ class ConductorManager(service.PeriodicService): if utils.is_host_active_controller(h): active_controller = h break - software_load = None if active_controller is not None: tboot_value = active_controller.get('tboot') if tboot_value is not None: values.update({'tboot': tboot_value}) - software_load = active_controller.software_load - LOG.info("create_ihost software_load=%s" % software_load) - ihost = self.dbapi.ihost_create(values, software_load=software_load) + ihost = self.dbapi.ihost_create(values) try: hostname = values.get("hostname") @@ -5295,9 +5189,9 @@ class ConductorManager(service.PeriodicService): # No upgrade in progress pass else: - if ihost.software_load != tsc.SW_VERSION: + if ihost.sw_version != tsc.SW_VERSION: LOG.info("Ignore updating lvg for host: %s. Version " - "%s mismatch." % (ihost.hostname, ihost.software_load)) + "%s mismatch." % (ihost.hostname, ihost.sw_version)) return elif (ihost.invprovision == constants.UPGRADING and ihost.personality != constants.STORAGE): @@ -5739,9 +5633,9 @@ class ConductorManager(service.PeriodicService): # No upgrade in progress pass else: - if db_host.software_load != tsc.SW_VERSION: + if db_host.sw_version != tsc.SW_VERSION: LOG.info("Ignore updating disk partition for host: %s. Version " - "%s mismatch." % (db_host.hostname, db_host.software_load)) + "%s mismatch." % (db_host.hostname, db_host.sw_version)) return elif (db_host.invprovision == constants.UPGRADING and db_host.personality != constants.STORAGE): @@ -6016,9 +5910,9 @@ class ConductorManager(service.PeriodicService): # No upgrade in progress pass else: - if ihost.software_load != tsc.SW_VERSION: + if ihost.sw_version != tsc.SW_VERSION: LOG.info("Ignore updating physical volume for host: %s. Version " - "%s mismatch." % (ihost.hostname, ihost.software_load)) + "%s mismatch." % (ihost.hostname, ihost.sw_version)) return elif (ihost.invprovision == constants.UPGRADING and ihost.personality != constants.STORAGE): @@ -6519,9 +6413,9 @@ class ConductorManager(service.PeriodicService): # No upgrade in progress pass else: - if ihost.software_load != tsc.SW_VERSION: + if ihost.sw_version != tsc.SW_VERSION: LOG.info("Ignore updating host-fs for host: %s. Version " - "%s mismatch." % (ihost.hostname, ihost.software_load)) + "%s mismatch." % (ihost.hostname, ihost.sw_version)) return if self._verify_restore_in_progress(): @@ -7355,65 +7249,6 @@ class ConductorManager(service.PeriodicService): val = {'vim_progress_status': vim_progress_status_str} self.dbapi.ihost_update(ihost.uuid, val) - @periodic_task.periodic_task(spacing=CONF.conductor_periodic_task_intervals.upgrade_status) - def _audit_upgrade_status(self, context): - """Audit upgrade related status""" - # NOTE(bqian) legacy upgrade only code - try: - upgrade = self.dbapi.software_upgrade_get_one() - except exception.NotFound: - # Not upgrading. No need to update status - return - - if upgrade.state == constants.UPGRADE_ACTIVATING_HOSTS: - hosts = self.dbapi.ihost_get_list() - out_of_date_hosts = [host for host in hosts - if host.config_target and host.config_target != host.config_applied] - if not out_of_date_hosts: - LOG.info("Manifests applied. Upgrade activation complete.") - self._upgrade_manifest_start_time = None - self.dbapi.software_upgrade_update( - upgrade.uuid, - {'state': constants.UPGRADE_ACTIVATION_COMPLETE}) - else: - LOG.info("Upgrade manifests running, config out-of-date hosts: %s" % - str([host.hostname for host in out_of_date_hosts])) - # if the timeout interval is reached and hosts are - # still out-of-date then mark activation as failed - if not self._upgrade_manifest_start_time: - self._upgrade_manifest_start_time = datetime.utcnow() - if (datetime.utcnow() - self._upgrade_manifest_start_time).total_seconds() >= \ - constants.UPGRADE_ACTIVATION_MANIFEST_TIMEOUT_IN_SECS: - self._upgrade_manifest_start_time = None - LOG.error("Upgrade activation failed, upgrade manifests apply timeout.") - self.dbapi.software_upgrade_update( - upgrade.uuid, - {'state': constants.UPGRADE_ACTIVATION_FAILED}) - - elif upgrade.state == constants.UPGRADE_DATA_MIGRATION: - # Progress upgrade state if necessary... - if os.path.isfile(tsc.CONTROLLER_UPGRADE_COMPLETE_FLAG): - self.dbapi.software_upgrade_update( - upgrade.uuid, - {'state': constants.UPGRADE_DATA_MIGRATION_COMPLETE}) - elif os.path.isfile(tsc.CONTROLLER_UPGRADE_FAIL_FLAG): - self.dbapi.software_upgrade_update( - upgrade.uuid, - {'state': constants.UPGRADE_DATA_MIGRATION_FAILED}) - - elif upgrade.state == constants.UPGRADE_UPGRADING_CONTROLLERS: - # In CPE upgrades, after swacting to controller-1, we need to clear - # the VIM upgrade flag on Controller-0 to allow VMs to be migrated - # to controller-1. - if constants.WORKER in tsc.subfunctions: - try: - controller_0 = self.dbapi.ihost_get_by_hostname( - constants.CONTROLLER_0_HOSTNAME) - if not utils.is_host_active_controller(controller_0): - vim_api.set_vim_upgrade_state(controller_0, False) - except Exception: - LOG.exception("Unable to set VIM upgrade state to False") - @periodic_task.periodic_task(spacing=CONF.conductor_periodic_task_intervals.install_states) def _audit_install_states(self, context): # A node could shutdown during it's installation and the install_state @@ -12037,13 +11872,10 @@ class ConductorManager(service.PeriodicService): try: # TODO (bqian) change below report to USM if USM major release # deploy activate failed - upgrade = usm_service.get_platform_upgrade(self.dbapi) + usm_service.get_platform_upgrade(self.dbapi) except exception.NotFound: LOG.error("Upgrade record not found during config failure") return - self.dbapi.software_upgrade_update( - upgrade.uuid, - {'state': constants.UPGRADE_ACTIVATION_FAILED}) def handle_kube_update_params_success(self, context, host_uuid): """ @@ -13904,7 +13736,7 @@ class ConductorManager(service.PeriodicService): # node before the "worker_config_complete" has been # executed. elif force: - if host.software_load == tsc.SW_VERSION: + if host.sw_version == tsc.SW_VERSION: try: # if active controller, update without check if utils.is_host_active_controller(host) and not skip_update_config: @@ -13921,7 +13753,7 @@ class ConductorManager(service.PeriodicService): elif (host.invprovision in [constants.PROVISIONED, constants.UPGRADING] or (host.invprovision == constants.PROVISIONING and host.personality == constants.CONTROLLER)): - if host.software_load == tsc.SW_VERSION: + if host.sw_version == tsc.SW_VERSION: # We will not generate the hieradata in runtime here if the # software load of the host is different from the active # controller. The Hieradata of a host during an upgrade/rollback @@ -14360,11 +14192,11 @@ class ConductorManager(service.PeriodicService): personalities = config_dict.get('personalities') for host in hosts: if host.personality in personalities: - if host.software_load == tsc.SW_VERSION: + if host.sw_version == tsc.SW_VERSION: host_uuids.append(host.uuid) else: LOG.info("Skip applying manifest for host: %s. Version %s mismatch." % - (host.hostname, host.software_load)) + (host.hostname, host.sw_version)) self._update_host_config_applied(context, host, config_uuid) if not host_uuids: @@ -14777,20 +14609,6 @@ class ConductorManager(service.PeriodicService): patches.append(patch_id) return patches - def _import_load_error(self, new_load): - """ - Update the load state to 'error' in the database - """ - patch = {'state': constants.ERROR_LOAD_STATE} - try: - self.dbapi.load_update(new_load['id'], patch) - - except exception.SysinvException as e: - LOG.exception(e) - raise exception.SysinvException(_("Error updating load in " - "database for load id: %s") - % new_load['id']) - @staticmethod def _unmount_iso(mounted_iso, mntdir): # We need to sleep here because the mount/umount is happening too @@ -14802,329 +14620,6 @@ class ConductorManager(service.PeriodicService): mounted_iso._umount_iso() shutil.rmtree(mntdir) - def start_import_load(self, context, path_to_iso, path_to_sig, - import_type=None): - """ - Mount the ISO and validate the load for import - """ - loads = self.dbapi.load_get_list() - - active_load = cutils.get_active_load(loads) - - if import_type != constants.ACTIVE_LOAD_IMPORT: - cutils.validate_loads_for_import(loads) - - current_version = active_load.software_version - - if not os.path.exists(path_to_iso): - raise exception.SysinvException(_("Specified path not found %s") % - path_to_iso) - - if not os.path.exists(path_to_sig): - raise exception.SysinvException(_("Specified path not found %s") % - path_to_sig) - - if not verify_files([path_to_iso], path_to_sig): - raise exception.SysinvException(_("Signature %s could not be verified") % - path_to_sig) - - mounted_iso = None - mntdir = tempfile.mkdtemp(dir='/tmp') - # Attempt to mount iso - try: - mounted_iso = cutils.ISO(path_to_iso, mntdir) - # Note: iso will be unmounted when object goes out of scope - - except subprocess.CalledProcessError: - raise exception.SysinvException(_( - "Unable to mount iso")) - - metadata_file_path = mntdir + '/upgrades/metadata.xml' - - if not os.path.exists(metadata_file_path): - self._unmount_iso(mounted_iso, mntdir) - raise exception.SysinvException(_("Metadata file not found")) - - # Read in the metadata file - try: - metadata_file = open(metadata_file_path, 'r') - root = ElementTree.fromstring(metadata_file.read()) - metadata_file.close() - except Exception: - self._unmount_iso(mounted_iso, mntdir) - raise exception.SysinvException(_( - "Unable to read metadata file")) - - new_version = root.findtext('version') - committed_patches = [] - if import_type == constants.INACTIVE_LOAD_IMPORT: - committed_patches = self._get_committed_patches_from_iso(new_version, mntdir) - - # unmount iso - self._unmount_iso(mounted_iso, mntdir) - - if import_type == constants.ACTIVE_LOAD_IMPORT: - if new_version != current_version: - raise exception.SysinvException( - _("Active version and import version must match (%s)") - % current_version) - - # return the matching (active) load in the database - loads = self.dbapi.load_get_list() - - for load in loads: - if load.software_version == new_version: - break - else: - raise exception.SysinvException( - _("Active load not found (%s)") % current_version) - - if os.path.exists(constants.LOAD_FILES_STAGING_DIR): - shutil.rmtree(constants.LOAD_FILES_STAGING_DIR) - - return load - - elif import_type == constants.INACTIVE_LOAD_IMPORT: - if LooseVersion(new_version) >= LooseVersion(current_version): - raise exception.SysinvException( - _("Inactive load (%s) must be an older load than the current active (%s).") - % (new_version, current_version)) - - supported_versions = self._get_current_supported_upgrade_versions() - is_version_upgradable = False - - for upgrade_path in supported_versions: - if new_version == upgrade_path["version"]: - is_version_upgradable = True - patches = upgrade_path['required_patch'] - for patch in patches: - if patch not in committed_patches: - is_version_upgradable = False - break - - if not is_version_upgradable: - msg = """ - Inactive version must be upgradable to the - current version (%s), please check the version - and patches. - """ % current_version - raise exception.SysinvException(_(msg)) - - self.dbapi.load_update(active_load['id'], {'compatible_version': new_version, - 'required_patches': '\n'.join(patches)}) - - patch = dict() - patch['state'] = constants.IMPORTING_LOAD_STATE - - patch['software_version'] = new_version - patch['compatible_version'] = "" - patch['required_patches'] = "" - new_load = self.dbapi.load_create(patch) - return new_load - - else: - if new_version == current_version: - raise exception.SysinvException( - _("Active version and import version match (%s)") - % current_version) - - supported_upgrades_elm = root.find('supported_upgrades') - if not supported_upgrades_elm: - raise exception.SysinvException( - _("Invalid Metadata XML")) - - path_found = False - upgrade_path = None - upgrade_paths = supported_upgrades_elm.findall('upgrade') - - for upgrade_element in upgrade_paths: - valid_from_version = upgrade_element.findtext('version') - valid_from_versions = valid_from_version.split(",") - if current_version in valid_from_versions: - path_found = True - upgrade_path = upgrade_element - break - - if not path_found: - raise exception.SysinvException( - _("No valid upgrade path found")) - - # Create a patch with the values from the metadata - patch = dict() - - patch['state'] = constants.IMPORTING_LOAD_STATE - patch['software_version'] = new_version - patch['compatible_version'] = current_version - - required_patches = [] - - if upgrade_path: - patch_elements = upgrade_path.findall('required_patch') - for patch_element in patch_elements: - required_patches.append(patch_element.text) - - patch['required_patches'] = "\n".join(required_patches) - - # create the new imported load in the database - new_load = self.dbapi.load_create(patch) - - return new_load - - def import_load(self, context, path_to_iso, new_load, - import_type=None): - """ - Run the import script and add the load to the database - """ - loads = self.dbapi.load_get_list() - - cutils.validate_loads_for_import(loads) - - if new_load is None: - raise exception.SysinvException( - _("Error importing load. Load not found")) - - if not os.path.exists(path_to_iso): - self._import_load_error(new_load) - raise exception.SysinvException(_("Specified path not found: %s") % - path_to_iso) - - mounted_iso = None - - mntdir = tempfile.mkdtemp(dir='/tmp') - # Attempt to mount iso - try: - mounted_iso = cutils.ISO(path_to_iso, mntdir) - # Note: iso will be unmounted when object goes out of scope - - except subprocess.CalledProcessError: - self._import_load_error(new_load) - raise exception.SysinvException(_("Unable to mount iso")) - - import_script = mntdir + "/upgrades/import.sh" - - if import_type == constants.INACTIVE_LOAD_IMPORT: - import_script = ["/opt/upgrades/import.sh", mntdir] - - # Run the upgrade script - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call(import_script, stdout=fnull, stderr=fnull) # pylint: disable=not-callable - except subprocess.CalledProcessError: - self._import_load_error(new_load) - raise exception.SysinvException(_( - "Failure during import script")) - - mounted_iso._umount_iso() - shutil.rmtree(mntdir) - - state = constants.IMPORTED_LOAD_STATE - - if import_type == constants.INACTIVE_LOAD_IMPORT: - state = constants.INACTIVE_LOAD_STATE - - try: - LoadImport.extract_files(new_load['software_version']) - except exception.SysinvException as error: - self._import_load_error(new_load) - raise exception.SysinvException( - "Failure during load extract_files: %s" % (error) - ) - - # Update the load status in the database - try: - self.dbapi.load_update(new_load['id'], {'state': state}) - - except exception.SysinvException as e: - LOG.exception(e) - raise exception.SysinvException(_("Error updating load in " - "database for load id: %s") - % new_load['id']) - - # Run the sw-patch init-release commands - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call(["/usr/sbin/sw-patch", # pylint: disable=not-callable - "init-release", - new_load['software_version']], - stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError: - self._import_load_error(new_load) - raise exception.SysinvException(_( - "Failure during sw-patch init-release")) - - if os.path.exists(constants.LOAD_FILES_STAGING_DIR): - shutil.rmtree(constants.LOAD_FILES_STAGING_DIR) - - LOG.info("Load import completed.") - return True - - def delete_load(self, context, load_id): - """ - Cleanup a load and remove it from the database - """ - load = self.dbapi.load_get(load_id) - - cutils.validate_load_for_delete(load) - - # We allow this command to be run again if the delete fails - if load.state != constants.DELETING_LOAD_STATE: - # Here we run the cleanup script locally - self._cleanup_load(load) - self.dbapi.load_update( - load_id, {'state': constants.DELETING_LOAD_STATE}) - - mate_hostname = cutils.get_mate_controller_hostname() - - try: - standby_controller = self.dbapi.ihost_get_by_hostname( - mate_hostname) - rpcapi = agent_rpcapi.AgentAPI() - rpcapi.delete_load( - context, standby_controller['uuid'], load.software_version) - except exception.NodeNotFound: - # The mate controller has not been configured so complete the - # deletion of the load now. - self.finalize_delete_load(context, load.software_version) - - LOG.info("Load (%s) deleted." % load.software_version) - - def _cleanup_load(self, load): - # Run the sw-patch del-release commands - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call(["/usr/sbin/sw-patch", # pylint: disable=not-callable - "del-release", - load.software_version], - stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError: - raise exception.SysinvException(_( - "Failure during sw-patch del-release")) - - cleanup_script = constants.DELETE_LOAD_SCRIPT - if os.path.isfile(cleanup_script): - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call( # pylint: disable=not-callable - [cleanup_script, load.software_version], - stdout=fnull, stderr=fnull) - except subprocess.CalledProcessError: - raise exception.SysinvException(_( - "Failure during cleanup script")) - else: - raise exception.SysinvException(_( - "Cleanup script %s does not exist.") % cleanup_script) - - def finalize_delete_load(self, context, sw_version): - # Clean up the staging directory in case an error occur during the - # import and this directory did not get cleaned up. - if os.path.exists(constants.LOAD_FILES_STAGING_DIR): - shutil.rmtree(constants.LOAD_FILES_STAGING_DIR) - - loads = self.dbapi.load_get_list() - for load in loads: - if load.software_version == sw_version: - self.dbapi.load_destroy(load.id) - def upgrade_ihost_pxe_config(self, context, host, load): """Upgrade a host. @@ -15229,11 +14724,6 @@ class ConductorManager(service.PeriodicService): upgrade.state == constants.UPGRADE_DATA_MIGRATION_COMPLETE): LOG.info("Finished upgrade of %s" % constants.CONTROLLER_1_HOSTNAME) - # Update upgrade state - upgrade_update = { - 'state': constants.UPGRADE_UPGRADING_CONTROLLERS} - self.dbapi.software_upgrade_update(upgrade.uuid, - upgrade_update) if (host.hostname == constants.CONTROLLER_0_HOSTNAME and host_upgrade.software_load == upgrade.to_load): @@ -15246,410 +14736,6 @@ class ConductorManager(service.PeriodicService): raise exception.SysinvException(_( "Failure clearing VIM host upgrade state")) - # If we are in the upgrading controllers state and controller-0 - # is running the new release, update the upgrade state - if upgrade.state == constants.UPGRADE_UPGRADING_CONTROLLERS: - upgrade_update = { - 'state': constants.UPGRADE_UPGRADING_HOSTS} - self.dbapi.software_upgrade_update(upgrade.uuid, - upgrade_update) - - LOG.info("Prepare for swact to controller-0") - # As a temporary solution we only migrate the etcd database - # when we swact to controller-0. This solution will present - # some problems when we do upgrade etcd, so further - # development will be required at that time. - try: - with open(os.devnull, "w") as devnull: - call_args = [ - '/usr/bin/upgrade_swact_migration.py', - 'prepare_swact', - upgrade.from_release, - upgrade.to_release - ] - subprocess.check_call(call_args, stdout=devnull) # pylint: disable=not-callable - except subprocess.CalledProcessError as e: - LOG.exception(e) - raise exception.SysinvException( - "Failed upgrade_swact_migration prepare_swact") - - def start_upgrade(self, context, upgrade): - """ Start the upgrade""" - - from_load = self.dbapi.load_get(upgrade.from_load) - from_version = from_load.software_version - to_load = self.dbapi.load_get(upgrade.to_load) - to_version = to_load.software_version - - controller_0 = self.dbapi.ihost_get_by_hostname( - constants.CONTROLLER_0_HOSTNAME) - - # Prepare for upgrade - LOG.info("Preparing for upgrade from release: %s to release: %s" % - (from_version, to_version)) - - try: - if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX: - LOG.info("Generating agent request to create simplex upgrade " - "data") - # NOTE(bqian) this is legacy upgrade only code, so only fetch upgrade - # entity from sysinv db - software_upgrade = self.dbapi.software_upgrade_get_one() - rpcapi = agent_rpcapi.AgentAPI() - # In cases where there is no backup in progress alarm but the flag exists, - # it must be removed. So upgrade-start is not blocked. - if os.path.isfile(tsc.BACKUP_IN_PROGRESS_FLAG): - LOG.info("Backup in Progress flag was found, cleaning it.") - os.remove(tsc.BACKUP_IN_PROGRESS_FLAG) - LOG.info("Backup in Progress flag was cleaned.") - rpcapi.create_simplex_backup(context, software_upgrade) - return - else: - # Extract N+1 packages necessary for installation of controller-1 - # (ie. installer images, kickstarts) - subprocess.check_call(['/usr/sbin/upgrade-start-pkg-extract', # pylint: disable=not-callable - '-r', to_version]) - # get the floating management IP - mgmt_address = cutils.get_primary_address_by_name(self.dbapi, - cutils.format_address_name(constants.CONTROLLER_HOSTNAME, - constants.NETWORK_TYPE_MGMT), - constants.NETWORK_TYPE_MGMT, True) - i_system = self.dbapi.isystem_get_one() - upgrades_management.prepare_upgrade( - from_version, to_version, i_system, mgmt_address.address) - - LOG.info("Finished upgrade preparation") - except Exception: - LOG.exception("Upgrade preparation failed") - with excutils.save_and_reraise_exception(): - if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX: - vim_api.set_vim_upgrade_state(controller_0, False) - upgrades_management.abort_upgrade(from_version, to_version, - upgrade) - # Delete upgrade record - self.dbapi.software_upgrade_destroy(upgrade.uuid) - - # Raise alarm to show an upgrade is in progress - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - constants.CONTROLLER_HOSTNAME) - fault = fm_api.Fault( - alarm_id=fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS, - alarm_state=fm_constants.FM_ALARM_STATE_SET, - entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, - entity_instance_id=entity_instance_id, - severity=fm_constants.FM_ALARM_SEVERITY_MINOR, - reason_text="System Upgrade in progress.", - # operational - alarm_type=fm_constants.FM_ALARM_TYPE_7, - # congestion - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8, - proposed_repair_action="No action required.", - service_affecting=False) - fm_api.FaultAPIs().set_fault(fault) - - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_STARTED}) - - @cutils.synchronized(LOCK_IMAGE_PULL) - def activate_upgrade(self, context, upgrade): - """Activate the upgrade. Generate and apply new manifests. - - """ - # TODO Move upgrade methods to another file - from_load = self.dbapi.load_get(upgrade.from_load) - from_version = from_load.software_version - to_load = self.dbapi.load_get(upgrade.to_load) - to_version = to_load.software_version - - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_ACTIVATING}) - - # Ask upgrade management to activate the upgrade - try: - i_system = self.dbapi.isystem_get_one() - upgrades_management.activate_upgrade(from_version, - to_version, i_system) - LOG.info("Finished upgrade activation") - except Exception: - LOG.exception("Upgrade activation failed") - with excutils.save_and_reraise_exception(): - # mark the activation as failed. The intention - # is for the user to retry activation once they - # have resolved the cause for failure - self.dbapi.software_upgrade_update( - upgrade.uuid, - {'state': constants.UPGRADE_ACTIVATION_FAILED}) - - # Remove platform-nfs-ip references if it exists - # TODO(fcorream): platform-nfs-ip is just necessary to allow an upgrade from - # StarlingX releases 6 or 7 to new releases. - # remove the plat_nfs_address_name and update_platform_nfs_ip_references when - # StarlingX rel. 6 or 7 are not being used anymore - plat_nfs_address_name = cutils.format_address_name("controller-platform-nfs", - constants.NETWORK_TYPE_MGMT) - try: - cutils.get_primary_address_by_name(self.dbapi, - plat_nfs_address_name, - constants.NETWORK_TYPE_MGMT, True) - LOG.info("platform-nfs-ip exists in the DB, updating all references") - self.update_platform_nfs_ip_references(context) - - except exception.AddressNotFoundByName: - LOG.debug("activate_upgrade: {} does not exist".format(plat_nfs_address_name)) - except Exception as e: - LOG.exception(e) - LOG.error("exception: update {} references could not be completed" - .format(plat_nfs_address_name)) - - manifests_applied = False - - if manifests_applied: - LOG.info("Running upgrade activation manifests") - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_ACTIVATING_HOSTS}) - else: - LOG.info("Upgrade activation complete") - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_ACTIVATION_COMPLETE}) - - def complete_upgrade(self, context, upgrade, state): - """ Complete the upgrade""" - - from_load = self.dbapi.load_get(upgrade.from_load) - from_version = from_load.software_version - to_load = self.dbapi.load_get(upgrade.to_load) - to_version = to_load.software_version - - controller_0 = self.dbapi.ihost_get_by_hostname( - constants.CONTROLLER_0_HOSTNAME) - - if state in [constants.UPGRADE_ABORTING, - constants.UPGRADE_ABORTING_ROLLBACK]: - if upgrade.state != constants.UPGRADE_ABORT_COMPLETING: - raise exception.SysinvException( - _("Unable to complete upgrade-abort: Upgrade not in %s " - "state.") % constants.UPGRADE_ABORT_COMPLETING) - LOG.info( - "Completing upgrade abort from release: %s to release: %s" % - (from_version, to_version)) - upgrades_management.abort_upgrade(from_version, to_version, upgrade) - - if (tsc.system_type == constants.SYSTEM_MODE_DUPLEX and - tsc.system_type == constants.TIS_AIO_BUILD and - state == constants.UPGRADE_ABORTING_ROLLBACK): - - # For AIO Case, VM goes into no state when Controller-0 becomes active - # after swact. nova clean up will fail the instance and restart - # nova-compute service - LOG.info("Calling nova cleanup") - with open(os.devnull, "w") as fnull: - try: - subprocess.check_call(["systemctl", "start", "nova-cleanup"], # pylint: disable=not-callable - stdout=fnull, - stderr=fnull) - except subprocess.CalledProcessError: - raise exception.SysinvException(_( - "Failed to call nova cleanup during AIO abort")) - - try: - vim_api.set_vim_upgrade_state(controller_0, False) - except Exception: - LOG.exception() - raise exception.SysinvException(_( - "upgrade-abort rejected: unable to reset VIM upgrade " - "state")) - LOG.info("Finished upgrade abort") - else: - if upgrade.state != constants.UPGRADE_COMPLETING: - raise exception.SysinvException( - _("Unable to complete upgrade: Upgrade not in %s state.") - % constants.UPGRADE_COMPLETING) - - # Mark "kube-system" namespace with platform label - body = { - "metadata": { - "labels": { - common.COMPONENT_LABEL_KEY: common.COMPONENT_LABEL_VALUE_PLATFORM - } - } - } - - try: - self._kube.kube_patch_namespace('kube-system', body) - except Exception as e: - LOG.error(e) - raise - - # Complete the restore procedure - if tsc.system_mode == constants.SYSTEM_MODE_SIMPLEX: - self.complete_restore(context) - - # Force all host_upgrade entries to use the new load - # In particular we may have host profiles created in the from load - # that we need to update before we can delete the load. - hosts = self.dbapi.host_upgrade_get_list() - for host_upgrade in hosts: - if (host_upgrade.target_load == from_load.id or - host_upgrade.software_load == from_load.id): - LOG.info(_("Updating host id: %s to use load id: %s") - % (host_upgrade.forihostid, upgrade.to_load)) - self.dbapi.host_upgrade_update( - host_upgrade.id, - {"software_load": upgrade.to_load, - "target_load": upgrade.to_load}) - - # Complete the upgrade - LOG.info("Completing upgrade from release: %s to release: %s" % - (from_version, to_version)) - upgrades_management.complete_upgrade(from_version, to_version, upgrade) - LOG.info("Finished completing upgrade") - # If applicable, notify dcmanager upgrade is complete - system = self.dbapi.isystem_get_one() - role = system.get('distributed_cloud_role') - if role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - dc_api.notify_dcmanager_platform_upgrade_completed() - - # Delete upgrade record - self.dbapi.software_upgrade_destroy(upgrade.uuid) - - # TODO(fcorream): This is just needed for upgrade from R7 to R8 - # need to remove the flag that disables the use of FQDN during the - # upgrade - if (tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX): - personalities = [constants.CONTROLLER] - config_uuid = self._config_update_hosts(context, personalities) - config_dict = { - "personalities": personalities, - "classes": ['platform::network::upgrade_fqdn_cleanup::runtime'], - } - self._config_apply_runtime_manifest(context, config_uuid, config_dict) - - # Clear upgrades alarm - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - constants.CONTROLLER_HOSTNAME) - fm_api.FaultAPIs().clear_fault( - fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS, - entity_instance_id) - - def abort_upgrade(self, context, upgrade): - """ Abort the upgrade""" - from_load = self.dbapi.load_get(upgrade.from_load) - from_version = from_load.software_version - to_load = self.dbapi.load_get(upgrade.to_load) - to_version = to_load.software_version - LOG.info("Aborted upgrade from release: %s to release: %s" % - (from_version, to_version)) - - updates = {'state': constants.UPGRADE_ABORTING} - - controller_0 = self.dbapi.ihost_get_by_hostname( - constants.CONTROLLER_0_HOSTNAME) - host_upgrade = self.dbapi.host_upgrade_get_by_host( - controller_0.id) - - if host_upgrade.target_load == to_load.id: - updates['state'] = constants.UPGRADE_ABORTING_ROLLBACK - - rpc_upgrade = self.dbapi.software_upgrade_update( - upgrade.uuid, updates) - # make sure the to/from loads are in the correct state - self.dbapi.set_upgrade_loads_state( - upgrade, - constants.IMPORTED_LOAD_STATE, - constants.ACTIVE_LOAD_STATE) - - self._puppet.update_system_config() - self._puppet.update_secure_system_config() - - # There are upgrade flags that are written to controller-0 that need to - # be removed before downgrading controller-1. As these flags reside on - # controller-0, we restrict this to abort actions started on that - # controller. When the abort is run on controller-1 the data-migration - # must be complete, and only the CONTROLLER_UPGRADE_COMPLETE_FLAG would - # remain. The CONTROLLER_UPGRADE_COMPLETE_FLAG does not interfere with - # the host-downgrade. Any remaining flags will be removed during - # upgrade-complete. - if utils.is_host_active_controller(controller_0): - upgrade_flag_files = [ - tsc.CONTROLLER_UPGRADE_FLAG, - tsc.CONTROLLER_UPGRADE_COMPLETE_FLAG, - tsc.CONTROLLER_UPGRADE_FAIL_FLAG, - tsc.CONTROLLER_UPGRADE_STARTED_FLAG - ] - for file in upgrade_flag_files: - try: - if os.path.isfile(file): - os.remove(file) - except OSError: - LOG.exception("Failed to remove upgrade flag: %s" % file) - - # When we abort from controller-1 while controller-0 is running - # the previous release, controller-0 will not be aware of the abort. - # We set the following flag so controller-0 will know we're - # aborting the upgrade and can set it's database accordingly - if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX: - if updates['state'] == constants.UPGRADE_ABORTING: - controller_1 = self.dbapi.ihost_get_by_hostname( - constants.CONTROLLER_1_HOSTNAME) - c1_host_upgrade = self.dbapi.host_upgrade_get_by_host( - controller_1.id) - if utils.is_host_active_controller(controller_1) and \ - c1_host_upgrade.target_load == to_load.id: - abort_flag = os.path.join( - tsc.PLATFORM_PATH, 'config', from_version, - tsc.UPGRADE_ABORT_FILE) - open(abort_flag, "w").close() - - return rpc_upgrade - - def complete_simplex_backup(self, context, success): - """Complete the simplex upgrade start process - - :param context: request context. - :param success: If the create_simplex_backup call completed - """ - try: - # NOTE(bqian) legacy upgrade only code - upgrade = self.dbapi.software_upgrade_get_one() - except exception.NotFound: - LOG.error("Software upgrade record not found") - return - - from_version = upgrade.from_release - to_version = upgrade.to_release - - if not success: - # The upgrade start data collection failed, stop the upgrade - upgrades_management.abort_upgrade(from_version, to_version, - upgrade) - # Delete upgrade record - self.dbapi.software_upgrade_destroy(upgrade.uuid) - LOG.info("Simplex upgrade start failed") - else: - LOG.info("Simplex upgrade start completed") - # Raise alarm to show an upgrade is in progress - entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_HOST, - constants.CONTROLLER_HOSTNAME) - fault = fm_api.Fault( - alarm_id=fm_constants.FM_ALARM_ID_UPGRADE_IN_PROGRESS, - alarm_state=fm_constants.FM_ALARM_STATE_SET, - entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST, - entity_instance_id=entity_instance_id, - severity=fm_constants.FM_ALARM_SEVERITY_MINOR, - reason_text="System Upgrade in progress.", - # operational - alarm_type=fm_constants.FM_ALARM_TYPE_7, - # congestion - probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8, - proposed_repair_action="No action required.", - service_affecting=False) - fm_api.FaultAPIs().set_fault(fault) - - self.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_STARTED}) - - return - def get_system_health(self, context, force=False, upgrade=False, kube_upgrade=False, kube_rootca_update=False, diff --git a/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py b/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py index ebb08d5ecb..75d9f8a8e0 100644 --- a/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py +++ b/sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py @@ -1285,65 +1285,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): self.make_msg('update_apparmor_config', ihost_uuid=ihost_uuid)) - def start_import_load(self, context, path_to_iso, path_to_sig, - import_type=None, timeout=180): - """Synchronously, mount the ISO and validate the load for import - - :param context: request context. - :param path_to_iso: the file path of the iso on this host - :param path_to_sig: the file path of the iso's detached signature on - this host - :param import_type: the type of the import, the possible values are - constants.ACTIVE_LOAD_IMPORT for active load or - constants.INACTIVE_LOAD_IMPORT for inactive load. - :param timeout: rpc call timeout in seconds - :returns: the newly create load object. - """ - return self.call(context, - self.make_msg('start_import_load', - path_to_iso=path_to_iso, - path_to_sig=path_to_sig, - import_type=import_type), - timeout=timeout) - - def import_load(self, context, path_to_iso, new_load, - import_type=None): - """Asynchronously, import a load and add it to the database - - :param context: request context. - :param path_to_iso: the file path of the iso on this host - :param new_load: the load object - :param import_type: the type of the import (active or inactive) - :returns: none. - """ - return self.cast(context, - self.make_msg('import_load', - path_to_iso=path_to_iso, - new_load=new_load, - import_type=import_type)) - - def delete_load(self, context, load_id): - """Asynchronously, cleanup a load from both controllers - - :param context: request context. - :param load_id: id of load to be deleted - :returns: none. - """ - return self.cast(context, - self.make_msg('delete_load', - load_id=load_id)) - - def finalize_delete_load(self, context, sw_version): - """Asynchronously, delete the load from the database - - :param context: request context. - :param sw_version: software version of load to be deleted - :returns: none. - """ - return self.cast(context, - self.make_msg('finalize_delete_load', - sw_version=sw_version)) - def load_update_by_host(self, context, ihost_id, version): """Update the host_upgrade table with the running SW_VERSION @@ -1407,15 +1348,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy): return self.call(context, self.make_msg('abort_upgrade', upgrade=upgrade)) - def complete_simplex_backup(self, context, success): - """Asynchronously, complete the simplex upgrade start process - - :param context: request context. - :param success: If the create_simplex_backup call completed - """ - return self.cast(context, self.make_msg('complete_simplex_backup', - success=success)) - def get_system_health(self, context, force=False, upgrade=False, kube_upgrade=False, kube_rootca_update=False, alarm_ignore_list=None): diff --git a/sysinv/sysinv/sysinv/sysinv/db/api.py b/sysinv/sysinv/sysinv/sysinv/db/api.py index 9b7cde4032..c3878676a9 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/api.py +++ b/sysinv/sysinv/sysinv/sysinv/db/api.py @@ -127,7 +127,7 @@ class Connection(object): """ @abc.abstractmethod - def ihost_create(self, values, software_load=None): + def ihost_create(self, values): """Create a new ihost. :param values: A dict containing several items used to identify @@ -144,7 +144,6 @@ class Connection(object): 'availability': 'offduty', 'extra': { ... }, } - :param: software_load. The load software_version. :returns: A ihost. """ @@ -3680,75 +3679,6 @@ class Connection(object): :param sensorgroup_id: id (PK) of the sensorgroup. """ - @abc.abstractmethod - def load_create(self, values): - """Create a new Load. - - :param values: A dict containing several items used to identify - and track the load - { - 'software_version': '16.10', - 'compatible_version': '15.10', - 'required_patches': '001,002,003', - } - :returns: A load. - """ - - @abc.abstractmethod - def load_get(self, load): - """Returns a load. - - :param load: The id or uuid of a load. - :returns: A load. - """ - - @abc.abstractmethod - def load_get_by_version(self, version): - """Returns the load with the specified version. - - :param version: The software version of a load. - :returns: A load. - """ - - @abc.abstractmethod - def load_get_list(self, limit=None, marker=None, sort_key=None, - sort_dir=None): - """Return a list of loads. - - :param limit: Maximum number of loads to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - """ - - @abc.abstractmethod - def load_update(self, load, values): - """Update properties of a load. - - :param load: The id or uuid of a load. - :param values: Dict of values to update. - May be a partial list, - :returns: A load. - """ - - @abc.abstractmethod - def load_destroy(self, load): - """Destroy a load. - - :param load: The id or uuid of a load. - """ - - @abc.abstractmethod - def set_upgrade_loads_state(self, upgrade, to_state, from_state): - """Change the states of the loads in an upgrade. - - :param upgrade: An upgrade object. - :param to_state: The state of the 'to' load. - :param from_state: The state of the 'from' load. - """ - @abc.abstractmethod def fpga_device_create(self, hostid, values): """Create a new FPGA device for a host. @@ -3883,123 +3813,6 @@ class Connection(object): :param deviceid: The id or uuid of a pci device. """ - @abc.abstractmethod - def software_upgrade_create(self, values): - """Create a new software_upgrade entry - - :param values: A dict containing several items used to identify - and track the entry, and several dicts which are passed - into the Drivers when managing this node. For example: - - { - 'uuid': uuidutils.generate_uuid(), - 'state': 'start', 'migration_complete', 'activated', - 'complete', - 'from_load': '15.10', - 'to_load' : '16.10', - } - :returns: A software_uprade record. - """ - - @abc.abstractmethod - def software_upgrade_get(self, id): - """Return a software_upgrade entry for a given id - - :param _id: The id or uuid of a software_upgrade entry - :returns: a software_upgrade entry - """ - - @abc.abstractmethod - def software_upgrade_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - """Return a list of software_upgrade entries. - - :param limit: Maximum number of software_upgrade entries to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - """ - - @abc.abstractmethod - def software_upgrade_get_one(self): - """Return exactly one software_upgrade. - - :returns: A software_upgrade. - """ - - @abc.abstractmethod - def software_upgrade_update(self, uuid, values): - """Update properties of a software_upgrade. - - :param node: The uuid of a software_upgrade entry. - :param values: Dict of values to update. - { - 'state': 'complete', - } - :returns: A software_upgrade entry. - """ - - @abc.abstractmethod - def software_upgrade_destroy(self, id): - """Destroy a software_upgrade entry. - - :param id: The id or uuid of a software_upgrade entry. - """ - - @abc.abstractmethod - def host_upgrade_create(self, host_id, values): - """Create host_upgrade entry. - :param ihost_id: id of the host. - :param values: Dict of values to update. - { - 'software_load': 'load.id', - } - :returns: a host_upgrade - """ - - @abc.abstractmethod - def host_upgrade_get(self, id): - """Return a host_upgrade entry for a given host - - :param id: id or uuid of the host_upgrade entry. - :returns: a host_upgrade - """ - - @abc.abstractmethod - def host_upgrade_get_by_host(self, host_id): - """Return a host_upgrade entry for a given host - - :param id: id of the host entry. - :returns: a host_upgrade - """ - - @abc.abstractmethod - def host_upgrade_get_list(self, limit=None, marker=None, sort_key=None, - sort_dir=None): - """Return a list of host_upgrade entries. - - :param limit: Maximum number of host_upgrade to return. - :param marker: the last item of the previous page; we return the next - result set. - :param sort_key: Attribute by which results should be sorted. - :param sort_dir: direction in which results should be sorted. - (asc, desc) - """ - - @abc.abstractmethod - def host_upgrade_update(self, host_id, values): - """Update properties of a host_upgrade entry. - - :param host_id: The id of a host entry. - :param values: Dict of values to update. - { - 'software_load': 'load.id' - } - :returns: A host_upgrade entry. - """ - @abc.abstractmethod def service_parameter_create(self, values): """Create a new service_parameter entry diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py index 4059357022..901b4e6749 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py @@ -282,12 +282,7 @@ def add_filter_by_many_identities(query, model, values): def add_host_options(query): - return query. \ - options(joinedload(models.ihost.system)). \ - options(joinedload(models.ihost.host_upgrade). - joinedload(models.HostUpgrade.load_software)). \ - options(joinedload(models.ihost.host_upgrade). - joinedload(models.HostUpgrade.load_target)) + return query.options(joinedload(models.ihost.system)) def add_inode_filter_by_ihost(query, value): @@ -1351,7 +1346,7 @@ class Connection(api.Connection): raise exception.ServerNotFound(server=server) @db_objects.objectify(objects.host) - def ihost_create(self, values, software_load=None): + def ihost_create(self, values): if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() host = models.ihost() @@ -1362,7 +1357,6 @@ class Connection(api.Connection): session.flush() except db_exc.DBDuplicateEntry: raise exception.NodeAlreadyExists(uuid=values['uuid']) - self._host_upgrade_create(host.id, software_load) self._kube_host_upgrade_create(host.id) return self._host_get(values['uuid']) @@ -6986,242 +6980,6 @@ class Connection(api.Connection): def isensorgroup_discrete_destroy(self, sensorgroup_id): return self._isensorgroup_destroy(models.SensorGroupsDiscrete, sensorgroup_id) - @db_objects.objectify(objects.load) - def load_create(self, values): - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - load = models.Load() - load.update(values) - with _session_for_write() as session: - try: - session.add(load) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.LoadAlreadyExists(uuid=values['uuid']) - return load - - @db_objects.objectify(objects.load) - def load_get(self, load): - # load may be passed as a string. It may be uuid or Int. - query = model_query(models.Load) - query = add_identity_filter(query, load) - - try: - result = query.one() - except NoResultFound: - raise exception.LoadNotFound(load=load) - - return result - - @db_objects.objectify(objects.load) - def load_get_by_version(self, version): - query = model_query(models.Load) - query = query.filter_by(software_version=version) - - try: - result = query.one() - except NoResultFound: - raise exception.LoadNotFound(load=version) - - return result - - @db_objects.objectify(objects.load) - def load_get_list(self, limit=None, marker=None, sort_key=None, - sort_dir=None): - - query = model_query(models.Load) - - return _paginate_query(models.Load, limit, marker, - sort_key, sort_dir, query) - - @db_objects.objectify(objects.load) - def load_update(self, load, values): - with _session_for_write() as session: - query = model_query(models.Load, session=session) - query = add_identity_filter(query, load) - - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.LoadNotFound(load=load) - return query.one() - - def load_destroy(self, load): - with _session_for_write() as session: - query = model_query(models.Load, session=session) - query = add_identity_filter(query, load) - - try: - query.one() - except NoResultFound: - raise exception.LoadNotFound(load=load) - - query.delete() - - def set_upgrade_loads_state(self, upgrade, to_state, from_state): - self.load_update(upgrade.from_load, {'state': from_state}) - self.load_update(upgrade.to_load, {'state': to_state}) - - def _software_upgrade_get(self, id): - query = model_query(models.SoftwareUpgrade) - if utils.is_uuid_like(id): - query = query.filter_by(uuid=id) - else: - query = query.filter_by(id=id) - - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No software upgrade entry found for %s" % id) - - return result - - @db_objects.objectify(objects.software_upgrade) - def software_upgrade_create(self, values): - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - upgrade = models.SoftwareUpgrade() - upgrade.update(values) - with _session_for_write() as session: - try: - session.add(upgrade) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.UpgradeAlreadyExists(uuid=values['uuid']) - - return self._software_upgrade_get(values['uuid']) - - @db_objects.objectify(objects.software_upgrade) - def software_upgrade_get(self, id): - return self._software_upgrade_get(id) - - @db_objects.objectify(objects.software_upgrade) - def software_upgrade_get_list(self, limit=None, marker=None, - sort_key=None, sort_dir=None): - - query = model_query(models.SoftwareUpgrade) - - return _paginate_query(models.SoftwareUpgrade, limit, marker, - sort_key, sort_dir, query) - - @db_objects.objectify(objects.software_upgrade) - def software_upgrade_get_one(self): - query = model_query(models.SoftwareUpgrade) - - try: - return query.one() - except NoResultFound: - raise exception.NotFound() - - @db_objects.objectify(objects.software_upgrade) - def software_upgrade_update(self, uuid, values): - with _session_for_write() as session: - query = model_query(models.SoftwareUpgrade, session=session) - query = query.filter_by(uuid=uuid) - - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.NotFound(id) - return query.one() - - def software_upgrade_destroy(self, id): - with _session_for_write() as session: - query = model_query(models.SoftwareUpgrade, session=session) - query = query.filter_by(uuid=id) - - try: - query.one() - except NoResultFound: - raise exception.NotFound(id) - - query.delete() - - def _host_upgrade_create(self, host_id, version, values=None): - if values is None: - values = dict() - if not version: - systems = self.isystem_get_list() - if systems is not None: - version = systems[0].software_version - LOG.info("_host_upgrade_create system version=%s" % version) - if version: - # get the load_id from the loads table - query = model_query(models.Load) - query = query.filter_by(software_version=version) - try: - result = query.one() - except NoResultFound: - LOG.info("Fail to get load id from load table %s" % - version) - return None - values['software_load'] = result.id - values['target_load'] = result.id - values['forihostid'] = host_id - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() - upgrade = models.HostUpgrade() - upgrade.update(values) - with _session_for_write() as session: - try: - session.add(upgrade) - session.flush() - except db_exc.DBDuplicateEntry: - raise exception.UpgradeAlreadyExists(uuid=values['uuid']) - return upgrade - - @db_objects.objectify(objects.host_upgrade) - def host_upgrade_create(self, host_id, version, values): - return self._host_upgrade_create(host_id, version, values) - - @db_objects.objectify(objects.host_upgrade) - def host_upgrade_get(self, id): - query = model_query(models.HostUpgrade) - - if utils.is_uuid_like(id): - query = query.filter_by(uuid=id) - else: - query = query.filter_by(id=id) - - try: - result = query.one() - except NoResultFound: - raise exception.InvalidParameterValue( - err="No host upgrade entry found for %s" % id) - - return result - - @db_objects.objectify(objects.host_upgrade) - def host_upgrade_get_by_host(self, host_id): - query = model_query(models.HostUpgrade) - query = query.filter_by(forihostid=host_id) - - try: - result = query.one() - except NoResultFound: - raise exception.NotFound(host_id) - - return result - - @db_objects.objectify(objects.host_upgrade) - def host_upgrade_get_list(self, limit=None, marker=None, sort_key=None, - sort_dir=None): - query = model_query(models.HostUpgrade) - - return _paginate_query(models.HostUpgrade, limit, marker, - sort_key, sort_dir, query) - - @db_objects.objectify(objects.host_upgrade) - def host_upgrade_update(self, object_id, values): - with _session_for_write() as session: - query = model_query(models.HostUpgrade, session=session) - query = query.filter_by(id=object_id) - - count = query.update(values, synchronize_session='fetch') - if count != 1: - raise exception.NotFound(id) - session.flush() - return query.one() - @db_objects.objectify(objects.service_parameter) def service_parameter_create(self, values): if not values.get('uuid'): diff --git a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py index 9d170e5c37..033704fcb3 100644 --- a/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py +++ b/sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py @@ -265,7 +265,6 @@ class ihost(Base): system = relationship("isystem", lazy="joined", join_depth=1) - host_upgrade = relationship("HostUpgrade", uselist=False) kube_host_upgrade = relationship("KubeHostUpgrade", uselist=False) ptp_instances = relationship( @@ -1639,22 +1638,6 @@ class SensorsAnalog(Sensors): } -class Load(Base): - __tablename__ = 'loads' - - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36)) - - state = Column(String(255)) - - software_version = Column(String(255)) - compatible_version = Column(String(255)) - - required_patches = Column(String(2047)) - - UniqueConstraint('software_version') - - class PciDevice(Base): __tablename__ = 'pci_devices' @@ -1842,28 +1825,6 @@ class DeviceImageState(Base): "DeviceImage", lazy="joined", backref="device_image_state") -class SoftwareUpgrade(Base): - __tablename__ = 'software_upgrade' - - id = Column('id', Integer, primary_key=True, nullable=False) - uuid = Column('uuid', String(36), unique=True) - state = Column('state', String(128), nullable=False) - from_load = Column('from_load', Integer, ForeignKey('loads.id', - ondelete="CASCADE"), - nullable=False) - to_load = Column('to_load', Integer, ForeignKey('loads.id', - ondelete="CASCADE"), - nullable=False) - - # the from_load and to_load should have been named with an _id, but since - # they weren't we will just reverse the naming to not clash with the - # foreign key column - load_from = relationship("Load", lazy="joined", join_depth=1, - foreign_keys=[from_load]) - load_to = relationship("Load", lazy="joined", join_depth=1, - foreign_keys=[to_load]) - - class Restore(Base): __tablename__ = 'backup_restore' @@ -1873,27 +1834,6 @@ class Restore(Base): capabilities = Column(JSONEncodedDict) -class HostUpgrade(Base): - __tablename__ = 'host_upgrade' - - id = Column('id', Integer, primary_key=True, nullable=False) - uuid = Column('uuid', String(36), unique=True) - forihostid = Column('forihostid', Integer, ForeignKey('i_host.id', - ondelete="CASCADE")) - software_load = Column('software_load', Integer, ForeignKey('loads.id'), - nullable=False) - target_load = Column('target_load', Integer, ForeignKey('loads.id'), - nullable=False) - - # the software_load and target_load should have been named with an _id, - # but since they weren't we will just reverse the naming to not clash with - # the foreign key column - load_software = relationship("Load", lazy="joined", join_depth=1, - foreign_keys=[software_load]) - load_target = relationship("Load", lazy="joined", join_depth=1, - foreign_keys=[target_load]) - - class ServiceParameter(Base): __tablename__ = 'service_parameter' diff --git a/sysinv/sysinv/sysinv/sysinv/objects/__init__.py b/sysinv/sysinv/sysinv/sysinv/objects/__init__.py index 5f316e9180..85c524751f 100644 --- a/sysinv/sysinv/sysinv/sysinv/objects/__init__.py +++ b/sysinv/sysinv/sysinv/sysinv/objects/__init__.py @@ -36,7 +36,6 @@ from sysinv.objects import drbdconfig from sysinv.objects import port_ethernet from sysinv.objects import helm_overrides from sysinv.objects import host -from sysinv.objects import host_upgrade from sysinv.objects import kube_app from sysinv.objects import kube_app_bundle from sysinv.objects import kube_app_releases @@ -57,7 +56,6 @@ from sysinv.objects import label from sysinv.objects import lldp_agent from sysinv.objects import lldp_neighbour from sysinv.objects import lldp_tlv -from sysinv.objects import load from sysinv.objects import lvg from sysinv.objects import memory from sysinv.objects import network @@ -87,7 +85,6 @@ from sysinv.objects import sensorgroup from sysinv.objects import sensorgroup_analog from sysinv.objects import sensorgroup_discrete from sysinv.objects import service_parameter -from sysinv.objects import software_upgrade from sysinv.objects import storage from sysinv.objects import storage_backend from sysinv.objects import storage_ceph @@ -165,10 +162,7 @@ sensor_discrete = sensor_discrete.SensorDiscrete sensorgroup = sensorgroup.SensorGroup sensorgroup_analog = sensorgroup_analog.SensorGroupAnalog sensorgroup_discrete = sensorgroup_discrete.SensorGroupDiscrete -load = load.Load pci_device = pci_device.PCIDevice -software_upgrade = software_upgrade.SoftwareUpgrade -host_upgrade = host_upgrade.HostUpgrade service_parameter = service_parameter.ServiceParameter lldp_agent = lldp_agent.LLDPAgent lldp_neighbour = lldp_neighbour.LLDPNeighbour @@ -251,10 +245,7 @@ __all__ = ("system", "sensorgroup", "sensorgroup_analog", "sensorgroup_discrete", - "load", "pci_device", - "software_upgrade", - "host_upgrade", "network", "interface_network", "service_parameter", diff --git a/sysinv/sysinv/sysinv/sysinv/objects/host.py b/sysinv/sysinv/sysinv/sysinv/objects/host.py index a9134f5f4d..a6f746958e 100644 --- a/sysinv/sysinv/sysinv/sysinv/objects/host.py +++ b/sysinv/sysinv/sysinv/sysinv/objects/host.py @@ -13,16 +13,6 @@ from sysinv.objects import base from sysinv.objects import utils -def _get_software_load(field, db_object): - if db_object.host_upgrade: - return db_object.host_upgrade.load_software.software_version - - -def _get_target_load(field, db_object): - if db_object.host_upgrade: - return db_object.host_upgrade.load_target.software_version - - def _get_ptp_instance_names(field, db_object): instances = db_object['ptp_instances'] names = [] @@ -103,8 +93,6 @@ class Host(base.SysinvObject): 'tboot': utils.str_or_none, 'vsc_controllers': utils.str_or_none, 'ttys_dcd': utils.bool_or_none, - 'software_load': utils.str_or_none, - 'target_load': utils.str_or_none, 'install_state': utils.str_or_none, 'install_state_info': utils.str_or_none, 'iscsi_initiator_name': utils.str_or_none, @@ -119,9 +107,7 @@ class Host(base.SysinvObject): } _foreign_fields = { - 'isystem_uuid': 'system:uuid', - 'software_load': _get_software_load, - 'target_load': _get_target_load + 'isystem_uuid': 'system:uuid' } _optional_fields = { diff --git a/sysinv/sysinv/sysinv/sysinv/objects/host_upgrade.py b/sysinv/sysinv/sysinv/sysinv/objects/host_upgrade.py deleted file mode 100644 index 15fca2f0e2..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/objects/host_upgrade.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from sysinv.db import api as db_api -from sysinv.objects import base -from sysinv.objects import utils - - -class HostUpgrade(base.SysinvObject): - # VERSION 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = {'id': int, - 'uuid': utils.uuid_or_none, - 'forihostid': utils.int_or_none, - 'software_load': utils.int_or_none, - 'target_load': utils.int_or_none, - } - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.host_upgrade_get(uuid) - - @base.remotable_classmethod - def get_by_host_id(cls, context, host_id): - return cls.dbapi.host_upgrade_get_by_host(host_id) - - def save_changes(self, context, updates): - self.dbapi.host_upgrade_update(self.id, # pylint: disable=no-member - updates) diff --git a/sysinv/sysinv/sysinv/sysinv/objects/load.py b/sysinv/sysinv/sysinv/sysinv/objects/load.py deleted file mode 100644 index 37811afca9..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/objects/load.py +++ /dev/null @@ -1,37 +0,0 @@ -# -# Copyright (c) 2015 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from sysinv.db import api as db_api -from sysinv.objects import base -from sysinv.objects import utils - - -class Load(base.SysinvObject): - dbapi = db_api.get_instance() - - fields = { - 'id': int, - 'uuid': utils.str_or_none, - - 'state': utils.str_or_none, - - 'software_version': utils.str_or_none, - - 'compatible_version': utils.str_or_none, - 'required_patches': utils.str_or_none, - } - - @base.remotable_classmethod - def get_by_uuid(self, context, uuid): - return self.dbapi.load_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.load_update(self.uuid, # pylint: disable=no-member - updates) diff --git a/sysinv/sysinv/sysinv/sysinv/objects/software_upgrade.py b/sysinv/sysinv/sysinv/sysinv/objects/software_upgrade.py deleted file mode 100644 index ab64dc8596..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/objects/software_upgrade.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2015-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# coding=utf-8 -# - -from sysinv.db import api as db_api -from sysinv.objects import base -from sysinv.objects import utils - - -class SoftwareUpgrade(base.SysinvObject): - # VERSION 1.0: Initial version - VERSION = '1.0' - - dbapi = db_api.get_instance() - - fields = {'id': int, - 'uuid': utils.uuid_or_none, - 'state': utils.str_or_none, - 'from_load': utils.int_or_none, - 'to_load': utils.int_or_none, - 'from_release': utils.str_or_none, - 'to_release': utils.str_or_none, - } - - _foreign_fields = { - 'from_release': 'load_from:software_version', - 'to_release': 'load_to:software_version' - } - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid): - return cls.dbapi.software_upgrade_get(uuid) - - def save_changes(self, context, updates): - self.dbapi.software_upgrade_update(self.uuid, # pylint: disable=no-member - updates) diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/nfv.py b/sysinv/sysinv/sysinv/sysinv/puppet/nfv.py index f12f8a6875..3fbc18619e 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/nfv.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/nfv.py @@ -313,7 +313,7 @@ class NfvPuppet(openstack.OpenstackBasePuppet): def get_host_config(self, host): if (constants.CONTROLLER not in utils.get_personalities(host)): return {} - database_dir = "/opt/platform/nfv/vim/%s" % host.software_load + database_dir = "/opt/platform/nfv/vim/%s" % host.sw_version return { 'nfv::vim::database_dir': database_dir, } diff --git a/sysinv/sysinv/sysinv/sysinv/puppet/platform.py b/sysinv/sysinv/sysinv/sysinv/puppet/platform.py index b6cc5e5db6..d2cb821676 100644 --- a/sysinv/sysinv/sysinv/sysinv/puppet/platform.py +++ b/sysinv/sysinv/sysinv/sysinv/puppet/platform.py @@ -397,7 +397,7 @@ class PlatformPuppet(base.BasePuppet): # required parameters config = { 'platform::params::hostname': host.hostname, - 'platform::params::software_version': self.quoted_str(host.software_load), + 'platform::params::software_version': self.quoted_str(host.sw_version), 'platform::params::apparmor': host.apparmor, } diff --git a/sysinv/sysinv/sysinv/sysinv/tests/agent/test_manager.py b/sysinv/sysinv/sysinv/sysinv/tests/agent/test_manager.py index 3deb91fc74..e998882227 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/agent/test_manager.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/agent/test_manager.py @@ -15,14 +15,12 @@ from oslo_context import context from sysinv.agent.manager import AgentManager from sysinv.common import constants -from sysinv.common import exception from sysinv.tests import base class FakeConductorAPI(object): def __init__(self, isystem=None): - self.finalize_delete_load = mock.MagicMock() self.create_host_filesystems = mock.MagicMock() self.update_host_max_cpu_mhz_configured = mock.MagicMock() self.is_virtual_system_config_result = False @@ -473,60 +471,3 @@ class TestHostFileSystems(base.TestCase): self.agent_manager._ihost_uuid, expected_filesystems) self.assertEqual(self.agent_manager._prev_fs, expected_filesystems) - - -@mock.patch('sysinv.agent.manager.os.path.isfile', mock.MagicMock()) -@mock.patch('sysinv.agent.manager.subprocess.check_call', mock.MagicMock()) -class TestLoad(base.TestCase): - def setUp(self): - super(TestLoad, self).setUp() - - self.version = '1.0' - self.fake_uuid = 'FAKEUUID' - self.agent_manager = AgentManager('test-host', 'test-topic') - self.agent_manager._ihost_uuid = self.fake_uuid - self.context = context.get_admin_context() - - conductor = mock.patch('sysinv.agent.manager.conductor_rpcapi.ConductorAPI') - self.mock_conductor_api = conductor.start() - self.fake_conductor = FakeConductorAPI() - self.mock_conductor_api.return_value = self.fake_conductor - self.addCleanup(conductor.stop) - - def tearDown(self): - super(TestLoad, self).tearDown() - - def test_delete_load(self): - self.agent_manager.delete_load( - self.context, - self.fake_uuid, - self.version, - ) - - self.fake_conductor.finalize_delete_load.assert_called_once() - - def test_delete_load_without_delete_script(self): - with mock.patch('sysinv.agent.manager.os.path.isfile') as isfile: - isfile.return_value = False - - self.agent_manager.delete_load( - self.context, - self.fake_uuid, - self.version, - ) - - self.fake_conductor.finalize_delete_load.assert_not_called() - - def test_delete_load_script_exception(self): - with mock.patch('sysinv.agent.manager.subprocess.check_call') as check_call: - check_call.side_effect = exception.SysinvException() - - self.assertRaises( - exception.SysinvException, - self.agent_manager.delete_load, - self.context, - self.fake_uuid, - self.version, - ) - - self.fake_conductor.finalize_delete_load.assert_not_called() diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_address_pool.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_address_pool.py index c83f0af69e..3ad385f7a9 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_address_pool.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_address_pool.py @@ -588,7 +588,6 @@ class TestPatchMixin(object): "0.0", "0.0") mock_get_platform_upgrade.return_value = usm_deploy - dbutils.create_test_upgrade(state=constants.UPGRADE_STARTING) addrpool = self.find_addrpool_by_networktype(constants.NETWORK_TYPE_OAM) response = self.patch_oam_fail(addrpool, http_client.BAD_REQUEST, controller1_address=str(self.oam_subnet[10])) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py index e881b44de5..f96dfc55b9 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_host.py @@ -2615,9 +2615,6 @@ class TestPatch(TestHost): operational=constants.OPERATIONAL_ENABLED, availability=constants.AVAILABILITY_ONLINE) - upgrade = dbutils.create_test_upgrade( - state=constants.DEPLOY_STATE_START - ) # Verify the error response on lock controller attempt response = self._patch_host_action(c1_host['hostname'], constants.LOCK_ACTION, @@ -2626,15 +2623,6 @@ class TestPatch(TestHost): self.assertEqual(response.content_type, 'application/json') self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) - self.assertIn("host-lock %s is not allowed during upgrade state '%s'" % - (c1_host['hostname'], upgrade.state), - response.json['error_message']) - - def test_lock_action_controller_during_upgrade_started(self): - dbutils.create_test_upgrade( - state=constants.UPGRADE_STARTED - ) - self._test_lock_action_controller() @mock.patch('os.path.isfile') def test_lock_action_controller_during_backup_in_progress(self, mock_os_is_file): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_datanetwork.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_datanetwork.py index 43fce30776..7dbea2fa71 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_datanetwork.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_datanetwork.py @@ -38,7 +38,6 @@ class InterfaceDataNetworkTestCase(base.FunctionalTest): self.addCleanup(p.stop) self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() self.controller = dbutils.create_test_ihost( id='1', uuid=None, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py index 52c43ad470..d530850f82 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_interface_network.py @@ -39,7 +39,6 @@ class InterfaceNetworkTestCase(base.FunctionalTest): self.addCleanup(p.stop) self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() self.controller = dbutils.create_test_ihost( id='1', uuid=None, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_rootca_update.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_rootca_update.py index 4a18f6dabc..c6defa241c 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_rootca_update.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_rootca_update.py @@ -305,10 +305,6 @@ class TestPostKubeRootCAUpdate(TestKubeRootCAUpdate, "0.0", "0.0") mock_get_platform_upgrade.return_value = usm_deploy - dbutils.create_test_load(software_version=dbutils.SW_VERSION_NEW, - compatible_version=dbutils.SW_VERSION, - state=constants.IMPORTED_LOAD_STATE) - dbutils.create_test_upgrade() create_dict = dbutils.post_get_test_kube_rootca_update() result = self.post_json('/kube_rootca_update?force=False', create_dict, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py index bd8737fc63..d447e04949 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_kube_upgrade.py @@ -378,10 +378,6 @@ class TestPostKubeUpgrade(TestKubeUpgrade, "0.0", "0.0") mock_get_platform_upgrade.return_value = usm_deploy - dbutils.create_test_load(software_version=dbutils.SW_VERSION_NEW, - compatible_version=dbutils.SW_VERSION, - state=constants.IMPORTED_LOAD_STATE) - dbutils.create_test_upgrade() create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2') result = self.post_json('/kube_upgrade', create_dict, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py index a07f4c2d4c..7a5e5e9894 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_label.py @@ -43,7 +43,6 @@ class LabelTestCase(base.FunctionalTest): super(LabelTestCase, self).setUp() self.dbapi = dbapi.get_instance() self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() def _get_path(self, host=None, params=None): if host: diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_oamnetwork.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_oamnetwork.py index b6d4b7ccbf..cdffa011c0 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_oamnetwork.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_oamnetwork.py @@ -369,9 +369,6 @@ class TestPatchMixin(OAMNetworkTestCase): 'oam_c0_ip': str(oam_c0_ip), 'oam_c1_ip': str(oam_c1_ip), } - dbutils.create_test_upgrade( - state=constants.UPGRADE_STARTING - ) error_message = "Action rejected while a " \ "platform upgrade is in progress" self._test_patch_fail(patch_obj, http_client.BAD_REQUEST, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_partition.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_partition.py index eb28b4bd1c..120f7ffc2a 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_partition.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_partition.py @@ -82,7 +82,6 @@ class TestPartition(base.FunctionalTest): "sdn_enabled": False, "shared_services": "[]"} ) - self.load = dbutils.create_test_load() # Create controller-0 self.ihost = self._create_controller_0() self.disk = self._create_disk(self.ihost.id) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_ptp.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_ptp.py index aa69bd7df2..24f1142017 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_ptp.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_ptp.py @@ -21,7 +21,6 @@ class PTPTestCase(base.FunctionalTest): super(PTPTestCase, self).setUp() self.dbapi = dbapi.get_instance() self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() self.controller = dbutils.create_test_ihost( id='1', uuid=None, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_route.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_route.py index e7ccb49608..2dee080b94 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_route.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_route.py @@ -170,11 +170,6 @@ class RouteTestCase(base.FunctionalTest, dbbase.BaseHostTestCase): self.assertEqual(response.status_code, status_code) self.assertIn(error_message, response.json['error_message']) - def _create_platform_upgrade(self): - self.upgrade = dbutils.create_test_upgrade( - state=constants.UPGRADE_STARTING - ) - class TestPost(RouteTestCase): def test_create_route(self): @@ -204,7 +199,6 @@ class TestPostUpgrade(RouteTestCase): def setUp(self): super(TestPostUpgrade, self).setUp() self.dbapi = db_api.get_instance() - self._create_platform_upgrade() @mock.patch('sysinv.common.usm_service.is_usm_authapi_ready', lambda: True) def test_create_route_during_disallowed_upgrade_state(self): @@ -276,7 +270,6 @@ class TestDeleteUpgrade(RouteTestCase): def setUp(self): super(TestDeleteUpgrade, self).setUp() self.dbapi = db_api.get_instance() - self._create_platform_upgrade() @mock.patch('sysinv.common.usm_service.is_usm_authapi_ready', lambda: True) def test_delete_route_during_disallowed_upgrade_state(self): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_sensorgroup.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_sensorgroup.py index 4768f4d721..2558e98b03 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_sensorgroup.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_sensorgroup.py @@ -16,7 +16,6 @@ class sensorgroupTestCase(base.FunctionalTest): def setUp(self): super(sensorgroupTestCase, self).setUp() self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() self.host = dbutils.create_test_ihost(forisystemid=self.system.id) def assertDeleted(self, fullPath): diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_backends.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_backends.py index f9314b7a61..a90bf5812e 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_backends.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_backends.py @@ -83,7 +83,6 @@ class StorageBackendTestCases(base.FunctionalTest): self.system = dbutils.create_test_isystem() self.cluster = dbutils.create_test_cluster(system_id=self.system.id) self.tier = dbutils.create_test_storage_tier(forclusterid=self.cluster.id) - self.load = dbutils.create_test_load() self.host = dbutils.create_test_ihost(forisystemid=self.system.id) self.lvg = dbutils.create_test_lvg(lvm_vg_name='cgts-vg', forihostid=self.host.id) @@ -840,7 +839,6 @@ class StorageFileTestCases(base.FunctionalTest): def setUp(self): super(StorageFileTestCases, self).setUp() self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() self.host = dbutils.create_test_ihost(forisystemid=self.system.id) def assertDeleted(self, fullPath): @@ -1086,7 +1084,6 @@ class StorageLvmTestCases(base.FunctionalTest): def setUp(self): super(StorageLvmTestCases, self).setUp() self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() self.host = dbutils.create_test_ihost(forisystemid=self.system.id) def assertDeleted(self, fullPath): @@ -1248,7 +1245,6 @@ class StorageCephTestCases(base.FunctionalTest): self.system = dbutils.create_test_isystem() self.cluster = dbutils.create_test_cluster(system_id=self.system.id) self.tier = dbutils.create_test_storage_tier(forclusterid=self.cluster.id) - self.load = dbutils.create_test_load() self.host = dbutils.create_test_ihost(forisystemid=self.system.id) self.lvg = dbutils.create_test_lvg(lvm_vg_name='cgts-vg', forihostid=self.host.id) @@ -1403,7 +1399,6 @@ class StorageCephRookTestCases(base.FunctionalTest): self.system = dbutils.create_test_isystem() self.cluster = dbutils.create_test_cluster(system_id=self.system.id) self.tier = dbutils.create_test_storage_tier(forclusterid=self.cluster.id) - self.load = dbutils.create_test_load() self.host = dbutils.create_test_ihost(forisystemid=self.system.id) # Patch management network for ceph diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py index 0ce69cbfa2..df1d475dc8 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/api/test_storage_tier.py @@ -57,7 +57,6 @@ class StorageTierIndependentTCs(base.FunctionalTest): self.set_is_initial_config_patcher.return_value = True self.system = dbutils.create_test_isystem() self.cluster = dbutils.create_test_cluster(system_id=self.system.id, name='ceph_cluster') - self.load = dbutils.create_test_load() self.host = dbutils.create_test_ihost(forisystemid=self.system.id) def tearDown(self): @@ -560,7 +559,6 @@ class StorageTierDependentTCs(base.FunctionalTest): self.context = context.get_admin_context() self.dbapi = dbapi.get_instance() self.system = dbutils.create_test_isystem() - self.load = dbutils.create_test_load() self.host_index = -1 self.mon_index = -1 diff --git a/sysinv/sysinv/sysinv/sysinv/tests/common/test_usm_service.py b/sysinv/sysinv/sysinv/sysinv/tests/common/test_usm_service.py index f847897e45..1adae8fcde 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/common/test_usm_service.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/common/test_usm_service.py @@ -26,7 +26,6 @@ class TestUSMService(TestCase): "2.0") mock_get_software_upgrade.return_value = usm_deploy mock_dbapi = mock.Mock() - mock_dbapi.software_upgrade_get_one.return_value = None result = get_platform_upgrade(mock_dbapi) diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_ceph.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_ceph.py index 0c0680b912..ca7d9b6517 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_ceph.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_ceph.py @@ -52,7 +52,6 @@ class UpdateCephCluster(base.DbTestCase): self.context = context.get_admin_context() self.dbapi = dbapi.get_instance() self.system = utils.create_test_isystem() - self.load = utils.create_test_load() self.host_index = -1 self.mock_fix_crushmap = self.fix_crushmap_patcher.start() diff --git a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py index ed9ca92e23..59303f4ee1 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/conductor/test_manager.py @@ -27,15 +27,12 @@ import json import mock import os.path import netaddr -import tempfile import uuid import threading from time import sleep from cryptography import x509 from cryptography.hazmat.backends import default_backend -from shutil import copy as shutil_copy -from shutil import rmtree from fm_api import constants as fm_constants from oslo_context import context @@ -48,10 +45,7 @@ from sysinv.common import kubernetes from sysinv.common import utils as cutils from sysinv.common import usm_service from sysinv.conductor import manager -from sysinv.db.sqlalchemy.api import Connection from sysinv.db import api as dbapi -from sysinv.loads.loads import LoadImport -from sysinv.objects.load import Load from sysinv.puppet import common as puppet_common from sysinv.tests.db import utils as dbutils from sysinv import objects @@ -377,7 +371,6 @@ class ManagerTestCase(base.DbTestCase): self.context = context.get_admin_context() self.dbapi = dbapi.get_instance() self.system = utils.create_test_isystem() - self.load = utils.create_test_load() self.dnsmasq_hosts_file = '/tmp/dnsmasq.hosts' # Mock the ceph operator @@ -5921,504 +5914,6 @@ class ManagerTestCase(base.DbTestCase): mock_update_cached_app_bundles_set.assert_called_once() -@mock.patch('sysinv.conductor.manager.verify_files', lambda x, y: True) -@mock.patch('sysinv.conductor.manager.cutils.ISO', mock.MagicMock()) -class ManagerStartLoadImportTest(base.BaseHostTestCase): - def setUp(self): - super(ManagerStartLoadImportTest, self).setUp() - - # Set up objects for testing - self.service = manager.ConductorManager('test-host', 'test-topic') - self.service.dbapi = dbapi.get_instance() - self.context = context.get_admin_context() - - self.tmp_dir = tempfile.mkdtemp(dir='/tmp') - - patch_mkdtemp = mock.patch('tempfile.mkdtemp') - mock_mkdtemp = patch_mkdtemp.start() - mock_mkdtemp.return_value = self.tmp_dir - self.addCleanup(patch_mkdtemp.stop) - - self.upgrades_path = '%s/upgrades' % self.tmp_dir - os.makedirs(self.upgrades_path, exist_ok=True) - - self.metadata = os.path.join( - os.path.dirname(__file__), "data", "metadata.xml" - ) - shutil_copy(self.metadata, self.upgrades_path) - - self.iso = os.path.join( - os.path.dirname(__file__), "data", "bootimage.iso" - ) - self.sig = os.path.join( - os.path.dirname(__file__), "data", "bootimage.sig" - ) - - load_update = mock.patch.object(Connection, 'load_update') - self.mock_load_update = load_update.start() - self.mock_load_update.return_value = mock.MagicMock() - self.addCleanup(load_update.stop) - - def test_start_import_load(self): - result = self.service.start_import_load( - self.context, - path_to_iso=self.iso, - path_to_sig=self.sig, - ) - - self.assertIsInstance(result, Load) - self.assertEqual(result.state, constants.IMPORTING_LOAD_STATE) - - @mock.patch('sysinv.conductor.manager.cutils.get_active_load') - def test_start_import_load_same_version(self, mock_get_active_load): - mock_get_active_load.return_value.software_version = '0.1' - - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - self.iso, - self.sig, - ) - - @mock.patch('sysinv.conductor.manager.cutils.get_active_load') - def test_start_import_load_invalid_from_version(self, mock_get_active_load): - mock_get_active_load.return_value.software_version = '0.2' - - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - self.iso, - self.sig, - ) - - @mock.patch.object(Connection, 'load_get_list') - @mock.patch('sysinv.conductor.manager.cutils.get_active_load') - def test_start_import_load_active(self, mock_get_active_load, mock_load_get_list): - mock_get_active_load.return_value.software_version = '0.1' - - load = utils.create_test_load(**{"software_version": "0.1"}) - mock_load_get_list.return_value = [load] - - result = self.service.start_import_load( - self.context, - path_to_iso=self.iso, - path_to_sig=self.sig, - import_type=constants.ACTIVE_LOAD_IMPORT, - ) - - self.assertIsInstance(result, Load) - self.assertEqual(result.state, constants.ACTIVE_LOAD_STATE) - - def test_start_import_load_active_invalid_version(self): - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - self.iso, - self.sig, - import_type=constants.ACTIVE_LOAD_IMPORT, - ) - - @mock.patch.object(Connection, 'load_get_list') - def test_start_import_load_active_load_not_found(self, mock_load_get_list): - load = utils.create_test_load(**{"software_version": "0.1"}) - mock_load_get_list.side_effect = [[load], []] - - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - self.iso, - self.sig, - import_type=constants.ACTIVE_LOAD_IMPORT, - ) - - @mock.patch('os.path.exists', mock.MagicMock()) - @mock.patch('sysinv.conductor.manager.cutils.get_active_load') - @mock.patch('sysinv.conductor.manager.ConductorManager._get_committed_patches_from_iso') - def test_start_import_load_inactive(self, mock__get_committed_patches_from_iso, mock_get_active_load): - mock_get_active_load.return_value.software_version = '0.2' - mock_get_active_load.return_value.uuid = "11111111-1111-1111-1111-111111111111" - mock_get_active_load.return_value.id = '1' - mock_get_active_load.return_value.compatible_version = "" - mock_get_active_load.return_value.required_patches = "" - mock__get_committed_patches_from_iso.return_value = ["PATCH_0001"] - - loading_metadata = open(self.metadata, 'r').read() - current_metadata = ''' - \n0.2\n - \n\n0.1\nPATCH_0001 - \n\n\n - ''' - - mock_files = [ - mock.mock_open(read_data=loading_metadata).return_value, - mock.mock_open(read_data=current_metadata).return_value, - ] - mock_open = mock.mock_open() - mock_open.side_effect = mock_files - - with mock.patch('builtins.open', mock_open): - result = self.service.start_import_load( - self.context, - path_to_iso=self.iso, - path_to_sig=self.sig, - import_type=constants.INACTIVE_LOAD_IMPORT, - ) - - self.mock_load_update.assert_called_once_with( - mock.ANY, - {'compatible_version': '0.1', 'required_patches': 'PATCH_0001'}, - ) - - self.assertIsInstance(result, Load) - self.assertEqual(result.state, constants.IMPORTING_LOAD_STATE) - - @mock.patch('sysinv.conductor.manager.open') - @mock.patch('sysinv.conductor.manager.cutils.get_active_load') - def test_start_import_load_inactive_incompatible_version(self, mock_get_active_load, mock_open): - mock_get_active_load.return_value.software_version = '0.3' - mock_get_active_load.return_value.uuid = "22222222-2222-2222-2222-222222222222" - mock_get_active_load.return_value.id = '1' - mock_get_active_load.return_value.compatible_version = "" - mock_get_active_load.return_value.required_patches = "" - - current_metadata = b''' - \n0.3\n - \n\n0.2\nPATCH_0001 - \n\n\n - ''' - - mock_files = [ - mock.mock_open(read_data=current_metadata).return_value, - ] - mock_open.side_effect = mock_files - - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - path_to_iso=self.iso, - path_to_sig=self.sig, - import_type=constants.INACTIVE_LOAD_IMPORT, - ) - - def test_get_patch_id(self): - import tempfile - patches = {"PATCH_0001-metadata.xml": "PATCH_0001", - "PATCH_0002-metadata.xml": "PATCH_0002", } - - patch_ids = [] - with tempfile.TemporaryDirectory() as tempdir: - for fn, content in patches.items(): - filename = os.path.join(tempdir, fn) - with open(filename, 'w') as f: - f.write(content) - patch_id = self.service._get_patch_id(filename) - if patch_id: - patch_ids.append(patch_id) - self.assertEqual(patch_ids, ["PATCH_0001", "PATCH_0002"]) - - @mock.patch('os.path.exists', mock.MagicMock()) - # @mock.patch('sysinv.conductor.manager.open') - @mock.patch('sysinv.conductor.manager.cutils.get_active_load') - @mock.patch('sysinv.conductor.manager.ConductorManager._get_committed_patches_from_iso') - def test_start_import_load_inactive_invalid_patch(self, mock__get_committed_patches_from_iso, mock_get_active_load): - mock_get_active_load.return_value.software_version = '0.3' - mock_get_active_load.return_value.uuid = "f0905590-9c02-445a-87c7-568cba08c997" - mock_get_active_load.return_value.id = 1 - mock_get_active_load.return_value.compatible_version = "" - mock_get_active_load.return_value.required_patches = "" - mock__get_committed_patches_from_iso.return_value = ["PATCH_0001"] - - loading_metadata = open(self.metadata, 'r').read() - current_metadata = b''' - \n0.2\n - \n\n0.1\nPATCH_0002 - \n\n\n - ''' - - mock_files = [ - mock.mock_open(read_data=loading_metadata).return_value, - mock.mock_open(read_data=current_metadata).return_value, - ] - mock_open = mock.mock_open() - mock_open.side_effect = mock_files - - # load can be import, the restriction of required_patches only applies - # when upgrade starts - with mock.patch('builtins.open', mock_open): - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - path_to_iso=self.iso, - path_to_sig=self.sig, - import_type=constants.INACTIVE_LOAD_IMPORT, - ) - - def test_start_import_load_invalid_path(self): - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - 'invalid/path/bootimage.iso', - 'invalid/path/bootimage.sig', - ) - - def test_start_import_load_invalid_files(self): - with mock.patch('sysinv.conductor.manager.verify_files', lambda x, y: False): - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - self.iso, - self.sig, - ) - - def test_start_import_load_without_metadata(self): - rmtree(self.upgrades_path, ignore_errors=True) - - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - self.iso, - self.sig, - ) - - def test_start_import_load_invalid_metadata(self): - iso = os.path.join( - os.path.dirname(__file__), "data", "bootimage.iso" - ) - shutil_copy(iso, self.upgrades_path) - os.rename( - '%s/bootimage.iso' % self.upgrades_path, - '%s/metadata.xml' % self.upgrades_path, - ) - - self.assertRaises( - exception.SysinvException, - self.service.start_import_load, - self.context, - self.iso, - self.sig, - ) - - -@mock.patch('sysinv.conductor.manager.subprocess', mock.MagicMock()) -@mock.patch('sysinv.conductor.manager.cutils.ISO', mock.MagicMock()) -class ManagerLoadImportTest(base.BaseHostTestCase): - def setUp(self): - super(ManagerLoadImportTest, self).setUp() - - # Set up objects for testing - self.service = manager.ConductorManager('test-host', 'test-topic') - self.service.dbapi = dbapi.get_instance() - self.context = context.get_admin_context() - - self.iso = os.path.join( - os.path.dirname(__file__), "data", "bootimage.iso" - ) - - self.load = utils.create_test_load( - **{"software_version": "0.1"} - ) - - load_update = mock.patch.object(Connection, 'load_update') - self.mock_load_update = load_update.start() - self.mock_load_update.return_value = mock.MagicMock() - self.addCleanup(load_update.stop) - - extract_files = mock.patch.object(LoadImport, 'extract_files') - self.mock_extract_files = extract_files.start() - self.mock_extract_files.return_value = mock.MagicMock() - self.addCleanup(extract_files.stop) - - def test_import_load(self): - result = self.service.import_load( - self.context, - path_to_iso=self.iso, - new_load=self.load, - ) - - self.assertTrue(result) - - self.mock_load_update.assert_called_once_with( - mock.ANY, - {'state': constants.IMPORTED_LOAD_STATE}, - ) - - @mock.patch('sysinv.conductor.manager.os.chmod', mock.Mock()) - @mock.patch('sysinv.conductor.manager.os.makedirs', mock.Mock()) - def test_import_load_inactive(self): - with mock.patch('builtins.open', mock.mock_open()): - result = self.service.import_load( - self.context, - path_to_iso=self.iso, - new_load=self.load, - import_type=constants.INACTIVE_LOAD_IMPORT, - ) - - self.assertTrue(result) - - self.mock_load_update.assert_called_once_with( - mock.ANY, - {'state': constants.INACTIVE_LOAD_STATE}, - ) - - @mock.patch('sysinv.conductor.manager.os.chmod', mock.Mock()) - @mock.patch('sysinv.conductor.manager.os.makedirs', mock.Mock()) - def test_import_load_inactive_failed_extract_files(self): - self.mock_extract_files.side_effect = exception.SysinvException() - - with mock.patch('builtins.open', mock.mock_open()): - self.assertRaises( - exception.SysinvException, - self.service.import_load, - self.context, - path_to_iso=self.iso, - new_load=self.load, - import_type=constants.INACTIVE_LOAD_IMPORT, - ) - - self.mock_load_update.assert_called_once_with( - mock.ANY, - {'state': constants.ERROR_LOAD_STATE}, - ) - - def test_import_load_empty_new_load(self): - self.assertRaises( - exception.SysinvException, - self.service.import_load, - self.context, - path_to_iso=self.iso, - new_load=None, - ) - - self.mock_load_update.assert_not_called() - - def test_import_load_invalid_iso_path(self): - self.assertRaises( - exception.SysinvException, - self.service.import_load, - self.context, - path_to_iso='invalid', - new_load=self.load, - ) - - self.mock_load_update.assert_called_once_with( - mock.ANY, - {'state': constants.ERROR_LOAD_STATE}, - ) - - def test_import_load_load_update_failed(self): - self.mock_load_update.side_effect = exception.SysinvException() - - self.assertRaises( - exception.SysinvException, - self.service.import_load, - self.context, - path_to_iso=self.iso, - new_load=self.load, - ) - - self.mock_load_update.assert_called_once_with( - mock.ANY, - {'state': constants.IMPORTED_LOAD_STATE}, - ) - - -@mock.patch('sysinv.conductor.manager.os.path.isfile', mock.MagicMock()) -@mock.patch('sysinv.conductor.manager.subprocess.check_call', mock.MagicMock()) -class ManagerLoadDeleteTest(base.BaseHostTestCase): - def setUp(self): - super(ManagerLoadDeleteTest, self).setUp() - - self.context = context.get_admin_context() - self.service = manager.ConductorManager('test-host', 'test-topic') - self.service.dbapi = dbapi.get_instance() - - self.load = utils.create_test_load( - **{ - 'software_version': '0.1', - 'state': constants.INACTIVE_LOAD_STATE, - } - ) - - ihost = utils.create_test_ihost() - - controller_hostname = mock.patch.object( - cutils, - 'get_mate_controller_hostname', - lambda: ihost.hostname, - ) - self.mock_controller_hostname = controller_hostname.start() - self.addCleanup(controller_hostname.stop) - - rpcapi_delete_load = mock.patch.object( - agent_rpcapi.AgentAPI, - 'delete_load', - mock.MagicMock(), - ) - self.mocked_rpcapi_delete_load = rpcapi_delete_load.start() - self.addCleanup(rpcapi_delete_load.stop) - - def tearDown(self): - super(ManagerLoadDeleteTest, self).tearDown() - - def test_load_delete(self): - self.service.delete_load( - self.context, - self.load.id, - ) - - self.mocked_rpcapi_delete_load.assert_called_once() - - def test_load_delete_run_again(self): - utils.update_test_load( - self.load.id, - **{'state': constants.DELETING_LOAD_STATE}, - ) - - self.service.delete_load( - self.context, - self.load.id, - ) - - self.mocked_rpcapi_delete_load.assert_called_once() - - @mock.patch.object(cutils, 'get_mate_controller_hostname', lambda: '') - def test_load_delete_meta_controller_not_configured(self): - self.service.delete_load( - self.context, - self.load.id, - ) - - loads = self.dbapi.load_get_list() - - self.assertEqual(1, len(loads)) - - self.mocked_rpcapi_delete_load.assert_not_called() - - def test_load_delete_invalid_state(self): - utils.update_test_load( - self.load.id, - **{'state': constants.IMPORTING_LOAD_STATE}, - ) - - self.assertRaises( - exception.SysinvException, - self.service.delete_load, - self.context, - self.load.id, - ) - - self.mocked_rpcapi_delete_load.assert_not_called() - - class ManagerTestCaseInternal(base.BaseHostTestCase): def setUp(self): super(ManagerTestCaseInternal, self).setUp() diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/base.py b/sysinv/sysinv/sysinv/sysinv/tests/db/base.py index 7f83ab577d..f496032940 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/base.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/base.py @@ -259,7 +259,6 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): def _create_test_common(self): self._create_test_system() - self._create_test_load() self._create_test_drbd() self._create_test_remotelogging() self._create_test_user() @@ -277,9 +276,6 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase): system_type=self.system_type, system_mode=self.system_mode) - def _create_test_load(self): - self.load = dbutils.create_test_load() - def _create_test_drbd(self): self.drbd = dbutils.create_test_drbd( forisystemid=self.system.id) @@ -889,18 +885,3 @@ class OpenstackTestCase(AppTestCase): with open(os.path.join(os.getcwd(), "sysinv", "tests", "puppet", "fake_hieradata.yaml")) as fake_data: self.fake_hieradata = fake_data.read() - - -class PlatformUpgradeTestCase(OpenstackTestCase): - - def _create_platform_upgrade(self): - self.upgrade = dbutils.create_test_upgrade( - state=constants.UPGRADE_STARTING - ) - - def setUp(self): - super(PlatformUpgradeTestCase, self).setUp() - self._create_platform_upgrade() - - def tearDown(self): - super(PlatformUpgradeTestCase, self).tearDown() diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py b/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py index c4f4adce87..f88a75b5be 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/sqlalchemy/test_migrations.py @@ -1253,8 +1253,7 @@ class TestMigrations(BaseMigrationTestCase, WalkVersionsMixin): host_upgrades = db_utils.get_table(engine, 'host_upgrade') host_upgrades_col = { 'id': 'Integer', 'uuid': 'String', 'deleted_at': 'DateTime', - 'created_at': 'DateTime', 'updated_at': 'DateTime', 'forihostid': 'Integer', - 'software_load': 'Integer', 'target_load': 'Integer', + 'created_at': 'DateTime', 'updated_at': 'DateTime', 'forihostid': 'Integer' } for col, coltype in host_upgrades_col.items(): self.assertTrue(isinstance(host_upgrades.c[col].type, diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/test_sysinv.py b/sysinv/sysinv/sysinv/sysinv/tests/db/test_sysinv.py index a61b7b3dfc..a0832d3ef9 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/test_sysinv.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/test_sysinv.py @@ -23,7 +23,6 @@ class DbNodeTestCase(base.DbTestCase): super(DbNodeTestCase, self).setUp() self.dbapi = dbapi.get_instance() self.system = utils.create_test_isystem() - self.load = utils.create_test_load() def _create_test_ihost(self, **kwargs): # ensure the system ID for proper association diff --git a/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py b/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py index f079e37a4f..b3cf09c3f4 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/db/utils.py @@ -245,50 +245,6 @@ def update_test_isystem(system_dict): return dbapi.isystem_update(system_dict['uuid'], system_dict) -def get_test_load(**kw): - load = { - "software_version": kw.get("software_version", SW_VERSION), - "compatible_version": kw.get("compatible_version", "N/A"), - "required_patches": "N/A", - "state": kw.get("state", constants.ACTIVE_LOAD_STATE), - } - return load - - -def create_test_load(**kw): - load = get_test_load(**kw) - dbapi = db_api.get_instance() - return dbapi.load_create(load) - - -def get_test_load_values(**kw): - values = {} - - for key, value in kw.items(): - values[key] = value - - return values - - -def update_test_load(load_id, **kw): - values = get_test_load_values(**kw) - dbapi = db_api.get_instance() - return dbapi.load_update(load_id, values) - - -def get_test_upgrade(**kw): - upgrade = {'from_load': kw.get('from_load', 1), - 'to_load': kw.get('to_load', 2), - 'state': kw.get('state', constants.UPGRADE_STARTING)} - return upgrade - - -def create_test_upgrade(**kw): - upgrade = get_test_upgrade(**kw) - dbapi = db_api.get_instance() - return dbapi.software_upgrade_create(upgrade) - - def post_get_test_kube_upgrade(**kw): upgrade = get_test_kube_upgrade(**kw) del upgrade['id'] diff --git a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py index 7894b5d174..c3dd342de4 100644 --- a/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py +++ b/sysinv/sysinv/sysinv/sysinv/tests/puppet/test_puppet.py @@ -140,8 +140,7 @@ class PlatformCephBackendAIODuplexHostTestCase(PuppetOperatorTestSuiteMixin, # ============= Openstack environment tests ============== class PlatformUpgradeOpenstackAIODuplexHostTestCase(PuppetOperatorTestSuiteMixin, - dbbase.BaseCephStorageBackendMixin, - dbbase.PlatformUpgradeTestCase): + dbbase.BaseCephStorageBackendMixin): @mock.patch('sysinv.common.usm_service.is_usm_authapi_ready', lambda: True) @mock.patch('sysinv.common.usm_service.get_platform_upgrade')