root/build-tools/stx/build-image
Davlet Panech 85ecb712be build-tools: apt repo priority based on "Origin"
* build-docker-images/stx-debian/stx.preferences.part.in
* build-docker-images/build-base-image.sh
  This file sets base image priorities for apt repos that match certain
  properties. Formerly we used higher priority for repos hosted by the
  build server, as opposed to debian.org or others.  This doesn't work
  when using a Debian mirror hosted on the build server itself, since
  the hostname of both repos are equal.

  Solution: increase priority for repos whose "Release" file contains
  the field "Origin: $REPOMGR_ORIGIN" and make sure aptly adds that
  field to its Release file. The value comes from the environment and
  should be set by the build container.

* stx/aptly_deb_usage.py:
  Add an "Origin" field to non-mirror publications, value taken from
  environment REPOMGR_ORIGIN

* build-docker-images/stx-debian/Dockerfile.stable
  Improvements to package conflict resolution and docker FS
  caching-related issues:
  - Upgrade base packages to versions in managed repos before doing
    anything else
  - Install packages provided by upstream debian in a separate RUN
    command/docker FS layer
  - Make sure each "apt-get install" is in its own RUN command and is
    preceded with "apt-get update" -- to avoid using stale metadata due
    to "docker build" FS layer caching

TESTS
======================
- Define REPOMGR_ORIGIN in container environment
- Run downloader & build-pkgs & make sure generated repos' Release file
  contains "Origin: starlingx"
- Build base image & make sure its apt.preferences contains the priority
  rule for "Origin: starlingx"

Story: 2010055
Task: 45729

Change-Id: Ibaafbfbeef408904d216265168daa466d90fc7f2
Signed-off-by: Davlet Panech <davlet.panech@windriver.com>
2022-07-05 20:39:40 -04:00

549 lines
20 KiB
Python
Executable File

#!/usr/bin/python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2021-2022 Wind River Systems,Inc
import argparse
import discovery
import getopt
import logging
import os
import re
import repo_manage
import shutil
import signal
import subprocess
import sys
import time
import utils
import yaml
STX_DEFAULT_DISTRO = discovery.STX_DEFAULT_DISTRO
ALL_LAYERS = discovery.get_all_layers(distro=STX_DEFAULT_DISTRO)
LAT_ROOT = '/localdisk/lat/'
REPO_ALL = 'deb-merge-all'
REPO_BINARY = 'deb-local-binary'
REPO_BUILD = 'deb-local-build'
DEB_CONFIG_DIR = 'stx-tools/debian-mirror-tools/config/'
PKG_LIST_DIR = os.path.join(os.environ.get('MY_REPO_ROOT_DIR'), DEB_CONFIG_DIR)
img_pkgs = []
kernel_type = 'std'
stx_std_kernel = 'linux-image-5.10.0-6-amd64-unsigned'
stx_rt_kernel = 'linux-rt-image-5.10.0-6-rt-amd64-unsigned'
WAIT_TIME_BEFORE_CHECKING_LOG = 2
DEFAULT_TIME_WAIT_LOG = 15
logger = logging.getLogger('build-image')
utils.set_logger(logger)
def merge_local_repos(repomgr):
logger.debug('Calls repo manager to create/udpate the snapshot %s which is merged from local repositories', REPO_ALL)
# REPO_BUILD is higher priority than REPO_BINARY for repomgr to select package
try:
pubname = repomgr.merge(REPO_ALL, ','.join([REPO_BUILD, REPO_BINARY]))
except Exception as e:
logger.error(str(e))
logger.error('Exception when repo_manager creates/updates snapshot %s', REPO_ALL)
return False
if pubname:
logger.debug('repo manager successfully created/updated snapshot %s', REPO_ALL)
else:
logger.debug('repo manager failed to create/update snapshot %s', REPO_ALL)
return False
return True
def update_debootstrap_mirror(img_yaml):
repomgr_url = os.environ.get('REPOMGR_DEPLOY_URL')
if not repomgr_url:
logger.error('REPOMGR_URL is not in current sys ENV')
return False
repo_all_url = '/'.join([repomgr_url, REPO_ALL])
try:
with open(img_yaml) as f:
yaml_doc = yaml.safe_load(f)
if not yaml_doc['debootstrap-mirror']:
logger.warning("There is not debootstrap-mirror in %s", img_yaml)
else:
mirror = yaml_doc['debootstrap-mirror']
if mirror == REPO_ALL:
yaml_doc['debootstrap-mirror'] = repo_all_url
with open(img_yaml, 'w') as f:
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
logger.debug('Updating %s, setting debootstrap_mirror to %s', img_yaml, repo_all_url)
return True
except IOError as e:
logger.error(str(e))
logger.debug('Failed to update %s, could not set debootstrap_mirror to %s', img_yaml, repo_all_url)
return False
def update_ostree_osname(img_yaml):
ostree_osname = os.environ.get('OSTREE_OSNAME')
if ostree_osname is None:
return False
try:
with open(img_yaml) as f:
yaml_doc = yaml.safe_load(f)
yaml_doc['ostree']['ostree_osname'] = ostree_osname
with open(img_yaml, 'w') as f:
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
except IOError as e:
logger.error(str(e))
return False
logger.debug(' '.join(['Update', img_yaml, 'to update the ostree_osname']))
return True
def change_default_kernel(img_yaml, ktype):
rt_kernel = std_kernel = None
try:
with open(img_yaml) as f:
yaml_doc = yaml.safe_load(f)
multi_kernels = yaml_doc["multiple-kernels"].split(" ")
default_kernel = yaml_doc["default-kernel"]
if len(multi_kernels) == 1:
return False
for kernel in multi_kernels:
if re.search("-rt-", kernel):
rt_kernel = kernel
else:
std_kernel = kernel
if ktype == "rt":
if re.search("-rt-", default_kernel):
return True
elif rt_kernel != None:
yaml_doc["default-kernel"] = rt_kernel
else:
logger.error(f"No rt kernel is found in {multiple-kernels}")
return False
elif ktype == "std":
if not re.search("-rt-", default_kernel):
return True
elif std_kernel != None:
yaml_doc["default-kernel"] = std_kernel
else:
logger.error(f"No std kernel is found in {multiple-kernels}")
return False
logger.debug(f'Set default kernel as {yaml_doc["default-kernel"]}')
try:
with open(img_yaml, 'w') as f:
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
except IOError as e:
logger.error(str(e))
return False
except IOError as e:
logger.error(str(e))
return False
return True
def replace_in_yaml(dst_yaml, field, field_type, src_str, dst_str):
logger.debug("Start to replace %s in field %s of yaml %s", src_str, field, dst_yaml)
try:
with open(dst_yaml) as f:
main_doc = yaml.safe_load(f)
except Exception as e:
logger.error(str(e))
logger.error("Failed to open the yaml file %s", dst_yaml)
return False
else:
if field_type == 'yaml_string':
string_orig = main_doc[field]
if not string_orig:
logger.error("Failed to find the field %s", field)
return False
if not string_orig == src_str:
logger.error("Found field %s, but the value %s does not match target %s", field, string_orig, src_str)
return False
main_doc[field] = dst_str
logger.debug("Successfully updated the field %s with %s", field, dst_str)
elif field_type == 'yaml_list':
list_new = []
list_orig = main_doc[field]
if not list_orig:
logger.error("Failed to find the field %s", field)
return False
for item in list_orig:
list_new.append(item.replace(src_str, dst_str))
main_doc[field] = list_new
logger.debug("Successfully updated the value %s of field %s with %s", src_str, field, dst_str)
elif field_type == 'yaml_list_suffix':
list_new = []
list_orig = main_doc[field]
if not list_orig:
logger.error("Failed to find the field %s", field)
return False
for item in list_orig:
if src_str in item:
if '=' in item:
logger.error("Package version is defined, can't be appened with suffix %s", dst_str)
return False
list_new.append(item.strip() + dst_str)
else:
list_new.append(item)
main_doc[field] = list_new
logger.debug("Successfully updated %s in field %s with %s suffix", src_str, field, dst_str)
try:
with open(dst_yaml, 'w') as f:
yaml.safe_dump(main_doc, f, default_flow_style=False, sort_keys=False)
except Exception as e:
logger.error(str(e))
logger.error("Failed to write to %s", dst_yaml)
return False
logger.info("Successfully updated %s", dst_yaml)
return True
def update_rt_kernel_in_main_yaml(main_yaml):
return replace_in_yaml(main_yaml, 'rootfs-pre-scripts', 'yaml_list', stx_std_kernel, stx_rt_kernel)
def update_rt_kernel_in_initramfs_yaml(initramfs_yaml):
ret = False
# Updated the name of kernel module
for layer in ALL_LAYERS:
pkg_dirs = discovery.package_dir_list(distro=STX_DEFAULT_DISTRO, layer=layer, build_type='rt')
if not pkg_dirs:
continue
for pkg_dir in pkg_dirs:
pkg_name = discovery.package_dir_to_package_name(pkg_dir, STX_DEFAULT_DISTRO)
if pkg_name and pkg_name != 'linux-rt':
if replace_in_yaml(initramfs_yaml, 'packages', 'yaml_list_suffix', pkg_name, '-rt'):
logger.debug("RT Initramfs: Updated %s with rt suffix", pkg_name)
else:
logger.debug("RT Initramfs: Failed to update %s with rt suffix", pkg_name)
return ret
ret = replace_in_yaml(initramfs_yaml, 'packages', 'yaml_list', stx_std_kernel, stx_rt_kernel)
return ret
def include_initramfs(img_yaml, ramfs_yaml_path):
if not os.path.exists(img_yaml):
logger.error("LAT yaml file %s does not exist", img_yaml)
return False
try:
with open(img_yaml) as f:
yaml_doc = yaml.safe_load(f)
yaml_doc['system'][0]['contains'][0] = ramfs_yaml_path
with open(img_yaml, 'w') as f:
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
except Exception as e:
logger.error(str(e))
logger.error("Failed to add %s to %s", ramfs_yaml_path, img_yaml)
return False
logger.debug("Successfully included %s in %s", ramfs_yaml_path, img_yaml)
return True
def feed_lat_src_repos(img_yaml, repo_url):
if not os.path.exists(img_yaml):
logger.error(' '.join(['LAT yaml file', img_yaml, 'does not exist']))
return False
with open(img_yaml) as f:
yaml_doc = yaml.safe_load(f)
yaml_doc['package_feeds'].extend(repo_url)
yaml_doc['package_feeds'] = list(set(yaml_doc['package_feeds']))
yaml_doc['package_feeds'].sort()
with open(img_yaml, 'w') as f:
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
logger.debug(' '.join(['Update', img_yaml, 'to feed repos']))
return True
def add_lat_packages(img_yaml, packages):
if not os.path.exists(img_yaml):
logger.error(' '.join(['LAT yaml file', img_yaml, 'does not exist']))
return False
with open(img_yaml) as f:
yaml_doc = yaml.safe_load(f)
yaml_doc['packages'].extend(packages)
yaml_doc['packages'] = list(set(yaml_doc['packages']))
yaml_doc['packages'].sort()
with open(img_yaml, 'w') as f:
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
logger.debug(' '.join(['Update', img_yaml, 'to add packages']))
return True
def check_base_os_binaries(repomgr):
base_bins_list = os.path.join(PKG_LIST_DIR,
'debian/common/base-bullseye.lst')
if not os.path.exists(base_bins_list):
logger.error(' '.join(['Base OS packages list', base_bins_list,
'does not exist']))
return False
results = verify_pkgs_in_repo(repomgr, REPO_BINARY, base_bins_list)
if results:
logger.error("====OS binaries checking fail:")
for deb in results:
logger.error(deb)
logger.error("====OS binaries missing end====\n")
return False
logger.info("====All OS binary packages are ready ====\n")
return True
def check_stx_binaries(repomgr, btype='std'):
stx_bins_list = ''.join([PKG_LIST_DIR, '/debian/distro/os-', btype,
'.lst'])
if not os.path.exists(stx_bins_list):
logger.warning(' '.join(['STX binary packages list', stx_bins_list,
'does not exist']))
# Assume no such list here means ok
return True
results = verify_pkgs_in_repo(repomgr, REPO_BINARY, stx_bins_list)
if results:
logger.error("====STX binaries checking fail:")
for deb in results:
logger.error(deb)
logger.error("====STX binaries missing end====\n")
return False
logger.info("====All STX binary packages are ready ====\n")
return True
def check_stx_patched(repomgr, btype='std'):
stx_patched_list = ''.join([PKG_LIST_DIR, '/debian/distro/stx-', btype,
'.lst'])
if not os.path.exists(stx_patched_list):
logger.warning(''.join(['STX patched packages list', stx_patched_list,
'does not exist']))
return False
results = verify_pkgs_in_repo(repomgr, REPO_BUILD, stx_patched_list)
if results:
logger.error("====STX patched packages checking fail:")
for deb in results:
logger.error(deb)
logger.error("====STX patched packages missing end====\n")
return False
logger.info("====All STX patched packages are ready ====\n")
return True
def verify_pkgs_in_repo(repomgr, repo_name, pkg_list_path):
failed_pkgs = []
with open(pkg_list_path, 'r') as flist:
lines = list(line for line in (lpkg.strip() for lpkg in flist) if line)
for pkg in lines:
pkg = pkg.strip()
if pkg.startswith('#'):
continue
pname_parts = pkg.split()
name = pname_parts[0]
if len(pname_parts) > 1:
version = pname_parts[1]
pkg_name = ''.join([name, '_', version])
if repomgr.search_pkg(repo_name, name, version):
img_pkgs.append(''.join([name, '=', version]))
logger.debug(''.join(['Found package:name=', name,
' version=', version]))
else:
logger.debug(' '.join([pkg_name,
'is missing in local binary repo']))
failed_pkgs.append(pkg_name)
else:
if repomgr.search_pkg(repo_name, name, None, True):
img_pkgs.append(name)
logger.debug(''.join(['Found package with name:', name]))
else:
failed_pkgs.append(name)
return failed_pkgs
def stop_latd():
os.system("latc stop")
time.sleep(2)
cmd = 'latc status'
try:
status = subprocess.check_output(cmd, shell=True).decode()
except Exception as e:
logger.error(str(e))
else:
if status:
if 'idle' in status:
logger.info("Successfully stopped latd")
return
logger.info("Failed to stop latd, you may have to login pkgbuilder to kill")
def user_signal_handler(signum, frame):
stop_latd()
sys.exit(1)
def user_register_signals():
signal.signal(signal.SIGINT, user_signal_handler)
signal.signal(signal.SIGHUP, user_signal_handler)
signal.signal(signal.SIGTERM, user_signal_handler)
signal.signal(signal.SIGPIPE, user_signal_handler)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="build-image helper")
kernel_types = parser.add_mutually_exclusive_group()
kernel_types.add_argument('--std', help="build standard image",
action='store_true')
kernel_types.add_argument('--rt', help="build rt image",
action='store_true')
args = parser.parse_args()
if args.rt:
kernel_type = 'rt'
else:
kernel_type = 'std'
user_register_signals()
rmg_logger = logging.getLogger('repo_manager')
utils.set_logger(rmg_logger)
repo_manager = repo_manage.RepoMgr('aptly', os.environ.get('REPOMGR_URL'),
'/tmp/', os.environ.get('REPOMGR_ORIGIN'),
rmg_logger)
repo_manager.upload_pkg(REPO_BUILD, None)
repo_manager.upload_pkg(REPO_BINARY, None)
logger.info("\n")
logger.info("=====Build Image start ......")
logger.info("checking OS binary packages ......")
base_bins_ready = check_base_os_binaries(repo_manager)
logger.info("\nchecking STX binary packages ......")
stx_bins_ready = check_stx_binaries(repo_manager, "std")
logger.info("\nchecking STX patched packages ......")
stx_patched_ready = check_stx_patched(repo_manager, "std")
if not base_bins_ready or not stx_bins_ready or not stx_patched_ready:
logger.error("Fail to get prepared to build image")
sys.exit(1)
base_yaml = os.path.join(PKG_LIST_DIR, 'debian/common/base-bullseye.yaml')
base_initramfs_yaml = os.path.join(PKG_LIST_DIR, 'debian/common/base-initramfs-bullseye.yaml')
os.environ["WORKSPACE_DIR"] = LAT_ROOT
lat_root_type_dir = os.path.join(LAT_ROOT, kernel_type)
lat_yaml = os.path.join(lat_root_type_dir, "lat.yaml")
lat_initramfs_yaml = os.path.join(lat_root_type_dir, "lat-initramfs.yaml")
for yaml_file in (base_yaml, base_initramfs_yaml):
if not os.path.exists(yaml_file):
logger.error(' '.join(['Base yaml file', yaml_file, 'does not exist']))
sys.exit(1)
if not os.path.exists(lat_root_type_dir):
os.makedirs(lat_root_type_dir)
try:
shutil.copyfile(base_yaml, lat_yaml)
shutil.copyfile(base_initramfs_yaml, lat_initramfs_yaml)
except IOError as e:
logger.error(str(e))
logger.error('Failed to copy yaml files to %s/%s', LAT_ROOT, kernel_type)
sys.exit(1)
include_initramfs(lat_yaml, lat_initramfs_yaml)
if merge_local_repos(repo_manager):
if update_debootstrap_mirror(lat_yaml):
logger.debug("Debootstrap switches to mirror %s in %s", REPO_ALL, lat_yaml)
if update_debootstrap_mirror(lat_initramfs_yaml):
logger.debug("Debootstrap switches to mirror %s in %s", REPO_ALL, lat_initramfs_yaml)
binary_repo_url = ''.join(['deb ',
os.environ.get('REPOMGR_DEPLOY_URL'),
REPO_BINARY, ' bullseye main'])
build_repo_url = ''.join(['deb ',
os.environ.get('REPOMGR_DEPLOY_URL'),
REPO_BUILD, ' bullseye main'])
for yaml_file in (lat_yaml, lat_initramfs_yaml):
if not feed_lat_src_repos(yaml_file, [binary_repo_url, build_repo_url]):
logger.error(' '.join(['Failed to set local repos to', yaml_file]))
sys.exit(1)
else:
logger.info(' '.join(['Successfully set local repos to', yaml_file]))
update_ostree_osname(lat_yaml)
if not change_default_kernel(lat_yaml, kernel_type):
logger.error("Failed to change the default boot kernel")
sys.exit(1)
ret = 1
if not add_lat_packages(lat_yaml, img_pkgs):
logger.error("Failed to add packages into image YAML config")
sys.exit(ret)
os.system(' '.join(['latc --file=' + lat_yaml, '-t', kernel_type, 'build']))
# Sleep here to wait for the log file created and feeded by latd
# It should be noted that latd does not output to log from its start
time.sleep(WAIT_TIME_BEFORE_CHECKING_LOG)
lat_log = os.path.join(LAT_ROOT, kernel_type, "log/log.appsdk")
time_to_wait = DEFAULT_TIME_WAIT_LOG
time_counter = 0
while not os.path.exists(lat_log):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
break
if not os.path.exists(lat_log):
logger.info('The wait for %s has timed out, please wait a moment,' % lat_log)
logger.info('then run: tail -f %s to check the process.' % lat_log)
sys.exit(ret)
else:
log_printer = subprocess.Popen("tail -f " + lat_log,
stdout=subprocess.PIPE, shell=True,
universal_newlines=True)
while log_printer.poll() is None:
line = log_printer.stdout.readline()
line = line.strip()
if line:
print(line)
if "ERROR: " in line:
logger.info("Failed to build image, check the log %s", lat_log)
break
if "DEBUG: Deploy ovmf.qcow2" in line:
logger.info("build-image successfully done, check the output in %s/%s", LAT_ROOT, kernel_type)
ret = 0
break
# stop latd
stop_latd()
sys.exit(ret)