From 14c94f85de3be58b26bdbf69ce59bfd8587a0fd1 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Tue, 14 Apr 2015 21:49:16 -0400 Subject: [PATCH] devtest_overcloud.sh: split out image building This patch removes the majority (everything except for the user image) of image building from devtest_overcloud.sh and provides a hook to use devtest_overcloud_images.sh or a new build-images script. The motivation for this split out was to decrease the complexity of the devtest_overcloud.sh script while also increasing the flexability of our devtest_overcloud image building process. For example: Some configuration tools (tripleo-image-elements) require that each role have very specific sets of elements to enable or disable functionality. Other configuration tools (puppet) allow image to be built in a variety of ways. You could use a very minimal base image and install packages at deployment time. Or alternately you could use images where packages get pre-installed based on the role. Or you could use just one image for everything (all the OpenStack packages get pre-installed). This is an initial step towards making devtest_overcloud.sh more flexible and configurable with regards to image building. Change-Id: Idccd180813498401898f10b502106585b1f18eb3 --- doc/source/index.rst | 1 + scripts/devtest_overcloud.sh | 186 ++++++++++------------------ scripts/devtest_overcloud_images.sh | 124 +++++++++++++++++++ 3 files changed, 187 insertions(+), 124 deletions(-) create mode 100755 scripts/devtest_overcloud_images.sh diff --git a/doc/source/index.rst b/doc/source/index.rst index f57742bf..52b450ee 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -29,6 +29,7 @@ Detailed notes devtest_seed devtest_undercloud devtest_overcloud + devtest_overcloud_images devtest_end Further Information diff --git a/scripts/devtest_overcloud.sh b/scripts/devtest_overcloud.sh index 868e320f..f15c720b 100755 --- a/scripts/devtest_overcloud.sh +++ b/scripts/devtest_overcloud.sh @@ -9,6 +9,7 @@ SCRIPT_HOME=$(dirname $0) BUILD_ONLY= DEBUG_LOGGING= HEAT_ENV= +DISK_IMAGES_CONFIG=${OVERCLOUD_DISK_IMAGES_CONFIG:-''} COMPUTE_FLAVOR="baremetal" CONTROL_FLAVOR="baremetal" BLOCKSTORAGE_FLAVOR="baremetal" @@ -66,6 +67,7 @@ while true ; do shift 1 ;; --heat-env) HEAT_ENV="$2"; shift 2;; + --disk-images-config) DISK_IMAGES_CONFIG="$2"; shift 2;; --compute-flavor) COMPUTE_FLAVOR="$2"; shift 2;; --control-flavor) CONTROL_FLAVOR="$2"; shift 2;; --block-storage-flavor) BLOCKSTORAGE_FLAVOR="$2"; shift 2;; @@ -77,9 +79,44 @@ while true ; do done set -x -if [ -z "$BUILD_ONLY" ]; then - OS_PASSWORD=${OS_PASSWORD:?"OS_PASSWORD is not set. Undercloud credentials are required"} + +### --include +## devtest_overcloud +## ================= + +## #. Build images. There are two helper scripts which can be +## used to build images. The first method uses environment +## variables to create a specific image for each overcloud +## role. This method works best if you are using tripleo-image-elements +## for configuration (which requires per role image customization). +## See :doc:`devtest_overcloud_images` for documentation. +## This method is currently the default. + + +## Another option is to make use of the build-images script which +## dynamically creates a set of images using a YAML (or JSON) config +## file (see the build-images script for details and the expected config +## file format). This method is typically preferred when using +## tripleo-puppet-elements (Puppet) for configuration which +## allows the contents and number of images used to deploy an +## overcloud to be more flexibly defined. Example: + +## build-images -d -c $DISK_IMAGES_CONFIG + +### --end +USE_CACHE=${USE_CACHE:-0} +if [ -n "$DISK_IMAGES_CONFIG" ]; then + USE_CACHE=$USE_CACHE build-images -d -c $DISK_IMAGES_CONFIG +else + USE_CACHE=$USE_CACHE devtest_overcloud_images.sh fi +if [ -n "$BUILD_ONLY" ]; then + echo "--build-only is deprecated. Please use devtest_overcloud_images.sh instead." + exit 0 +fi + + +OS_PASSWORD=${OS_PASSWORD:?"OS_PASSWORD is not set. Undercloud credentials are required"} # Parameters for tripleo-cd - see the tripleo-cd element. # NOTE(rpodolyaka): retain backwards compatibility by accepting both positional @@ -102,24 +139,8 @@ SSLBASE=${11:-''} OVERCLOUD_SSL_CERT=${SSLBASE:+$(<$SSLBASE.crt)} OVERCLOUD_SSL_KEY=${SSLBASE:+$(<$SSLBASE.key)} PUBLIC_API_URL=${12:-''} -SSL_ELEMENT=${SSLBASE:+openstack-ssl} -USE_CACHE=${USE_CACHE:-0} -DIB_COMMON_ELEMENTS=${DIB_COMMON_ELEMENTS:-'stackuser'} -OVERCLOUD_CONTROL_DIB_ELEMENTS=${OVERCLOUD_CONTROL_DIB_ELEMENTS:-'ntp hosts baremetal boot-stack cinder-api ceilometer-collector ceilometer-api ceilometer-agent-central ceilometer-agent-notification ceilometer-alarm-notifier ceilometer-alarm-evaluator os-collect-config horizon neutron-network-node dhcp-all-interfaces swift-proxy swift-storage keepalived haproxy sysctl'} -OVERCLOUD_CONTROL_DIB_EXTRA_ARGS=${OVERCLOUD_CONTROL_DIB_EXTRA_ARGS:-'rabbitmq-server cinder-tgt'} -OVERCLOUD_COMPUTE_DIB_ELEMENTS=${OVERCLOUD_COMPUTE_DIB_ELEMENTS:-'ntp hosts baremetal nova-compute nova-kvm neutron-openvswitch-agent os-collect-config dhcp-all-interfaces sysctl'} -OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS=${OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS:-''} - -OVERCLOUD_BLOCKSTORAGE_DIB_ELEMENTS=${OVERCLOUD_BLOCKSTORAGE_DIB_ELEMENTS:-'ntp hosts baremetal os-collect-config dhcp-all-interfaces sysctl'} -OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS=${OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS:-'cinder-tgt'} TE_DATAFILE=${TE_DATAFILE:?"TE_DATAFILE must be defined before calling this script!"} -if [ "${USE_MARIADB:-}" = 1 ] ; then - OVERCLOUD_CONTROL_DIB_EXTRA_ARGS="$OVERCLOUD_CONTROL_DIB_EXTRA_ARGS mariadb-rpm" - OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS="$OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS mariadb-dev-rpm" - OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS="$OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS mariadb-dev-rpm" -fi - # A client-side timeout in minutes for creating or updating the overcloud # Heat stack. OVERCLOUD_STACK_TIMEOUT=${OVERCLOUD_STACK_TIMEOUT:-60} @@ -129,124 +150,30 @@ OVERCLOUD_FIXED_RANGE_CIDR=${OVERCLOUD_FIXED_RANGE_CIDR:-"10.0.0.0/8"} OVERCLOUD_FIXED_RANGE_GATEWAY=${OVERCLOUD_FIXED_RANGE_GATEWAY:-"10.0.0.1"} OVERCLOUD_FIXED_RANGE_NAMESERVER=${OVERCLOUD_FIXED_RANGE_NAMESERVER:-"8.8.8.8"} -### --include -## devtest_overcloud -## ================= - -## #. Create your overcloud control plane image. - -## This is the image the undercloud -## will deploy to become the KVM (or QEMU, Xen, etc.) cloud control plane. - -## ``$OVERCLOUD_*_DIB_EXTRA_ARGS`` (CONTROL, COMPUTE, BLOCKSTORAGE) are -## meant to be used to pass additional build-time specific arguments to -## ``disk-image-create``. - -## ``$SSL_ELEMENT`` is used when building a cloud with SSL endpoints - it should be -## set to openstack-ssl in that situation. -## :: - NODE_ARCH=$(os-apply-config -m $TE_DATAFILE --key arch --type raw) -## #. Undercloud UI needs SNMPd for monitoring of every Overcloud node +### --include + +## #. Load the controller image into Glance (if present). ## :: -if [ "$USE_UNDERCLOUD_UI" -ne 0 ] ; then - OVERCLOUD_CONTROL_DIB_EXTRA_ARGS="$OVERCLOUD_CONTROL_DIB_EXTRA_ARGS snmpd" - OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS="$OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS snmpd" - OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS="$OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS snmpd" -fi - -if [ ! -e $TRIPLEO_ROOT/overcloud-control.qcow2 -o "$USE_CACHE" == "0" ] ; then #nodocs - $TRIPLEO_ROOT/diskimage-builder/bin/disk-image-create $NODE_DIST \ - -a $NODE_ARCH -o $TRIPLEO_ROOT/overcloud-control \ - $OVERCLOUD_CONTROL_DIB_ELEMENTS \ - $DIB_COMMON_ELEMENTS $OVERCLOUD_CONTROL_DIB_EXTRA_ARGS ${SSL_ELEMENT:-} 2>&1 | \ - tee $TRIPLEO_ROOT/dib-overcloud-control.log -fi #nodocs - -## #. Unless you are just building the images, load the image into Glance. -## :: - -if [ -z "$BUILD_ONLY" ]; then #nodocs +if [ -f "$TRIPLEO_ROOT/overcloud-control.qcow2" ]; then #nodocs OVERCLOUD_CONTROL_ID=$(load-image -d $TRIPLEO_ROOT/overcloud-control.qcow2) fi #nodocs -## #. Create your block storage image if some block storage nodes are to be used. This -## is the image the undercloud deploys for the additional cinder-volume instances. +## #. Load the block storage image into Glance (if present). ## :: -if [ $OVERCLOUD_BLOCKSTORAGESCALE -gt 0 ]; then - if [ ! -e $TRIPLEO_ROOT/overcloud-cinder-volume.qcow2 -o "$USE_CACHE" == "0" ]; then #nodocs - $TRIPLEO_ROOT/diskimage-builder/bin/disk-image-create $NODE_DIST \ - -a $NODE_ARCH -o $TRIPLEO_ROOT/overcloud-cinder-volume \ - $OVERCLOUD_BLOCKSTORAGE_DIB_ELEMENTS $DIB_COMMON_ELEMENTS \ - $OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS 2>&1 | \ - tee $TRIPLEO_ROOT/dib-overcloud-cinder-volume.log - fi #nodocs - -## #. And again load the image into Glance, unless you are just building the images. - -## :: - - if [ -z "$BUILD_ONLY" ]; then #nodocs - OVERCLOUD_BLOCKSTORAGE_ID=$(load-image -d $TRIPLEO_ROOT/overcloud-cinder-volume.qcow2) - fi #nodocs -fi - -## #. We enable the automatic relocation of L3 routers in Neutron by default, -## alternatively you can use the L3 agents high availability mechanism -## (only works with three or more controller nodes) or the distributed virtual -## routing mechanism (deploying routers on compute nodes). Set the environment -## variable ``OVERCLOUD_L3`` to ``relocate``, ``ha`` or ``dvr``. -## :: - -OVERCLOUD_L3=${OVERCLOUD_L3:-'relocate'} - -### --end - -OVERCLOUD_L3_HA=${OVERCLOUD_L3_HA:-'False'} -OVERCLOUD_DISTRIBUTED_ROUTERS=${OVERCLOUD_DISTRIBUTED_ROUTERS:-'False'} -case $OVERCLOUD_L3 in - "ha") - OVERCLOUD_L3_HA="True" - ;; - "dvr") - OVERCLOUD_DISTRIBUTED_ROUTERS="True" - ;; -esac - -# NOTE: If enabling distributed virtual routing for Neutron on the overcloud the -# compute node must have the ``neutron-router`` element installed. -if [ $OVERCLOUD_DISTRIBUTED_ROUTERS == "True" ]; then - ComputeNeutronRole=' neutron-router' - OVERCLOUD_COMPUTE_DIB_ELEMENTS=$OVERCLOUD_COMPUTE_DIB_ELEMENTS$ComputeNeutronRole -fi - -### --include -## #. Create your overcloud compute image. This is the image the undercloud -## deploys to host KVM (or QEMU, Xen, etc.) instances. -## :: - -if [ ! -e $TRIPLEO_ROOT/overcloud-compute.qcow2 -o "$USE_CACHE" == "0" ]; then #nodocs - $TRIPLEO_ROOT/diskimage-builder/bin/disk-image-create $NODE_DIST \ - -a $NODE_ARCH -o $TRIPLEO_ROOT/overcloud-compute \ - $OVERCLOUD_COMPUTE_DIB_ELEMENTS $DIB_COMMON_ELEMENTS \ - $OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS 2>&1 | \ - tee $TRIPLEO_ROOT/dib-overcloud-compute.log +if [ -f "$TRIPLEO_ROOT/overcloud-cinder-volume.qcow2" ]; then #nodocs +OVERCLOUD_BLOCKSTORAGE_ID=$(load-image -d $TRIPLEO_ROOT/overcloud-cinder-volume.qcow2) fi #nodocs -## #. Load the image into Glance. If you are just building the images you are done. +## #. Load the compute image into Glance. (if present) ## :: -### --end - -if [ -n "$BUILD_ONLY" ]; then - exit 0 -fi - -### --include +if [ -f "$TRIPLEO_ROOT/overcloud-compute.qcow2" ]; then #nodocs OVERCLOUD_COMPUTE_ID=$(load-image -d $TRIPLEO_ROOT/overcloud-compute.qcow2) +fi #nodocs ## #. For running an overcloud in VM's. For Physical machines, set to kvm: ## :: @@ -435,11 +362,21 @@ if [ $OVERCLOUD_BLOCKSTORAGESCALE -gt 0 ]; then }' <<< $ENV_JSON) fi + +## #. We enable the automatic relocation of L3 routers in Neutron by default, +## alternatively you can use the L3 agents high availability mechanism +## (only works with three or more controller nodes) or the distributed virtul +## routing mechanism (deploying routers on compute nodes). Set the environment +## variable ``OVERCLOUD_L3`` to ``relocate``, ``ha`` or ``dvr``. +## :: + +OVERCLOUD_L3=${OVERCLOUD_L3:-'relocate'} + ## #. If enabling distributed virtual routing on the overcloud, some values need ## to be set so that Neutron DVR will work. ## :: -if [ $OVERCLOUD_DISTRIBUTED_ROUTERS == "True" ]; then +if [ ${OVERCLOUD_DISTRIBUTED_ROUTERS:-'False'} == "True" -o $OVERCLOUD_L3 == "dvr" ]; then ENV_JSON=$(jq '.parameters = {} + .parameters + { "NeutronDVR": "True", "NeutronTunnelTypes": "vxlan", @@ -449,7 +386,8 @@ if [ $OVERCLOUD_DISTRIBUTED_ROUTERS == "True" ]; then }' <<< $ENV_JSON) fi -if [ $OVERCLOUD_L3_HA == "True" ]; then + +if [ ${OVERCLOUD_L3_HA:-'False'} == "True" -o $OVERCLOUD_L3 == "ha" ]; then ENV_JSON=$(jq '.parameters = {} + .parameters + { "NeutronL3HA": "True", "NeutronAllowL3AgentFailover": "False", diff --git a/scripts/devtest_overcloud_images.sh b/scripts/devtest_overcloud_images.sh new file mode 100755 index 00000000..bbe86f44 --- /dev/null +++ b/scripts/devtest_overcloud_images.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +set -eu +set -o pipefail +SCRIPT_NAME=$(basename $0) + +function show_options { + echo "Usage: $SCRIPT_NAME [options]" + echo + echo "Builds overcloud images using defined environment variables." + echo + echo "Options:" + echo " -h -- this help" + echo " -c -- re-use existing source/images if they exist." + exit $1 +} + +TEMP=$(getopt -o c,h,help -n $SCRIPT_NAME -- "$@") +if [ $? != 0 ] ; then + echo "Terminating..." >&2; + exit 1; +fi + +# Note the quotes around `$TEMP': they are essential! +eval set -- "$TEMP" + +while true ; do + case "$1" in + -c) USE_CACHE=1; shift 1;; + -h | --help) show_options 0;; + --) shift ; break ;; + *) echo "Error: unsupported option $1." ; exit 1 ;; + esac +done + +USE_CACHE=${USE_CACHE:-0} +DIB_COMMON_ELEMENTS=${DIB_COMMON_ELEMENTS:-'stackuser'} +OVERCLOUD_CONTROL_DIB_ELEMENTS=${OVERCLOUD_CONTROL_DIB_ELEMENTS:-'ntp hosts baremetal boot-stack cinder-api ceilometer-collector ceilometer-api ceilometer-agent-central ceilometer-agent-notification ceilometer-alarm-notifier ceilometer-alarm-evaluator os-collect-config horizon neutron-network-node dhcp-all-interfaces swift-proxy swift-storage keepalived haproxy sysctl'} +OVERCLOUD_CONTROL_DIB_EXTRA_ARGS=${OVERCLOUD_CONTROL_DIB_EXTRA_ARGS:-'rabbitmq-server cinder-tgt'} +OVERCLOUD_COMPUTE_DIB_ELEMENTS=${OVERCLOUD_COMPUTE_DIB_ELEMENTS:-'ntp hosts baremetal nova-compute nova-kvm neutron-openvswitch-agent os-collect-config dhcp-all-interfaces sysctl'} +OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS=${OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS:-''} + +OVERCLOUD_BLOCKSTORAGE_DIB_ELEMENTS=${OVERCLOUD_BLOCKSTORAGE_DIB_ELEMENTS:-'ntp hosts baremetal os-collect-config dhcp-all-interfaces sysctl'} +OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS=${OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS:-'cinder-tgt'} +SSL_ELEMENT=${SSLBASE:+openstack-ssl} +TE_DATAFILE=${TE_DATAFILE:?"TE_DATAFILE must be defined before calling this script!"} + +if [ "${USE_MARIADB:-}" = 1 ] ; then + OVERCLOUD_CONTROL_DIB_EXTRA_ARGS="$OVERCLOUD_CONTROL_DIB_EXTRA_ARGS mariadb-rpm" + OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS="$OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS mariadb-dev-rpm" + OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS="$OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS mariadb-dev-rpm" +fi + +NODE_ARCH=$(os-apply-config -m $TE_DATAFILE --key arch --type raw) + +### --include +## devtest_overcloud_images +## ======================== +## Build images with environment variables. This script works best +## when using tripleo-image-elements for Overcloud configuration. + +## #. Undercloud UI needs SNMPd for monitoring of every Overcloud node +## :: + +if [ "$USE_UNDERCLOUD_UI" -ne 0 ] ; then + OVERCLOUD_CONTROL_DIB_EXTRA_ARGS="$OVERCLOUD_CONTROL_DIB_EXTRA_ARGS snmpd" + OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS="$OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS snmpd" + OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS="$OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS snmpd" +fi + +## #. Create your overcloud control plane image. + +## ``$OVERCLOUD_*_DIB_EXTRA_ARGS`` (CONTROL, COMPUTE, BLOCKSTORAGE) are +## meant to be used to pass additional build-time specific arguments to +## ``disk-image-create``. + +## ``$SSL_ELEMENT`` is used when building a cloud with SSL endpoints - it should be +## set to openstack-ssl in that situation. +## :: + +if [ ! -e $TRIPLEO_ROOT/overcloud-control.qcow2 -o "$USE_CACHE" == "0" ] ; then + $TRIPLEO_ROOT/diskimage-builder/bin/disk-image-create $NODE_DIST \ + -a $NODE_ARCH -o $TRIPLEO_ROOT/overcloud-control \ + $OVERCLOUD_CONTROL_DIB_ELEMENTS \ + $DIB_COMMON_ELEMENTS $OVERCLOUD_CONTROL_DIB_EXTRA_ARGS ${SSL_ELEMENT:-} 2>&1 | \ + tee $TRIPLEO_ROOT/dib-overcloud-control.log +fi + +## #. Create your block storage image if some block storage nodes are to be used. This +## is the image the undercloud deploys for the additional cinder-volume nodes. +## :: + +if [ ${OVERCLOUD_BLOCKSTORAGESCALE:-0} -gt 0 ]; then + if [ ! -e $TRIPLEO_ROOT/overcloud-cinder-volume.qcow2 -o "$USE_CACHE" == "0" ]; then + $TRIPLEO_ROOT/diskimage-builder/bin/disk-image-create $NODE_DIST \ + -a $NODE_ARCH -o $TRIPLEO_ROOT/overcloud-cinder-volume \ + $OVERCLOUD_BLOCKSTORAGE_DIB_ELEMENTS $DIB_COMMON_ELEMENTS \ + $OVERCLOUD_BLOCKSTORAGE_DIB_EXTRA_ARGS 2>&1 | \ + tee $TRIPLEO_ROOT/dib-overcloud-cinder-volume.log + fi +fi + +## If enabling distributed virtual routing for Neutron on the overcloud the compute node +## must have the ``neutron-router`` element installed. +## :: + +OVERCLOUD_DISTRIBUTED_ROUTERS=${OVERCLOUD_DISTRIBUTED_ROUTERS:-'False'} +OVERCLOUD_L3=${OVERCLOUD_L3:-'relocate'} +if [ $OVERCLOUD_DISTRIBUTED_ROUTERS == "True" -o $OVERCLOUD_L3 == "dvr" ]; then + OVERCLOUD_COMPUTE_DIB_ELEMENTS="$OVERCLOUD_COMPUTE_DIB_ELEMENTS neutron-router" +fi + +## #. Create your overcloud compute image. This is the image the undercloud +## deploys to host the overcloud Nova compute hypervisor components. +## :: + +if [ ! -e $TRIPLEO_ROOT/overcloud-compute.qcow2 -o "$USE_CACHE" == "0" ]; then + $TRIPLEO_ROOT/diskimage-builder/bin/disk-image-create $NODE_DIST \ + -a $NODE_ARCH -o $TRIPLEO_ROOT/overcloud-compute \ + $OVERCLOUD_COMPUTE_DIB_ELEMENTS $DIB_COMMON_ELEMENTS \ + $OVERCLOUD_COMPUTE_DIB_EXTRA_ARGS 2>&1 | \ + tee $TRIPLEO_ROOT/dib-overcloud-compute.log +fi +### --end