Yoshiro Watanabe a0b2a6cbaf Change repository for k8s, cri-o
The legacy k8s repository was retired on March 26, 2024 [1].
The cri-o project followed k8s lead and moved the build to a new
repository [2].

This patch changes the location of k8s, cri-o installed packages
for Ubuntu based deployments only. Changes the value of the
apiversion parameter in the kubeadm configuration because the new
repository can also install 1.27.x and later versions of k8s that
no longer support v1beta2 and earlier APIs.

The version of the package to be installed can be specified using
the K8S_VERSION and CRIO_VERSION variables.
Also, the default values of K8S_VERSION and CRIO_VERSION have been
changed, and it has been confirmed that tacker project FT works fine
with the changed version.

[1]https://kubernetes.io/blog/2023/08/31/legacy-package-repository-deprecation/
[2]https://kubernetes.io/blog/2023/10/10/cri-o-community-package-infrastructure/

Change-Id: I0ce9fd2bcb5d79ebad2cecafabf8c9f33b106647
2024-10-01 09:18:19 +00:00

202 lines
6.2 KiB
Bash

#!/bin/bash
# Dependencies:
#
# - functions
# - ``STACK_USER`` must be defined
# stack.sh
# --------
# - install_k8s
# The following variables are assumed to be defined by certain functions:
#
# - ``http_proxy`` ``https_proxy`` ``no_proxy``
# Save trace setting
_XTRACE_DOCKER=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
K8S_TOKEN=${K8S_TOKEN:-""}
K8S_API_SERVER_IP=${K8S_API_SERVER_IP:-$SERVICE_HOST}
K8S_NODE_IP=${K8S_NODE_IP:-$HOST_IP}
K8S_API_SERVER_PORT=${K8S_API_SERVER_PORT:-6443}
K8S_POD_NETWORK_CIDR=${K8S_POD_NETWORK_CIDR:-10.244.0.0/16}
K8S_SERVICE_NETWORK_CIDR=${K8S_SERVICE_NETWORK_CIDR:-10.96.0.0/12}
K8S_VERSION=${K8S_VERSION:-"1.30.5"}
K8S_NETWORK_ADDON=${K8S_NETWORK_ADDON:-flannel}
# Functions
# ---------
function is_k8s_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"k8s-" ]] && return 0
return 1
}
function install_kubeadm {
if is_ubuntu; then
local stream="https://pkgs.k8s.io/core:/stable:/v${K8S_VERSION%.*}"
local key_path="/etc/apt/keyrings/kubernetes-apt-keyring.gpg"
apt_get install apt-transport-https ca-certificates curl gpg
curl -fsSL "${stream}/deb/Release.key" | sudo gpg --dearmor -o "${key_path}"
echo "deb [signed-by=${key_path}] ${stream}/deb/ /" | \
sudo tee /etc/apt/sources.list.d/kubernetes.list
REPOS_UPDATED=False apt_get_update
kube_pkg_version=$(sudo apt-cache show kubeadm | grep "Version: $K8S_VERSION-" | awk '{ print $2 }' | head -n 1)
apt_get install kubelet="${kube_pkg_version}" kubeadm="${kube_pkg_version}" kubectl="${kube_pkg_version}"
sudo apt-mark hold kubelet kubeadm kubectl
# NOTE(hongbin): This work-around an issue that kubelet pick a wrong
# IP address if the node has multiple network interfaces.
# See https://github.com/kubernetes/kubeadm/issues/203
echo "KUBELET_EXTRA_ARGS=--node-ip=$K8S_NODE_IP" | sudo tee -a /etc/default/kubelet
sudo systemctl daemon-reload && sudo systemctl restart kubelet
else
(>&2 echo "WARNING: kubeadm installation is not supported in this distribution.")
fi
}
function kubeadm_init {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
CGROUP_DRIVER=$(iniget "/etc/crio/crio.conf" crio.runtime cgroup_manager)
CRI_SOCKET="unix:///var/run/crio/crio.sock"
else
# docker is used
CGROUP_DRIVER=$(docker info -f '{{.CgroupDriver}}')
CRI_SOCKET="/var/run/dockershim.sock"
fi
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
imageRepository: "${KUBEADMIN_IMAGE_REPOSITORY}"
etcd:
external:
endpoints:
- "http://${SERVICE_HOST}:${ETCD_PORT}"
networking:
podSubnet: "${K8S_POD_NETWORK_CIDR}"
serviceSubnet: "${K8S_SERVICE_NETWORK_CIDR}"
---
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- token: "${K8S_TOKEN}"
ttl: 0s
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: "${K8S_API_SERVER_IP}"
bindPort: ${K8S_API_SERVER_PORT}
nodeRegistration:
criSocket: "$CRI_SOCKET"
kubeletExtraArgs:
enable-server: "true"
taints:
[]
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $CGROUP_DRIVER
EOF
sudo kubeadm config images pull --image-repository=${KUBEADMIN_IMAGE_REPOSITORY}
sudo kubeadm init --config $kubeadm_config_file --ignore-preflight-errors Swap
local kube_config_file=$HOME/.kube/config
sudo mkdir -p $(dirname ${kube_config_file})
sudo cp /etc/kubernetes/admin.conf $kube_config_file
safe_chown $STACK_USER:$STACK_USER $kube_config_file
if [[ "$K8S_NETWORK_ADDON" == "flannel" ]]; then
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
fi
}
function kubeadm_join {
local kubeadm_config_file
kubeadm_config_file=$(mktemp)
if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
CGROUP_DRIVER=$(iniget "/etc/crio/crio.conf" crio.runtime cgroup_manager)
CRI_SOCKET="unix:///var/run/crio/crio.sock"
else
# docker is used
CGROUP_DRIVER=$(docker info -f '{{.CgroupDriver}}')
CRI_SOCKET="/var/run/dockershim.sock"
fi
cat <<EOF | tee $kubeadm_config_file >/dev/null
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
bootstrapToken:
apiServerEndpoint: "${K8S_API_SERVER_IP}:${K8S_API_SERVER_PORT}"
token: "${K8S_TOKEN}"
unsafeSkipCAVerification: true
tlsBootstrapToken: "${K8S_TOKEN}"
nodeRegistration:
criSocket: "$CRI_SOCKET"
kubeletExtraArgs:
enable-server: "true"
taints:
[]
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
failSwapOn: false
address: "0.0.0.0"
enableServer: true
cgroupDriver: $CGROUP_DRIVER
EOF
sudo kubeadm join --config $kubeadm_config_file --ignore-preflight-errors Swap
}
function start_collect_logs {
wait_for_kube_service 180 component=kube-controller-manager
wait_for_kube_service 60 component=kube-apiserver
wait_for_kube_service 30 component=kube-scheduler
wait_for_kube_service 30 k8s-app=kube-proxy
run_process kube-controller-manager "/usr/bin/kubectl logs -n kube-system -f -l component=kube-controller-manager"
run_process kube-apiserver "/usr/bin/kubectl logs -n kube-system -f -l component=kube-apiserver"
run_process kube-scheduler "/usr/bin/kubectl logs -n kube-system -f -l component=kube-scheduler"
run_process kube-proxy "/usr/bin/kubectl logs -n kube-system -f -l k8s-app=kube-proxy"
}
function wait_for_kube_service {
local timeout=$1
local selector=$2
local rval=0
time_start "wait_for_service"
timeout $timeout bash -x <<EOF || rval=$?
NAME=""
while [[ "\$NAME" == "" ]]; do
sleep 1
NAME=\$(kubectl wait --for=condition=Ready pod -n kube-system -l $selector -o name)
done
EOF
time_stop "wait_for_service"
# Figure out what's happening on platforms where this doesn't work
if [[ "$rval" != 0 ]]; then
echo "Didn't find kube service after $timeout seconds"
kubectl get pods -n kube-system -l $selector
fi
return $rval
}
function kubeadm_reset {
sudo kubeadm reset --force
}
# Restore xtrace
$_XTRACE_DOCKER