Disable ceph-pools-audit chart by default

Since most customers do not need this audit, the chart will be
disabled by default, which will prevent an unnecessary periodic
CrobnJob from being triggered. But the customer's can choose to
enable the audit via the 'system helm-chart-attribute-modify'
if desired.

system helm-override-list platform-integ-apps -l

system helm-chart-attribute-modify --enabled true \
platform-integ-appsceph-pools-audit kube-system

system helm-chart-attribute-modify --enabled false \
platform-integ-apps ceph-pools-audit kube-system

system application-apply platform-integ-apps

Additionally, the cephfs and rbd storage-init scripts have been
changed to set the 'min_size' of their respective pools.

Finally, the ceph-pools-audit chart was a CronJob and was replaced
by a Deployment to avoid processing spikes every time the job was
executed. And also the audit script was optimized to execute the
minimum number of ceph commands.

NOTE: This chart will be disabled for new upload/applies only
(i.e. initial installs). If already have a version applied, it
will use whatever is set in the 'system helm-override-list'
command.

PS: The patch 0017 has been removed and the change has
been made directly in the patch 0008, as ​​it is a new
file (template) created by us, not upstream.

Test Plan:
 PASS: On a fresh install, apply the app and check if
       the chart is disabled
 PASS: Check the chart status with
       'system helm-override-list platform-integ-apps -l',
       update the app and confirm again to see if it remains
       the same.
 PASS: Change backend capabilities (replication, min_replication)
       and check if the pools have been changed.

* Tests were performed on AIO-SX/AIO-DX with k8s v1.29

Story: 2011353
Task: 51825

Depends-On: https://review.opendev.org/c/starlingx/config/+/943688

Change-Id: Id86ee8902fbc860917725741f86097c196a894fd
Signed-off-by: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
This commit is contained in:
Erickson Silva de Oliveira 2025-03-07 10:36:20 -03:00
parent b9b1d8a515
commit 627c1d30e7
22 changed files with 255 additions and 255 deletions

View File

@ -7,4 +7,4 @@ apiVersion: v1
appVersion: "1.0"
description: Ceph RBD pool replication monitor chart
name: ceph-pools-audit
version: 0.3.0
version: 1.0.0

View File

@ -1,6 +1,6 @@
{{/*
#
# Copyright (c) 2019-2022 Wind River Systems, Inc.
# Copyright (c) 2019-2022,2025 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@ -19,56 +19,94 @@ data:
{{/*
#
# Copyright (c) 2019 Wind River Systems, Inc.
# Copyright (c) 2019,2025 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check Pod logs for details."
exit 1
fi
TIMEOUT_CEPH_CLI=60
touch /etc/ceph/ceph.client.admin.keyring
while true
do
echo -e "\nWaiting ${AUDIT_INTERVAL} seconds to check pools...\n"
sleep "${AUDIT_INTERVAL}"
echo "RBD_POOL_CRUSH_RULE_NAME: ${RBD_POOL_CRUSH_RULE_NAME}"
if [ -z "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
echo "Error: No Ceph crush rule name specified"
exit 1
fi
ceph osd crush rule ls | grep -q "${RBD_POOL_CRUSH_RULE_NAME}"
if [ $? -ne 0 ]; then
echo "Error: Ceph crush rule ${RBD_POOL_CRUSH_RULE_NAME} not found, exit"
exit 1
fi
POOLS=( $(ceph osd pool ls) )
for pool in "${POOLS[@]}"; do
echo "Check for pool name: $pool"
pool_rule=$(ceph osd pool get $pool crush_rule | awk '{print $2}')
echo "Pool crush rule name: ${pool_rule}"
if [ "${pool_rule}" != "${RBD_POOL_CRUSH_RULE_NAME}" ]; then
timeout $TIMEOUT_CEPH_CLI ceph -s
if [ $? -ne 0 ]; then
echo "Error: Ceph cluster is not accessible, check ceph mon logs for details."
continue
fi
pool_size=$(ceph osd pool get $pool size | awk '{print $2}')
pool_min_size=$(ceph osd pool get $pool min_size | awk '{print $2}')
echo "===> pool_size: ${pool_size} pool_min_size: ${pool_min_size}"
if [ "${pool_size}" != "${RBD_POOL_REPLICATION}" ]; then
echo "Set size for $pool to ${RBD_POOL_REPLICATION}"
ceph osd pool set $pool size "${RBD_POOL_REPLICATION}"
echo "POOL_CRUSH_RULE_NAME: ${POOL_CRUSH_RULE_NAME}"
if [ -z "${POOL_CRUSH_RULE_NAME}" ]; then
echo "Error: No ceph crush rule name specified"
exit
fi
if [ "${pool_min_size}" != "${RBD_POOL_MIN_REPLICATION}" ]; then
echo "Set min_size for $pool to ${RBD_POOL_MIN_REPLICATION}"
ceph osd pool set $pool min_size "${RBD_POOL_MIN_REPLICATION}"
CRUSH_RULE_DUMP=$(timeout $TIMEOUT_CEPH_CLI ceph osd crush rule dump)
if [ $? -ne 0 ]; then
echo "Error: Failed to get crush rules."
continue
fi
for crush_rule in $(echo $CRUSH_RULE_DUMP | jq -r '.[] | tostring | @base64'); do
_jq() {
echo "${crush_rule}" | base64 -di | jq -r "${1}"
}
rule_name=$(_jq '.rule_name')
if [ "${rule_name}" == "${POOL_CRUSH_RULE_NAME}" ]; then
POOL_CRUSH_RULE_ID=$(_jq '.rule_id')
echo "POOL_CRUSH_RULE_ID: ${POOL_CRUSH_RULE_ID}"
break
fi
done
if [ -z "${POOL_CRUSH_RULE_ID}" ]; then
echo "Error: Ceph crush rule ${POOL_CRUSH_RULE_NAME} not found."
continue
fi
POOLS=$(timeout $TIMEOUT_CEPH_CLI ceph osd pool ls detail -f json)
if [ $? -ne 0 ]; then
echo "Error: Failed to list pools."
continue
fi
for pool in $(echo $POOLS | jq -r '.[] | tostring | @base64'); do
_jq() {
echo "${pool}" | base64 -di | jq -r "${1}"
}
pool_name=$(_jq '.pool_name')
echo -e "\nChecking the pool: ${pool_name}"
crush_rule_id=$(_jq '.crush_rule')
if [ "${crush_rule_id}" != "${POOL_CRUSH_RULE_ID}" ]; then
echo "The pool is not associated with the crush rule, skipping."
continue
fi
pool_size=$(_jq '.size')
pool_min_size=$(_jq '.min_size')
echo "===> pool_size: ${pool_size} pool_min_size: ${pool_min_size}"
if [ "${pool_size}" != "${POOL_REPLICATION}" ]; then
echo "Set size for $pool_name to ${POOL_REPLICATION}"
timeout $TIMEOUT_CEPH_CLI ceph osd pool set $pool_name size "${POOL_REPLICATION}"
if [ $? -ne 0 ]; then
echo "Error: Failed to set size for pool $pool_name."
fi
fi
if [ "${pool_min_size}" != "${POOL_MIN_REPLICATION}" ]; then
echo "Set min_size for $pool_name to ${POOL_MIN_REPLICATION}"
timeout $TIMEOUT_CEPH_CLI ceph osd pool set $pool_name min_size "${POOL_MIN_REPLICATION}"
if [ $? -ne 0 ]; then
echo "Error: Failed to set minimum size for pool $pool_name."
fi
fi
done
done
{{- end }}

View File

@ -0,0 +1,95 @@
{{/*
#
# Copyright (c) 2020-2025 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.manifests.job_ceph_pools_audit }}
{{- $envAll := . }}
{{- $serviceAccountName := "ceph-pools-audit" }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $serviceAccountName }}
namespace: {{ $envAll.Release.namespace }}
imagePullSecrets:
- name: default-registry-key
---
#
# The CronJob makes sure all the Ceph pools have the right replication,
# as present in the attributes of the Ceph backends.
# This is needed for:
# - charts that don't manage pool configuration
# - pools created dynamically by services that may not have the current
# pool configuration uploaded (ex: swift)
# - when replication is changed and we don't want to reinstall all the
# charts that created Ceph pools
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: ceph-pools-audit
spec:
selector:
matchLabels:
app: ceph-pools-audit
template:
metadata:
name: "{{$envAll.Release.Name}}"
namespace: {{ $envAll.Release.namespace }}
labels:
app: ceph-pools-audit
spec:
serviceAccountName: {{ $serviceAccountName }}
nodeSelector:
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | toString | quote }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 12 }}
{{- end }}
volumes:
- name: ceph-pools-bin
configMap:
name: ceph-pools-bin
defaultMode: 0555
- name: etcceph
emptyDir: {}
- name: ceph-etc
configMap:
name: {{ $envAll.Values.ceph_client.configmap }}
defaultMode: 0444
containers:
{{- range $tierConfig := $envAll.Values.conf.ceph.storage_tiers }}
- name: ceph-pools-audit-{{- $tierConfig.name }}
image: {{ $envAll.Values.images.tags.ceph_config_helper | quote }}
env:
- name: POOL_REPLICATION
value: {{ $tierConfig.replication | quote }}
- name: POOL_MIN_REPLICATION
value: {{ $tierConfig.min_replication | quote }}
- name: POOL_CRUSH_RULE_NAME
value: {{ $tierConfig.crush_rule_name | quote }}
- name: AUDIT_INTERVAL
value: {{ $envAll.Values.manifests.audit_interval | quote }}
command:
- /tmp/ceph-pools-audit.sh
volumeMounts:
- name: ceph-pools-bin
mountPath: /tmp/ceph-pools-audit.sh
subPath: ceph-pools-audit.sh
readOnly: true
- name: etcceph
mountPath: /etc/ceph
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
{{- end }}
{{- end }}

View File

@ -1,101 +0,0 @@
{{/*
#
# Copyright (c) 2020-2024 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
*/}}
{{- if .Values.manifests.job_ceph_pools_audit }}
{{- $envAll := . }}
{{- $serviceAccountName := "ceph-pools-audit" }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $serviceAccountName }}
namespace: {{ $envAll.Release.namespace }}
imagePullSecrets:
- name: default-registry-key
---
#
# The CronJob makes sure all the Ceph pools have the right replication,
# as present in the attributes of the Ceph backends.
# This is needed for:
# - charts that don't manage pool configuration
# - pools created dynamically by services that may not have the current
# pool configuration uploaded (ex: swift)
# - when replication is changed and we don't want to reinstall all the
# charts that created Ceph pools
#
apiVersion: batch/v1
kind: CronJob
metadata:
name: ceph-pools-audit
spec:
schedule: {{ .Values.jobs.job_ceph_pools_audit.cron | quote }}
successfulJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.success }}
failedJobsHistoryLimit: {{ .Values.jobs.job_ceph_pools_audit.history.failed }}
concurrencyPolicy: Forbid
startingDeadlineSeconds: {{ .Values.jobs.job_ceph_pools_audit.startingDeadlineSeconds }}
jobTemplate:
metadata:
name: "{{$envAll.Release.Name}}"
namespace: {{ $envAll.Release.namespace }}
labels:
app: ceph-pools-audit
spec:
template:
metadata:
labels:
app: ceph-pools-audit
spec:
serviceAccountName: {{ $serviceAccountName }}
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value | toString | quote }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 12 }}
{{- end }}
volumes:
- name: ceph-pools-bin
configMap:
name: ceph-pools-bin
defaultMode: 0555
- name: etcceph
emptyDir: {}
- name: ceph-etc
configMap:
name: {{ $envAll.Values.ceph_client.configmap }}
defaultMode: 0444
containers:
{{- range $tierConfig := $envAll.Values.conf.ceph.storage_tiers }}
- name: ceph-pools-audit-{{- $tierConfig.name }}
image: {{ $envAll.Values.images.tags.ceph_config_helper | quote }}
env:
- name: RBD_POOL_REPLICATION
value: {{ $tierConfig.replication | quote }}
- name: RBD_POOL_MIN_REPLICATION
value: {{ $tierConfig.min_replication | quote }}
- name: RBD_POOL_CRUSH_RULE_NAME
value: {{ $tierConfig.crush_rule_name | quote }}
command:
- /tmp/ceph-pools-audit.sh
volumeMounts:
- name: ceph-pools-bin
mountPath: /tmp/ceph-pools-audit.sh
subPath: ceph-pools-audit.sh
readOnly: true
- name: etcceph
mountPath: /etc/ceph
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf
subPath: ceph.conf
readOnly: true
{{- end }}
{{- end }}

View File

@ -49,14 +49,6 @@ dependencies:
- endpoint: internal
service: local_image_registry
jobs:
job_ceph_pools_audit:
cron: "*/5 * * * *"
startingDeadlineSeconds: 200
history:
success: 3
failed: 1
resources: {}
nodeSelector: { node-role.kubernetes.io/control-plane: "" }
@ -69,3 +61,4 @@ manifests:
job_ceph_pools_audit: true
configmap_bin: true
configmap_ceph_conf: true
audit_interval: 300 # in seconds

View File

@ -1,3 +1,9 @@
ceph-pools-audit-helm (1.0-0) unstable; urgency=medium
* Change CronJob to Deployment and improves audit script performance.
-- Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com> Fri, 6 Mar 2025 10:57:10 +0000
ceph-pools-audit-helm (0.3-0) unstable; urgency=medium
* Update of ceph-config-helper image.

View File

@ -1,9 +1,9 @@
---
debname: ceph-pools-audit-helm
debver: 0.3-0
debver: 1.0-0
src_path: ceph-pools-audit-helm
revision:
dist: $STX_DIST
GITREVCOUNT:
SRC_DIR: ${MY_REPO}/stx/platform-armada-app/helm-charts/custom/ceph-pools-audit-helm/ceph-pools-audit-helm/ceph-pools-audit
BASE_SRCREV: e8d5daf4af6e74420258769a49cdb5b5b5e896a4
BASE_SRCREV: b9b1d8a515f539be66d50341ca1b4b015c5507cd

View File

@ -1,18 +1,18 @@
From fcd8a2e8f92526af3243724ae53389c5d0d23bb5 Mon Sep 17 00:00:00 2001
From: Gabriel Przybysz Gonçalves Júnior <gabriel.przybyszgoncalvesjunior@windriver.com>
Date: Wed, 4 Dec 2024 09:15:12 -0300
From 56abdf56024a46a2ea09a72a11a85f3bcdc61fd5 Mon Sep 17 00:00:00 2001
From: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
Date: Thu, 27 Feb 2025 20:16:53 -0300
Subject: [PATCH] ceph-csi-cephfs: add default fields to values.yaml
This patch adds fields and values necessary for the proper use
of the cephfs chart in the system.
Signed-off-by: Gabriel Przybysz Gonçalves Júnior <gabriel.przybyszgoncalvesjunior@windriver.com>
Signed-off-by: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
---
charts/ceph-csi-cephfs/values.yaml | 39 ++++++++++++++++++++++++++++--
1 file changed, 37 insertions(+), 2 deletions(-)
charts/ceph-csi-cephfs/values.yaml | 41 ++++++++++++++++++++++++++++--
1 file changed, 39 insertions(+), 2 deletions(-)
diff --git a/charts/ceph-csi-cephfs/values.yaml b/charts/ceph-csi-cephfs/values.yaml
index caf29f4..b340892 100644
index f30e2b7..160b7da 100644
--- a/charts/ceph-csi-cephfs/values.yaml
+++ b/charts/ceph-csi-cephfs/values.yaml
@@ -290,10 +290,10 @@ provisioner:
@ -28,7 +28,7 @@ index caf29f4..b340892 100644
# Define which node labels to use as CRUSH location.
# This should correspond to the values set in the CRUSH map.
# NOTE: the value here serves as an example
@@ -321,6 +321,24 @@ storageClass:
@@ -375,6 +375,26 @@ storageClass:
# mountOptions:
# - discard
@ -38,6 +38,8 @@ index caf29f4..b340892 100644
+ userSecretName: ceph-secret-kube
+ # Pool replication
+ replication: 1
+ # Pool min replication
+ min_replication: 1
+ # Pool crush rule name
+ crush_rule_name: storage_tier_ruleset
+ # Pool chunk size / PG_NUM
@ -53,7 +55,7 @@ index caf29f4..b340892 100644
secret:
# Specifies whether the secret should be created
create: false
@@ -431,6 +431,23 @@ externallyManagedConfigmap: false
@@ -431,6 +451,23 @@ externallyManagedConfigmap: false
cephConfConfigMapName: ceph-config
# Name of the configmap used for encryption kms configuration
kmsConfigMapName: ceph-csi-encryption-kms-config
@ -78,4 +80,4 @@ index caf29f4..b340892 100644
# radosNamespaceCephFS: csi
# Unique ID distinguishing this instance of Ceph CSI among other instances,
--
2.25.1
2.34.1

View File

@ -1,21 +1,20 @@
From f380234343cbb6cb65ad54574099ca5adbca9cb4 Mon Sep 17 00:00:00 2001
From: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
Date: Thu, 4 Jan 2024 15:02:15 -0300
From b0f74d567f5004058564f24261eed00207af023c Mon Sep 17 00:00:00 2001
From: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
Date: Thu, 27 Feb 2025 20:21:59 -0300
Subject: [PATCH] ceph-csi-cephfs: add storage-init.yaml
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
Signed-off-by: Gabriel de Araújo Cabral <gabriel.cabral@windriver.com>
Signed-off-by: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
---
.../templates/storage-init.yaml | 386 ++++++++++++++++++
1 file changed, 386 insertions(+)
.../templates/storage-init.yaml | 396 ++++++++++++++++++
1 file changed, 396 insertions(+)
create mode 100644 charts/ceph-csi-cephfs/templates/storage-init.yaml
diff --git a/charts/ceph-csi-cephfs/templates/storage-init.yaml b/charts/ceph-csi-cephfs/templates/storage-init.yaml
new file mode 100644
index 0000000..3ffa153
index 0000000..9538cf5
--- /dev/null
+++ b/charts/ceph-csi-cephfs/templates/storage-init.yaml
@@ -0,0 +1,386 @@
@@ -0,0 +1,396 @@
+{{/*
+#
+# Copyright (c) 2020-2024 Wind River Systems, Inc.
@ -156,6 +155,14 @@ index 0000000..3ffa153
+ exit ${RETURN_CODE}
+ fi
+
+ echo "- Setting the minimum number of replicas:"
+ ceph osd pool set "${POOL}" min_size "${POOL_MIN_REPLICATION}"
+ RETURN_CODE=$?
+ if [ ${RETURN_CODE} -ne 0 ]; then
+ echo "Error setting the minimum number of pool replicas (return code: ${RETURN_CODE})"
+ exit ${RETURN_CODE}
+ fi
+
+ echo "- Assigning crush rule:"
+ ceph osd pool set "${POOL}" crush_rule "${POOL_CRUSH_RULE_NAME}"
+ RETURN_CODE=$?
@ -385,6 +392,8 @@ index 0000000..3ffa153
+ value: {{ $sc.chunk_size | quote }}
+ - name: POOL_REPLICATION
+ value: {{ $sc.replication | quote }}
+ - name: POOL_MIN_REPLICATION
+ value: {{ $sc.min_replication | quote }}
+ - name: POOL_CRUSH_RULE_NAME
+ value: {{ $sc.crush_rule_name | quote }}
+ - name: CSI_DRIVER_NAME

View File

@ -1,21 +1,21 @@
From 1a94b0f0f7bcc17afc77b278f1cf767a346a22f4 Mon Sep 17 00:00:00 2001
From: Gabriel Przybysz Gonçalves Júnior <gabriel.przybyszgoncalvesjunior@windriver.com>
Date: Wed, 4 Dec 2024 09:22:02 -0300
From d7571be3b5312c170ffd6a18f71ef6072064bf46 Mon Sep 17 00:00:00 2001
From: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
Date: Thu, 27 Feb 2025 20:12:39 -0300
Subject: [PATCH] ceph-csi-rbd: add default fields to values.yaml
This patch adds fields and values necessary for the proper use
of the rbd chart in the system.
Signed-off-by: Gabriel Przybysz Gonçalves Júnior <gabriel.przybyszgoncalvesjunior@windriver.com>
Signed-off-by: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
---
charts/ceph-csi-rbd/values.yaml | 40 +++++++++++++++++++++++++++++++--
1 file changed, 38 insertions(+), 2 deletions(-)
charts/ceph-csi-rbd/values.yaml | 42 +++++++++++++++++++++++++++++++--
1 file changed, 40 insertions(+), 2 deletions(-)
diff --git a/charts/ceph-csi-rbd/values.yaml b/charts/ceph-csi-rbd/values.yaml
index 28a7ba6..28c2854 100644
index 91d4ad2..e835624 100644
--- a/charts/ceph-csi-rbd/values.yaml
+++ b/charts/ceph-csi-rbd/values.yaml
@@ -308,10 +308,10 @@ topology:
@@ -337,10 +337,10 @@ topology:
# - topology.kubernetes.io/region
# - topology.kubernetes.io/zone
@ -28,7 +28,7 @@ index 28a7ba6..28c2854 100644
# Define which node labels to use as CRUSH location.
# This should correspond to the values set in the CRUSH map.
# NOTE: the value here serves as an example
@@ -495,6 +495,22 @@ storageClass:
@@ -526,6 +526,24 @@ storageClass:
# mountOptions:
# - discard
@ -38,6 +38,8 @@ index 28a7ba6..28c2854 100644
+ userSecretName: ceph-secret-kube
+ # Pool replication
+ replication: 1
+ # Pool min replication
+ min_replication: 1
+ # Pool crush rule name
+ crush_rule_name: storage_tier_ruleset
+ # Pool chunk size / PG_NUM
@ -51,7 +53,7 @@ index 28a7ba6..28c2854 100644
# Mount the host /etc/selinux inside pods to support
# selinux-enabled filesystems
selinuxMount: true
@@ -553,6 +569,26 @@ externallyManagedConfigmap: false
@@ -582,6 +600,26 @@ externallyManagedConfigmap: false
cephConfConfigMapName: ceph-config
# Name of the configmap used for encryption kms configuration
kmsConfigMapName: ceph-csi-encryption-kms-config
@ -79,4 +81,4 @@ index 28a7ba6..28c2854 100644
# when sharing Ceph clusters across CSI instances for provisioning
# instanceID: default
--
2.25.1
2.34.1

View File

@ -1,23 +1,20 @@
From 080cad7da551b36ee22139c558770cf6520a0d55 Mon Sep 17 00:00:00 2001
From: Gabriel de Araújo Cabral <gabriel.cabral@windriver.com>
Date: Wed, 6 Dec 2023 14:17:19 -0300
From e3f7a30bdc7a46657dabb64dc3914c20220d183f Mon Sep 17 00:00:00 2001
From: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
Date: Thu, 27 Feb 2025 20:23:37 -0300
Subject: [PATCH] ceph-csi-rbd: add storage-init.yaml
Signed-off-by: Hediberto Cavalcante da Silva <hediberto.cavalcantedasilva@windriver.com>
Signed-off-by: Felipe Sanches Zanoni <Felipe.SanchesZanoni@windriver.com>
Signed-off-by: Erickson Silva de Oliveira <Erickson.SilvadeOliveira@windriver.com>
Signed-off-by: Gabriel de Araújo Cabral <gabriel.cabral@windriver.com>
---
.../ceph-csi-rbd/templates/storage-init.yaml | 394 ++++++++++++++++++
1 file changed, 394 insertions(+)
.../ceph-csi-rbd/templates/storage-init.yaml | 404 ++++++++++++++++++
1 file changed, 404 insertions(+)
create mode 100644 charts/ceph-csi-rbd/templates/storage-init.yaml
diff --git a/charts/ceph-csi-rbd/templates/storage-init.yaml b/charts/ceph-csi-rbd/templates/storage-init.yaml
new file mode 100644
index 000000000..6abd8ec3f
index 0000000..cd91bfe
--- /dev/null
+++ b/charts/ceph-csi-rbd/templates/storage-init.yaml
@@ -0,0 +1,394 @@
@@ -0,0 +1,404 @@
+{{/*
+#
+# Copyright (c) 2020-2023 Wind River Systems, Inc.
@ -218,6 +215,14 @@ index 000000000..6abd8ec3f
+ exit ${RETURN_CODE}
+ fi
+
+ echo "- Setting the minimum number of replicas:"
+ ceph osd pool set "${POOL_NAME}" min_size "${POOL_MIN_REPLICATION}"
+ RETURN_CODE=$?
+ if [ ${RETURN_CODE} -ne 0 ]; then
+ echo "Error setting the minimum number of pool replicas (return code: ${RETURN_CODE})"
+ exit ${RETURN_CODE}
+ fi
+
+ echo "- Assigning crush rule:"
+ ceph osd pool set "${POOL_NAME}" crush_rule "${POOL_CRUSH_RULE_NAME}"
+ RETURN_CODE=$?
@ -364,7 +369,7 @@ index 000000000..6abd8ec3f
+ labels:
+ heritage: {{ .Release.Service | quote }}
+ release: {{ .Release.Name | quote }}
+ chart: "{{ .Chart.Name }}-{{- .Chart.Version }}"
+ chart: {{ include "ceph-csi-rbd.chart" . }}
+ spec:
+ serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
+ restartPolicy: OnFailure
@ -394,6 +399,8 @@ index 000000000..6abd8ec3f
+ value: {{ $sc.pool_name }}
+ - name: POOL_REPLICATION
+ value: {{ $sc.replication | quote }}
+ - name: POOL_MIN_REPLICATION
+ value: {{ $sc.min_replication | quote }}
+ - name: POOL_CRUSH_RULE_NAME
+ value: {{ $sc.crush_rule_name | quote }}
+ - name: POOL_CHUNK_SIZE
@ -413,4 +420,4 @@ index 000000000..6abd8ec3f
+{{ toYaml . | indent 8 }}
+{{- end }}
--
2.25.1
2.34.1

View File

@ -1,30 +0,0 @@
From: Robert Church <robert.church@windriver.com>
Date: Sun, 31 Dec 2023 12:49:27 -0600
Subject: Fix chart label for rbd-storage-init Job
Update the chart label on the rbd-storage-init Job to align with all the
other chart labels used throughout the chart.
This one-off implementation resulted in an invalid
label (ceph-csi-rbd-3.9.0+STX.14) based on the new chart auto-versioning
scheme. Using the common include mechanism provides a valid
label (ceph-csi-rbd-3.9.0_STX.14).
Signed-off-by: Robert Church <robert.church@windriver.com>
---
charts/ceph-csi-rbd/templates/storage-init.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/charts/ceph-csi-rbd/templates/storage-init.yaml b/charts/ceph-csi-rbd/templates/storage-init.yaml
index 6abd8ec..0b36c58 100644
--- a/charts/ceph-csi-rbd/templates/storage-init.yaml
+++ b/charts/ceph-csi-rbd/templates/storage-init.yaml
@@ -344,7 +344,7 @@ spec:
labels:
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
- chart: "{{ .Chart.Name }}-{{- .Chart.Version }}"
+ chart: {{ include "ceph-csi-rbd.chart" . }}
spec:
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
restartPolicy: OnFailure

View File

@ -14,8 +14,7 @@
0014-ceph-csi-cephfs-add-snapshotclass.patch
0015-ceph-csi-rbd-add-snapshotclass.patch
0016-ceph-csi-cephfs-remove-mountoptions-debug-on-pvs.patch
0017-Fix-chart-label-for-rbd-storage-init-Job.patch
0018-ceph-csi-cephfs-add-conditional-for-csi-snapshotter-creation.patch
0019-ceph-csi-rbd-add-conditional-for-csi-snapshotter-creation.patch
0020-ceph-csi-cephfs-add-conditional-to-enable-leader-election.patch
0021-ceph-csi-rbd-add-conditional-to-enable-leader-election.patch
0017-ceph-csi-cephfs-add-conditional-for-csi-snapshotter-creation.patch
0018-ceph-csi-rbd-add-conditional-for-csi-snapshotter-creation.patch
0019-ceph-csi-cephfs-add-conditional-to-enable-leader-election.patch
0020-ceph-csi-rbd-add-conditional-to-enable-leader-election.patch

View File

@ -9,7 +9,7 @@ src_files:
- platform-helm/files/Makefile
revision:
dist: $STX_DIST
stx_patch: 20
stx_patch: 19
GITREVCOUNT:
BASE_SRCREV: bc01cdee112dbb9a5511a180c07a5e3513b48692
SRC_DIR: ${MY_REPO}/stx/platform-armada-app/helm-charts/upstream/platform-helm

View File

@ -114,15 +114,6 @@ class CephFSProvisionerHelm(base.FluxCDBaseHelm):
SERVICE_NAME = app_constants.HELM_CHART_CEPH_FS_PROVISIONER
SERVICE_PORT_MON = 6789
def execute_manifest_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
if not self._is_enabled(operator.APP, self.CHART,
app_constants.HELM_NS_CEPH_FS_PROVISIONER):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def execute_kustomize_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
@ -185,6 +176,7 @@ class CephFSProvisionerHelm(base.FluxCDBaseHelm):
"userSecretName": user_secret_name or class_defaults["adminSecretName"],
"chunk_size": 64,
"replication": int(bk.capabilities.get("replication")),
"min_replication": int(bk.capabilities.get("min_replication")),
"crush_rule_name": rule_name,
"additionalNamespaces": ['default', 'kube-public']
}

View File

@ -27,15 +27,6 @@ class CephPoolsAuditHelm(base.FluxCDBaseHelm):
SERVICE_NAME = 'ceph-pools'
def execute_manifest_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
if not self._is_enabled(operator.APP, self.CHART,
common.HELM_NS_RBD_PROVISIONER):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def execute_kustomize_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user

View File

@ -31,15 +31,6 @@ class RbdProvisionerHelm(base.FluxCDBaseHelm):
SERVICE_NAME = app_constants.HELM_CHART_RBD_PROVISIONER
SERVICE_PORT_MON = 6789
def execute_manifest_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
if not self._is_enabled(operator.APP, self.CHART,
common.HELM_NS_RBD_PROVISIONER):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def execute_kustomize_updates(self, operator):
# On application load this chart is enabled. Only disable if specified
# by the user
@ -99,6 +90,7 @@ class RbdProvisionerHelm(base.FluxCDBaseHelm):
"userSecretName": user_secret_name,
"chunk_size": 64,
"replication": int(bk.capabilities.get("replication")),
"min_replication": int(bk.capabilities.get("min_replication")),
"crush_rule_name": rule_name,
"additionalNamespaces": ['default', 'kube-public']
}

View File

@ -2,6 +2,10 @@ app_name: APP_REPLACE_NAME
app_version: APP_REPLACE_VERSION
helm_repo: HELM_REPLACE_REPO
maintain_user_overrides: true
maintain_attributes: true
disabled_charts:
- ceph-pools-audit
upgrades:
auto_update: true
@ -16,6 +20,7 @@ behavior:
triggers:
- type: runtime-apply-puppet # TODO(dvoicule): optimize triggers
- type: host-availability-updated
- type: storage-backend-modify
- type: kube-upgrade-complete
filters:
- availability: services-enabled