From e911599abe3bf161779cc65c4affe42c77421954 Mon Sep 17 00:00:00 2001 From: Guillaume Boutry Date: Mon, 7 Oct 2024 16:29:59 +0200 Subject: [PATCH] Migrate to unified charmcraft.yaml Charmcraft 3 moves towards a single charmcraft.yaml, this is needed for 24.04 migration. Change-Id: I743712752aaf37bf68730b64bd6c147dfad370e2 Signed-off-by: Guillaume Boutry --- charms/aodh-k8s/actions.yaml | 2 - charms/aodh-k8s/charmcraft.yaml | 124 +++++- charms/aodh-k8s/config.yaml | 21 - charms/aodh-k8s/metadata.yaml | 92 ----- charms/aodh-k8s/tests/config.yaml | 1 - charms/barbican-k8s/charmcraft.yaml | 93 ++++- charms/barbican-k8s/config.yaml | 9 - charms/barbican-k8s/metadata.yaml | 74 ---- charms/barbican-k8s/tests/config.yaml | 1 - charms/ceilometer-k8s/actions.yaml | 2 - charms/ceilometer-k8s/charmcraft.yaml | 87 ++++- charms/ceilometer-k8s/config.yaml | 9 - charms/ceilometer-k8s/metadata.yaml | 65 ---- charms/ceilometer-k8s/tests/actions.yaml | 1 - charms/ceilometer-k8s/tests/config.yaml | 1 - charms/cinder-ceph-k8s/actions.yaml | 2 - charms/cinder-ceph-k8s/charmcraft.yaml | 340 +++++++++++++++- charms/cinder-ceph-k8s/config.yaml | 268 ------------- charms/cinder-ceph-k8s/metadata.yaml | 61 --- .../tests/unit/test_cinder_ceph_charm.py | 3 - charms/cinder-k8s/actions.yaml | 2 - charms/cinder-k8s/charmcraft.yaml | 94 ++++- charms/cinder-k8s/config.yaml | 9 - charms/cinder-k8s/metadata.yaml | 73 ---- charms/cinder-k8s/tests/config.yaml | 1 - charms/designate-bind-k8s/actions.yaml | 2 - charms/designate-bind-k8s/charmcraft.yaml | 66 +++- charms/designate-bind-k8s/config.yaml | 5 - charms/designate-bind-k8s/metadata.yaml | 45 --- charms/designate-k8s/actions.yaml | 2 - charms/designate-k8s/charmcraft.yaml | 97 ++++- charms/designate-k8s/config.yaml | 16 - charms/designate-k8s/metadata.yaml | 65 ---- charms/designate-k8s/tests/config.yaml | 1 - charms/glance-k8s/actions.yaml | 8 - charms/glance-k8s/charmcraft.yaml | 364 +++++++++++++++++- charms/glance-k8s/config.yaml | 261 ------------- charms/glance-k8s/metadata.yaml | 88 ----- charms/glance-k8s/tests/actions.yaml | 1 - charms/glance-k8s/tests/config.yaml | 1 - charms/gnocchi-k8s/actions.yaml | 2 - charms/gnocchi-k8s/charmcraft.yaml | 323 +++++++++++++++- charms/gnocchi-k8s/config.yaml | 239 ------------ charms/gnocchi-k8s/metadata.yaml | 71 ---- charms/gnocchi-k8s/tests/config.yaml | 1 - charms/heat-k8s/actions.yaml | 2 - charms/heat-k8s/charmcraft.yaml | 93 ++++- charms/heat-k8s/config.yaml | 9 - charms/heat-k8s/metadata.yaml | 70 ---- charms/heat-k8s/tests/config.yaml | 1 - charms/horizon-k8s/actions.yaml | 2 - charms/horizon-k8s/charmcraft.yaml | 251 +++++++++++- charms/horizon-k8s/config.yaml | 169 -------- charms/horizon-k8s/metadata.yaml | 63 --- charms/horizon-k8s/tests/actions.yaml | 1 - charms/keystone-k8s/actions.yaml | 60 --- charms/keystone-k8s/charmcraft.yaml | 186 ++++++++- charms/keystone-k8s/config.yaml | 39 -- charms/keystone-k8s/metadata.yaml | 98 ----- charms/keystone-k8s/tests/actions.yaml | 1 - charms/keystone-k8s/tests/config.yaml | 1 - charms/keystone-ldap-k8s/charmcraft.yaml | 73 +++- charms/keystone-ldap-k8s/config.yaml | 28 -- charms/keystone-ldap-k8s/metadata.yaml | 22 -- charms/keystone-ldap-k8s/tests/config.yaml | 1 - charms/magnum-k8s/actions.yaml | 2 - charms/magnum-k8s/charmcraft.yaml | 91 ++++- charms/magnum-k8s/config.yaml | 19 - charms/magnum-k8s/metadata.yaml | 67 ---- charms/magnum-k8s/tests/config.yaml | 1 - charms/masakari-k8s/charmcraft.yaml | 142 +++---- charms/masakari-k8s/tests/unit/test_charm.py | 12 - charms/neutron-k8s/actions.yaml | 2 - charms/neutron-k8s/charmcraft.yaml | 176 ++++++++- charms/neutron-k8s/config.yaml | 83 ---- charms/neutron-k8s/metadata.yaml | 76 ---- charms/neutron-k8s/tests/config.yaml | 1 - charms/nova-k8s/actions.yaml | 2 - charms/nova-k8s/charmcraft.yaml | 138 ++++++- charms/nova-k8s/config.yaml | 9 - charms/nova-k8s/metadata.yaml | 113 ------ charms/nova-k8s/tests/config.yaml | 1 - charms/octavia-k8s/actions.yaml | 2 - charms/octavia-k8s/charmcraft.yaml | 114 +++++- charms/octavia-k8s/config.yaml | 9 - charms/octavia-k8s/metadata.yaml | 92 ----- charms/octavia-k8s/tests/config.yaml | 1 - charms/openstack-exporter-k8s/actions.yaml | 2 - charms/openstack-exporter-k8s/charmcraft.yaml | 71 +++- charms/openstack-exporter-k8s/config.yaml | 5 - charms/openstack-exporter-k8s/metadata.yaml | 50 --- .../openstack-exporter-k8s/tests/config.yaml | 1 - charms/openstack-hypervisor/actions.yaml | 15 - charms/openstack-hypervisor/charmcraft.yaml | 103 ++++- charms/openstack-hypervisor/config.yaml | 30 -- charms/openstack-hypervisor/metadata.yaml | 48 --- .../openstack-hypervisor/tests/actions.yaml | 1 - .../tests/unit/config.yaml | 1 - .../tests/unit/test_charm.py | 3 - .../openstack-images-sync-k8s/charmcraft.yaml | 46 +-- .../tests/unit/test_charm.py | 9 - charms/ovn-central-k8s/actions.yaml | 2 - charms/ovn-central-k8s/charmcraft.yaml | 139 ++++++- charms/ovn-central-k8s/config.yaml | 29 -- charms/ovn-central-k8s/metadata.yaml | 90 ----- charms/ovn-central-k8s/tests/config.yaml | 1 - charms/ovn-relay-k8s/actions.yaml | 2 - charms/ovn-relay-k8s/charmcraft.yaml | 71 +++- charms/ovn-relay-k8s/metadata.yaml | 51 --- charms/ovn-relay-k8s/tests/actions.yaml | 1 - charms/placement-k8s/actions.yaml | 2 - charms/placement-k8s/charmcraft.yaml | 85 +++- charms/placement-k8s/config.yaml | 9 - charms/placement-k8s/metadata.yaml | 60 --- charms/placement-k8s/tests/config.yaml | 1 - charms/sunbeam-clusterd/charmcraft.yaml | 74 ++-- charms/sunbeam-machine/charmcraft.yaml | 40 +- charms/sunbeam-machine/config.yaml | 17 - charms/sunbeam-machine/metadata.yaml | 17 - charms/sunbeam-machine/tests/unit/config.yaml | 1 - .../sunbeam-machine/tests/unit/test_charm.py | 3 - charms/tempest-k8s/charmcraft.yaml | 125 +++--- charms/watcher-k8s/charmcraft.yaml | 125 +++--- ops-sunbeam/ops_sunbeam/test_utils.py | 14 +- .../shared_code/config-ceph-options.yaml | 10 +- .../metadata.yaml | 2 +- test-requirements.txt | 2 + zuul.d/zuul.yaml | 2 +- 128 files changed, 3249 insertions(+), 3527 deletions(-) delete mode 100644 charms/aodh-k8s/actions.yaml delete mode 100644 charms/aodh-k8s/config.yaml delete mode 100644 charms/aodh-k8s/metadata.yaml delete mode 120000 charms/aodh-k8s/tests/config.yaml delete mode 100644 charms/barbican-k8s/config.yaml delete mode 100644 charms/barbican-k8s/metadata.yaml delete mode 120000 charms/barbican-k8s/tests/config.yaml delete mode 100644 charms/ceilometer-k8s/actions.yaml delete mode 100644 charms/ceilometer-k8s/config.yaml delete mode 100644 charms/ceilometer-k8s/metadata.yaml delete mode 120000 charms/ceilometer-k8s/tests/actions.yaml delete mode 120000 charms/ceilometer-k8s/tests/config.yaml delete mode 100644 charms/cinder-ceph-k8s/actions.yaml delete mode 100644 charms/cinder-ceph-k8s/config.yaml delete mode 100644 charms/cinder-ceph-k8s/metadata.yaml delete mode 100644 charms/cinder-k8s/actions.yaml delete mode 100644 charms/cinder-k8s/config.yaml delete mode 100644 charms/cinder-k8s/metadata.yaml delete mode 120000 charms/cinder-k8s/tests/config.yaml delete mode 100644 charms/designate-bind-k8s/actions.yaml delete mode 100644 charms/designate-bind-k8s/config.yaml delete mode 100644 charms/designate-bind-k8s/metadata.yaml delete mode 100644 charms/designate-k8s/actions.yaml delete mode 100644 charms/designate-k8s/config.yaml delete mode 100644 charms/designate-k8s/metadata.yaml delete mode 120000 charms/designate-k8s/tests/config.yaml delete mode 100644 charms/glance-k8s/actions.yaml delete mode 100644 charms/glance-k8s/config.yaml delete mode 100644 charms/glance-k8s/metadata.yaml delete mode 120000 charms/glance-k8s/tests/actions.yaml delete mode 120000 charms/glance-k8s/tests/config.yaml delete mode 100644 charms/gnocchi-k8s/actions.yaml delete mode 100644 charms/gnocchi-k8s/config.yaml delete mode 100644 charms/gnocchi-k8s/metadata.yaml delete mode 120000 charms/gnocchi-k8s/tests/config.yaml delete mode 100644 charms/heat-k8s/actions.yaml delete mode 100644 charms/heat-k8s/config.yaml delete mode 100644 charms/heat-k8s/metadata.yaml delete mode 120000 charms/heat-k8s/tests/config.yaml delete mode 100644 charms/horizon-k8s/actions.yaml delete mode 100644 charms/horizon-k8s/config.yaml delete mode 100644 charms/horizon-k8s/metadata.yaml delete mode 120000 charms/horizon-k8s/tests/actions.yaml delete mode 100644 charms/keystone-k8s/actions.yaml delete mode 100644 charms/keystone-k8s/config.yaml delete mode 100644 charms/keystone-k8s/metadata.yaml delete mode 120000 charms/keystone-k8s/tests/actions.yaml delete mode 120000 charms/keystone-k8s/tests/config.yaml delete mode 100644 charms/keystone-ldap-k8s/config.yaml delete mode 100644 charms/keystone-ldap-k8s/metadata.yaml delete mode 120000 charms/keystone-ldap-k8s/tests/config.yaml delete mode 100644 charms/magnum-k8s/actions.yaml delete mode 100644 charms/magnum-k8s/config.yaml delete mode 100644 charms/magnum-k8s/metadata.yaml delete mode 120000 charms/magnum-k8s/tests/config.yaml delete mode 100644 charms/neutron-k8s/actions.yaml delete mode 100644 charms/neutron-k8s/config.yaml delete mode 100644 charms/neutron-k8s/metadata.yaml delete mode 120000 charms/neutron-k8s/tests/config.yaml delete mode 100644 charms/nova-k8s/actions.yaml delete mode 100644 charms/nova-k8s/config.yaml delete mode 100644 charms/nova-k8s/metadata.yaml delete mode 120000 charms/nova-k8s/tests/config.yaml delete mode 100644 charms/octavia-k8s/actions.yaml delete mode 100644 charms/octavia-k8s/config.yaml delete mode 100644 charms/octavia-k8s/metadata.yaml delete mode 120000 charms/octavia-k8s/tests/config.yaml delete mode 100644 charms/openstack-exporter-k8s/actions.yaml delete mode 100644 charms/openstack-exporter-k8s/config.yaml delete mode 100644 charms/openstack-exporter-k8s/metadata.yaml delete mode 120000 charms/openstack-exporter-k8s/tests/config.yaml delete mode 100644 charms/openstack-hypervisor/actions.yaml delete mode 100644 charms/openstack-hypervisor/config.yaml delete mode 100644 charms/openstack-hypervisor/metadata.yaml delete mode 120000 charms/openstack-hypervisor/tests/actions.yaml delete mode 120000 charms/openstack-hypervisor/tests/unit/config.yaml delete mode 100644 charms/ovn-central-k8s/actions.yaml delete mode 100644 charms/ovn-central-k8s/config.yaml delete mode 100644 charms/ovn-central-k8s/metadata.yaml delete mode 120000 charms/ovn-central-k8s/tests/config.yaml delete mode 100644 charms/ovn-relay-k8s/actions.yaml delete mode 100644 charms/ovn-relay-k8s/metadata.yaml delete mode 120000 charms/ovn-relay-k8s/tests/actions.yaml delete mode 100644 charms/placement-k8s/actions.yaml delete mode 100644 charms/placement-k8s/config.yaml delete mode 100644 charms/placement-k8s/metadata.yaml delete mode 120000 charms/placement-k8s/tests/config.yaml delete mode 100644 charms/sunbeam-machine/config.yaml delete mode 100644 charms/sunbeam-machine/metadata.yaml delete mode 120000 charms/sunbeam-machine/tests/unit/config.yaml diff --git a/charms/aodh-k8s/actions.yaml b/charms/aodh-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/aodh-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/aodh-k8s/charmcraft.yaml b/charms/aodh-k8s/charmcraft.yaml index f6459be6..9919a131 100644 --- a/charms/aodh-k8s/charmcraft.yaml +++ b/charms/aodh-k8s/charmcraft.yaml @@ -1,11 +1,115 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: aodh-k8s +summary: OpenStack aodh service +description: | + OpenStack aodh provides an HTTP service for managing, selecting, + and claiming providers of classes of inventory representing available + resources in a cloud. + . +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-aodh-k8s + issues: + - https://bugs.launchpad.net/charm-aodh-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + alarm-history-time-to-live: + default: -1 + description: | + Number of seconds that alarm histories are kept in the database for (<= 0 + means forever) + type: int + alarm-histories-delete-batch-size: + default: 0 + description: | + Number of alarm histories to be deleted in one iteration from the database (0 + means all). (integer value) + type: int + +containers: + aodh-api: + resource: aodh-api-image + aodh-evaluator: + resource: aodh-evaluator-image + aodh-notifier: + resource: aodh-notifier-image + aodh-listener: + resource: aodh-listener-image + aodh-expirer: + resource: aodh-expirer-image + +resources: + aodh-api-image: + type: oci-image + description: OCI image for OpenStack aodh api service + upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 + aodh-evaluator-image: + type: oci-image + description: OCI image for OpenStack aodh evaluator service + upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 + aodh-notifier-image: + type: oci-image + description: OCI image for OpenStack aodh notifier service + upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 + aodh-listener-image: + type: oci-image + description: OCI image for OpenStack aodh listener service + upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 + aodh-expirer-image: + type: oci-image + description: OCI image for OpenStack aodh expirer service + upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + amqp: + interface: rabbitmq + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + aodh: + interface: aodh + +peers: + peers: + interface: aodh-peer + parts: update-certificates: plugin: nil @@ -13,9 +117,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/aodh-k8s/config.yaml b/charms/aodh-k8s/config.yaml deleted file mode 100644 index 654ce012..00000000 --- a/charms/aodh-k8s/config.yaml +++ /dev/null @@ -1,21 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string - alarm-history-time-to-live: - default: -1 - description: | - Number of seconds that alarm histories are kept in the database for (<= 0 - means forever) - type: int - alarm-histories-delete-batch-size: - default: 0 - description: | - Number of alarm histories to be deleted in one iteration from the database (0 - means all). (integer value) - type: int diff --git a/charms/aodh-k8s/metadata.yaml b/charms/aodh-k8s/metadata.yaml deleted file mode 100644 index 3ac850a4..00000000 --- a/charms/aodh-k8s/metadata.yaml +++ /dev/null @@ -1,92 +0,0 @@ -name: aodh-k8s -summary: OpenStack aodh service -maintainer: OpenStack Charmers -description: | - OpenStack aodh provides an HTTP service for managing, selecting, - and claiming providers of classes of inventory representing available - resources in a cloud. - . -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-aodh-k8s -issues: https://bugs.launchpad.net/charm-aodh-k8s - -containers: - aodh-api: - resource: aodh-api-image - aodh-evaluator: - resource: aodh-evaluator-image - aodh-notifier: - resource: aodh-notifier-image - aodh-listener: - resource: aodh-listener-image - aodh-expirer: - resource: aodh-expirer-image - -resources: - aodh-api-image: - type: oci-image - description: OCI image for OpenStack aodh api service - # ghcr.io/canonical/aodh-consolidated:2024.1 - upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 - aodh-evaluator-image: - type: oci-image - description: OCI image for OpenStack aodh evaluator service - # ghcr.io/canonical/aodh-consolidated:2024.1 - upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 - aodh-notifier-image: - type: oci-image - description: OCI image for OpenStack aodh notifier service - # ghcr.io/canonical/aodh-consolidated:2024.1 - upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 - aodh-listener-image: - type: oci-image - description: OCI image for OpenStack aodh listener service - # ghcr.io/canonical/aodh-consolidated:2024.1 - upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 - aodh-expirer-image: - type: oci-image - description: OCI image for OpenStack aodh expirer service - # ghcr.io/canonical/aodh-consolidated:2024.1 - upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - amqp: - interface: rabbitmq - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - aodh: - interface: aodh - -peers: - peers: - interface: aodh-peer diff --git a/charms/aodh-k8s/tests/config.yaml b/charms/aodh-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/aodh-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/barbican-k8s/charmcraft.yaml b/charms/barbican-k8s/charmcraft.yaml index cc24191b..f800c024 100644 --- a/charms/barbican-k8s/charmcraft.yaml +++ b/charms/barbican-k8s/charmcraft.yaml @@ -1,14 +1,87 @@ -# This file configures Charmcraft. -# See https://juju.is/docs/sdk/charmcraft-config for guidance. - type: charm -bases: - - build-on: - - name: ubuntu - channel: "22.04" - run-on: - - name: ubuntu - channel: "22.04" +title: Barbican +name: barbican-k8s +summary: Openstack Key Manager service +description: | + Barbican is the OpenStack Key Manager service. + It provides secure storage, provisioning and management of secret data. + This includes keying material such as Symmetric Keys, Asymmetric Keys, Certificates and raw binary data. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-barbican-k8s + issues: + - https://bugs.launchpad.net/charm-barbican-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + barbican-api: + resource: barbican-api-image + barbican-worker: + resource: barbican-worker-image + +resources: + barbican-api-image: + type: oci-image + description: OCI image for OpenStack Barbican API + upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1 + barbican-worker-image: + type: oci-image + description: OCI image for OpenStack Barbican worker + upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1 + +requires: + ingress-internal: + interface: ingress + limit: 1 + optional: true + ingress-public: + interface: ingress + limit: 1 + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + identity-ops: + interface: keystone-resources + optional: true + amqp: + interface: rabbitmq + vault-kv: + interface: vault-kv + limit: 1 + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +peers: + peers: + interface: barbican-peer + parts: charm: build-packages: diff --git a/charms/barbican-k8s/config.yaml b/charms/barbican-k8s/config.yaml deleted file mode 100644 index 45e55dcc..00000000 --- a/charms/barbican-k8s/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/barbican-k8s/metadata.yaml b/charms/barbican-k8s/metadata.yaml deleted file mode 100644 index 91e21ac0..00000000 --- a/charms/barbican-k8s/metadata.yaml +++ /dev/null @@ -1,74 +0,0 @@ -name: barbican-k8s -display-name: Barbican -summary: Openstack Key Manager service -description: | - Barbican is the OpenStack Key Manager service. - It provides secure storage, provisioning and management of secret data. - This includes keying material such as Symmetric Keys, Asymmetric Keys, Certificates and raw binary data. -maintainer: Openstack Charmers -source: https://opendev.org/openstack/charm-barbican-k8s -issues: https://bugs.launchpad.net/charm-barbican-k8s - -bases: - - name: ubuntu - channel: 22.04/stable - -assumes: - - k8s-api - - juju >= 3.1 -tags: - - openstack - - secrets - - misc - -requires: - ingress-internal: - interface: ingress - limit: 1 - optional: true - ingress-public: - interface: ingress - limit: 1 - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - identity-ops: - interface: keystone-resources - optional: true - amqp: - interface: rabbitmq - vault-kv: - interface: vault-kv - limit: 1 - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: barbican-peer - -containers: - barbican-api: - resource: barbican-api-image - barbican-worker: - resource: barbican-worker-image - -resources: - barbican-api-image: - type: oci-image - description: OCI image for OpenStack Barbican API - upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1 - barbican-worker-image: - type: oci-image - description: OCI image for OpenStack Barbican worker - upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1 diff --git a/charms/barbican-k8s/tests/config.yaml b/charms/barbican-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/barbican-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/ceilometer-k8s/actions.yaml b/charms/ceilometer-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/ceilometer-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/ceilometer-k8s/charmcraft.yaml b/charms/ceilometer-k8s/charmcraft.yaml index 98bf024f..0ee6de06 100644 --- a/charms/ceilometer-k8s/charmcraft.yaml +++ b/charms/ceilometer-k8s/charmcraft.yaml @@ -1,11 +1,78 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: ceilometer-k8s +summary: OpenStack ceilometer service +description: | + OpenStack ceilometer provides an HTTP service for managing, selecting, + and claiming providers of classes of inventory representing available + resources in a cloud. + . +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-ceilometer-k8s + issues: + - https://bugs.launchpad.net/charm-ceilometer-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + ceilometer-central: + resource: ceilometer-central-image + ceilometer-notification: + resource: ceilometer-notification-image + +resources: + ceilometer-central-image: + type: oci-image + description: OCI image for OpenStack ceilometer central + upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1 + ceilometer-notification-image: + type: oci-image + description: OCI image for OpenStack ceilometer + upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1 + +requires: + amqp: + interface: rabbitmq + identity-credentials: + interface: keystone-credentials + limit: 1 + gnocchi-db: + interface: gnocchi + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + ceilometer-service: + interface: ceilometer + +peers: + peers: + interface: ceilometer-peer + parts: update-certificates: plugin: nil @@ -13,9 +80,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/ceilometer-k8s/config.yaml b/charms/ceilometer-k8s/config.yaml deleted file mode 100644 index 45e55dcc..00000000 --- a/charms/ceilometer-k8s/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/ceilometer-k8s/metadata.yaml b/charms/ceilometer-k8s/metadata.yaml deleted file mode 100644 index ac3f40ce..00000000 --- a/charms/ceilometer-k8s/metadata.yaml +++ /dev/null @@ -1,65 +0,0 @@ -name: ceilometer-k8s -summary: OpenStack ceilometer service -maintainer: OpenStack Charmers -description: | - OpenStack ceilometer provides an HTTP service for managing, selecting, - and claiming providers of classes of inventory representing available - resources in a cloud. - . -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-ceilometer-k8s -issues: https://bugs.launchpad.net/charm-ceilometer-k8s - -containers: - ceilometer-central: - resource: ceilometer-central-image - ceilometer-notification: - resource: ceilometer-notification-image - -resources: - ceilometer-central-image: - type: oci-image - description: OCI image for OpenStack ceilometer central - # ghcr.io/canonical/ceilometer-central:2024.1 - upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1 - ceilometer-notification-image: - type: oci-image - description: OCI image for OpenStack ceilometer notification - # ghcr.io/canonical/ceilometer-notification:2024.1 - description: OCI image for OpenStack ceilometer - upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1 - -provides: - ceilometer-service: - interface: ceilometer - -requires: - amqp: - interface: rabbitmq - identity-credentials: - interface: keystone-credentials - limit: 1 - gnocchi-db: - interface: gnocchi - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: ceilometer-peer diff --git a/charms/ceilometer-k8s/tests/actions.yaml b/charms/ceilometer-k8s/tests/actions.yaml deleted file mode 120000 index 9adaf92e..00000000 --- a/charms/ceilometer-k8s/tests/actions.yaml +++ /dev/null @@ -1 +0,0 @@ -../actions.yaml \ No newline at end of file diff --git a/charms/ceilometer-k8s/tests/config.yaml b/charms/ceilometer-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/ceilometer-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/cinder-ceph-k8s/actions.yaml b/charms/cinder-ceph-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/cinder-ceph-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/cinder-ceph-k8s/charmcraft.yaml b/charms/cinder-ceph-k8s/charmcraft.yaml index d5efdc7b..19afd61c 100644 --- a/charms/cinder-ceph-k8s/charmcraft.yaml +++ b/charms/cinder-ceph-k8s/charmcraft.yaml @@ -1,11 +1,331 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: cinder-ceph-k8s +summary: OpenStack volume service - Ceph backend +description: | + Cinder is the OpenStack project that provides volume management for + instances. This charm provides integration with Ceph storage + backends. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-cinder-ceph-k8s + issues: + - https://bugs.launchpad.net/charm-cinder-ceph-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + type: boolean + default: false + description: Enable debug logging. + use-syslog: + type: boolean + default: true + description: | + Setting this to False will disable logging to syslog (the default). + ceph-osd-replication-count: + default: 3 + type: int + description: | + This value dictates the number of replicas ceph must make of any + object it stores within the cinder rbd pool. Of course, this only + applies if using Ceph as a backend store. Note that once the cinder + rbd pool has been created, changing this value will not have any + effect (although it can be changed in ceph by manually configuring + your ceph cluster). + ceph-pool-weight: + type: int + default: 40 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the ephemeral volumes + for the OpenStack compute instances are expected to take up 20% of the + overall configuration then this value would be specified as 20. Note - + it is important to choose an appropriate value for the pool weight as + this directly affects the number of placement groups which will be + created for the pool. The number of placement groups for a pool can + only be increased, never decreased - so it is important to identify the + percent of data that will likely reside in the pool. + volume-backend-name: + default: null + type: string + description: | + Volume backend name for the backend. The default value is the + application name in the Juju model, e.g. "cinder-ceph-mybackend" + if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`. + A common backend name can be set to multiple backends with the + same characters so that those can be treated as a single virtual + backend associated with a single volume type. + backend-availability-zone: + default: null + type: string + description: | + Availability zone name of this volume backend. If set, it will + override the default availability zone. Supported for Pike or + newer releases. + restrict-ceph-pools: + default: false + type: boolean + description: | + Optionally restrict Ceph key permissions to access pools as required. + rbd-pool-name: + default: null + type: string + description: | + Optionally specify an existing rbd pool that cinder should map to. + rbd-flatten-volume-from-snapshot: + default: false + type: boolean + description: | + Flatten volumes created from snapshots to remove dependency from + volume to snapshot. + rbd-mirroring-mode: + type: string + default: pool + description: | + The RBD mirroring mode used for the Ceph pool. This option is only used + with 'replicated' pool type, as it's not supported for 'erasure-coded' + pool type - valid values: 'pool' and 'image' + pool-type: + type: string + default: replicated + description: | + Ceph pool type to use for storage - valid values include `replicated` + and `erasure-coded`. + ec-profile-name: + type: string + default: null + description: | + Name for the EC profile to be created for the EC pools. If not defined + a profile name will be generated based on the name of the pool used by + the application. + ec-rbd-metadata-pool: + type: string + default: null + description: | + Name of the metadata pool to be created (for RBD use-cases). If not + defined a metadata pool name will be generated based on the name of + the data pool used by the application. The metadata pool is always + replicated, not erasure coded. + ec-profile-k: + type: int + default: 1 + description: | + Number of data chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-m: + type: int + default: 2 + description: | + Number of coding chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-locality: + type: int + default: null + description: | + (lrc plugin - l) Group the coding and data chunks into sets of size l. + For instance, for k=4 and m=2, when l=3 two groups of three are created. + Each set can be recovered without reading chunks from another set. Note + that using the lrc plugin does incur more raw storage usage than isa or + jerasure in order to reduce the cost of recovery operations. + ec-profile-crush-locality: + type: string + default: null + description: | + (lrc plugin) The type of the crush bucket in which each set of chunks + defined by l will be stored. For instance, if it is set to rack, each + group of l chunks will be placed in a different rack. It is used to + create a CRUSH rule step such as step choose rack. If it is not set, + no such grouping is done. + ec-profile-durability-estimator: + type: int + default: null + description: | + (shec plugin - c) The number of parity chunks each of which includes + each data chunk in its calculation range. The number is used as a + durability estimator. For instance, if c=2, 2 OSDs can be down + without losing data. + ec-profile-helper-chunks: + type: int + default: null + description: | + (clay plugin - d) Number of OSDs requested to send data during + recovery of a single chunk. d needs to be chosen such that + k+1 <= d <= k+m-1. Larger the d, the better the savings. + ec-profile-scalar-mds: + type: string + default: null + description: | + (clay plugin) specifies the plugin that is used as a building + block in the layered construction. It can be one of jerasure, + isa, shec (defaults to jerasure). + ec-profile-plugin: + type: string + default: jerasure + description: | + EC plugin to use for this applications pool. The following list of + plugins acceptable - jerasure, lrc, isa, shec, clay. + ec-profile-technique: + type: string + default: null + description: | + EC profile technique used for this applications pool - will be + validated based on the plugin configured via ec-profile-plugin. + Supported techniques are `reed_sol_van`, `reed_sol_r6_op`, + `cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure, + `reed_sol_van`, `cauchy` for isa and `single`, `multiple` + for shec. + ec-profile-device-class: + type: string + default: null + description: | + Device class from CRUSH map to use for placement groups for + erasure profile - valid values: ssd, hdd or nvme (or leave + unset to not use a device class). + bluestore-compression-algorithm: + type: string + default: null + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: null + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: null + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: null + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: null + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: null + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: null + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: null + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. + bluestore-compression-max-blob-size-ssd: + type: int + default: null + description: | + Value of bluestore compression max blob size for solid state media on + pools requested by this charm. + rabbit-user: + type: string + default: null + description: Username to request access on rabbitmq-server. + rabbit-vhost: + type: string + default: null + description: RabbitMQ virtual host to request access on rabbitmq-server. + enable-telemetry-notifications: + type: boolean + default: false + description: Enable notifications to send to telemetry. + image-volume-cache-enabled: + type: boolean + default: false + description: | + Enable the image volume cache. + image-volume-cache-max-size-gb: + type: int + default: 0 + description: | + Max size of the image volume cache in GB. 0 means unlimited. + image-volume-cache-max-count: + type: int + default: 0 + description: | + Max number of entries allowed in the image volume cache. 0 means + unlimited. + +containers: + cinder-volume: + resource: cinder-volume-image + +resources: + cinder-volume-image: + type: oci-image + description: OCI image for OpenStack Cinder Volume + upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1 + +requires: + amqp: + interface: rabbitmq + ceph: + interface: ceph-client + database: + interface: mysql_client + limit: 1 + identity-credentials: + interface: keystone-credentials + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + ceph-access: + interface: cinder-ceph-key + storage-backend: + interface: cinder-backend + +peers: + peers: + interface: cinder-peer + parts: update-certificates: plugin: nil @@ -13,9 +333,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/cinder-ceph-k8s/config.yaml b/charms/cinder-ceph-k8s/config.yaml deleted file mode 100644 index eb25fbe6..00000000 --- a/charms/cinder-ceph-k8s/config.yaml +++ /dev/null @@ -1,268 +0,0 @@ -options: - debug: - type: boolean - default: False - description: Enable debug logging. - use-syslog: - type: boolean - default: True - description: | - Setting this to False will disable logging to syslog (the default). - ceph-osd-replication-count: - default: 3 - type: int - description: | - This value dictates the number of replicas ceph must make of any - object it stores within the cinder rbd pool. Of course, this only - applies if using Ceph as a backend store. Note that once the cinder - rbd pool has been created, changing this value will not have any - effect (although it can be changed in ceph by manually configuring - your ceph cluster). - ceph-pool-weight: - type: int - default: 40 - description: | - Defines a relative weighting of the pool as a percentage of the total - amount of data in the Ceph cluster. This effectively weights the number - of placement groups for the pool created to be appropriately portioned - to the amount of data expected. For example, if the ephemeral volumes - for the OpenStack compute instances are expected to take up 20% of the - overall configuration then this value would be specified as 20. Note - - it is important to choose an appropriate value for the pool weight as - this directly affects the number of placement groups which will be - created for the pool. The number of placement groups for a pool can - only be increased, never decreased - so it is important to identify the - percent of data that will likely reside in the pool. - volume-backend-name: - default: - type: string - description: | - Volume backend name for the backend. The default value is the - application name in the Juju model, e.g. "cinder-ceph-mybackend" - if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`. - A common backend name can be set to multiple backends with the - same characters so that those can be treated as a single virtual - backend associated with a single volume type. - backend-availability-zone: - default: - type: string - description: | - Availability zone name of this volume backend. If set, it will - override the default availability zone. Supported for Pike or - newer releases. - restrict-ceph-pools: - default: False - type: boolean - description: | - Optionally restrict Ceph key permissions to access pools as required. - rbd-pool-name: - default: - type: string - description: | - Optionally specify an existing rbd pool that cinder should map to. - rbd-flatten-volume-from-snapshot: - default: - type: boolean - default: False - description: | - Flatten volumes created from snapshots to remove dependency from - volume to snapshot. - rbd-mirroring-mode: - type: string - default: pool - description: | - The RBD mirroring mode used for the Ceph pool. This option is only used - with 'replicated' pool type, as it's not supported for 'erasure-coded' - pool type - valid values: 'pool' and 'image' - pool-type: - type: string - default: replicated - description: | - Ceph pool type to use for storage - valid values include ‘replicated’ - and ‘erasure-coded’. - ec-profile-name: - type: string - default: - description: | - Name for the EC profile to be created for the EC pools. If not defined - a profile name will be generated based on the name of the pool used by - the application. - ec-rbd-metadata-pool: - type: string - default: - description: | - Name of the metadata pool to be created (for RBD use-cases). If not - defined a metadata pool name will be generated based on the name of - the data pool used by the application. The metadata pool is always - replicated, not erasure coded. - ec-profile-k: - type: int - default: 1 - description: | - Number of data chunks that will be used for EC data pool. K+M factors - should never be greater than the number of available zones (or hosts) - for balancing. - ec-profile-m: - type: int - default: 2 - description: | - Number of coding chunks that will be used for EC data pool. K+M factors - should never be greater than the number of available zones (or hosts) - for balancing. - ec-profile-locality: - type: int - default: - description: | - (lrc plugin - l) Group the coding and data chunks into sets of size l. - For instance, for k=4 and m=2, when l=3 two groups of three are created. - Each set can be recovered without reading chunks from another set. Note - that using the lrc plugin does incur more raw storage usage than isa or - jerasure in order to reduce the cost of recovery operations. - ec-profile-crush-locality: - type: string - default: - description: | - (lrc plugin) The type of the crush bucket in which each set of chunks - defined by l will be stored. For instance, if it is set to rack, each - group of l chunks will be placed in a different rack. It is used to - create a CRUSH rule step such as step choose rack. If it is not set, - no such grouping is done. - ec-profile-durability-estimator: - type: int - default: - description: | - (shec plugin - c) The number of parity chunks each of which includes - each data chunk in its calculation range. The number is used as a - durability estimator. For instance, if c=2, 2 OSDs can be down - without losing data. - ec-profile-helper-chunks: - type: int - default: - description: | - (clay plugin - d) Number of OSDs requested to send data during - recovery of a single chunk. d needs to be chosen such that - k+1 <= d <= k+m-1. Larger the d, the better the savings. - ec-profile-scalar-mds: - type: string - default: - description: | - (clay plugin) specifies the plugin that is used as a building - block in the layered construction. It can be one of jerasure, - isa, shec (defaults to jerasure). - ec-profile-plugin: - type: string - default: jerasure - description: | - EC plugin to use for this applications pool. The following list of - plugins acceptable - jerasure, lrc, isa, shec, clay. - ec-profile-technique: - type: string - default: - description: | - EC profile technique used for this applications pool - will be - validated based on the plugin configured via ec-profile-plugin. - Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’, - ‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure, - ‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’ - for shec. - ec-profile-device-class: - type: string - default: - description: | - Device class from CRUSH map to use for placement groups for - erasure profile - valid values: ssd, hdd or nvme (or leave - unset to not use a device class). - bluestore-compression-algorithm: - type: string - default: - description: | - Compressor to use (if any) for pools requested by this charm. - . - NOTE: The ceph-osd charm sets a global default for this value (defaults - to 'lz4' unless configured by the end user) which will be used unless - specified for individual pools. - bluestore-compression-mode: - type: string - default: - description: | - Policy for using compression on pools requested by this charm. - . - 'none' means never use compression. - 'passive' means use compression when clients hint that data is - compressible. - 'aggressive' means use compression unless clients hint that - data is not compressible. - 'force' means use compression under all circumstances even if the clients - hint that the data is not compressible. - bluestore-compression-required-ratio: - type: float - default: - description: | - The ratio of the size of the data chunk after compression relative to the - original size must be at least this small in order to store the - compressed version on pools requested by this charm. - bluestore-compression-min-blob-size: - type: int - default: - description: | - Chunks smaller than this are never compressed on pools requested by - this charm. - bluestore-compression-min-blob-size-hdd: - type: int - default: - description: | - Value of bluestore compression min blob size for rotational media on - pools requested by this charm. - bluestore-compression-min-blob-size-ssd: - type: int - default: - description: | - Value of bluestore compression min blob size for solid state media on - pools requested by this charm. - bluestore-compression-max-blob-size: - type: int - default: - description: | - Chunks larger than this are broken into smaller blobs sizing bluestore - compression max blob size before being compressed on pools requested by - this charm. - bluestore-compression-max-blob-size-hdd: - type: int - default: - description: | - Value of bluestore compression max blob size for rotational media on - pools requested by this charm. - bluestore-compression-max-blob-size-ssd: - type: int - default: - description: | - Value of bluestore compression max blob size for solid state media on - pools requested by this charm. - rabbit-user: - type: string - default: - description: Username to request access on rabbitmq-server. - rabbit-vhost: - type: string - default: - description: RabbitMQ virtual host to request access on rabbitmq-server. - enable-telemetry-notifications: - type: boolean - default: False - description: Enable notifications to send to telemetry. - image-volume-cache-enabled: - type: boolean - default: False - description: | - Enable the image volume cache. - image-volume-cache-max-size-gb: - type: int - default: 0 - description: | - Max size of the image volume cache in GB. 0 means unlimited. - image-volume-cache-max-count: - type: int - default: 0 - description: | - Max number of entries allowed in the image volume cache. 0 means - unlimited. diff --git a/charms/cinder-ceph-k8s/metadata.yaml b/charms/cinder-ceph-k8s/metadata.yaml deleted file mode 100644 index 10ef4381..00000000 --- a/charms/cinder-ceph-k8s/metadata.yaml +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2021 Canonical Ltd -# See LICENSE file for licensing details. -name: cinder-ceph-k8s -summary: OpenStack volume service - Ceph backend -maintainer: Openstack Charmers -description: | - Cinder is the OpenStack project that provides volume management for - instances. This charm provides integration with Ceph storage - backends. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: - - openstack - - storage - - misc -source: https://opendev.org/openstack/charm-cinder-ceph-k8s -issues: https://bugs.launchpad.net/charm-cinder-ceph-k8s - -containers: - cinder-volume: - resource: cinder-volume-image - -resources: - cinder-volume-image: - type: oci-image - description: OCI image for OpenStack Cinder Volume - upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1 - -requires: - amqp: - interface: rabbitmq - ceph: - interface: ceph-client - database: - interface: mysql_client - limit: 1 - identity-credentials: - interface: keystone-credentials - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - ceph-access: - interface: cinder-ceph-key - storage-backend: - interface: cinder-backend - -peers: - peers: - interface: cinder-peer diff --git a/charms/cinder-ceph-k8s/tests/unit/test_cinder_ceph_charm.py b/charms/cinder-ceph-k8s/tests/unit/test_cinder_ceph_charm.py index 6fc7d4ca..6995f4d5 100644 --- a/charms/cinder-ceph-k8s/tests/unit/test_cinder_ceph_charm.py +++ b/charms/cinder-ceph-k8s/tests/unit/test_cinder_ceph_charm.py @@ -83,12 +83,9 @@ class TestCinderCephOperatorCharm(test_utils.CharmTestCase): """Setup fixtures ready for testing.""" super().setUp(charm, self.PATCHES) self.mock_event = MagicMock() - with open("config.yaml", "r") as f: - config_data = f.read() self.harness = test_utils.get_harness( _CinderCephOperatorCharm, container_calls=self.container_calls, - charm_config=config_data, ) mock_get_platform = patch( "charmhelpers.osplatform.get_platform", return_value="ubuntu" diff --git a/charms/cinder-k8s/actions.yaml b/charms/cinder-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/cinder-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/cinder-k8s/charmcraft.yaml b/charms/cinder-k8s/charmcraft.yaml index f6459be6..2a6f4173 100644 --- a/charms/cinder-k8s/charmcraft.yaml +++ b/charms/cinder-k8s/charmcraft.yaml @@ -1,11 +1,85 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: cinder-k8s +summary: OpenStack volume service +description: | + Cinder is the OpenStack project that provides volume management for + instances. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-cinder-k8s + issues: + - https://bugs.launchpad.net/charm-cinder-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + cinder-api: + resource: cinder-api-image + cinder-scheduler: + resource: cinder-scheduler-image + +resources: + cinder-api-image: + type: oci-image + description: OCI image for OpenStack Cinder API + upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1 + cinder-scheduler-image: + type: oci-image + description: OCI image for OpenStack Cinder Scheduler + upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + identity-service: + interface: keystone + limit: 1 + amqp: + interface: rabbitmq + storage-backend: + interface: cinder-backend + image-service: + interface: glance + optional: true + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +peers: + peers: + interface: cinder-peer + parts: update-certificates: plugin: nil @@ -13,9 +87,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/cinder-k8s/config.yaml b/charms/cinder-k8s/config.yaml deleted file mode 100644 index 45e55dcc..00000000 --- a/charms/cinder-k8s/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/cinder-k8s/metadata.yaml b/charms/cinder-k8s/metadata.yaml deleted file mode 100644 index dfdf4b66..00000000 --- a/charms/cinder-k8s/metadata.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2021 Canonical Ltd -# See LICENSE file for licensing details. -name: cinder-k8s -summary: OpenStack volume service -maintainer: Openstack Charmers -description: | - Cinder is the OpenStack project that provides volume management for - instances. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: - - openstack - - storage - - misc -source: https://opendev.org/openstack/charm-cinder-k8s -issues: https://bugs.launchpad.net/charm-cinder-k8s - -containers: - cinder-api: - resource: cinder-api-image - cinder-scheduler: - resource: cinder-scheduler-image - -resources: - cinder-api-image: - type: oci-image - description: OCI image for OpenStack Cinder API - upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1 - cinder-scheduler-image: - type: oci-image - description: OCI image for OpenStack Cinder Scheduler - upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - identity-service: - interface: keystone - limit: 1 - amqp: - interface: rabbitmq - storage-backend: - interface: cinder-backend - image-service: - interface: glance - optional: true - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: cinder-peer diff --git a/charms/cinder-k8s/tests/config.yaml b/charms/cinder-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/cinder-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/designate-bind-k8s/actions.yaml b/charms/designate-bind-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/designate-bind-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/designate-bind-k8s/charmcraft.yaml b/charms/designate-bind-k8s/charmcraft.yaml index e24ab1f5..cc11aecc 100644 --- a/charms/designate-bind-k8s/charmcraft.yaml +++ b/charms/designate-bind-k8s/charmcraft.yaml @@ -1,11 +1,59 @@ type: charm -bases: - - build-on: - - name: ubuntu - channel: "22.04" - run-on: - - name: ubuntu - channel: "22.04" +name: designate-bind-k8s +summary: OpenStack designate-bind service +description: + "Domain Name Service (DNS) is an Internet service that maps IP addresses\ + \ and fully qualified domain names (FQDN) to one another.\nIn this way, DNS alleviates\ + \ the need to remember IP addresses. Computers that run DNS are called name servers.\ + \ \nUbuntu ships with BIND (Berkley Internet Naming Daemon), the most common program\ + \ used for maintaining a name server on Linux.\n" +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-designate-bind-k8s + issues: + - https://bugs.launchpad.net/charm-designate-bind-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + +containers: + designate-bind: + resource: designate-bind-image + +resources: + designate-bind-image: + type: oci-image + description: OCI image for bind9 + upstream-source: ubuntu/bind9:9.18-22.04_beta + +requires: + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + dns-backend: + interface: bind-rndc + +peers: + peers: + interface: bind-peer + parts: update-certificates: plugin: nil @@ -13,9 +61,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/designate-bind-k8s/config.yaml b/charms/designate-bind-k8s/config.yaml deleted file mode 100644 index 91aa6408..00000000 --- a/charms/designate-bind-k8s/config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean diff --git a/charms/designate-bind-k8s/metadata.yaml b/charms/designate-bind-k8s/metadata.yaml deleted file mode 100644 index febe9867..00000000 --- a/charms/designate-bind-k8s/metadata.yaml +++ /dev/null @@ -1,45 +0,0 @@ -name: designate-bind-k8s -summary: OpenStack designate-bind service -maintainer: OpenStack Charmers -description: | - Domain Name Service (DNS) is an Internet service that maps IP addresses and fully qualified domain names (FQDN) to one another. - In this way, DNS alleviates the need to remember IP addresses. Computers that run DNS are called name servers. - Ubuntu ships with BIND (Berkley Internet Naming Daemon), the most common program used for maintaining a name server on Linux. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-designate-bind-k8s -issues: https://bugs.launchpad.net/charm-designate-bind-k8s - -containers: - designate-bind: - resource: designate-bind-image - -resources: - designate-bind-image: - type: oci-image - description: OCI image for bind9 - upstream-source: ubuntu/bind9:9.18-22.04_beta - -provides: - dns-backend: - interface: bind-rndc - -requires: - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: bind-peer diff --git a/charms/designate-k8s/actions.yaml b/charms/designate-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/designate-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/designate-k8s/charmcraft.yaml b/charms/designate-k8s/charmcraft.yaml index f6459be6..b68f883f 100644 --- a/charms/designate-k8s/charmcraft.yaml +++ b/charms/designate-k8s/charmcraft.yaml @@ -1,11 +1,88 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: designate-k8s +summary: OpenStack designate service +description: | + Designate is a multi-tenant DNSaaS service for OpenStack. It provides a REST API with integrated Keystone authentication. + It can be configured to auto-generate records based on Nova and Neutron actions. + Designate supports a variety of DNS servers including Bind9 and PowerDNS 4. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-designate-k8s + issues: + - https://bugs.launchpad.net/charm-designate-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + nameservers: + type: string + default: null + description: | + Space delimited list of nameservers. These are the nameservers that have + been provided to the domain registrar in order to delegate the domain to + Designate. e.g. "ns1.example.com. ns2.example.com." + +containers: + designate: + resource: designate-image + +resources: + designate-image: + type: oci-image + description: OCI image for OpenStack designate + upstream-source: ghcr.io/canonical/designate-consolidated:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + amqp: + interface: rabbitmq + dns-backend: + interface: bind-rndc + limit: 1 + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + dnsaas: + interface: designate + +peers: + peers: + interface: designate-peer + parts: update-certificates: plugin: nil @@ -13,9 +90,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/designate-k8s/config.yaml b/charms/designate-k8s/config.yaml deleted file mode 100644 index 1f4a51fe..00000000 --- a/charms/designate-k8s/config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string - nameservers: - type: string - default: - description: | - Space delimited list of nameservers. These are the nameservers that have - been provided to the domain registrar in order to delegate the domain to - Designate. e.g. "ns1.example.com. ns2.example.com." diff --git a/charms/designate-k8s/metadata.yaml b/charms/designate-k8s/metadata.yaml deleted file mode 100644 index aa3dbd21..00000000 --- a/charms/designate-k8s/metadata.yaml +++ /dev/null @@ -1,65 +0,0 @@ -name: designate-k8s -summary: OpenStack designate service -maintainer: OpenStack Charmers -description: | - Designate is a multi-tenant DNSaaS service for OpenStack. It provides a REST API with integrated Keystone authentication. - It can be configured to auto-generate records based on Nova and Neutron actions. - Designate supports a variety of DNS servers including Bind9 and PowerDNS 4. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-designate-k8s -issues: https://bugs.launchpad.net/charm-designate-k8s - -containers: - designate: - resource: designate-image - -resources: - designate-image: - type: oci-image - description: OCI image for OpenStack designate - upstream-source: ghcr.io/canonical/designate-consolidated:2024.1 - -provides: - dnsaas: - interface: designate - -requires: - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - amqp: - interface: rabbitmq - dns-backend: - interface: bind-rndc - limit: 1 - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: designate-peer diff --git a/charms/designate-k8s/tests/config.yaml b/charms/designate-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/designate-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/glance-k8s/actions.yaml b/charms/glance-k8s/actions.yaml deleted file mode 100644 index 29d2d86c..00000000 --- a/charms/glance-k8s/actions.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# NOTE: no actions yet! - -describe-status: - description: | - See an expanded view of the compound status. - For a neat human readable summary: - - juju run-action --wait glance/0 describe-status --format=json | jq -r '.[].results.output' diff --git a/charms/glance-k8s/charmcraft.yaml b/charms/glance-k8s/charmcraft.yaml index f6459be6..e8ff9a30 100644 --- a/charms/glance-k8s/charmcraft.yaml +++ b/charms/glance-k8s/charmcraft.yaml @@ -1,11 +1,355 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: glance-k8s +summary: OpenStack Image Registry and Delivery Service +description: | + The Glance project provides an image registration and discovery service + and an image delivery service. These services are used in conjunction + by Nova to deliver images from object stores, such as OpenStack's Swift + service, to Nova's compute nodes. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-glance-k8s + issues: + - https://bugs.launchpad.net/charm-glance-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + ceph-osd-replication-count: + default: 3 + type: int + description: | + This value dictates the number of replicas ceph must make of any + object it stores within the cinder rbd pool. Of course, this only + applies if using Ceph as a backend store. Note that once the cinder + rbd pool has been created, changing this value will not have any + effect (although it can be changed in ceph by manually configuring + your ceph cluster). + ceph-pool-weight: + type: int + default: 40 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the ephemeral volumes + for the OpenStack compute instances are expected to take up 20% of the + overall configuration then this value would be specified as 20. Note - + it is important to choose an appropriate value for the pool weight as + this directly affects the number of placement groups which will be + created for the pool. The number of placement groups for a pool can + only be increased, never decreased - so it is important to identify the + percent of data that will likely reside in the pool. + volume-backend-name: + default: null + type: string + description: | + Volume backend name for the backend. The default value is the + application name in the Juju model, e.g. "cinder-ceph-mybackend" + if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`. + A common backend name can be set to multiple backends with the + same characters so that those can be treated as a single virtual + backend associated with a single volume type. + backend-availability-zone: + default: null + type: string + description: | + Availability zone name of this volume backend. If set, it will + override the default availability zone. Supported for Pike or + newer releases. + restrict-ceph-pools: + default: false + type: boolean + description: | + Optionally restrict Ceph key permissions to access pools as required. + rbd-pool-name: + default: null + type: string + description: | + Optionally specify an existing rbd pool that cinder should map to. + rbd-flatten-volume-from-snapshot: + default: false + type: boolean + description: | + Flatten volumes created from snapshots to remove dependency from + volume to snapshot. Supported on Queens+ + rbd-mirroring-mode: + type: string + default: pool + description: | + The RBD mirroring mode used for the Ceph pool. This option is only used + with 'replicated' pool type, as it's not supported for 'erasure-coded' + pool type - valid values: 'pool' and 'image' + pool-type: + type: string + default: replicated + description: | + Ceph pool type to use for storage - valid values include `replicated` + and `erasure-coded`. + ec-profile-name: + type: string + default: null + description: | + Name for the EC profile to be created for the EC pools. If not defined + a profile name will be generated based on the name of the pool used by + the application. + ec-rbd-metadata-pool: + type: string + default: null + description: | + Name of the metadata pool to be created (for RBD use-cases). If not + defined a metadata pool name will be generated based on the name of + the data pool used by the application. The metadata pool is always + replicated, not erasure coded. + ec-profile-k: + type: int + default: 1 + description: | + Number of data chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-m: + type: int + default: 2 + description: | + Number of coding chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-locality: + type: int + default: null + description: | + (lrc plugin - l) Group the coding and data chunks into sets of size l. + For instance, for k=4 and m=2, when l=3 two groups of three are created. + Each set can be recovered without reading chunks from another set. Note + that using the lrc plugin does incur more raw storage usage than isa or + jerasure in order to reduce the cost of recovery operations. + ec-profile-crush-locality: + type: string + default: null + description: | + (lrc plugin) The type of the crush bucket in which each set of chunks + defined by l will be stored. For instance, if it is set to rack, each + group of l chunks will be placed in a different rack. It is used to + create a CRUSH rule step such as step choose rack. If it is not set, + no such grouping is done. + ec-profile-durability-estimator: + type: int + default: null + description: | + (shec plugin - c) The number of parity chunks each of which includes + each data chunk in its calculation range. The number is used as a + durability estimator. For instance, if c=2, 2 OSDs can be down + without losing data. + ec-profile-helper-chunks: + type: int + default: null + description: | + (clay plugin - d) Number of OSDs requested to send data during + recovery of a single chunk. d needs to be chosen such that + k+1 <= d <= k+m-1. Larger the d, the better the savings. + ec-profile-scalar-mds: + type: string + default: null + description: | + (clay plugin) specifies the plugin that is used as a building + block in the layered construction. It can be one of jerasure, + isa, shec (defaults to jerasure). + ec-profile-plugin: + type: string + default: jerasure + description: | + EC plugin to use for this applications pool. The following list of + plugins acceptable - jerasure, lrc, isa, shec, clay. + ec-profile-technique: + type: string + default: null + description: | + EC profile technique used for this applications pool - will be + validated based on the plugin configured via ec-profile-plugin. + Supported techniques are `reed_sol_van`, `reed_sol_r6_op`, + `cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure, + `reed_sol_van`, `cauchy` for isa and `single`, `multiple` + for shec. + ec-profile-device-class: + type: string + default: null + description: | + Device class from CRUSH map to use for placement groups for + erasure profile - valid values: ssd, hdd or nvme (or leave + unset to not use a device class). + bluestore-compression-algorithm: + type: string + default: null + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: null + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: null + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: null + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: null + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: null + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: null + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: null + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. + bluestore-compression-max-blob-size-ssd: + type: int + default: null + description: | + Value of bluestore compression max blob size for solid state media on + pools requested by this charm. + enable-telemetry-notifications: + type: boolean + default: false + description: Enable notifications to send to telemetry. + image-size-cap: + type: string + default: 5GB + description: | + Maximum size of image a user can upload. Defaults to 5GB + (5368709120 bytes). Example values: 500M, 500MB, 5G, 5TB. + Valid units: K, KB, M, MB, G, GB, T, TB, P, PB. If no units provided, + bytes are assumed. + . + WARNING: this value should only be increased after careful consideration + and must be set to a value under 8EB (9223372036854775808 bytes). + image-conversion: + type: boolean + default: false + description: | + Enable conversion of all images to raw format during image import. + This only works on imported images (for example using 'openstack image create --import'). + Does not work on regular image uploads (like 'openstack image create') + +actions: + describe-status: + description: | + See an expanded view of the compound status. + For a neat human readable summary: + + juju run-action --wait glance/0 describe-status --format=json | jq -r '.[].results.output' + +containers: + glance-api: + resource: glance-api-image + mounts: + - storage: local-repository + location: /var/lib/glance/images + +resources: + glance-api-image: + type: oci-image + description: OCI image for OpenStack Glance + upstream-source: ghcr.io/canonical/glance-api:2024.1 + +storage: + local-repository: + type: filesystem + minimum-size: 10GiB + description: | + A local filesystem storage repository for glance images to be saved to. + Note, this must be shared storage in order to support a highly + available glance image registry. + +requires: + database: + interface: mysql_client + limit: 1 + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + identity-service: + interface: keystone + limit: 1 + amqp: + interface: rabbitmq + optional: true + ceph: + interface: ceph-client + optional: true + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + image-service: + interface: glance + +peers: + peers: + interface: glance-peer + parts: update-certificates: plugin: nil @@ -13,9 +357,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/glance-k8s/config.yaml b/charms/glance-k8s/config.yaml deleted file mode 100644 index c4f16b6d..00000000 --- a/charms/glance-k8s/config.yaml +++ /dev/null @@ -1,261 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string - ceph-osd-replication-count: - default: 3 - type: int - description: | - This value dictates the number of replicas ceph must make of any - object it stores within the cinder rbd pool. Of course, this only - applies if using Ceph as a backend store. Note that once the cinder - rbd pool has been created, changing this value will not have any - effect (although it can be changed in ceph by manually configuring - your ceph cluster). - ceph-pool-weight: - type: int - default: 40 - description: | - Defines a relative weighting of the pool as a percentage of the total - amount of data in the Ceph cluster. This effectively weights the number - of placement groups for the pool created to be appropriately portioned - to the amount of data expected. For example, if the ephemeral volumes - for the OpenStack compute instances are expected to take up 20% of the - overall configuration then this value would be specified as 20. Note - - it is important to choose an appropriate value for the pool weight as - this directly affects the number of placement groups which will be - created for the pool. The number of placement groups for a pool can - only be increased, never decreased - so it is important to identify the - percent of data that will likely reside in the pool. - volume-backend-name: - default: - type: string - description: | - Volume backend name for the backend. The default value is the - application name in the Juju model, e.g. "cinder-ceph-mybackend" - if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`. - A common backend name can be set to multiple backends with the - same characters so that those can be treated as a single virtual - backend associated with a single volume type. - backend-availability-zone: - default: - type: string - description: | - Availability zone name of this volume backend. If set, it will - override the default availability zone. Supported for Pike or - newer releases. - restrict-ceph-pools: - default: False - type: boolean - description: | - Optionally restrict Ceph key permissions to access pools as required. - rbd-pool-name: - default: - type: string - description: | - Optionally specify an existing rbd pool that cinder should map to. - rbd-flatten-volume-from-snapshot: - default: - type: boolean - default: False - description: | - Flatten volumes created from snapshots to remove dependency from - volume to snapshot. Supported on Queens+ - rbd-mirroring-mode: - type: string - default: pool - description: | - The RBD mirroring mode used for the Ceph pool. This option is only used - with 'replicated' pool type, as it's not supported for 'erasure-coded' - pool type - valid values: 'pool' and 'image' - pool-type: - type: string - default: replicated - description: | - Ceph pool type to use for storage - valid values include ‘replicated’ - and ‘erasure-coded’. - ec-profile-name: - type: string - default: - description: | - Name for the EC profile to be created for the EC pools. If not defined - a profile name will be generated based on the name of the pool used by - the application. - ec-rbd-metadata-pool: - type: string - default: - description: | - Name of the metadata pool to be created (for RBD use-cases). If not - defined a metadata pool name will be generated based on the name of - the data pool used by the application. The metadata pool is always - replicated, not erasure coded. - ec-profile-k: - type: int - default: 1 - description: | - Number of data chunks that will be used for EC data pool. K+M factors - should never be greater than the number of available zones (or hosts) - for balancing. - ec-profile-m: - type: int - default: 2 - description: | - Number of coding chunks that will be used for EC data pool. K+M factors - should never be greater than the number of available zones (or hosts) - for balancing. - ec-profile-locality: - type: int - default: - description: | - (lrc plugin - l) Group the coding and data chunks into sets of size l. - For instance, for k=4 and m=2, when l=3 two groups of three are created. - Each set can be recovered without reading chunks from another set. Note - that using the lrc plugin does incur more raw storage usage than isa or - jerasure in order to reduce the cost of recovery operations. - ec-profile-crush-locality: - type: string - default: - description: | - (lrc plugin) The type of the crush bucket in which each set of chunks - defined by l will be stored. For instance, if it is set to rack, each - group of l chunks will be placed in a different rack. It is used to - create a CRUSH rule step such as step choose rack. If it is not set, - no such grouping is done. - ec-profile-durability-estimator: - type: int - default: - description: | - (shec plugin - c) The number of parity chunks each of which includes - each data chunk in its calculation range. The number is used as a - durability estimator. For instance, if c=2, 2 OSDs can be down - without losing data. - ec-profile-helper-chunks: - type: int - default: - description: | - (clay plugin - d) Number of OSDs requested to send data during - recovery of a single chunk. d needs to be chosen such that - k+1 <= d <= k+m-1. Larger the d, the better the savings. - ec-profile-scalar-mds: - type: string - default: - description: | - (clay plugin) specifies the plugin that is used as a building - block in the layered construction. It can be one of jerasure, - isa, shec (defaults to jerasure). - ec-profile-plugin: - type: string - default: jerasure - description: | - EC plugin to use for this applications pool. The following list of - plugins acceptable - jerasure, lrc, isa, shec, clay. - ec-profile-technique: - type: string - default: - description: | - EC profile technique used for this applications pool - will be - validated based on the plugin configured via ec-profile-plugin. - Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’, - ‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure, - ‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’ - for shec. - ec-profile-device-class: - type: string - default: - description: | - Device class from CRUSH map to use for placement groups for - erasure profile - valid values: ssd, hdd or nvme (or leave - unset to not use a device class). - bluestore-compression-algorithm: - type: string - default: - description: | - Compressor to use (if any) for pools requested by this charm. - . - NOTE: The ceph-osd charm sets a global default for this value (defaults - to 'lz4' unless configured by the end user) which will be used unless - specified for individual pools. - bluestore-compression-mode: - type: string - default: - description: | - Policy for using compression on pools requested by this charm. - . - 'none' means never use compression. - 'passive' means use compression when clients hint that data is - compressible. - 'aggressive' means use compression unless clients hint that - data is not compressible. - 'force' means use compression under all circumstances even if the clients - hint that the data is not compressible. - bluestore-compression-required-ratio: - type: float - default: - description: | - The ratio of the size of the data chunk after compression relative to the - original size must be at least this small in order to store the - compressed version on pools requested by this charm. - bluestore-compression-min-blob-size: - type: int - default: - description: | - Chunks smaller than this are never compressed on pools requested by - this charm. - bluestore-compression-min-blob-size-hdd: - type: int - default: - description: | - Value of bluestore compression min blob size for rotational media on - pools requested by this charm. - bluestore-compression-min-blob-size-ssd: - type: int - default: - description: | - Value of bluestore compression min blob size for solid state media on - pools requested by this charm. - bluestore-compression-max-blob-size: - type: int - default: - description: | - Chunks larger than this are broken into smaller blobs sizing bluestore - compression max blob size before being compressed on pools requested by - this charm. - bluestore-compression-max-blob-size-hdd: - type: int - default: - description: | - Value of bluestore compression max blob size for rotational media on - pools requested by this charm. - bluestore-compression-max-blob-size-ssd: - type: int - default: - description: | - Value of bluestore compression max blob size for solid state media on - pools requested by this charm. - enable-telemetry-notifications: - type: boolean - default: False - description: Enable notifications to send to telemetry. - image-size-cap: - type: string - default: 5GB - description: | - Maximum size of image a user can upload. Defaults to 5GB - (5368709120 bytes). Example values: 500M, 500MB, 5G, 5TB. - Valid units: K, KB, M, MB, G, GB, T, TB, P, PB. If no units provided, - bytes are assumed. - . - WARNING: this value should only be increased after careful consideration - and must be set to a value under 8EB (9223372036854775808 bytes). - image-conversion: - type: boolean - default: False - description: | - Enable conversion of all images to raw format during image import. - This only works on imported images (for example using 'openstack image create --import'). - Does not work on regular image uploads (like 'openstack image create') diff --git a/charms/glance-k8s/metadata.yaml b/charms/glance-k8s/metadata.yaml deleted file mode 100644 index a5dda819..00000000 --- a/charms/glance-k8s/metadata.yaml +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2021 Canonical Ltd -# See LICENSE file for licensing details. -name: glance-k8s -maintainer: OpenStack Charmers -summary: OpenStack Image Registry and Delivery Service -description: | - The Glance project provides an image registration and discovery service - and an image delivery service. These services are used in conjunction - by Nova to deliver images from object stores, such as OpenStack's Swift - service, to Nova's compute nodes. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: - - openstack - - storage - - misc -source: https://opendev.org/openstack/charm-glance-k8s -issues: https://bugs.launchpad.net/charm-glance-k8s - -containers: - glance-api: - resource: glance-api-image - mounts: - - storage: local-repository - # A persistent storage place to store glance images in a local file - # based repository. This must be shared storage in order to support a - # highly available glance registry. - location: /var/lib/glance/images - -resources: - glance-api-image: - type: oci-image - description: OCI image for OpenStack Glance - # ghcr.io/canonical/glance-api:2024.1 - upstream-source: ghcr.io/canonical/glance-api:2024.1 - -storage: - local-repository: - type: filesystem - minimum-size: 10GiB - description: | - A local filesystem storage repository for glance images to be saved to. - Note, this must be shared storage in order to support a highly - available glance image registry. - -requires: - database: - interface: mysql_client - limit: 1 - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - identity-service: - interface: keystone - limit: 1 - amqp: - interface: rabbitmq - optional: true - ceph: - interface: ceph-client - optional: true - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - image-service: - interface: glance - -peers: - peers: - interface: glance-peer diff --git a/charms/glance-k8s/tests/actions.yaml b/charms/glance-k8s/tests/actions.yaml deleted file mode 120000 index 9adaf92e..00000000 --- a/charms/glance-k8s/tests/actions.yaml +++ /dev/null @@ -1 +0,0 @@ -../actions.yaml \ No newline at end of file diff --git a/charms/glance-k8s/tests/config.yaml b/charms/glance-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/glance-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/gnocchi-k8s/actions.yaml b/charms/gnocchi-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/gnocchi-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/gnocchi-k8s/charmcraft.yaml b/charms/gnocchi-k8s/charmcraft.yaml index f6459be6..4e255701 100644 --- a/charms/gnocchi-k8s/charmcraft.yaml +++ b/charms/gnocchi-k8s/charmcraft.yaml @@ -1,11 +1,314 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: gnocchi-k8s +summary: OpenStack gnocchi service +description: | + OpenStack gnocchi provides an HTTP service for managing, selecting, + and claiming providers of classes of inventory representing available + resources in a cloud. + . +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-gnocchi-k8s + issues: + - https://bugs.launchpad.net/charm-gnocchi-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + ceph-osd-replication-count: + default: 3 + type: int + description: | + This value dictates the number of replicas ceph must make of any + object it stores within the cinder rbd pool. Of course, this only + applies if using Ceph as a backend store. Note that once the cinder + rbd pool has been created, changing this value will not have any + effect (although it can be changed in ceph by manually configuring + your ceph cluster). + ceph-pool-weight: + type: int + default: 40 + description: | + Defines a relative weighting of the pool as a percentage of the total + amount of data in the Ceph cluster. This effectively weights the number + of placement groups for the pool created to be appropriately portioned + to the amount of data expected. For example, if the ephemeral volumes + for the OpenStack compute instances are expected to take up 20% of the + overall configuration then this value would be specified as 20. Note - + it is important to choose an appropriate value for the pool weight as + this directly affects the number of placement groups which will be + created for the pool. The number of placement groups for a pool can + only be increased, never decreased - so it is important to identify the + percent of data that will likely reside in the pool. + volume-backend-name: + default: null + type: string + description: | + Volume backend name for the backend. The default value is the + application name in the Juju model, e.g. "cinder-ceph-mybackend" + if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`. + A common backend name can be set to multiple backends with the + same characters so that those can be treated as a single virtual + backend associated with a single volume type. + backend-availability-zone: + default: null + type: string + description: | + Availability zone name of this volume backend. If set, it will + override the default availability zone. Supported for Pike or + newer releases. + restrict-ceph-pools: + default: false + type: boolean + description: | + Optionally restrict Ceph key permissions to access pools as required. + rbd-pool-name: + default: null + type: string + description: | + Optionally specify an existing rbd pool that cinder should map to. + rbd-flatten-volume-from-snapshot: + default: false + type: boolean + description: | + Flatten volumes created from snapshots to remove dependency from + volume to snapshot. Supported on Queens+ + rbd-mirroring-mode: + type: string + default: pool + description: | + The RBD mirroring mode used for the Ceph pool. This option is only used + with 'replicated' pool type, as it's not supported for 'erasure-coded' + pool type - valid values: 'pool' and 'image' + pool-type: + type: string + default: replicated + description: | + Ceph pool type to use for storage - valid values include `replicated` + and `erasure-coded`. + ec-profile-name: + type: string + default: null + description: | + Name for the EC profile to be created for the EC pools. If not defined + a profile name will be generated based on the name of the pool used by + the application. + ec-rbd-metadata-pool: + type: string + default: null + description: | + Name of the metadata pool to be created (for RBD use-cases). If not + defined a metadata pool name will be generated based on the name of + the data pool used by the application. The metadata pool is always + replicated, not erasure coded. + ec-profile-k: + type: int + default: 1 + description: | + Number of data chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-m: + type: int + default: 2 + description: | + Number of coding chunks that will be used for EC data pool. K+M factors + should never be greater than the number of available zones (or hosts) + for balancing. + ec-profile-locality: + type: int + default: null + description: | + (lrc plugin - l) Group the coding and data chunks into sets of size l. + For instance, for k=4 and m=2, when l=3 two groups of three are created. + Each set can be recovered without reading chunks from another set. Note + that using the lrc plugin does incur more raw storage usage than isa or + jerasure in order to reduce the cost of recovery operations. + ec-profile-crush-locality: + type: string + default: null + description: | + (lrc plugin) The type of the crush bucket in which each set of chunks + defined by l will be stored. For instance, if it is set to rack, each + group of l chunks will be placed in a different rack. It is used to + create a CRUSH rule step such as step choose rack. If it is not set, + no such grouping is done. + ec-profile-durability-estimator: + type: int + default: null + description: | + (shec plugin - c) The number of parity chunks each of which includes + each data chunk in its calculation range. The number is used as a + durability estimator. For instance, if c=2, 2 OSDs can be down + without losing data. + ec-profile-helper-chunks: + type: int + default: null + description: | + (clay plugin - d) Number of OSDs requested to send data during + recovery of a single chunk. d needs to be chosen such that + k+1 <= d <= k+m-1. Larger the d, the better the savings. + ec-profile-scalar-mds: + type: string + default: null + description: | + (clay plugin) specifies the plugin that is used as a building + block in the layered construction. It can be one of jerasure, + isa, shec (defaults to jerasure). + ec-profile-plugin: + type: string + default: jerasure + description: | + EC plugin to use for this applications pool. The following list of + plugins acceptable - jerasure, lrc, isa, shec, clay. + ec-profile-technique: + type: string + default: null + description: | + EC profile technique used for this applications pool - will be + validated based on the plugin configured via ec-profile-plugin. + Supported techniques are `reed_sol_van`, `reed_sol_r6_op`, + `cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure, + `reed_sol_van`, `cauchy` for isa and `single`, `multiple` + for shec. + ec-profile-device-class: + type: string + default: null + description: | + Device class from CRUSH map to use for placement groups for + erasure profile - valid values: ssd, hdd or nvme (or leave + unset to not use a device class). + bluestore-compression-algorithm: + type: string + default: null + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: null + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: null + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: null + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: null + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: null + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: null + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: null + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. + bluestore-compression-max-blob-size-ssd: + type: int + default: null + description: | + Value of bluestore compression max blob size for solid state media on + pools requested by this charm. + +containers: + gnocchi-api: + resource: gnocchi-api-image + gnocchi-metricd: + resource: gnocchi-metricd-image + +resources: + gnocchi-api-image: + type: oci-image + description: OCI image for OpenStack Gnocchi api service + upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1 + gnocchi-metricd-image: + type: oci-image + description: OCI image for OpenStack Gnocchi Metric service + upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + ceph: + interface: ceph-client + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + gnocchi-service: + interface: gnocchi + +peers: + peers: + interface: gnocchi-peer + parts: update-certificates: plugin: nil @@ -13,9 +316,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/gnocchi-k8s/config.yaml b/charms/gnocchi-k8s/config.yaml deleted file mode 100644 index df480b17..00000000 --- a/charms/gnocchi-k8s/config.yaml +++ /dev/null @@ -1,239 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string - ceph-osd-replication-count: - default: 3 - type: int - description: | - This value dictates the number of replicas ceph must make of any - object it stores within the cinder rbd pool. Of course, this only - applies if using Ceph as a backend store. Note that once the cinder - rbd pool has been created, changing this value will not have any - effect (although it can be changed in ceph by manually configuring - your ceph cluster). - ceph-pool-weight: - type: int - default: 40 - description: | - Defines a relative weighting of the pool as a percentage of the total - amount of data in the Ceph cluster. This effectively weights the number - of placement groups for the pool created to be appropriately portioned - to the amount of data expected. For example, if the ephemeral volumes - for the OpenStack compute instances are expected to take up 20% of the - overall configuration then this value would be specified as 20. Note - - it is important to choose an appropriate value for the pool weight as - this directly affects the number of placement groups which will be - created for the pool. The number of placement groups for a pool can - only be increased, never decreased - so it is important to identify the - percent of data that will likely reside in the pool. - volume-backend-name: - default: - type: string - description: | - Volume backend name for the backend. The default value is the - application name in the Juju model, e.g. "cinder-ceph-mybackend" - if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`. - A common backend name can be set to multiple backends with the - same characters so that those can be treated as a single virtual - backend associated with a single volume type. - backend-availability-zone: - default: - type: string - description: | - Availability zone name of this volume backend. If set, it will - override the default availability zone. Supported for Pike or - newer releases. - restrict-ceph-pools: - default: False - type: boolean - description: | - Optionally restrict Ceph key permissions to access pools as required. - rbd-pool-name: - default: - type: string - description: | - Optionally specify an existing rbd pool that cinder should map to. - rbd-flatten-volume-from-snapshot: - default: - type: boolean - default: False - description: | - Flatten volumes created from snapshots to remove dependency from - volume to snapshot. Supported on Queens+ - rbd-mirroring-mode: - type: string - default: pool - description: | - The RBD mirroring mode used for the Ceph pool. This option is only used - with 'replicated' pool type, as it's not supported for 'erasure-coded' - pool type - valid values: 'pool' and 'image' - pool-type: - type: string - default: replicated - description: | - Ceph pool type to use for storage - valid values include ‘replicated’ - and ‘erasure-coded’. - ec-profile-name: - type: string - default: - description: | - Name for the EC profile to be created for the EC pools. If not defined - a profile name will be generated based on the name of the pool used by - the application. - ec-rbd-metadata-pool: - type: string - default: - description: | - Name of the metadata pool to be created (for RBD use-cases). If not - defined a metadata pool name will be generated based on the name of - the data pool used by the application. The metadata pool is always - replicated, not erasure coded. - ec-profile-k: - type: int - default: 1 - description: | - Number of data chunks that will be used for EC data pool. K+M factors - should never be greater than the number of available zones (or hosts) - for balancing. - ec-profile-m: - type: int - default: 2 - description: | - Number of coding chunks that will be used for EC data pool. K+M factors - should never be greater than the number of available zones (or hosts) - for balancing. - ec-profile-locality: - type: int - default: - description: | - (lrc plugin - l) Group the coding and data chunks into sets of size l. - For instance, for k=4 and m=2, when l=3 two groups of three are created. - Each set can be recovered without reading chunks from another set. Note - that using the lrc plugin does incur more raw storage usage than isa or - jerasure in order to reduce the cost of recovery operations. - ec-profile-crush-locality: - type: string - default: - description: | - (lrc plugin) The type of the crush bucket in which each set of chunks - defined by l will be stored. For instance, if it is set to rack, each - group of l chunks will be placed in a different rack. It is used to - create a CRUSH rule step such as step choose rack. If it is not set, - no such grouping is done. - ec-profile-durability-estimator: - type: int - default: - description: | - (shec plugin - c) The number of parity chunks each of which includes - each data chunk in its calculation range. The number is used as a - durability estimator. For instance, if c=2, 2 OSDs can be down - without losing data. - ec-profile-helper-chunks: - type: int - default: - description: | - (clay plugin - d) Number of OSDs requested to send data during - recovery of a single chunk. d needs to be chosen such that - k+1 <= d <= k+m-1. Larger the d, the better the savings. - ec-profile-scalar-mds: - type: string - default: - description: | - (clay plugin) specifies the plugin that is used as a building - block in the layered construction. It can be one of jerasure, - isa, shec (defaults to jerasure). - ec-profile-plugin: - type: string - default: jerasure - description: | - EC plugin to use for this applications pool. The following list of - plugins acceptable - jerasure, lrc, isa, shec, clay. - ec-profile-technique: - type: string - default: - description: | - EC profile technique used for this applications pool - will be - validated based on the plugin configured via ec-profile-plugin. - Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’, - ‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure, - ‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’ - for shec. - ec-profile-device-class: - type: string - default: - description: | - Device class from CRUSH map to use for placement groups for - erasure profile - valid values: ssd, hdd or nvme (or leave - unset to not use a device class). - bluestore-compression-algorithm: - type: string - default: - description: | - Compressor to use (if any) for pools requested by this charm. - . - NOTE: The ceph-osd charm sets a global default for this value (defaults - to 'lz4' unless configured by the end user) which will be used unless - specified for individual pools. - bluestore-compression-mode: - type: string - default: - description: | - Policy for using compression on pools requested by this charm. - . - 'none' means never use compression. - 'passive' means use compression when clients hint that data is - compressible. - 'aggressive' means use compression unless clients hint that - data is not compressible. - 'force' means use compression under all circumstances even if the clients - hint that the data is not compressible. - bluestore-compression-required-ratio: - type: float - default: - description: | - The ratio of the size of the data chunk after compression relative to the - original size must be at least this small in order to store the - compressed version on pools requested by this charm. - bluestore-compression-min-blob-size: - type: int - default: - description: | - Chunks smaller than this are never compressed on pools requested by - this charm. - bluestore-compression-min-blob-size-hdd: - type: int - default: - description: | - Value of bluestore compression min blob size for rotational media on - pools requested by this charm. - bluestore-compression-min-blob-size-ssd: - type: int - default: - description: | - Value of bluestore compression min blob size for solid state media on - pools requested by this charm. - bluestore-compression-max-blob-size: - type: int - default: - description: | - Chunks larger than this are broken into smaller blobs sizing bluestore - compression max blob size before being compressed on pools requested by - this charm. - bluestore-compression-max-blob-size-hdd: - type: int - default: - description: | - Value of bluestore compression max blob size for rotational media on - pools requested by this charm. - bluestore-compression-max-blob-size-ssd: - type: int - default: - description: | - Value of bluestore compression max blob size for solid state media on - pools requested by this charm. diff --git a/charms/gnocchi-k8s/metadata.yaml b/charms/gnocchi-k8s/metadata.yaml deleted file mode 100644 index b4a52939..00000000 --- a/charms/gnocchi-k8s/metadata.yaml +++ /dev/null @@ -1,71 +0,0 @@ -name: gnocchi-k8s -summary: OpenStack gnocchi service -maintainer: OpenStack Charmers -description: | - OpenStack gnocchi provides an HTTP service for managing, selecting, - and claiming providers of classes of inventory representing available - resources in a cloud. - . -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-gnocchi-k8s -issues: https://bugs.launchpad.net/charm-gnocchi-k8s - -containers: - gnocchi-api: - resource: gnocchi-api-image - gnocchi-metricd: - resource: gnocchi-metricd-image - -resources: - gnocchi-api-image: - type: oci-image - description: OCI image for OpenStack Gnocchi api service - # ghcr.io/canonical/gnocchi-api:2024.1 - upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1 - gnocchi-metricd-image: - type: oci-image - description: OCI image for OpenStack Gnocchi Metric service - # ghcr.io/canonical/gnocchi-metricd:2024.1 - upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - ceph: - interface: ceph-client - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - gnocchi-service: - interface: gnocchi - -peers: - peers: - interface: gnocchi-peer diff --git a/charms/gnocchi-k8s/tests/config.yaml b/charms/gnocchi-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/gnocchi-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/heat-k8s/actions.yaml b/charms/heat-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/heat-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/heat-k8s/charmcraft.yaml b/charms/heat-k8s/charmcraft.yaml index f6459be6..c09708ae 100644 --- a/charms/heat-k8s/charmcraft.yaml +++ b/charms/heat-k8s/charmcraft.yaml @@ -1,11 +1,84 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: heat-k8s +summary: OpenStack heat service +description: | + Heat is the main project in the OpenStack Orchestration program. It implements an + orchestration engine to launch multiple composite cloud applications based on + templates in the form of text files that can be treated like code. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-heat-k8s + issues: + - https://bugs.launchpad.net/charm-heat-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + heat-api: + resource: heat-api-image + heat-api-cfn: + resource: heat-api-image + heat-engine: + resource: heat-engine-image + +resources: + heat-api-image: + type: oci-image + description: OCI image for OpenStack Heat + upstream-source: ghcr.io/canonical/heat-consolidated:2024.1 + heat-engine-image: + type: oci-image + description: OCI image for OpenStack Heat Engine + upstream-source: ghcr.io/canonical/heat-consolidated:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + traefik-route-internal: + interface: traefik_route + optional: true + limit: 1 + traefik-route-public: + interface: traefik_route + limit: 1 + amqp: + interface: rabbitmq + identity-ops: + interface: keystone-resources + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +peers: + peers: + interface: heat-peer + parts: update-certificates: plugin: nil @@ -13,9 +86,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/heat-k8s/config.yaml b/charms/heat-k8s/config.yaml deleted file mode 100644 index 45e55dcc..00000000 --- a/charms/heat-k8s/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/heat-k8s/metadata.yaml b/charms/heat-k8s/metadata.yaml deleted file mode 100644 index 72f3c136..00000000 --- a/charms/heat-k8s/metadata.yaml +++ /dev/null @@ -1,70 +0,0 @@ -name: heat-k8s -summary: OpenStack heat service -maintainer: OpenStack Charmers -description: | - Heat is the main project in the OpenStack Orchestration program. It implements an - orchestration engine to launch multiple composite cloud applications based on - templates in the form of text files that can be treated like code. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-heat-k8s -issues: https://bugs.launchpad.net/charm-heat-k8s - -containers: - heat-api: - resource: heat-api-image - heat-api-cfn: - resource: heat-api-image - heat-engine: - resource: heat-engine-image - -resources: - heat-api-image: - type: oci-image - description: OCI image for OpenStack Heat - # ghcr.io/canonical/heat-api:2024.1 - upstream-source: ghcr.io/canonical/heat-consolidated:2024.1 - heat-engine-image: - type: oci-image - description: OCI image for OpenStack Heat Engine - # ghcr.io/canonical/heat-engine:2024.1 - upstream-source: ghcr.io/canonical/heat-consolidated:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - traefik-route-internal: - interface: traefik_route - optional: true - limit: 1 - traefik-route-public: - interface: traefik_route - limit: 1 - amqp: - interface: rabbitmq - identity-ops: - interface: keystone-resources - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: heat-peer diff --git a/charms/heat-k8s/tests/config.yaml b/charms/heat-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/heat-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/horizon-k8s/actions.yaml b/charms/horizon-k8s/actions.yaml deleted file mode 100644 index 6d1a31bb..00000000 --- a/charms/horizon-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -get-dashboard-url: - description: URL for access to the Horizon OpenStack Dashboard. diff --git a/charms/horizon-k8s/charmcraft.yaml b/charms/horizon-k8s/charmcraft.yaml index f6459be6..d60d0979 100644 --- a/charms/horizon-k8s/charmcraft.yaml +++ b/charms/horizon-k8s/charmcraft.yaml @@ -1,11 +1,242 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: horizon-k8s +summary: OpenStack Horizon service +description: | + OpenStack Horizon provides an HTTP service for managing, selecting, + and claiming providers of classes of inventory representing available + resources in a cloud. + . +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-horizon-k8s + issues: + - https://bugs.launchpad.net/charm-horizon-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + session-timeout: + type: int + default: 3600 + description: + A method to supersede the token timeout with a shorter dashboard + session timeout in seconds. For example, if your token expires in 60 minutes, + a value of 1800 will log users out after 30 minutes. + default-role: + type: string + default: member + description: | + Default role for Horizon operations that will be created in + Keystone upon introduction of an identity-service relation. + default-domain: + type: string + default: null + description: | + Default domain when authenticating with Horizon. Disables the domain + field in the login page. + secret: + type: string + default: null + description: | + Secret for Horizon to use when securing internal data; set this when + using multiple dashboard units. + dropdown-max-items: + type: int + default: 30 + description: | + Max dropdown items to show in dropdown controls. + NOTE: This setting is supported >= OpenStack Liberty. + profile: + type: string + default: null + description: Default profile for the dashboard. Eg. cisco. + disable-instance-snapshot: + type: boolean + default: false + description: | + This setting disables Snapshots as a valid boot source for launching + instances. Snapshots sources won't show up in the Launch Instance modal + dialogue box. This option works from the Newton release, and has no + effect on earlier OpenStack releases. + cinder-backup: + type: boolean + default: false + description: Enable cinder backup panel. + password-retrieve: + type: boolean + default: false + description: Enable "Retrieve password" instance action. + customization-module: + type: string + default: "" + description: | + This option provides a means to enable customisation modules to modify + existing dashboards and panels. + allow-password-autocompletion: + type: boolean + default: false + description: | + Setting this to True will allow password form autocompletion by browser. + default-create-volume: + type: boolean + default: true + description: | + The default value for the option of creating a new volume in the + workflow for image and instance snapshot sources when launching an + instance. This option has an effect only to Ocata or newer + releases. + hide-create-volume: + type: boolean + default: false + description: | + Hide the "Create New Volume" option and rely on the + default-create-volume value during instance creation. + image-formats: + type: string + default: "" + description: | + The image-formats setting can be used to alter the default list of + advertised image formats. Many installations cannot use all the formats + that Glance recognizes, restricting the list here prevents unwanted + formats from being listed in Horizon which can lead to confusion. + . + This setting takes a space separated list, for example: iso qcow2 raw + . + Supported formats are: aki, ami, ari, docker, iso, ova, qcow2, raw, vdi, + vhd, vmdk. + . + If not provided, leave the option unconfigured which enables all of the + above. + api-result-limit: + type: int + default: 1000 + description: | + The maximum number of objects (e.g. Swift objects or Glance images) to + display on a single page before providing a paging element (a "more" link) + to paginate results. + enable-fip-topology-check: + type: boolean + default: true + description: + By default Horizon checks that a project has a router attached + to an external network before allowing FIPs to be attached to a VM. Some use + cases will not meet this constraint, e.g. if the router is owned by a different + project. Setting this to False removes this check from Horizon. + disable-password-reveal: + type: boolean + default: false + description: | + If enabled, the reveal button for passwords is removed. + enforce-password-check: + type: boolean + default: true + description: | + If True, displays an `Admin Password` field on the Change Password form + to verify that it is indeed the admin logged-in who wants to change the password. + site-name: + type: string + default: "" + description: | + An unique site name for OpenStack deployment to be passed via the + application-dashboard relation + site-branding: + type: string + default: null + description: | + A brand name to be shown in the HTML title. The default value is + "OpenStack Dashboard", e.g. "Instance Overview - OpenStack Dashboard" + site-branding-link: + type: string + default: null + description: | + A custom hyperlink when the logo in the dashboard is clicked, e.g. + https://mycloud.example.com/. The default value is + "horizon:user_home" to open the top level of the dashboard. + help-url: + type: string + default: null + description: | + A custom hyperlink for the "Help" menu, e.g. + https://mycloud.example.com/help. The default value is + https://docs.openstack.org/ + create-instance-flavor-sort-key: + type: string + default: null + description: | + This option can be used to customise the order instances are sorted in. + Support values include: id, name, ram, disk, and vcpus. + See https://docs.openstack.org/horizon/latest/configuration/settings.html#create-instance-flavor-sort + for more details. + create-instance-flavor-sort-reverse: + type: boolean + default: false + description: | + This option can be used to set the instance sorting to either ascending or descending. + Set True to sort in ascending order or False for descending order. + plugins: + type: string + default: "[]" + description: | + This option can be used to enable plugins for Horizon. The value should be a + JSON formatted list of plugin names. + +actions: + get-dashboard-url: + description: URL for access to the Horizon OpenStack Dashboard. + +containers: + horizon: + resource: horizon-image + +resources: + horizon-image: + type: oci-image + description: OCI image for Horizon + upstream-source: ghcr.io/canonical/horizon:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + ingress-internal: + interface: ingress + limit: 1 + ingress-public: + interface: ingress + optional: true + limit: 1 + identity-credentials: + interface: keystone-credentials + limit: 1 + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + horizon: + interface: horizon + +peers: + peers: + interface: horizon-peer + parts: update-certificates: plugin: nil @@ -13,9 +244,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/horizon-k8s/config.yaml b/charms/horizon-k8s/config.yaml deleted file mode 100644 index 76445775..00000000 --- a/charms/horizon-k8s/config.yaml +++ /dev/null @@ -1,169 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - session-timeout: - type: int - default: 3600 - description: - A method to supersede the token timeout with a shorter dashboard session - timeout in seconds. For example, if your token expires in 60 minutes, a - value of 1800 will log users out after 30 minutes. - default-role: - type: string - default: "member" - description: | - Default role for Horizon operations that will be created in - Keystone upon introduction of an identity-service relation. - default-domain: - type: string - default: - description: | - Default domain when authenticating with Horizon. Disables the domain - field in the login page. - secret: - type: string - default: - description: | - Secret for Horizon to use when securing internal data; set this when - using multiple dashboard units. - dropdown-max-items: - type: int - default: 30 - description: | - Max dropdown items to show in dropdown controls. - NOTE: This setting is supported >= OpenStack Liberty. - profile: - type: string - default: - description: Default profile for the dashboard. Eg. cisco. - disable-instance-snapshot: - type: boolean - default: False - description: | - This setting disables Snapshots as a valid boot source for launching - instances. Snapshots sources won’t show up in the Launch Instance modal - dialogue box. This option works from the Newton release, and has no - effect on earlier OpenStack releases. - cinder-backup: - type: boolean - default: False - description: Enable cinder backup panel. - password-retrieve: - type: boolean - default: False - description: Enable "Retrieve password" instance action. - customization-module: - type: string - default: "" - description: | - This option provides a means to enable customisation modules to modify - existing dashboards and panels. - allow-password-autocompletion: - type: boolean - default: False - description: | - Setting this to True will allow password form autocompletion by browser. - default-create-volume: - type: boolean - default: True - description: | - The default value for the option of creating a new volume in the - workflow for image and instance snapshot sources when launching an - instance. This option has an effect only to Ocata or newer - releases. - hide-create-volume: - type: boolean - default: False - description: | - Hide the "Create New Volume" option and rely on the - default-create-volume value during instance creation. - image-formats: - type: string - default: "" - description: | - The image-formats setting can be used to alter the default list of - advertised image formats. Many installations cannot use all the formats - that Glance recognizes, restricting the list here prevents unwanted - formats from being listed in Horizon which can lead to confusion. - . - This setting takes a space separated list, for example: iso qcow2 raw - . - Supported formats are: aki, ami, ari, docker, iso, ova, qcow2, raw, vdi, - vhd, vmdk. - . - If not provided, leave the option unconfigured which enables all of the - above. - api-result-limit: - type: int - default: 1000 - description: | - The maximum number of objects (e.g. Swift objects or Glance images) to - display on a single page before providing a paging element (a "more" link) - to paginate results. - enable-fip-topology-check: - type: boolean - default: true - description: - By default Horizon checks that a project has a router attached to an - external network before allowing FIPs to be attached to a VM. Some use - cases will not meet this constraint, e.g. if the router is owned by a - different project. Setting this to False removes this check from Horizon. - disable-password-reveal: - type: boolean - default: false - description: | - If enabled, the reveal button for passwords is removed. - enforce-password-check: - type: boolean - default: True - description: | - If True, displays an ‘Admin Password’ field on the Change Password form - to verify that it is indeed the admin logged-in who wants to change the password. - site-name: - type: string - default: '' - description: | - An unique site name for OpenStack deployment to be passed via the - application-dashboard relation - site-branding: - type: string - default: - description: | - A brand name to be shown in the HTML title. The default value is - "OpenStack Dashboard", e.g. "Instance Overview - OpenStack Dashboard" - site-branding-link: - type: string - default: - description: | - A custom hyperlink when the logo in the dashboard is clicked, e.g. - https://mycloud.example.com/. The default value is - "horizon:user_home" to open the top level of the dashboard. - help-url: - type: string - default: - description: | - A custom hyperlink for the "Help" menu, e.g. - https://mycloud.example.com/help. The default value is - https://docs.openstack.org/ - create-instance-flavor-sort-key: - type: string - default: - description: | - This option can be used to customise the order instances are sorted in. - Support values include: id, name, ram, disk, and vcpus. - See https://docs.openstack.org/horizon/latest/configuration/settings.html#create-instance-flavor-sort - for more details. - create-instance-flavor-sort-reverse: - type: boolean - default: False - description: | - This option can be used to set the instance sorting to either ascending or descending. - Set True to sort in ascending order or False for descending order. - plugins: - type: string - default: '[]' - description: | - This option can be used to enable plugins for Horizon. The value should be a - JSON formatted list of plugin names. diff --git a/charms/horizon-k8s/metadata.yaml b/charms/horizon-k8s/metadata.yaml deleted file mode 100644 index 0ec41265..00000000 --- a/charms/horizon-k8s/metadata.yaml +++ /dev/null @@ -1,63 +0,0 @@ -name: horizon-k8s -summary: OpenStack Horizon service -maintainer: OpenStack Charmers -description: | - OpenStack Horizon provides an HTTP service for managing, selecting, - and claiming providers of classes of inventory representing available - resources in a cloud. - . -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-horizon-k8s -issues: https://bugs.launchpad.net/charm-horizon-k8s - -containers: - horizon: - resource: horizon-image - -resources: - horizon-image: - type: oci-image - description: OCI image for Horizon - # ghcr.io/canonical/horizon:2024.1 - upstream-source: ghcr.io/canonical/horizon:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - ingress-internal: - interface: ingress - limit: 1 - ingress-public: - interface: ingress - optional: true - limit: 1 - identity-credentials: - interface: keystone-credentials - limit: 1 - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - horizon: - interface: horizon - -peers: - peers: - interface: horizon-peer diff --git a/charms/horizon-k8s/tests/actions.yaml b/charms/horizon-k8s/tests/actions.yaml deleted file mode 120000 index 9adaf92e..00000000 --- a/charms/horizon-k8s/tests/actions.yaml +++ /dev/null @@ -1 +0,0 @@ -../actions.yaml \ No newline at end of file diff --git a/charms/keystone-k8s/actions.yaml b/charms/keystone-k8s/actions.yaml deleted file mode 100644 index d73153a6..00000000 --- a/charms/keystone-k8s/actions.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -get-admin-password: - description: Get the password for the Keystone Admin user - -get-admin-account: - description: Get full access details for the Keystone Admin user - -get-service-account: - description: Create/get details for a new/existing service account. - params: - username: - type: string - description: The username for the service account. - required: - - username - additionalProperties: False - -regenerate-password: - description: | - Regenerate password for the given user. - params: - username: - type: string - description: The username for the account. - required: - - username - additionalProperties: False - -add-ca-certs: - description: | - Add CA certs for transfer - params: - name: - type: string - description: Name of CA certs bundle - ca: - type: string - description: Base64 encoded CA certificate - chain: - type: string - description: Base64 encoded CA Chain - required: - - name - - ca - additionalProperties: False -remove-ca-certs: - description: | - Remove CA certs - params: - name: - type: string - description: Name of CA certs bundle - required: - - name - additionalProperties: False -list-ca-certs: - description: | - List CA certs uploaded for transfer diff --git a/charms/keystone-k8s/charmcraft.yaml b/charms/keystone-k8s/charmcraft.yaml index d5efdc7b..edf1c0b2 100644 --- a/charms/keystone-k8s/charmcraft.yaml +++ b/charms/keystone-k8s/charmcraft.yaml @@ -1,11 +1,177 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: keystone-k8s +summary: OpenStack identity service +description: | + Keystone is an OpenStack project that provides Identity, Token, Catalog and + Policy services for use specifically by projects in the OpenStack family. It + implements OpenStack's Identity API. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-keystone-k8s + issues: + - https://bugs.launchpad.net/charm-keystone-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + log-level: + default: WARNING + type: string + description: Log level (WARNING, INFO, DEBUG, ERROR) + region: + default: RegionOne + description: Name of the OpenStack region + type: string + catalog-cache-expiration: + type: int + default: 60 + description: Amount of time (in seconds) the catalog should be cached for. + dogpile-cache-expiration: + type: int + default: 60 + description: | + Amount of time (in seconds) to cache items in the dogpile.cache. This only applies + to cached methods that do not have an explicitly defined cache expiration time. + identity-backend: + type: string + default: sql + description: | + Keystone identity backend, valid options are sql and pam + enable-telemetry-notifications: + type: boolean + default: false + description: Enable notifications to send to telemetry. + +actions: + get-admin-password: + description: Get the password for the Keystone Admin user + get-admin-account: + description: Get full access details for the Keystone Admin user + get-service-account: + description: Create/get details for a new/existing service account. + params: + username: + type: string + description: The username for the service account. + required: + - username + additionalProperties: false + regenerate-password: + description: | + Regenerate password for the given user. + params: + username: + type: string + description: The username for the account. + required: + - username + additionalProperties: false + add-ca-certs: + description: | + Add CA certs for transfer + params: + name: + type: string + description: Name of CA certs bundle + ca: + type: string + description: Base64 encoded CA certificate + chain: + type: string + description: Base64 encoded CA Chain + required: + - name + - ca + additionalProperties: false + remove-ca-certs: + description: | + Remove CA certs + params: + name: + type: string + description: Name of CA certs bundle + required: + - name + additionalProperties: false + list-ca-certs: + description: | + List CA certs uploaded for transfer + +containers: + keystone: + resource: keystone-image + mounts: + - storage: fernet-keys + location: /etc/keystone/fernet-keys/ + - storage: credential-keys + location: /etc/keystone/credential-keys/ + +resources: + keystone-image: + type: oci-image + description: OCI image for OpenStack Keystone + upstream-source: ghcr.io/canonical/keystone:2024.1 + +storage: + fernet-keys: + type: filesystem + description: | + Persistent storage for the location of fernet keys + minimum-size: 5M + credential-keys: + type: filesystem + description: | + Persistent storage for the location of credential keys + minimum-size: 5M + +requires: + database: + interface: mysql_client + limit: 1 + ingress-internal: + interface: ingress + limit: 1 + optional: true + ingress-public: + interface: ingress + limit: 1 + amqp: + interface: rabbitmq + optional: true + domain-config: + interface: keystone-domain-config + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + identity-service: + interface: keystone + identity-credentials: + interface: keystone-credentials + identity-ops: + interface: keystone-resources + send-ca-cert: + interface: certificate_transfer + +peers: + peers: + interface: keystone-peer + parts: update-certificates: plugin: nil @@ -13,9 +179,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/keystone-k8s/config.yaml b/charms/keystone-k8s/config.yaml deleted file mode 100644 index d122b49f..00000000 --- a/charms/keystone-k8s/config.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. -# -options: - debug: - default: False - description: Enable debug logging. - type: boolean - log-level: - default: WARNING - type: string - description: Log level (WARNING, INFO, DEBUG, ERROR) - - region: - default: RegionOne - description: Name of the OpenStack region - type: string - - catalog-cache-expiration: - type: int - default: 60 - description: Amount of time (in seconds) the catalog should be cached for. - dogpile-cache-expiration: - type: int - default: 60 - description: | - Amount of time (in seconds) to cache items in the dogpile.cache. This only applies - to cached methods that do not have an explicitly defined cache expiration time. - - identity-backend: - type: string - default: "sql" - description: | - Keystone identity backend, valid options are sql and pam - - enable-telemetry-notifications: - type: boolean - default: False - description: Enable notifications to send to telemetry. diff --git a/charms/keystone-k8s/metadata.yaml b/charms/keystone-k8s/metadata.yaml deleted file mode 100644 index 28a59cb0..00000000 --- a/charms/keystone-k8s/metadata.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -name: keystone-k8s -summary: OpenStack identity service -maintainer: Openstack Charmers -description: | - Keystone is an OpenStack project that provides Identity, Token, Catalog and - Policy services for use specifically by projects in the OpenStack family. It - implements OpenStack's Identity API. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: - - openstack - - identity - - misc -source: https://opendev.org/openstack/charm-keystone-k8s -issues: https://bugs.launchpad.net/charm-keystone-k8s - -provides: - identity-service: - interface: keystone - identity-credentials: - interface: keystone-credentials - identity-ops: - interface: keystone-resources - send-ca-cert: - interface: certificate_transfer - -requires: - database: - interface: mysql_client - limit: 1 - ingress-internal: - interface: ingress - limit: 1 - optional: true - ingress-public: - interface: ingress - limit: 1 - amqp: - interface: rabbitmq - optional: true - domain-config: - interface: keystone-domain-config - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: keystone-peer - -storage: - fernet-keys: - type: filesystem - description: | - Persistent storage for the location of fernet keys - minimum-size: 5M - - credential-keys: - type: filesystem - description: | - Persistent storage for the location of credential keys - minimum-size: 5M - -containers: - # The primary container that runs the keystone services - keystone: - resource: keystone-image - mounts: - - storage: fernet-keys - # The fernet keys used for generated tokens are stored here. With a - # mounted storage option, the fernet keys are persisted across - # container restarts. - location: /etc/keystone/fernet-keys/ - - - storage: credential-keys - # The credential keys used for generated credentials are stored here. - # With a mounted storage option, the credential keys are persisted - # across container restarts. - location: /etc/keystone/credential-keys/ - -resources: - keystone-image: - type: oci-image - description: OCI image for OpenStack Keystone - # ghcr.io/canonical/keystone:2024.1 - upstream-source: ghcr.io/canonical/keystone:2024.1 diff --git a/charms/keystone-k8s/tests/actions.yaml b/charms/keystone-k8s/tests/actions.yaml deleted file mode 120000 index 9adaf92e..00000000 --- a/charms/keystone-k8s/tests/actions.yaml +++ /dev/null @@ -1 +0,0 @@ -../actions.yaml \ No newline at end of file diff --git a/charms/keystone-k8s/tests/config.yaml b/charms/keystone-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/keystone-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/keystone-ldap-k8s/charmcraft.yaml b/charms/keystone-ldap-k8s/charmcraft.yaml index 12fc2d53..7ce3751b 100644 --- a/charms/keystone-ldap-k8s/charmcraft.yaml +++ b/charms/keystone-ldap-k8s/charmcraft.yaml @@ -1,11 +1,64 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +title: Keystone LDAP integration +name: keystone-ldap-k8s +summary: Keystone Domain backend for LDAP or Active Directory +description: | + Keystone support the use of domain specific identity drivers, + allowing different types of authentication backend to be deployed in a single Keystone + deployment. This charm supports use of LDAP or Active Directory domain backends, + with configuration details provided by charm configuration options. + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + domain-name: + type: string + default: null + description: | + Name of the keystone domain to configure; defaults to the deployed + application name. + ldap-config-flags: + type: string + default: null + description: | + The are ~50 LDAP configuration options supported by keystone. + Use a json like string with double quotes + and braces around all the options and single quotes around complex values. + "{user_tree_dn: 'DC=dc1,DC=ad,DC=example,DC=com', + user_allow_create: False, + user_allow_delete: False}" + See the README for more details. + tls-ca-ldap: + type: string + default: null + description: | + This option controls which certificate (or a chain) will be used to connect + to an ldap server(s) over TLS. Certificate contents should be either used + directly or included via include-file:// + An LDAP url should also be considered as ldaps and StartTLS are both valid + methods of using TLS (see RFC 4513) with StartTLS using a non-ldaps url which, + of course, still requires a CA certificate. + +requires: + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + domain-config: + interface: keystone-domain-config + +peers: + peers: + interface: keystone-dc-peer + parts: update-certificates: plugin: nil @@ -13,9 +66,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/keystone-ldap-k8s/config.yaml b/charms/keystone-ldap-k8s/config.yaml deleted file mode 100644 index 2d291337..00000000 --- a/charms/keystone-ldap-k8s/config.yaml +++ /dev/null @@ -1,28 +0,0 @@ -options: - domain-name: - type: string - default: - description: | - Name of the keystone domain to configure; defaults to the deployed - application name. - ldap-config-flags: - type: string - default: - description: | - The are ~50 LDAP configuration options supported by keystone. - Use a json like string with double quotes - and braces around all the options and single quotes around complex values. - "{user_tree_dn: 'DC=dc1,DC=ad,DC=example,DC=com', - user_allow_create: False, - user_allow_delete: False}" - See the README for more details. - tls-ca-ldap: - type: string - default: null - description: | - This option controls which certificate (or a chain) will be used to connect - to an ldap server(s) over TLS. Certificate contents should be either used - directly or included via include-file:// - An LDAP url should also be considered as ldaps and StartTLS are both valid - methods of using TLS (see RFC 4513) with StartTLS using a non-ldaps url which, - of course, still requires a CA certificate. diff --git a/charms/keystone-ldap-k8s/metadata.yaml b/charms/keystone-ldap-k8s/metadata.yaml deleted file mode 100644 index b131776f..00000000 --- a/charms/keystone-ldap-k8s/metadata.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: keystone-ldap-k8s -display-name: Keystone LDAP integration -summary: Keystone Domain backend for LDAP or Active Directory -description: | - Keystone support the use of domain specific identity drivers, - allowing different types of authentication backend to be deployed in a single Keystone - deployment. This charm supports use of LDAP or Active Directory domain backends, - with configuration details provided by charm configuration options. -peers: - peers: - interface: keystone-dc-peer -provides: - domain-config: - interface: keystone-domain-config -requires: - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 diff --git a/charms/keystone-ldap-k8s/tests/config.yaml b/charms/keystone-ldap-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/keystone-ldap-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/magnum-k8s/actions.yaml b/charms/magnum-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/magnum-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/magnum-k8s/charmcraft.yaml b/charms/magnum-k8s/charmcraft.yaml index d284ee46..2f7bb059 100644 --- a/charms/magnum-k8s/charmcraft.yaml +++ b/charms/magnum-k8s/charmcraft.yaml @@ -1,11 +1,86 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: magnum-k8s +summary: OpenStack magnum service +description: | + Magnum is an OpenStack project which offers container orchestration engines + for deploying and managing containers as first class resources in OpenStack. +assumes: + - k8s-api + - juju >= 3.1 + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + cluster-user-trust: + type: boolean + default: false + description: | + Controls whether to assign a trust to the cluster user or not. You will + need to set it to True for clusters with volume_driver=cinder or + registry_enabled=true in the underlying cluster template to work. This is + a potential security risk since the trust gives instances OpenStack API + access to the cluster's project. Note that this setting does not affect + per-cluster trusts assigned to the Magnum service user. + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + magnum-api: + resource: magnum-api-image + magnum-conductor: + resource: magnum-conductor-image + +resources: + magnum-api-image: + type: oci-image + description: OCI image for OpenStack magnum + upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1 + magnum-conductor-image: + type: oci-image + description: OCI image for OpenStack magnum + upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + identity-ops: + interface: keystone-resources + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + amqp: + interface: rabbitmq + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +peers: + peers: + interface: magnum-peer + parts: charm: build-packages: diff --git a/charms/magnum-k8s/config.yaml b/charms/magnum-k8s/config.yaml deleted file mode 100644 index d45fe862..00000000 --- a/charms/magnum-k8s/config.yaml +++ /dev/null @@ -1,19 +0,0 @@ -options: - cluster-user-trust: - type: boolean - default: False - description: | - Controls whether to assign a trust to the cluster user or not. You will - need to set it to True for clusters with volume_driver=cinder or - registry_enabled=true in the underlying cluster template to work. This is - a potential security risk since the trust gives instances OpenStack API - access to the cluster's project. Note that this setting does not affect - per-cluster trusts assigned to the Magnum service user. - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/magnum-k8s/metadata.yaml b/charms/magnum-k8s/metadata.yaml deleted file mode 100644 index a72d3efa..00000000 --- a/charms/magnum-k8s/metadata.yaml +++ /dev/null @@ -1,67 +0,0 @@ -name: magnum-k8s -summary: OpenStack magnum service -maintainer: OpenStack Charmers -description: | - Magnum is an OpenStack project which offers container orchestration engines - for deploying and managing containers as first class resources in OpenStack. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: - - openstack - - container-orchestration - - misc - -containers: - magnum-api: - resource: magnum-api-image - magnum-conductor: - resource: magnum-conductor-image - -resources: - magnum-api-image: - type: oci-image - description: OCI image for OpenStack magnum - # ghcr.io/canonical/magnum-consolidated:2024.1 - upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1 - magnum-conductor-image: - type: oci-image - description: OCI image for OpenStack magnum - # ghcr.io/canonical/magnum-consolidated:2024.1 - upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - identity-ops: - interface: keystone-resources - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - amqp: - interface: rabbitmq - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: magnum-peer diff --git a/charms/magnum-k8s/tests/config.yaml b/charms/magnum-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/magnum-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/masakari-k8s/charmcraft.yaml b/charms/masakari-k8s/charmcraft.yaml index bc06ba08..a04e497c 100644 --- a/charms/masakari-k8s/charmcraft.yaml +++ b/charms/masakari-k8s/charmcraft.yaml @@ -1,34 +1,6 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" -parts: - update-certificates: - plugin: nil - override-build: | - apt update - apt install -y ca-certificates - update-ca-certificates - charm: - after: [update-certificates] - build-packages: - - git - - libffi-dev - - libssl-dev - - rustc - - cargo - - pkg-config - charm-binary-python-packages: - - cryptography - - jsonschema - - pydantic - - jinja2 -name: masakari-k8s +type: charm title: OpenStack masakari service +name: masakari-k8s summary: Masakari - Instances High Availability Service description: | Masakari provides Instances High Availability Service for OpenStack clouds by @@ -36,25 +8,52 @@ description: | KVM-based Virtual Machine(VM)s from failure events such as VM process down, provisioning process down, and nova-compute host failure. Masakari also provides an API service to manage and control the automated rescue mechanism. -links: - source: https://opendev.org/openstack/sunbeam-charms - issues: https://bugs.launchpad.net/sunbeam-charms assumes: - k8s-api - juju >= 3.5 +links: + source: https://opendev.org/openstack/sunbeam-charms + issues: https://bugs.launchpad.net/sunbeam-charms + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + evacuation-delay: + type: int + default: 60 + description: | + Number of seconds to wait before evacuation after a service is + enabled or disabled. + evacuate-all-instances: + type: boolean + default: true + description: | + Whether to restrict instance evacuation to instances with ha enabled + in their metadata + region: + type: string + default: RegionOne + description: Name of the OpenStack region + containers: masakari-api: resource: masakari-image masakari-engine: resource: masakari-image - # Note(mylesjp): disabled until implemented - # host-monitor: - # resource: masakari-image + resources: masakari-image: description: OCI image for OpenStack Masakari services type: oci-image upstream-source: ghcr.io/canonical/masakari-consolidated:2024.1 + requires: amqp: interface: rabbitmq @@ -84,41 +83,42 @@ requires: interface: tracing limit: 1 optional: true - # Note(mylesjp): consul disabled until charm is published - # consul-management: - # interface: consul-client - # limit: 1 - # consul-tenant: # Name TBD - # interface: consul-client - # limit: 1 - # optional: true - # consul-storage: - # interface: consul-client - # limit: 1 - # optional: true +# Note(mylesjp): consul disabled until charm is published +# consul-management: +# interface: consul-client +# limit: 1 +# consul-tenant: # Name TBD +# interface: consul-client +# limit: 1 +# optional: true +# consul-storage: +# interface: consul-client +# limit: 1 +# optional: true + peers: peers: interface: masakari-peer -config: - options: - debug: - default: False - description: Enable debug logging. - type: boolean - evacuation-delay: - type: int - default: 60 - description: | - Number of seconds to wait before evacuation after a service is - enabled or disabled. - evacuate-all-instances: - type: boolean - default: True - description: | - Whether to restrict instance evacuation to instances with ha enabled - in their metadata - region: - type: string - default: RegionOne - description: Name of the OpenStack region -actions: {} + +parts: + update-certificates: + plugin: nil + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates + charm: + after: + - update-certificates + build-packages: + - git + - libffi-dev + - libssl-dev + - rustc + - cargo + - pkg-config + charm-binary-python-packages: + - cryptography + - jsonschema + - pydantic + - jinja2 diff --git a/charms/masakari-k8s/tests/unit/test_charm.py b/charms/masakari-k8s/tests/unit/test_charm.py index efd17ee7..ac35bf3a 100644 --- a/charms/masakari-k8s/tests/unit/test_charm.py +++ b/charms/masakari-k8s/tests/unit/test_charm.py @@ -16,17 +16,8 @@ """Tests for masakari-k8s charm.""" -from pathlib import ( - Path, -) - import charm import ops_sunbeam.test_utils as test_utils -import yaml - -charmcraft = (Path(__file__).parents[2] / "charmcraft.yaml").read_text() -config = yaml.dump(yaml.safe_load(charmcraft)["config"]) -actions = yaml.dump(yaml.safe_load(charmcraft)["actions"]) class _MasakariOperatorCharm(charm.MasakariOperatorCharm): @@ -57,9 +48,6 @@ class TestMasakariOperatorCharm(test_utils.CharmTestCase): self.harness = test_utils.get_harness( _MasakariOperatorCharm, container_calls=self.container_calls, - charm_metadata=charmcraft, - charm_config=config, - charm_actions=actions, ) from charms.data_platform_libs.v0.data_interfaces import ( diff --git a/charms/neutron-k8s/actions.yaml b/charms/neutron-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/neutron-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/neutron-k8s/charmcraft.yaml b/charms/neutron-k8s/charmcraft.yaml index a699281d..090d13f1 100644 --- a/charms/neutron-k8s/charmcraft.yaml +++ b/charms/neutron-k8s/charmcraft.yaml @@ -1,11 +1,167 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: neutron-k8s +summary: OpenStack Networking API service +description: | + Neutron is a virtual network service for OpenStack, and a part of + Netstack. Just like OpenStack Nova provides an API to dynamically + request and configure virtual servers, Neutron provides an API to + dynamically request and configure virtual networks. These networks + connect "interfaces" from other OpenStack services (e.g., virtual NICs + from Nova VMs). The Neutron API supports extensions to provide + advanced network capabilities (e.g., QoS, ACLs, network monitoring, + etc.) + . + This charm provides the OpenStack Neutron API service. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-neutron-k8s + issues: + - https://bugs.launchpad.net/charm-neutron-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + dns-domain: + default: cloud.sunbeam.internal. + description: | + Specifies the dns domain name that should be used for building instance + hostnames. The value of 'openstacklocal' will cause + the dhcp agents to broadcast the default domain of openstacklocal and + will not enable internal cloud dns resolution. This value should end + with a '.', e.g. 'cloud.example.org.'. + type: string + region: + default: RegionOne + description: Name of the OpenStack region + type: string + vlan-ranges: + default: physnet1:1:4094 + description: | + Space-delimited list of :: or + specifying physical_network names usable for VLAN + provider and tenant networks, as well as ranges of VLAN tags on each + available for allocation to tenant networks. + type: string + enable-igmp-snooping: + default: false + description: Enable IGMP snooping for integration bridge. + type: boolean + global-physnet-mtu: + type: int + default: 1500 + description: | + MTU of the underlying physical network. Neutron uses this value to + calculate MTU for all virtual network components. For flat and + VLAN networks, neutron uses this value without modification. For + overlay networks such as Geneve, neutron automatically subtracts + the overlay protocol overhead from this value. + path-mtu: + type: int + default: 1500 + description: | + Maximum size of an IP packet (MTU) that can traverse the + underlying physical network infrastructure without fragmentation + when using an overlay/tunnel protocol. This option allows + specifying a physical network MTU value that differs from the + default global-physnet-mtu value. + physical-network-mtus: + type: string + default: null + description: | + Space-delimited list of : pairs specifying MTU for + individual physical networks. + . + Use this if a subset of your flat or VLAN provider networks have a MTU + that differ with what is set in global-physnet-mtu. + reverse-dns-lookup: + default: false + description: | + A boolean value specifying whether to enable or not the creation of + reverse lookup (PTR) records. + . + NOTE: Use only when integrating neutron-k8s charm to designate charm. + type: boolean + ipv4-ptr-zone-prefix-size: + default: 24 + description: | + The size in bits of the prefix for the IPv4 reverse lookup (PTR) zones. + Valid size has to be multiple of 8, with maximum value of 24 and minimum + value of 8. + . + NOTE: Use only when "reverse-dns-lookup" option is set to "True". + type: int + ipv6-ptr-zone-prefix-size: + default: 64 + description: | + The size in bits of the prefix for the IPv6 reverse lookup (PTR) zones. + Valid size has to be multiple of 4, with maximum value of 124 and minimum + value of 4. + . + NOTE: Use only when "reverse-dns-lookup" option is set to "True". + type: int + +containers: + neutron-server: + resource: neutron-server-image + +resources: + neutron-server-image: + type: oci-image + description: OCI image for OpenStack Neutron API + upstream-source: ghcr.io/canonical/neutron-server:2024.1 + +requires: + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + database: + interface: mysql_client + limit: 1 + amqp: + interface: rabbitmq + identity-service: + interface: keystone + ovsdb-cms: + interface: ovsdb-cms + certificates: + interface: tls-certificates + optional: true + receive-ca-cert: + interface: certificate_transfer + optional: true + external-dns: + interface: designate + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + neutron-api: + interface: neutron-api + +peers: + peers: + interface: neutron-peer + parts: update-certificates: plugin: nil @@ -13,9 +169,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/neutron-k8s/config.yaml b/charms/neutron-k8s/config.yaml deleted file mode 100644 index e9b05d70..00000000 --- a/charms/neutron-k8s/config.yaml +++ /dev/null @@ -1,83 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - dns-domain: - default: cloud.sunbeam.internal. - description: | - Specifies the dns domain name that should be used for building instance - hostnames. The value of 'openstacklocal' will cause - the dhcp agents to broadcast the default domain of openstacklocal and - will not enable internal cloud dns resolution. This value should end - with a '.', e.g. 'cloud.example.org.'. - type: string - region: - default: RegionOne - description: Name of the OpenStack region - type: string - vlan-ranges: - default: "physnet1:1:4094" - description: | - Space-delimited list of :: or - specifying physical_network names usable for VLAN - provider and tenant networks, as well as ranges of VLAN tags on each - available for allocation to tenant networks. - type: string - enable-igmp-snooping: - default: False - description: Enable IGMP snooping for integration bridge. - type: boolean - global-physnet-mtu: - type: int - default: 1500 - description: | - MTU of the underlying physical network. Neutron uses this value to - calculate MTU for all virtual network components. For flat and - VLAN networks, neutron uses this value without modification. For - overlay networks such as Geneve, neutron automatically subtracts - the overlay protocol overhead from this value. - path-mtu: - type: int - default: 1500 - description: | - Maximum size of an IP packet (MTU) that can traverse the - underlying physical network infrastructure without fragmentation - when using an overlay/tunnel protocol. This option allows - specifying a physical network MTU value that differs from the - default global-physnet-mtu value. - physical-network-mtus: - type: string - default: - description: | - Space-delimited list of : pairs specifying MTU for - individual physical networks. - . - Use this if a subset of your flat or VLAN provider networks have a MTU - that differ with what is set in global-physnet-mtu. - reverse-dns-lookup: - default: False - description: | - A boolean value specifying whether to enable or not the creation of - reverse lookup (PTR) records. - . - NOTE: Use only when integrating neutron-k8s charm to designate charm. - type: boolean - ipv4-ptr-zone-prefix-size: - default: 24 - description: | - The size in bits of the prefix for the IPv4 reverse lookup (PTR) zones. - Valid size has to be multiple of 8, with maximum value of 24 and minimum - value of 8. - . - NOTE: Use only when "reverse-dns-lookup" option is set to "True". - type: int - ipv6-ptr-zone-prefix-size: - default: 64 - description: | - The size in bits of the prefix for the IPv6 reverse lookup (PTR) zones. - Valid size has to be multiple of 4, with maximum value of 124 and minimum - value of 4. - . - NOTE: Use only when "reverse-dns-lookup" option is set to "True". - type: int diff --git a/charms/neutron-k8s/metadata.yaml b/charms/neutron-k8s/metadata.yaml deleted file mode 100644 index 090077fa..00000000 --- a/charms/neutron-k8s/metadata.yaml +++ /dev/null @@ -1,76 +0,0 @@ -name: neutron-k8s -summary: OpenStack Networking API service -maintainer: OpenStack Charmers -description: | - Neutron is a virtual network service for OpenStack, and a part of - Netstack. Just like OpenStack Nova provides an API to dynamically - request and configure virtual servers, Neutron provides an API to - dynamically request and configure virtual networks. These networks - connect "interfaces" from other OpenStack services (e.g., virtual NICs - from Nova VMs). The Neutron API supports extensions to provide - advanced network capabilities (e.g., QoS, ACLs, network monitoring, - etc.) - . - This charm provides the OpenStack Neutron API service. -tags: - - openstack -source: https://opendev.org/openstack/charm-neutron-k8s -issues: https://bugs.launchpad.net/charm-neutron-k8s -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -containers: - neutron-server: - resource: neutron-server-image - -resources: - neutron-server-image: - type: oci-image - description: OCI image for OpenStack Neutron API - # ghcr.io/canonical/neutron-server:2024.1 - upstream-source: ghcr.io/canonical/neutron-server:2024.1 - -provides: - neutron-api: - interface: neutron-api - -requires: - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - database: - interface: mysql_client - limit: 1 - amqp: - interface: rabbitmq - identity-service: - interface: keystone - ovsdb-cms: - interface: ovsdb-cms - certificates: - interface: tls-certificates - optional: true - receive-ca-cert: - interface: certificate_transfer - optional: true - external-dns: - interface: designate - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: neutron-peer diff --git a/charms/neutron-k8s/tests/config.yaml b/charms/neutron-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/neutron-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/nova-k8s/actions.yaml b/charms/nova-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/nova-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/nova-k8s/charmcraft.yaml b/charms/nova-k8s/charmcraft.yaml index d5efdc7b..fbf18f28 100644 --- a/charms/nova-k8s/charmcraft.yaml +++ b/charms/nova-k8s/charmcraft.yaml @@ -1,11 +1,129 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: nova-k8s +summary: OpenStack Compute - Nova cloud controller service +description: | + OpenStack is a reliable cloud infrastructure. Its mission is to produce + the ubiquitous cloud computing platform that will meet the needs of public + and private cloud providers regardless of size, by being simple to implement + and massively scalable. + . + OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In + addition to its "native" API (the OpenStack API), it also supports the Amazon + EC2 API. + . + This charm provides the cloud controller service for OpenStack Nova and includes + nova-scheduler, nova-api and nova-conductor services. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-nova-k8s + issues: + - https://bugs.launchpad.net/charm-nova-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + nova-api: + resource: nova-api-image + nova-scheduler: + resource: nova-scheduler-image + nova-conductor: + resource: nova-conductor-image + nova-spiceproxy: + resource: nova-spiceproxy-image + +resources: + nova-api-image: + type: oci-image + description: OCI image for OpenStack Nova API + upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 + nova-scheduler-image: + type: oci-image + description: OCI image for OpenStack Nova Scheduler + upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 + nova-conductor-image: + type: oci-image + description: OCI image for OpenStack Nova Conductor + upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 + nova-spiceproxy-image: + type: oci-image + description: OCI image for OpenStack Nova Spice proxy + upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 + +requires: + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + traefik-route-internal: + interface: traefik_route + optional: true + limit: 1 + traefik-route-public: + interface: traefik_route + limit: 1 + database: + interface: mysql_client + limit: 1 + api-database: + interface: mysql_client + limit: 1 + cell-database: + interface: mysql_client + limit: 1 + amqp: + interface: rabbitmq + image-service: + interface: glance + identity-service: + interface: keystone + cloud-compute: + interface: nova-compute + cinder-volume-service: + interface: cinder + neutron-network-service: + interface: neutron + neutron-api: + interface: neutron-api + placement: + interface: placement + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + nova-service: + interface: nova + +peers: + peers: + interface: nova-peer + parts: update-certificates: plugin: nil @@ -13,9 +131,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/nova-k8s/config.yaml b/charms/nova-k8s/config.yaml deleted file mode 100644 index 45e55dcc..00000000 --- a/charms/nova-k8s/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/nova-k8s/metadata.yaml b/charms/nova-k8s/metadata.yaml deleted file mode 100644 index 4058fc5b..00000000 --- a/charms/nova-k8s/metadata.yaml +++ /dev/null @@ -1,113 +0,0 @@ -name: nova-k8s -summary: OpenStack Compute - Nova cloud controller service -maintainer: OpenStack Charmers -description: | - OpenStack is a reliable cloud infrastructure. Its mission is to produce - the ubiquitous cloud computing platform that will meet the needs of public - and private cloud providers regardless of size, by being simple to implement - and massively scalable. - . - OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In - addition to its "native" API (the OpenStack API), it also supports the Amazon - EC2 API. - . - This charm provides the cloud controller service for OpenStack Nova and includes - nova-scheduler, nova-api and nova-conductor services. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: - - openstack -source: https://opendev.org/openstack/charm-nova-k8s -issues: https://bugs.launchpad.net/charm-nova-k8s - -containers: - nova-api: - resource: nova-api-image - nova-scheduler: - resource: nova-scheduler-image - nova-conductor: - resource: nova-conductor-image - nova-spiceproxy: - resource: nova-spiceproxy-image - -resources: - nova-api-image: - type: oci-image - description: OCI image for OpenStack Nova API - upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 - nova-scheduler-image: - type: oci-image - description: OCI image for OpenStack Nova Scheduler - upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 - nova-conductor-image: - type: oci-image - description: OCI image for OpenStack Nova Conductor - upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 - nova-spiceproxy-image: - type: oci-image - description: OCI image for OpenStack Nova Spice proxy - upstream-source: ghcr.io/canonical/nova-consolidated:2024.1 - -requires: - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - traefik-route-internal: - interface: traefik_route - optional: true - limit: 1 - traefik-route-public: - interface: traefik_route - limit: 1 - database: - interface: mysql_client - limit: 1 - api-database: - interface: mysql_client - limit: 1 - cell-database: - interface: mysql_client - limit: 1 - amqp: - interface: rabbitmq - image-service: - interface: glance - identity-service: - interface: keystone - cloud-compute: - interface: nova-compute - cinder-volume-service: - interface: cinder - neutron-network-service: - interface: neutron - neutron-api: - interface: neutron-api - placement: - interface: placement - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - nova-service: - interface: nova - -peers: - peers: - interface: nova-peer diff --git a/charms/nova-k8s/tests/config.yaml b/charms/nova-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/nova-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/octavia-k8s/actions.yaml b/charms/octavia-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/octavia-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/octavia-k8s/charmcraft.yaml b/charms/octavia-k8s/charmcraft.yaml index f6459be6..2d490ea0 100644 --- a/charms/octavia-k8s/charmcraft.yaml +++ b/charms/octavia-k8s/charmcraft.yaml @@ -1,11 +1,105 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: octavia-k8s +summary: OpenStack Octavia service +description: | + OpenStack Octavia provides loadbalancing service for an OpenStack cloud. + Currently OVN Octavia provider driver is supported. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-octavia-k8s + issues: + - https://bugs.launchpad.net/charm-octavia-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + octavia-api: + resource: octavia-api-image + mounts: + - storage: agent-sockets + location: /var/run/octavia/ + octavia-driver-agent: + resource: octavia-driver-agent-image + mounts: + - storage: agent-sockets + location: /var/run/octavia/ + octavia-housekeeping: + resource: octavia-housekeeping-image + +resources: + octavia-api-image: + type: oci-image + description: OCI image for OpenStack octavia + upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1 + octavia-driver-agent-image: + type: oci-image + description: OCI image for OpenStack Octavia Driver Agent + upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1 + octavia-housekeeping-image: + type: oci-image + description: OCI image for OpenStack Octavia Housekeeping + upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1 + +storage: + agent-sockets: + type: filesystem + description: | + Storage for the location of agent sockets shared between octavia-api + and octavia-driver-agent + minimum-size: 100M + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + ovsdb-cms: + interface: ovsdb-cms + certificates: + interface: tls-certificates + optional: true + identity-ops: + interface: keystone-resources + optional: true + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +peers: + peers: + interface: octavia-peer + parts: update-certificates: plugin: nil @@ -13,9 +107,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/octavia-k8s/config.yaml b/charms/octavia-k8s/config.yaml deleted file mode 100644 index 45e55dcc..00000000 --- a/charms/octavia-k8s/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/octavia-k8s/metadata.yaml b/charms/octavia-k8s/metadata.yaml deleted file mode 100644 index 90efbcd1..00000000 --- a/charms/octavia-k8s/metadata.yaml +++ /dev/null @@ -1,92 +0,0 @@ -name: octavia-k8s -summary: OpenStack Octavia service -maintainer: OpenStack Charmers -description: | - OpenStack Octavia provides loadbalancing service for an OpenStack cloud. - Currently OVN Octavia provider driver is supported. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-octavia-k8s -issues: https://bugs.launchpad.net/charm-octavia-k8s - -storage: - agent-sockets: - type: filesystem - description: | - Storage for the location of agent sockets shared between octavia-api - and octavia-driver-agent - minimum-size: 100M - -containers: - octavia-api: - resource: octavia-api-image - mounts: - - storage: agent-sockets - location: /var/run/octavia/ - octavia-driver-agent: - resource: octavia-driver-agent-image - mounts: - - storage: agent-sockets - location: /var/run/octavia/ - octavia-housekeeping: - resource: octavia-housekeeping-image - -resources: - octavia-api-image: - type: oci-image - description: OCI image for OpenStack octavia - # ghcr.io/canonical/octavia-api:2024.1 - upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1 - octavia-driver-agent-image: - type: oci-image - description: OCI image for OpenStack Octavia Driver Agent - # ghcr.io/canonical/octavia-driver-agent:2024.1 - upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1 - octavia-housekeeping-image: - type: oci-image - description: OCI image for OpenStack Octavia Housekeeping - # ghcr.io/canonical/octavia-housekeeping:2024.1 - upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - ingress-internal: - interface: ingress - optional: true - limit: 1 - ingress-public: - interface: ingress - limit: 1 - ovsdb-cms: - interface: ovsdb-cms - certificates: - interface: tls-certificates - optional: true - identity-ops: - interface: keystone-resources - optional: true - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -peers: - peers: - interface: octavia-peer diff --git a/charms/octavia-k8s/tests/config.yaml b/charms/octavia-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/octavia-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/openstack-exporter-k8s/actions.yaml b/charms/openstack-exporter-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/openstack-exporter-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/openstack-exporter-k8s/charmcraft.yaml b/charms/openstack-exporter-k8s/charmcraft.yaml index f6459be6..e05efef2 100644 --- a/charms/openstack-exporter-k8s/charmcraft.yaml +++ b/charms/openstack-exporter-k8s/charmcraft.yaml @@ -1,11 +1,62 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: openstack-exporter-k8s +summary: OpenStack openstack-exporter service +description: | + OpenStack openstack-exporter provides endpoint metrics for OpenStack services. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-openstack-exporter-k8s + issues: + - https://bugs.launchpad.net/charm-openstack-exporter-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + openstack-exporter: + resource: openstack-exporter-image + +resources: + openstack-exporter-image: + type: oci-image + description: OCI image for OpenStack openstack-exporter + upstream-source: ghcr.io/canonical/openstack-exporter:1.7.0 + +requires: + identity-ops: + interface: keystone-resources + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + metrics-endpoint: + interface: prometheus_scrape + grafana-dashboard: + interface: grafana_dashboard + +peers: + peers: + interface: openstack-exporter-peer + parts: update-certificates: plugin: nil @@ -13,9 +64,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/openstack-exporter-k8s/config.yaml b/charms/openstack-exporter-k8s/config.yaml deleted file mode 100644 index 6dd367d0..00000000 --- a/charms/openstack-exporter-k8s/config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -options: - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/openstack-exporter-k8s/metadata.yaml b/charms/openstack-exporter-k8s/metadata.yaml deleted file mode 100644 index 9218515f..00000000 --- a/charms/openstack-exporter-k8s/metadata.yaml +++ /dev/null @@ -1,50 +0,0 @@ -name: openstack-exporter-k8s -summary: OpenStack openstack-exporter service -maintainer: OpenStack Charmers -description: | - OpenStack openstack-exporter provides endpoint metrics for OpenStack services. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-openstack-exporter-k8s -issues: https://bugs.launchpad.net/charm-openstack-exporter-k8s - -containers: - openstack-exporter: - resource: openstack-exporter-image - -resources: - openstack-exporter-image: - type: oci-image - description: OCI image for OpenStack openstack-exporter - upstream-source: ghcr.io/canonical/openstack-exporter:1.7.0 - -requires: - identity-ops: - interface: keystone-resources - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - metrics-endpoint: - interface: prometheus_scrape - grafana-dashboard: - interface: grafana_dashboard - -peers: - peers: - interface: openstack-exporter-peer diff --git a/charms/openstack-exporter-k8s/tests/config.yaml b/charms/openstack-exporter-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/openstack-exporter-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/openstack-hypervisor/actions.yaml b/charms/openstack-hypervisor/actions.yaml deleted file mode 100644 index 98ea1af0..00000000 --- a/charms/openstack-hypervisor/actions.yaml +++ /dev/null @@ -1,15 +0,0 @@ -set-hypervisor-local-settings: - description: | - Apply settings specific to this hypervisor unit - params: - external-nic: - type: string - description: NIC that hypervisor will configure for North/South traffic - spice-proxy-address: - type: string - description: IP address to use for configuration of SPICE consoles in instances. - ip-address: - type: string - description: IP address to use for service configuration - additionalProperties: false - diff --git a/charms/openstack-hypervisor/charmcraft.yaml b/charms/openstack-hypervisor/charmcraft.yaml index bb52d4e0..c9aea95a 100644 --- a/charms/openstack-hypervisor/charmcraft.yaml +++ b/charms/openstack-hypervisor/charmcraft.yaml @@ -1,14 +1,97 @@ -# This file configures Charmcraft. -# See https://juju.is/docs/sdk/charmcraft-config for guidance. - type: charm -bases: - - build-on: - - name: ubuntu - channel: "22.04" - run-on: - - name: ubuntu - channel: "22.04" +title: OpenStack Hypervisor +name: openstack-hypervisor +summary: Deploy the OpenStack hypervisor +description: | + Configure machine to run VMs as part of an OpenStack cloud. +assumes: + - juju >= 3.1 + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + snap-channel: + default: 2024.1/edge + type: string + debug: + default: false + type: boolean + resume-on-boot: + default: true + description: | + Whether to resume the guest VMs when the host boots. + type: boolean + dns-servers: + default: 8.8.8.8 + type: string + external-bridge: + default: br-ex + type: string + external-bridge-address: + default: 10.20.20.1/24 + type: string + physnet-name: + default: physnet1 + type: string + use-migration-binding: + default: false + type: boolean + use-data-binding: + default: false + type: boolean + +actions: + set-hypervisor-local-settings: + description: | + Apply settings specific to this hypervisor unit + params: + external-nic: + type: string + description: NIC that hypervisor will configure for North/South traffic + spice-proxy-address: + type: string + description: IP address to use for configuration of SPICE consoles in instances. + ip-address: + type: string + description: IP address to use for service configuration + additionalProperties: false + +requires: + amqp: + interface: rabbitmq + identity-credentials: + interface: keystone-credentials + ovsdb-cms: + interface: ovsdb-cms + certificates: + interface: tls-certificates + optional: true + ceilometer-service: + interface: ceilometer + optional: true + ceph-access: + interface: cinder-ceph-key + optional: true + receive-ca-cert: + interface: certificate_transfer + optional: true + nova-service: + interface: nova + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + cos-agent: + interface: cos_agent + +extra-bindings: + migration: null + data: null parts: charm: diff --git a/charms/openstack-hypervisor/config.yaml b/charms/openstack-hypervisor/config.yaml deleted file mode 100644 index e54b5581..00000000 --- a/charms/openstack-hypervisor/config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -options: - snap-channel: - default: "2024.1/edge" - type: string - debug: - default: False - type: boolean - resume-on-boot: - default: True - description: | - Whether to resume the guest VMs when the host boots. - type: boolean - dns-servers: - default: "8.8.8.8" - type: string - external-bridge: - default: "br-ex" - type: string - external-bridge-address: - default: "10.20.20.1/24" - type: string - physnet-name: - default: "physnet1" - type: string - use-migration-binding: - default: False - type: boolean - use-data-binding: - default: False - type: boolean diff --git a/charms/openstack-hypervisor/metadata.yaml b/charms/openstack-hypervisor/metadata.yaml deleted file mode 100644 index a7c27098..00000000 --- a/charms/openstack-hypervisor/metadata.yaml +++ /dev/null @@ -1,48 +0,0 @@ -name: openstack-hypervisor - -display-name: OpenStack Hypervisor - -summary: Deploy the OpenStack hypervisor - -description: | - Configure machine to run VMs as part of an OpenStack cloud. - -requires: - amqp: - interface: rabbitmq - identity-credentials: - interface: keystone-credentials - ovsdb-cms: - interface: ovsdb-cms - certificates: - interface: tls-certificates - optional: true - ceilometer-service: - interface: ceilometer - optional: true - ceph-access: - interface: cinder-ceph-key - optional: true - receive-ca-cert: - interface: certificate_transfer - optional: true - nova-service: - interface: nova - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - cos-agent: - interface: cos_agent - -extra-bindings: - migration: - data: # this binding will be used to support project networking between hypervisors - -# This charm has no peer relation by design. This charm needs to scale to -# hundreds of units and this is limited by the peer relation. - -assumes: - - juju >= 3.1 diff --git a/charms/openstack-hypervisor/tests/actions.yaml b/charms/openstack-hypervisor/tests/actions.yaml deleted file mode 120000 index 9adaf92e..00000000 --- a/charms/openstack-hypervisor/tests/actions.yaml +++ /dev/null @@ -1 +0,0 @@ -../actions.yaml \ No newline at end of file diff --git a/charms/openstack-hypervisor/tests/unit/config.yaml b/charms/openstack-hypervisor/tests/unit/config.yaml deleted file mode 120000 index 82800a03..00000000 --- a/charms/openstack-hypervisor/tests/unit/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../../config.yaml \ No newline at end of file diff --git a/charms/openstack-hypervisor/tests/unit/test_charm.py b/charms/openstack-hypervisor/tests/unit/test_charm.py index 3c6f71da..b5b83572 100644 --- a/charms/openstack-hypervisor/tests/unit/test_charm.py +++ b/charms/openstack-hypervisor/tests/unit/test_charm.py @@ -40,12 +40,9 @@ class TestCharm(test_utils.CharmTestCase): def setUp(self): """Setup OpenStack Hypervisor tests.""" super().setUp(charm, self.PATCHES) - with open("config.yaml", "r") as f: - config_data = f.read() self.harness = test_utils.get_harness( _HypervisorOperatorCharm, container_calls=self.container_calls, - charm_config=config_data, ) self.addCleanup(self.harness.cleanup) diff --git a/charms/openstack-images-sync-k8s/charmcraft.yaml b/charms/openstack-images-sync-k8s/charmcraft.yaml index e95d41c9..2dde2e58 100644 --- a/charms/openstack-images-sync-k8s/charmcraft.yaml +++ b/charms/openstack-images-sync-k8s/charmcraft.yaml @@ -1,40 +1,18 @@ -name: openstack-images-sync-k8s - type: charm - title: OpenStack Images Sync K8S - +name: openstack-images-sync-k8s summary: Keep OpenStack images in sync with the latest versions - description: | Openstack Images Sync operator allows synchronization from a SimpleStreams source to an OpenStack cloud. -bases: - - build-on: - - name: ubuntu - channel: "22.04" - run-on: - - name: ubuntu - channel: "22.04" - -parts: - charm: - build-packages: - - git - - libffi-dev - - libssl-dev - - pkg-config - - rustc - - cargo - charm-binary-python-packages: - - cryptography - - jsonschema - - jinja2 +base: ubuntu@22.04 +platforms: + amd64: config: options: debug: - default: False + default: false type: boolean frequency: default: hourly @@ -94,3 +72,17 @@ requires: interface: tracing optional: true limit: 1 + +parts: + charm: + build-packages: + - git + - libffi-dev + - libssl-dev + - pkg-config + - rustc + - cargo + charm-binary-python-packages: + - cryptography + - jsonschema + - jinja2 diff --git a/charms/openstack-images-sync-k8s/tests/unit/test_charm.py b/charms/openstack-images-sync-k8s/tests/unit/test_charm.py index 576a5605..572cd304 100644 --- a/charms/openstack-images-sync-k8s/tests/unit/test_charm.py +++ b/charms/openstack-images-sync-k8s/tests/unit/test_charm.py @@ -15,16 +15,9 @@ # limitations under the License. """Tests for openstack images sync charm.""" -import pathlib import charm import ops_sunbeam.test_utils as test_utils -import yaml - -charmcraft = ( - pathlib.Path(__file__).parents[2] / "charmcraft.yaml" -).read_text() -config = yaml.dump(yaml.safe_load(charmcraft)["config"]) class _OISOperatorCharm(charm.OpenstackImagesSyncK8SCharm): @@ -61,9 +54,7 @@ class TestOISOperatorCharm(test_utils.CharmTestCase): super().setUp(charm, self.PATCHES) self.harness = test_utils.get_harness( _OISOperatorCharm, - charm_metadata=charmcraft, container_calls=self.container_calls, - charm_config=config, ) self.addCleanup(self.harness.cleanup) diff --git a/charms/ovn-central-k8s/actions.yaml b/charms/ovn-central-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/ovn-central-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/ovn-central-k8s/charmcraft.yaml b/charms/ovn-central-k8s/charmcraft.yaml index e67d37b5..fb5e8505 100644 --- a/charms/ovn-central-k8s/charmcraft.yaml +++ b/charms/ovn-central-k8s/charmcraft.yaml @@ -1,13 +1,128 @@ -# Learn more about charmcraft.yaml configuration at: -# https://juju.is/docs/sdk/charmcraft-config -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: ovn-central-k8s +summary: Open Virtual Network for Open vSwitch +description: | + Principal charm that deploys ovn-northd, the OVN central control daemon, + and ovsdb-server, the Open vSwitch Database (OVSDB). + + The ovn-northd daemon is responsible for translating the high-level OVN + configuration into logical configuration consumable by daemons such as + ovn-controller. + + The ovn-northd process talks to OVN Northbound- and Southbound- databases. + + The ovsdb-server exposes endpoints over relations implemented by the ovsdb + interface. + + The charm supports clustering of the OVSDB, you must have a odd number of + units for this to work. Note that write performance decreases as you + increase the number of units. + + Running multiple ovn-northd daemons is supported and they will operate in + active/passive mode. The daemon uses a locking feature in the OVSDB to + automatically choose a single active instance. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/x/charm-ovn-central-k8s + issues: + - https://bugs.launchpad.net/charm-ovn-central-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + ovsdb-server-election-timer: + default: 4 + type: int + description: | + Raft leader election timeout in seconds. The charm allows a value between + 1 and 60 seconds. + . + The Open vSwitch ovsdb-server default of 1 second may not be sufficient + for a loaded cluster where the database server may be too busy serving + requests to respond to elections in time. + . + Using a higher value will increase the time to discover a real failure, + but you must weigh that against the risk of spurious leader flapping and + the unwanted churn that entails. + . + NOTE: The ovsdb-server will refuse to decrease or increase the value of + this timer more than 2x the current value. The charm will compensate for + this and decrease / increase the timer in increments, but care should be + taken to not decrease / increase the value too much in one operation. + ovsdb-server-inactivity-probe: + default: 60 + type: int + description: | + Maximum number of seconds of idle time on connection to client before + sending an inactivity probe message. + + The Open vSwitch ovsdb-server default of 5 seconds may not be sufficient + depending on type and load of the CMS you want to connect to OVN. + +containers: + ovn-sb-db-server: + resource: ovn-sb-db-server-image + mounts: + - storage: databases + location: /var/lib/ovn + ovn-nb-db-server: + resource: ovn-nb-db-server-image + mounts: + - storage: databases + location: /var/lib/ovn + ovn-northd: + resource: ovn-northd-image + +resources: + ovn-sb-db-server-image: + type: oci-image + description: OCI image for OVN Southbound Database Server + upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 + ovn-nb-db-server-image: + type: oci-image + description: OCI image for OVN Northbound Database Server + upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 + ovn-northd-image: + type: oci-image + description: OCI image for OVN Northd + upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 + +storage: + databases: + type: filesystem + description: | + Persistent storage for OVN Northbound and Southbound databases + minimum-size: 100M + +requires: + certificates: + interface: tls-certificates + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + ovsdb: + interface: ovsdb + ovsdb-cms: + interface: ovsdb-cms + ovsdb-server: + interface: ovsdb-cluster + +peers: + peers: + interface: ovn-central-peer + parts: update-certificates: plugin: nil @@ -15,9 +130,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/ovn-central-k8s/config.yaml b/charms/ovn-central-k8s/config.yaml deleted file mode 100644 index b6b6e8fd..00000000 --- a/charms/ovn-central-k8s/config.yaml +++ /dev/null @@ -1,29 +0,0 @@ -options: - ovsdb-server-election-timer: - default: 4 - type: int - description: | - Raft leader election timeout in seconds. The charm allows a value between - 1 and 60 seconds. - . - The Open vSwitch ovsdb-server default of 1 second may not be sufficient - for a loaded cluster where the database server may be too busy serving - requests to respond to elections in time. - . - Using a higher value will increase the time to discover a real failure, - but you must weigh that against the risk of spurious leader flapping and - the unwanted churn that entails. - . - NOTE: The ovsdb-server will refuse to decrease or increase the value of - this timer more than 2x the current value. The charm will compensate for - this and decrease / increase the timer in increments, but care should be - taken to not decrease / increase the value too much in one operation. - ovsdb-server-inactivity-probe: - default: 60 - type: int - description: | - Maximum number of seconds of idle time on connection to client before - sending an inactivity probe message. - - The Open vSwitch ovsdb-server default of 5 seconds may not be sufficient - depending on type and load of the CMS you want to connect to OVN. diff --git a/charms/ovn-central-k8s/metadata.yaml b/charms/ovn-central-k8s/metadata.yaml deleted file mode 100644 index 2a649ef8..00000000 --- a/charms/ovn-central-k8s/metadata.yaml +++ /dev/null @@ -1,90 +0,0 @@ -name: ovn-central-k8s -summary: Open Virtual Network for Open vSwitch -maintainer: OpenStack Charmers -description: | - Principal charm that deploys ovn-northd, the OVN central control daemon, - and ovsdb-server, the Open vSwitch Database (OVSDB). - - The ovn-northd daemon is responsible for translating the high-level OVN - configuration into logical configuration consumable by daemons such as - ovn-controller. - - The ovn-northd process talks to OVN Northbound- and Southbound- databases. - - The ovsdb-server exposes endpoints over relations implemented by the ovsdb - interface. - - The charm supports clustering of the OVSDB, you must have a odd number of - units for this to work. Note that write performance decreases as you - increase the number of units. - - Running multiple ovn-northd daemons is supported and they will operate in - active/passive mode. The daemon uses a locking feature in the OVSDB to - automatically choose a single active instance. -tags: -- networking -source: https://opendev.org/x/charm-ovn-central-k8s -issues: https://bugs.launchpad.net/charm-ovn-central-k8s -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -storage: - databases: - type: filesystem - description: | - Persistent storage for OVN Northbound and Southbound databases - minimum-size: 100M - -containers: - ovn-sb-db-server: - resource: ovn-sb-db-server-image - mounts: - - storage: databases - location: /var/lib/ovn - ovn-nb-db-server: - resource: ovn-nb-db-server-image - mounts: - - storage: databases - location: /var/lib/ovn - ovn-northd: - resource: ovn-northd-image - -resources: - ovn-sb-db-server-image: - type: oci-image - description: OCI image for OVN Southbound Database Server - upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 - ovn-nb-db-server-image: - type: oci-image - description: OCI image for OVN Northbound Database Server - upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 - ovn-northd-image: - type: oci-image - description: OCI image for OVN Northd - upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 - -requires: - certificates: - interface: tls-certificates - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - ovsdb: - interface: ovsdb - ovsdb-cms: - interface: ovsdb-cms - ovsdb-server: - interface: ovsdb-cluster - -peers: - peers: - interface: ovn-central-peer diff --git a/charms/ovn-central-k8s/tests/config.yaml b/charms/ovn-central-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/ovn-central-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/ovn-relay-k8s/actions.yaml b/charms/ovn-relay-k8s/actions.yaml deleted file mode 100644 index 5756cbf2..00000000 --- a/charms/ovn-relay-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -get-southbound-db-url: - description: Southbound DB Connection URL for access to OVN via the relay. diff --git a/charms/ovn-relay-k8s/charmcraft.yaml b/charms/ovn-relay-k8s/charmcraft.yaml index e67d37b5..4715036a 100644 --- a/charms/ovn-relay-k8s/charmcraft.yaml +++ b/charms/ovn-relay-k8s/charmcraft.yaml @@ -1,13 +1,60 @@ -# Learn more about charmcraft.yaml configuration at: -# https://juju.is/docs/sdk/charmcraft-config -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: ovn-relay-k8s +summary: Relay for Open Virtual Network for Open vSwitch +description: | + The charm that deploys OVSDB Relay service. + + The OVSDB Relay handles all read-only requests and forwards + transactions that requires database modifications to ovsdb + server. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/x/charm-ovn-relay-k8s + issues: + - https://bugs.launchpad.net/charm-ovn-relay-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +actions: + get-southbound-db-url: + description: Southbound DB Connection URL for access to OVN via the relay. + +containers: + ovsdb-server: + resource: ovn-sb-db-server-image + +resources: + ovn-sb-db-server-image: + type: oci-image + description: OCI image for OVN Southbound Database Server + upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 + +requires: + ovsdb-cms: + interface: ovsdb-cms + certificates: + interface: tls-certificates + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + ovsdb-cms-relay: + interface: ovsdb-cms + +peers: + peers: + interface: ovn-relay-peer + parts: update-certificates: plugin: nil @@ -15,9 +62,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/ovn-relay-k8s/metadata.yaml b/charms/ovn-relay-k8s/metadata.yaml deleted file mode 100644 index 01c84ca5..00000000 --- a/charms/ovn-relay-k8s/metadata.yaml +++ /dev/null @@ -1,51 +0,0 @@ -name: ovn-relay-k8s -summary: Relay for Open Virtual Network for Open vSwitch -maintainer: OpenStack Charmers -description: | - The charm that deploys OVSDB Relay service. - - The OVSDB Relay handles all read-only requests and forwards - transactions that requires database modifications to ovsdb - server. -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- networking -source: https://opendev.org/x/charm-ovn-relay-k8s -issues: https://bugs.launchpad.net/charm-ovn-relay-k8s - -containers: - ovsdb-server: - resource: ovn-sb-db-server-image - -resources: - ovn-sb-db-server-image: - type: oci-image - description: OCI image for OVN Southbound Database Server - # ghcr.io/canonical/ovn-sb-db-server:24.03 - upstream-source: ghcr.io/canonical/ovn-consolidated:24.03 - -requires: - ovsdb-cms: - interface: ovsdb-cms - certificates: - interface: tls-certificates - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - ovsdb-cms-relay: - interface: ovsdb-cms - -peers: - peers: - interface: ovn-relay-peer diff --git a/charms/ovn-relay-k8s/tests/actions.yaml b/charms/ovn-relay-k8s/tests/actions.yaml deleted file mode 120000 index 9adaf92e..00000000 --- a/charms/ovn-relay-k8s/tests/actions.yaml +++ /dev/null @@ -1 +0,0 @@ -../actions.yaml \ No newline at end of file diff --git a/charms/placement-k8s/actions.yaml b/charms/placement-k8s/actions.yaml deleted file mode 100644 index 88e6195d..00000000 --- a/charms/placement-k8s/actions.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# NOTE: no actions yet! -{ } diff --git a/charms/placement-k8s/charmcraft.yaml b/charms/placement-k8s/charmcraft.yaml index f6459be6..20c0cb5e 100644 --- a/charms/placement-k8s/charmcraft.yaml +++ b/charms/placement-k8s/charmcraft.yaml @@ -1,11 +1,76 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" +type: charm +name: placement-k8s +summary: OpenStack placement service +description: | + OpenStack Placement provides an HTTP service for managing, selecting, + and claiming providers of classes of inventory representing available + resources in a cloud. +assumes: + - k8s-api + - juju >= 3.1 +links: + source: + - https://opendev.org/openstack/charm-placement-k8s + issues: + - https://bugs.launchpad.net/charm-placement-k8s + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + description: Enable debug logging. + type: boolean + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +containers: + placement-api: + resource: placement-api-image + +resources: + placement-api-image: + type: oci-image + description: OCI image for OpenStack Placement + upstream-source: ghcr.io/canonical/placement-api:2024.1 + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + ingress-internal: + interface: ingress + limit: 1 + ingress-public: + interface: ingress + optional: true + limit: 1 + receive-ca-cert: + interface: certificate_transfer + optional: true + logging: + interface: loki_push_api + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + placement: + interface: placement + +peers: + peers: + interface: placement-peer + parts: update-certificates: plugin: nil @@ -13,9 +78,9 @@ parts: apt update apt install -y ca-certificates update-ca-certificates - charm: - after: [update-certificates] + after: + - update-certificates build-packages: - git - libffi-dev diff --git a/charms/placement-k8s/config.yaml b/charms/placement-k8s/config.yaml deleted file mode 100644 index 45e55dcc..00000000 --- a/charms/placement-k8s/config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -options: - debug: - default: False - description: Enable debug logging. - type: boolean - region: - default: RegionOne - description: Name of the OpenStack region - type: string diff --git a/charms/placement-k8s/metadata.yaml b/charms/placement-k8s/metadata.yaml deleted file mode 100644 index 1938d18f..00000000 --- a/charms/placement-k8s/metadata.yaml +++ /dev/null @@ -1,60 +0,0 @@ -name: placement-k8s -summary: OpenStack placement service -maintainer: OpenStack Charmers -description: | - OpenStack Placement provides an HTTP service for managing, selecting, - and claiming providers of classes of inventory representing available - resources in a cloud. -version: 3 -bases: - - name: ubuntu - channel: 22.04/stable -assumes: - - k8s-api - - juju >= 3.1 -tags: -- openstack -source: https://opendev.org/openstack/charm-placement-k8s -issues: https://bugs.launchpad.net/charm-placement-k8s - -containers: - placement-api: - resource: placement-api-image - -resources: - placement-api-image: - type: oci-image - description: OCI image for OpenStack Placement - upstream-source: ghcr.io/canonical/placement-api:2024.1 - -requires: - database: - interface: mysql_client - limit: 1 - identity-service: - interface: keystone - ingress-internal: - interface: ingress - limit: 1 - ingress-public: - interface: ingress - optional: true - limit: 1 - receive-ca-cert: - interface: certificate_transfer - optional: true - logging: - interface: loki_push_api - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - placement: - interface: placement - -peers: - peers: - interface: placement-peer diff --git a/charms/placement-k8s/tests/config.yaml b/charms/placement-k8s/tests/config.yaml deleted file mode 120000 index e84e89a8..00000000 --- a/charms/placement-k8s/tests/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../config.yaml \ No newline at end of file diff --git a/charms/sunbeam-clusterd/charmcraft.yaml b/charms/sunbeam-clusterd/charmcraft.yaml index 28fe91af..8c2e9dc7 100644 --- a/charms/sunbeam-clusterd/charmcraft.yaml +++ b/charms/sunbeam-clusterd/charmcraft.yaml @@ -1,25 +1,40 @@ -# This file configures Charmcraft. -# See https://juju.is/docs/sdk/charmcraft-config for guidance. - -name: sunbeam-clusterd - type: charm - title: Sunbeam Clusterd - +name: sunbeam-clusterd summary: A juju charm to run sunbeam clusterd - description: | Manage sunbeam clusterd deployment -# (Required for 'charm' type) -bases: - - build-on: - - name: ubuntu - channel: "22.04" - run-on: - - name: ubuntu - channel: "22.04" +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + snap-channel: + default: 2024.1/edge + type: string + debug: + default: false + type: boolean + +actions: + get-credentials: + description: | + Return information necessary to connect to clusterd. + +requires: + certificates: + interface: tls-certificates + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +peers: + peers: + interface: clusterd-peer parts: charm: @@ -34,30 +49,3 @@ parts: - cryptography - jsonschema - jinja2 - -peers: - peers: - interface: clusterd-peer - -actions: - get-credentials: - description: | - Return information necessary to connect to clusterd. - -config: - options: - snap-channel: - default: "2024.1/edge" - type: string - debug: - default: False - type: boolean - -requires: - certificates: - interface: tls-certificates - optional: True - tracing: - interface: tracing - optional: true - limit: 1 diff --git a/charms/sunbeam-machine/charmcraft.yaml b/charms/sunbeam-machine/charmcraft.yaml index 9a8d8b29..3bce0d33 100644 --- a/charms/sunbeam-machine/charmcraft.yaml +++ b/charms/sunbeam-machine/charmcraft.yaml @@ -1,14 +1,34 @@ -# This file configures Charmcraft. -# See https://juju.is/docs/sdk/charmcraft-config for guidance. - type: charm -bases: - - build-on: - - name: ubuntu - channel: "22.04" - run-on: - - name: ubuntu - channel: "22.04" +title: Sunbeam Machine +name: sunbeam-machine +summary: Manage sunbeam utilities in OpenStack deployment +description: | + Manage machine configuration for sunbeam deployment + +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + debug: + default: false + type: boolean + http_proxy: + description: Set HTTP_PROXY in /etc/environment + type: string + https_proxy: + description: Set HTTPS_PROXY in /etc/environment + type: string + no_proxy: + description: Set NO_PROXY in /etc/environment + type: string + +requires: + tracing: + interface: tracing + optional: true + limit: 1 parts: charm: diff --git a/charms/sunbeam-machine/config.yaml b/charms/sunbeam-machine/config.yaml deleted file mode 100644 index f8008c6e..00000000 --- a/charms/sunbeam-machine/config.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# This file defines charm config options, and populates the Configure tab on Charmhub. -# If your charm does not require configuration options, delete this file entirely. -# -# See https://juju.is/docs/config for guidance. -options: - debug: - default: False - type: boolean - http_proxy: - description: Set HTTP_PROXY in /etc/environment - type: string - https_proxy: - description: Set HTTPS_PROXY in /etc/environment - type: string - no_proxy: - description: Set NO_PROXY in /etc/environment - type: string diff --git a/charms/sunbeam-machine/metadata.yaml b/charms/sunbeam-machine/metadata.yaml deleted file mode 100644 index 6eceabdd..00000000 --- a/charms/sunbeam-machine/metadata.yaml +++ /dev/null @@ -1,17 +0,0 @@ -name: sunbeam-machine - -display-name: Sunbeam Machine - -summary: Manage sunbeam utilities in OpenStack deployment - -description: | - Manage machine configuration for sunbeam deployment - -# This charm has no peer relation by design. This charm needs to scale to -# hundreds of units and this is limited by the peer relation. - -requires: - tracing: - interface: tracing - optional: true - limit: 1 diff --git a/charms/sunbeam-machine/tests/unit/config.yaml b/charms/sunbeam-machine/tests/unit/config.yaml deleted file mode 120000 index 82800a03..00000000 --- a/charms/sunbeam-machine/tests/unit/config.yaml +++ /dev/null @@ -1 +0,0 @@ -../../config.yaml \ No newline at end of file diff --git a/charms/sunbeam-machine/tests/unit/test_charm.py b/charms/sunbeam-machine/tests/unit/test_charm.py index 7679240d..36d896f7 100644 --- a/charms/sunbeam-machine/tests/unit/test_charm.py +++ b/charms/sunbeam-machine/tests/unit/test_charm.py @@ -40,12 +40,9 @@ class TestCharm(test_utils.CharmTestCase): def setUp(self): """Setup Sunbeam machine tests.""" super().setUp(charm, self.PATCHES) - with open("config.yaml", "r") as f: - config_data = f.read() self.harness = test_utils.get_harness( _SunbeamMachineCharm, container_calls=self.container_calls, - charm_config=config_data, ) self.addCleanup(self.harness.cleanup) diff --git a/charms/tempest-k8s/charmcraft.yaml b/charms/tempest-k8s/charmcraft.yaml index 1f1b42f0..275379b5 100644 --- a/charms/tempest-k8s/charmcraft.yaml +++ b/charms/tempest-k8s/charmcraft.yaml @@ -1,34 +1,4 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" -parts: - update-certificates: - plugin: nil - override-build: | - apt update - apt install -y ca-certificates - update-ca-certificates - - charm: - after: [update-certificates] - build-packages: - - git - - libffi-dev - - libssl-dev - - rustc - - cargo - - pkg-config - charm-binary-python-packages: - - cryptography - - jsonschema - - pydantic - - jinja2 - +type: charm name: tempest-k8s summary: OpenStack integration test suite (tempest) description: | @@ -36,47 +6,17 @@ description: | or periodic fasion, against a live OpenStack cluster for OpenStack API validation, scenarios, and other specific tests useful in validating an OpenStack deployment. - assumes: - k8s-api - juju >= 3.1 - links: source: https://opendev.org/openstack/sunbeam-charms issues: https://bugs.launchpad.net/sunbeam-charms documentation: https://discourse.charmhub.io/t/tempest-k8s-docs-index/13258 -containers: - tempest: - resource: tempest-image - -resources: - tempest-image: - type: oci-image - description: OCI image for tempest - # ghcr.io/canonical/tempest:2024.1 - upstream-source: ghcr.io/canonical/tempest:2024.1 - -requires: - identity-ops: - interface: keystone-resources - logging: - interface: loki_push_api - receive-ca-cert: - interface: certificate_transfer - optional: true - tracing: - interface: tracing - optional: true - limit: 1 - -provides: - grafana-dashboard: - interface: grafana_dashboard - -peers: - peers: - interface: tempest-peer +base: ubuntu@22.04 +platforms: + amd64: config: options: @@ -86,7 +26,7 @@ config: description: Name of the OpenStack region schedule: type: string - default: "0 */1 * * *" + default: 0 */1 * * * description: | The cron schedule expression to define when to run tempest periodic checks. @@ -158,7 +98,60 @@ actions: default: "" description: | Use a predefined test list. See `get-lists` for available test lists. - get-lists: description: List existing test lists, to be used with validate action. additionalProperties: false + +containers: + tempest: + resource: tempest-image + +resources: + tempest-image: + type: oci-image + description: OCI image for tempest + upstream-source: ghcr.io/canonical/tempest:2024.1 + +requires: + identity-ops: + interface: keystone-resources + logging: + interface: loki_push_api + receive-ca-cert: + interface: certificate_transfer + optional: true + tracing: + interface: tracing + optional: true + limit: 1 + +provides: + grafana-dashboard: + interface: grafana_dashboard + +peers: + peers: + interface: tempest-peer + +parts: + update-certificates: + plugin: nil + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates + charm: + after: + - update-certificates + build-packages: + - git + - libffi-dev + - libssl-dev + - rustc + - cargo + - pkg-config + charm-binary-python-packages: + - cryptography + - jsonschema + - pydantic + - jinja2 diff --git a/charms/watcher-k8s/charmcraft.yaml b/charms/watcher-k8s/charmcraft.yaml index e0ff27e5..a57e9ecf 100644 --- a/charms/watcher-k8s/charmcraft.yaml +++ b/charms/watcher-k8s/charmcraft.yaml @@ -1,36 +1,6 @@ -type: "charm" -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" -parts: - update-certificates: - plugin: nil - override-build: | - apt update - apt install -y ca-certificates - update-ca-certificates - - charm: - after: [update-certificates] - build-packages: - - git - - libffi-dev - - libssl-dev - - rustc - - cargo - - pkg-config - charm-binary-python-packages: - - cryptography - - jsonschema - - pydantic - - jinja2 - -name: watcher-k8s +type: charm title: OpenStack watcher service +name: watcher-k8s summary: Resource Optimization service for OpenStack description: | watcher-k8s is a charm for OpenStack Watcher service which @@ -39,14 +9,46 @@ description: | The charm handles instantiation, scaling, configuration and Day 2 operations for OpenStack Watcher services. - +assumes: + - k8s-api + - juju >= 3.1 links: source: https://opendev.org/openstack/sunbeam-charms issues: https://bugs.launchpad.net/sunbeam-charms -assumes: -- k8s-api -- juju >= 3.1 +base: ubuntu@22.04 +platforms: + amd64: + +config: + options: + collector-plugins: + default: compute + description: | + A comma separated list of cluster data model plugin names. + . + Available collector-plugins are: compute and storage. + type: string + debug: + default: false + description: Enable debug logging. + type: boolean + enable-telemetry-notifications: + default: false + description: Enable notifications to send to telemetry. + type: boolean + maas-api-key: + description: MAAS API authentication key + type: string + maas-url: + description: MAAS URL to connect + type: string + region: + default: RegionOne + description: Name of the OpenStack region + type: string + +actions: {} containers: watcher-api: @@ -99,32 +101,25 @@ peers: peers: interface: watcher-peer -config: - options: - collector-plugins: - default: compute - description: | - A comma separated list of cluster data model plugin names. - . - Available collector-plugins are: compute and storage. - type: string - debug: - default: false - description: Enable debug logging. - type: boolean - enable-telemetry-notifications: - default: false - description: Enable notifications to send to telemetry. - type: boolean - maas-api-key: - description: MAAS API authentication key - type: string - maas-url: - description: MAAS URL to connect - type: string - region: - default: RegionOne - description: Name of the OpenStack region - type: string - -actions: {} +parts: + update-certificates: + plugin: nil + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates + charm: + after: + - update-certificates + build-packages: + - git + - libffi-dev + - libssl-dev + - rustc + - cargo + - pkg-config + charm-binary-python-packages: + - cryptography + - jsonschema + - pydantic + - jinja2 diff --git a/ops-sunbeam/ops_sunbeam/test_utils.py b/ops-sunbeam/ops_sunbeam/test_utils.py index 16f5a2f0..260f39b0 100644 --- a/ops-sunbeam/ops_sunbeam/test_utils.py +++ b/ops-sunbeam/ops_sunbeam/test_utils.py @@ -40,6 +40,7 @@ from unittest.mock import ( import ops import ops.storage +import yaml from ops_sunbeam.charm import ( OSBaseOperatorCharm, ) @@ -680,7 +681,7 @@ def set_remote_leader_ready( ) -def get_harness( +def get_harness( # noqa: C901 charm_class: type[OSBaseOperatorCharm], charm_metadata: str | None = None, container_calls: ContainerCalls | None = None, @@ -790,11 +791,20 @@ def get_harness( charm_dir = pathlib.Path(filename).parents[2] if not charm_metadata: - metadata_file = f"{charm_dir}/metadata.yaml" + metadata_file = f"{charm_dir}/charmcraft.yaml" if os.path.isfile(metadata_file): with open(metadata_file) as f: charm_metadata = f.read() + if charm_metadata: + loaded_metadata = yaml.safe_load(charm_metadata) + + if not charm_config and (config := loaded_metadata.get("config")): + charm_config = yaml.safe_dump(config) + + if not charm_actions and (actions := loaded_metadata.get("actions")): + charm_actions = yaml.safe_dump(actions) + os.environ["JUJU_VERSION"] = "3.4.4" harness = Harness( charm_class, diff --git a/ops-sunbeam/shared_code/config-ceph-options.yaml b/ops-sunbeam/shared_code/config-ceph-options.yaml index fa701b26..a11c136b 100644 --- a/ops-sunbeam/shared_code/config-ceph-options.yaml +++ b/ops-sunbeam/shared_code/config-ceph-options.yaml @@ -68,8 +68,8 @@ type: string default: replicated description: | - Ceph pool type to use for storage - valid values include ‘replicated’ - and ‘erasure-coded’. + Ceph pool type to use for storage - valid values include `replicated` + and `erasure-coded`. ec-profile-name: type: string default: @@ -151,9 +151,9 @@ description: | EC profile technique used for this applications pool - will be validated based on the plugin configured via ec-profile-plugin. - Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’, - ‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure, - ‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’ + Supported techniques are `reed_sol_van`, `reed_sol_r6_op`, + `cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure, + `reed_sol_van`, `cauchy` for isa and `single`, `multiple` for shec. ec-profile-device-class: type: string diff --git a/ops-sunbeam/shared_code/sunbeam_charm/{{cookiecutter.service_name}}/metadata.yaml b/ops-sunbeam/shared_code/sunbeam_charm/{{cookiecutter.service_name}}/metadata.yaml index 11836c1f..2b1774bd 100644 --- a/ops-sunbeam/shared_code/sunbeam_charm/{{cookiecutter.service_name}}/metadata.yaml +++ b/ops-sunbeam/shared_code/sunbeam_charm/{{cookiecutter.service_name}}/metadata.yaml @@ -5,7 +5,7 @@ description: | OpenStack {{ cookiecutter.service_name }} provides an HTTP service for managing, selecting, and claiming providers of classes of inventory representing available resources in a cloud. - . + version: 3 bases: - name: ubuntu diff --git a/test-requirements.txt b/test-requirements.txt index 206f937e..f57fe2b9 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -19,3 +19,5 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client # cinder-ceph-k8s requests-unixsocket # sunbeam-clusterd openstacksdk # tempest-k8s + +types-PyYAML # types stub for mypy diff --git a/zuul.d/zuul.yaml b/zuul.d/zuul.yaml index 65927433..b5e35c38 100644 --- a/zuul.d/zuul.yaml +++ b/zuul.d/zuul.yaml @@ -23,7 +23,7 @@ env_type: k8s k8s_channel: 1.30-classic/candidate k8s_classic_mode: true - charmcraft_channel: 2.x/stable + charmcraft_channel: 3.x/stable publish_charm: true publish_channels: keystone-k8s: 2024.1/edge