Some files that can be shared between charms
This commit is contained in:
parent
4d4e12848e
commit
6bf5ab6ea1
3
ops-sunbeam/shared_code/.stestr.conf
Normal file
3
ops-sunbeam/shared_code/.stestr.conf
Normal file
@ -0,0 +1,3 @@
|
||||
[DEFAULT]
|
||||
test_path=./unit_tests
|
||||
top_dir=./
|
230
ops-sunbeam/shared_code/config-ceph-options.yaml
Normal file
230
ops-sunbeam/shared_code/config-ceph-options.yaml
Normal file
@ -0,0 +1,230 @@
|
||||
ceph-osd-replication-count:
|
||||
default: 3
|
||||
type: int
|
||||
description: |
|
||||
This value dictates the number of replicas ceph must make of any
|
||||
object it stores within the cinder rbd pool. Of course, this only
|
||||
applies if using Ceph as a backend store. Note that once the cinder
|
||||
rbd pool has been created, changing this value will not have any
|
||||
effect (although it can be changed in ceph by manually configuring
|
||||
your ceph cluster).
|
||||
ceph-pool-weight:
|
||||
type: int
|
||||
default: 40
|
||||
description: |
|
||||
Defines a relative weighting of the pool as a percentage of the total
|
||||
amount of data in the Ceph cluster. This effectively weights the number
|
||||
of placement groups for the pool created to be appropriately portioned
|
||||
to the amount of data expected. For example, if the ephemeral volumes
|
||||
for the OpenStack compute instances are expected to take up 20% of the
|
||||
overall configuration then this value would be specified as 20. Note -
|
||||
it is important to choose an appropriate value for the pool weight as
|
||||
this directly affects the number of placement groups which will be
|
||||
created for the pool. The number of placement groups for a pool can
|
||||
only be increased, never decreased - so it is important to identify the
|
||||
percent of data that will likely reside in the pool.
|
||||
volume-backend-name:
|
||||
default:
|
||||
type: string
|
||||
description: |
|
||||
Volume backend name for the backend. The default value is the
|
||||
application name in the Juju model, e.g. "cinder-ceph-mybackend"
|
||||
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
|
||||
A common backend name can be set to multiple backends with the
|
||||
same characters so that those can be treated as a single virtual
|
||||
backend associated with a single volume type.
|
||||
backend-availability-zone:
|
||||
default:
|
||||
type: string
|
||||
description: |
|
||||
Availability zone name of this volume backend. If set, it will
|
||||
override the default availability zone. Supported for Pike or
|
||||
newer releases.
|
||||
restrict-ceph-pools:
|
||||
default: False
|
||||
type: boolean
|
||||
description: |
|
||||
Optionally restrict Ceph key permissions to access pools as required.
|
||||
rbd-pool-name:
|
||||
default:
|
||||
type: string
|
||||
description: |
|
||||
Optionally specify an existing rbd pool that cinder should map to.
|
||||
rbd-flatten-volume-from-snapshot:
|
||||
default:
|
||||
type: boolean
|
||||
default: False
|
||||
description: |
|
||||
Flatten volumes created from snapshots to remove dependency from
|
||||
volume to snapshot. Supported on Queens+
|
||||
rbd-mirroring-mode:
|
||||
type: string
|
||||
default: pool
|
||||
description: |
|
||||
The RBD mirroring mode used for the Ceph pool. This option is only used
|
||||
with 'replicated' pool type, as it's not supported for 'erasure-coded'
|
||||
pool type - valid values: 'pool' and 'image'
|
||||
pool-type:
|
||||
type: string
|
||||
default: replicated
|
||||
description: |
|
||||
Ceph pool type to use for storage - valid values include ‘replicated’
|
||||
and ‘erasure-coded’.
|
||||
ec-profile-name:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Name for the EC profile to be created for the EC pools. If not defined
|
||||
a profile name will be generated based on the name of the pool used by
|
||||
the application.
|
||||
ec-rbd-metadata-pool:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Name of the metadata pool to be created (for RBD use-cases). If not
|
||||
defined a metadata pool name will be generated based on the name of
|
||||
the data pool used by the application. The metadata pool is always
|
||||
replicated, not erasure coded.
|
||||
ec-profile-k:
|
||||
type: int
|
||||
default: 1
|
||||
description: |
|
||||
Number of data chunks that will be used for EC data pool. K+M factors
|
||||
should never be greater than the number of available zones (or hosts)
|
||||
for balancing.
|
||||
ec-profile-m:
|
||||
type: int
|
||||
default: 2
|
||||
description: |
|
||||
Number of coding chunks that will be used for EC data pool. K+M factors
|
||||
should never be greater than the number of available zones (or hosts)
|
||||
for balancing.
|
||||
ec-profile-locality:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(lrc plugin - l) Group the coding and data chunks into sets of size l.
|
||||
For instance, for k=4 and m=2, when l=3 two groups of three are created.
|
||||
Each set can be recovered without reading chunks from another set. Note
|
||||
that using the lrc plugin does incur more raw storage usage than isa or
|
||||
jerasure in order to reduce the cost of recovery operations.
|
||||
ec-profile-crush-locality:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
(lrc plugin) The type of the crush bucket in which each set of chunks
|
||||
defined by l will be stored. For instance, if it is set to rack, each
|
||||
group of l chunks will be placed in a different rack. It is used to
|
||||
create a CRUSH rule step such as step choose rack. If it is not set,
|
||||
no such grouping is done.
|
||||
ec-profile-durability-estimator:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(shec plugin - c) The number of parity chunks each of which includes
|
||||
each data chunk in its calculation range. The number is used as a
|
||||
durability estimator. For instance, if c=2, 2 OSDs can be down
|
||||
without losing data.
|
||||
ec-profile-helper-chunks:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(clay plugin - d) Number of OSDs requested to send data during
|
||||
recovery of a single chunk. d needs to be chosen such that
|
||||
k+1 <= d <= k+m-1. Larger the d, the better the savings.
|
||||
ec-profile-scalar-mds:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
(clay plugin) specifies the plugin that is used as a building
|
||||
block in the layered construction. It can be one of jerasure,
|
||||
isa, shec (defaults to jerasure).
|
||||
ec-profile-plugin:
|
||||
type: string
|
||||
default: jerasure
|
||||
description: |
|
||||
EC plugin to use for this applications pool. The following list of
|
||||
plugins acceptable - jerasure, lrc, isa, shec, clay.
|
||||
ec-profile-technique:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
EC profile technique used for this applications pool - will be
|
||||
validated based on the plugin configured via ec-profile-plugin.
|
||||
Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’,
|
||||
‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure,
|
||||
‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’
|
||||
for shec.
|
||||
ec-profile-device-class:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Device class from CRUSH map to use for placement groups for
|
||||
erasure profile - valid values: ssd, hdd or nvme (or leave
|
||||
unset to not use a device class).
|
||||
bluestore-compression-algorithm:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Compressor to use (if any) for pools requested by this charm.
|
||||
.
|
||||
NOTE: The ceph-osd charm sets a global default for this value (defaults
|
||||
to 'lz4' unless configured by the end user) which will be used unless
|
||||
specified for individual pools.
|
||||
bluestore-compression-mode:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Policy for using compression on pools requested by this charm.
|
||||
.
|
||||
'none' means never use compression.
|
||||
'passive' means use compression when clients hint that data is
|
||||
compressible.
|
||||
'aggressive' means use compression unless clients hint that
|
||||
data is not compressible.
|
||||
'force' means use compression under all circumstances even if the clients
|
||||
hint that the data is not compressible.
|
||||
bluestore-compression-required-ratio:
|
||||
type: float
|
||||
default:
|
||||
description: |
|
||||
The ratio of the size of the data chunk after compression relative to the
|
||||
original size must be at least this small in order to store the
|
||||
compressed version on pools requested by this charm.
|
||||
bluestore-compression-min-blob-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Chunks smaller than this are never compressed on pools requested by
|
||||
this charm.
|
||||
bluestore-compression-min-blob-size-hdd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression min blob size for rotational media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-min-blob-size-ssd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression min blob size for solid state media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-max-blob-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Chunks larger than this are broken into smaller blobs sizing bluestore
|
||||
compression max blob size before being compressed on pools requested by
|
||||
this charm.
|
||||
bluestore-compression-max-blob-size-hdd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression max blob size for rotational media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-max-blob-size-ssd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression max blob size for solid state media on
|
||||
pools requested by this charm.
|
6
ops-sunbeam/shared_code/requirements.txt
Normal file
6
ops-sunbeam/shared_code/requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
# ops >= 1.2.0
|
||||
jinja2
|
||||
git+https://github.com/canonical/operator@2875e73e#egg=ops
|
||||
git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack
|
||||
git+https://github.com/openstack-charmers/advanced-sunbeam-openstack#egg=advanced_sunbeam_openstack
|
||||
lightkube
|
22
ops-sunbeam/shared_code/templates/ceph.conf.j2
Normal file
22
ops-sunbeam/shared_code/templates/ceph.conf.j2
Normal file
@ -0,0 +1,22 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# ceph configuration file maintained in aso
|
||||
# local changes may be overwritten.
|
||||
###############################################################################
|
||||
[global]
|
||||
{% if ceph.auth -%}
|
||||
auth_supported = {{ ceph.auth }}
|
||||
mon host = {{ ceph.mon_hosts }}
|
||||
{% endif -%}
|
||||
keyring = /etc/ceph/$cluster.$name.keyring
|
||||
log to syslog = false
|
||||
err to syslog = false
|
||||
clog to syslog = false
|
||||
{% if ceph.rbd_features %}
|
||||
rbd default features = {{ ceph.rbd_features }}
|
||||
{% endif %}
|
||||
|
||||
[client]
|
||||
{% if ceph_config.rbd_default_data_pool -%}
|
||||
rbd default data pool = {{ ceph_config.rbd_default_data_pool }}
|
||||
{% endif %}
|
7
ops-sunbeam/shared_code/templates/parts/section-database
Normal file
7
ops-sunbeam/shared_code/templates/parts/section-database
Normal file
@ -0,0 +1,7 @@
|
||||
[database]
|
||||
{% if shared_db.database_host -%}
|
||||
connection = {{ shared_db.database_type }}://{{ shared_db.database_user }}:{{ shared_db.database_password }}@{{ shared_db.database_host }}/{{ shared_db.database }}{% if shared_db.database_ssl_ca %}?ssl_ca={{ shared_db.database_ssl_ca }}{% if shared_db.database_ssl_cert %}&ssl_cert={{ shared_db.database_ssl_cert }}&ssl_key={{ shared_db.database_ssl_key }}{% endif %}{% endif %}
|
||||
{% else -%}
|
||||
connection = sqlite:////var/lib/cinder/cinder.db
|
||||
{% endif -%}
|
||||
connection_recycle_time = 200
|
10
ops-sunbeam/shared_code/templates/parts/section-federation
Normal file
10
ops-sunbeam/shared_code/templates/parts/section-federation
Normal file
@ -0,0 +1,10 @@
|
||||
{% if trusted_dashboards %}
|
||||
[federation]
|
||||
{% for dashboard_url in trusted_dashboards -%}
|
||||
trusted_dashboard = {{ dashboard_url }}
|
||||
{% endfor -%}
|
||||
{% endif %}
|
||||
{% for sp in fid_sps -%}
|
||||
[{{ sp['protocol-name'] }}]
|
||||
remote_id_attribute = {{ sp['remote-id-attribute'] }}
|
||||
{% endfor -%}
|
11
ops-sunbeam/shared_code/templates/parts/section-identity
Normal file
11
ops-sunbeam/shared_code/templates/parts/section-identity
Normal file
@ -0,0 +1,11 @@
|
||||
[keystone_authtoken]
|
||||
{% if identity_service.internal_host -%}
|
||||
www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
||||
auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
|
||||
auth_type = password
|
||||
project_domain_name = {{ identity_service.service_domain_name }}
|
||||
user_domain_name = {{ identity_service.service_domain_name }}
|
||||
project_name = {{ identity_service.service_project_name }}
|
||||
username = {{ identity_service.service_user_name }}
|
||||
password = {{ identity_service.service_password }}
|
||||
{% endif -%}
|
@ -0,0 +1,6 @@
|
||||
{% for section in sections -%}
|
||||
[{{section}}]
|
||||
{% for key, value in sections[section].items() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor %}
|
||||
{%- endfor %}
|
15
ops-sunbeam/shared_code/templates/parts/section-signing
Normal file
15
ops-sunbeam/shared_code/templates/parts/section-signing
Normal file
@ -0,0 +1,15 @@
|
||||
{% if enable_signing -%}
|
||||
[signing]
|
||||
{% if certfile -%}
|
||||
certfile = {{ certfile }}
|
||||
{% endif -%}
|
||||
{% if keyfile -%}
|
||||
keyfile = {{ keyfile }}
|
||||
{% endif -%}
|
||||
{% if ca_certs -%}
|
||||
ca_certs = {{ ca_certs }}
|
||||
{% endif -%}
|
||||
{% if ca_key -%}
|
||||
ca_key = {{ ca_key }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
24
ops-sunbeam/shared_code/templates/wsgi-template.conf.j2
Normal file
24
ops-sunbeam/shared_code/templates/wsgi-template.conf.j2
Normal file
@ -0,0 +1,24 @@
|
||||
Listen {{ wsgi_config.public_port }}
|
||||
<VirtualHost *:{{ wsgi_config.public_port }}>
|
||||
WSGIDaemonProcess glance processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup glance
|
||||
WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog {{ wsgi_config.error_log }}
|
||||
CustomLog {{ wsgi_config.custom_log }} combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
17
ops-sunbeam/shared_code/test-requirements.txt
Normal file
17
ops-sunbeam/shared_code/test-requirements.txt
Normal file
@ -0,0 +1,17 @@
|
||||
# This file is managed centrally. If you find the need to modify this as a
|
||||
# one-off, please don't. Intead, consult #openstack-charms and ask about
|
||||
# requirements management in charms via bot-control. Thank you.
|
||||
charm-tools>=2.4.4
|
||||
coverage>=3.6
|
||||
mock>=1.2
|
||||
flake8>=2.2.4,<=2.4.1
|
||||
pyflakes==2.1.1
|
||||
stestr>=2.2.0
|
||||
requests>=2.18.4
|
||||
psutil
|
||||
# oslo.i18n dropped py35 support
|
||||
oslo.i18n<4.0.0
|
||||
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
|
||||
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
|
||||
pytz # workaround for 14.04 pip/tox
|
||||
pyudev # for ceph-* charm unit tests (not mocked?)
|
134
ops-sunbeam/shared_code/tox.ini
Normal file
134
ops-sunbeam/shared_code/tox.ini
Normal file
@ -0,0 +1,134 @@
|
||||
# Operator charm (with zaza): tox.ini
|
||||
|
||||
[tox]
|
||||
envlist = pep8,py3
|
||||
skipsdist = True
|
||||
# NOTE: Avoid build/test env pollution by not enabling sitepackages.
|
||||
sitepackages = False
|
||||
# NOTE: Avoid false positives by not skipping missing interpreters.
|
||||
skip_missing_interpreters = False
|
||||
# NOTES:
|
||||
# * We avoid the new dependency resolver by pinning pip < 20.3, see
|
||||
# https://github.com/pypa/pip/issues/9187
|
||||
# * Pinning dependencies requires tox >= 3.2.0, see
|
||||
# https://tox.readthedocs.io/en/latest/config.html#conf-requires
|
||||
# * It is also necessary to pin virtualenv as a newer virtualenv would still
|
||||
# lead to fetching the latest pip in the func* tox targets, see
|
||||
# https://stackoverflow.com/a/38133283
|
||||
requires = pip < 20.3
|
||||
virtualenv < 20.0
|
||||
# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci
|
||||
minversion = 3.2.0
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
PYTHONHASHSEED=0
|
||||
CHARM_DIR={envdir}
|
||||
install_command =
|
||||
pip install {opts} {packages}
|
||||
commands = stestr run --slowest {posargs}
|
||||
whitelist_externals =
|
||||
git
|
||||
add-to-archive.py
|
||||
bash
|
||||
charmcraft
|
||||
passenv = HOME TERM CS_* OS_* TEST_*
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:py35]
|
||||
basepython = python3.5
|
||||
# python3.5 is irrelevant on a focal+ charm.
|
||||
commands = /bin/true
|
||||
|
||||
[testenv:py36]
|
||||
basepython = python3.6
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:py37]
|
||||
basepython = python3.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:py38]
|
||||
basepython = python3.8
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:py3]
|
||||
basepython = python3
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:pep8]
|
||||
basepython = python3
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = flake8 {posargs} src unit_tests tests
|
||||
|
||||
[testenv:cover]
|
||||
# Technique based heavily upon
|
||||
# https://github.com/openstack/nova/blob/master/tox.ini
|
||||
basepython = python3
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv =
|
||||
{[testenv]setenv}
|
||||
PYTHON=coverage run
|
||||
commands =
|
||||
coverage erase
|
||||
stestr run --slowest {posargs}
|
||||
coverage combine
|
||||
coverage html -d cover
|
||||
coverage xml -o cover/coverage.xml
|
||||
coverage report
|
||||
|
||||
[coverage:run]
|
||||
branch = True
|
||||
concurrency = multiprocessing
|
||||
parallel = True
|
||||
source =
|
||||
.
|
||||
omit =
|
||||
.tox/*
|
||||
*/charmhelpers/*
|
||||
unit_tests/*
|
||||
|
||||
[testenv:venv]
|
||||
basepython = python3
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:build]
|
||||
basepython = python3
|
||||
deps = -r{toxinidir}/build-requirements.txt
|
||||
commands =
|
||||
charmcraft build
|
||||
|
||||
[testenv:func-noop]
|
||||
basepython = python3
|
||||
commands =
|
||||
functest-run-suite --help
|
||||
|
||||
[testenv:func]
|
||||
basepython = python3
|
||||
commands =
|
||||
functest-run-suite --keep-model
|
||||
|
||||
[testenv:func-smoke]
|
||||
basepython = python3
|
||||
commands =
|
||||
functest-run-suite --keep-model --smoke
|
||||
|
||||
[testenv:func-dev]
|
||||
basepython = python3
|
||||
commands =
|
||||
functest-run-suite --keep-model --dev
|
||||
|
||||
[testenv:func-target]
|
||||
basepython = python3
|
||||
commands =
|
||||
functest-run-suite --keep-model --bundle {posargs}
|
||||
|
||||
[flake8]
|
||||
# Ignore E902 because the unit_tests directory is missing in the built charm.
|
||||
ignore = E402,E226,E902
|
Loading…
x
Reference in New Issue
Block a user