Merge "Elastic: Move to elasticsearch helm charts at elastic/helm-charts"
This commit is contained in:
commit
0f07b5a426
@ -75,14 +75,16 @@ systemconfig.helm_plugins.platform_integ_apps =
|
||||
003_ceph-pools-audit = sysinv.helm.ceph_pools_audit:CephPoolsAuditHelm
|
||||
|
||||
systemconfig.helm_plugins.stx_monitor =
|
||||
001_elasticsearch = sysinv.helm.elasticsearch:ElasticsearchHelm
|
||||
002_kibana = sysinv.helm.kibana:KibanaHelm
|
||||
003_filebeat = sysinv.helm.filebeat:FilebeatHelm
|
||||
004_metricbeat = sysinv.helm.metricbeat:MetricbeatHelm
|
||||
005_kube-state-metrics = sysinv.helm.kube_state_metrics:KubeStateMetricsHelm
|
||||
006_nginx-ingress = sysinv.helm.nginx_ingress:NginxIngressHelm
|
||||
007_logstash = sysinv.helm.logstash:LogstashHelm
|
||||
008_monitor_version_check = sysinv.helm.monitor_version_check:StxMonitorVersionCheckHelm
|
||||
001_elasticsearch-master = sysinv.helm.elasticsearch_master:ElasticsearchMasterHelm
|
||||
002_elasticsearch-data = sysinv.helm.elasticsearch_data:ElasticsearchDataHelm
|
||||
003_elasticsearch-client = sysinv.helm.elasticsearch_client:ElasticsearchClientHelm
|
||||
004_kibana = sysinv.helm.kibana:KibanaHelm
|
||||
005_filebeat = sysinv.helm.filebeat:FilebeatHelm
|
||||
006_metricbeat = sysinv.helm.metricbeat:MetricbeatHelm
|
||||
007_kube-state-metrics = sysinv.helm.kube_state_metrics:KubeStateMetricsHelm
|
||||
008_nginx-ingress = sysinv.helm.nginx_ingress:NginxIngressHelm
|
||||
009_logstash = sysinv.helm.logstash:LogstashHelm
|
||||
010_monitor_version_check = sysinv.helm.monitor_version_check:StxMonitorVersionCheckHelm
|
||||
|
||||
systemconfig.helm_plugins.stx_openstack =
|
||||
001_ingress = sysinv.helm.ingress:IngressHelm
|
||||
|
@ -206,7 +206,7 @@ class KubeAppController(rest.RestController):
|
||||
elasticsearch_active = cutils.is_chart_enabled(
|
||||
pecan.request.dbapi,
|
||||
constants.HELM_APP_MONITOR,
|
||||
helm_common.HELM_CHART_ELASTICSEARCH,
|
||||
helm_common.HELM_CHART_ELASTICSEARCH_MASTER,
|
||||
helm_common.HELM_NS_MONITOR)
|
||||
|
||||
if not elasticsearch_active and not logstash_active:
|
||||
@ -244,17 +244,19 @@ class KubeAppController(rest.RestController):
|
||||
helm_common.LABEL_MONITOR_DATA: 1,
|
||||
helm_common.LABEL_MONITOR_CLIENT: 1}
|
||||
else:
|
||||
# For dual controller configs, we require the below.
|
||||
# Dual controller configs
|
||||
required_label_counts = {
|
||||
helm_common.LABEL_MONITOR_DATA: 2,
|
||||
helm_common.LABEL_MONITOR_CLIENT: 2}
|
||||
helm_common.LABEL_MONITOR_CLIENT: 2,
|
||||
helm_common.LABEL_MONITOR_MASTER: 3}
|
||||
|
||||
if cutils.is_aio_duplex_system(pecan.request.dbapi):
|
||||
# For AIO-DX without worker nodes, we only need 2
|
||||
# hosts labelled as master.
|
||||
if (cutils.is_aio_duplex_system(pecan.request.dbapi) and
|
||||
(pecan.request.dbapi.count_hosts_by_label(
|
||||
helm_common.LABEL_MONITOR_MASTER) < 3)):
|
||||
required_label_counts[
|
||||
helm_common.LABEL_MONITOR_MASTER] = 2
|
||||
else:
|
||||
required_label_counts[
|
||||
helm_common.LABEL_MONITOR_MASTER] = 3
|
||||
|
||||
if logstash_active:
|
||||
good_label_counts[
|
||||
@ -280,12 +282,9 @@ class KubeAppController(rest.RestController):
|
||||
for label in labels:
|
||||
if label.label_key in required_label_counts:
|
||||
if label.label_value == helm_common.LABEL_VALUE_ENABLED:
|
||||
label_counts[label.label_key] = \
|
||||
label_counts[label.label_key] + 1
|
||||
|
||||
label_counts[label.label_key] += 1
|
||||
if host_good:
|
||||
good_label_counts[label.label_key] = \
|
||||
good_label_counts[label.label_key] + 1
|
||||
good_label_counts[label.label_key] += 1
|
||||
|
||||
# If we are short of labels on unlocked and enabled hosts
|
||||
# inform the user with a detailed message.
|
||||
@ -312,7 +311,7 @@ class KubeAppController(rest.RestController):
|
||||
hosts_to_label_check = pecan.request.dbapi.ihost_get_by_personality(
|
||||
constants.CONTROLLER)
|
||||
|
||||
if not cutils.is_aio_system(pecan.request.dbapi):
|
||||
if not cutils.is_aio_simplex_system(pecan.request.dbapi):
|
||||
whosts = pecan.request.dbapi.ihost_get_by_personality(
|
||||
constants.WORKER)
|
||||
hosts_to_label_check.extend(whosts)
|
||||
|
@ -491,6 +491,7 @@ class AppOperator(object):
|
||||
for r, f in cutils.get_files_matching(path, 'values.yaml'):
|
||||
with open(os.path.join(r, f), 'r') as value_f:
|
||||
try_image_tag_repo_format = False
|
||||
try_image_imagetag_format = False
|
||||
y = yaml.safe_load(value_f)
|
||||
try:
|
||||
ids = y["images"]["tags"].values()
|
||||
@ -503,7 +504,17 @@ class AppOperator(object):
|
||||
y_image_tag = y_image['repository'] + ":" + y_image['tag']
|
||||
ids = [y_image_tag]
|
||||
except (AttributeError, TypeError, KeyError):
|
||||
try_image_imagetag_format = True
|
||||
pass
|
||||
|
||||
if try_image_imagetag_format:
|
||||
try:
|
||||
y_image_tag = \
|
||||
y_image['image'] + ":" + y_image['imageTag']
|
||||
ids = [y_image_tag]
|
||||
except (AttributeError, TypeError, KeyError):
|
||||
pass
|
||||
|
||||
image_tags.extend(ids)
|
||||
return image_tags
|
||||
|
||||
@ -563,6 +574,7 @@ class AppOperator(object):
|
||||
|
||||
# Get the image tags from the armada manifest file
|
||||
try_image_tag_repo_format = False
|
||||
try_image_imagetag_format = False
|
||||
try:
|
||||
images_manifest = chart_data['values']['images']['tags']
|
||||
except (TypeError, KeyError, AttributeError):
|
||||
@ -577,6 +589,16 @@ class AppOperator(object):
|
||||
y_image_tag = \
|
||||
y_image['repository'] + ":" + y_image['tag']
|
||||
images_manifest = {chart_name: y_image_tag}
|
||||
except (AttributeError, TypeError, KeyError):
|
||||
try_image_imagetag_format = True
|
||||
pass
|
||||
|
||||
if try_image_imagetag_format:
|
||||
try:
|
||||
y_image = chart_data['values']
|
||||
y_image_tag = \
|
||||
y_image['image'] + ":" + y_image['imageTag']
|
||||
images_manifest = {chart_name: y_image_tag}
|
||||
except (AttributeError, TypeError, KeyError):
|
||||
pass
|
||||
|
||||
@ -685,15 +707,38 @@ class AppOperator(object):
|
||||
default_flow_style=False)
|
||||
|
||||
def _save_images_list_by_charts(self, app):
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
# Mine the images from values.yaml files in the charts directory.
|
||||
# The list of images for each chart are saved to the images file.
|
||||
images_by_charts = {}
|
||||
for chart in app.charts:
|
||||
images = {}
|
||||
chart_name = os.path.join(app.charts_dir, chart.name)
|
||||
|
||||
if not os.path.exists(chart_name):
|
||||
# If the helm chart name is not the same as the armada
|
||||
# chart name in the manifest, try using the source
|
||||
# to find the chart directory.
|
||||
try:
|
||||
# helm charts should be of the standard format:
|
||||
# <chartname>-X.X.X.tgz
|
||||
url_path = os.path.basename(urlparse(chart.location).path)
|
||||
# strip the .tgz
|
||||
chart_and_version = re.sub('\.tgz$', '', url_path)
|
||||
# strip the version
|
||||
chart_name_no_version = re.sub('-(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)',
|
||||
'', chart_and_version)
|
||||
chart_name = os.path.join(app.charts_dir, chart_name_no_version)
|
||||
except Exception as e:
|
||||
LOG.info("Cannot parse chart path: %s" % e)
|
||||
pass
|
||||
|
||||
chart_path = os.path.join(chart_name, 'values.yaml')
|
||||
|
||||
try_image_tag_repo_format = False
|
||||
try_image_imagetag_format = False
|
||||
|
||||
if os.path.exists(chart_path):
|
||||
with open(chart_path, 'r') as f:
|
||||
y = yaml.safe_load(f)
|
||||
@ -711,6 +756,16 @@ class AppOperator(object):
|
||||
images = {chart.name: y_image_tag}
|
||||
except (AttributeError, TypeError, KeyError):
|
||||
LOG.info("Chart %s has no image tags" % chart_name)
|
||||
try_image_imagetag_format = True
|
||||
pass
|
||||
|
||||
if try_image_imagetag_format:
|
||||
try:
|
||||
y_image_tag = \
|
||||
y['image'] + ":" + y['imageTag']
|
||||
images = {chart.name: y_image_tag}
|
||||
except (AttributeError, TypeError, KeyError):
|
||||
LOG.info("Chart %s has no imageTag tags" % chart_name)
|
||||
pass
|
||||
|
||||
if images:
|
||||
|
@ -58,7 +58,9 @@ HELM_CHART_KEYSTONE_API_PROXY = 'keystone-api-proxy'
|
||||
HELM_CHART_SWIFT = 'ceph-rgw'
|
||||
HELM_CHART_NGINX_PORTS_CONTROL = "nginx-ports-control"
|
||||
|
||||
HELM_CHART_ELASTICSEARCH = 'elasticsearch'
|
||||
HELM_CHART_ELASTICSEARCH_MASTER = 'elasticsearch-master'
|
||||
HELM_CHART_ELASTICSEARCH_DATA = 'elasticsearch-data'
|
||||
HELM_CHART_ELASTICSEARCH_CLIENT = 'elasticsearch-client'
|
||||
HELM_CHART_KIBANA = 'kibana'
|
||||
HELM_CHART_FILEBEAT = 'filebeat'
|
||||
HELM_CHART_METRICBEAT = 'metricbeat'
|
||||
|
@ -1,114 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils
|
||||
from sysinv.helm import common
|
||||
from sysinv.helm import elastic
|
||||
|
||||
|
||||
class ElasticsearchHelm(elastic.ElasticBaseHelm):
|
||||
"""Class to encapsulate helm operations for elasticsearch"""
|
||||
|
||||
CHART = common.HELM_CHART_ELASTICSEARCH
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
'cluster': self._get_cluster_overrides(),
|
||||
'master': self._get_master_overrides(),
|
||||
'data': self._get_data_overrides(),
|
||||
'client': self._get_client_overrides(),
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
||||
|
||||
def _get_cluster_overrides(self):
|
||||
|
||||
env_vars = {'MINIMUM_MASTER_NODES': "1",
|
||||
'EXPECTED_MASTER_NODES': "1",
|
||||
'RECOVER_AFTER_MASTER_NODES': "1"}
|
||||
|
||||
conf = {
|
||||
'env': env_vars,
|
||||
}
|
||||
return conf
|
||||
|
||||
def _get_master_overrides(self):
|
||||
if utils.is_aio_system(self.dbapi):
|
||||
heap_size = "256m"
|
||||
replicas = 1
|
||||
else:
|
||||
heap_size = "512m"
|
||||
replicas = 3
|
||||
|
||||
conf = {
|
||||
'replicas': replicas,
|
||||
'heapSize': heap_size,
|
||||
'nodeSelector': {common.LABEL_MONITOR_MASTER: "enabled"},
|
||||
'antiAffinity': "hard",
|
||||
}
|
||||
return conf
|
||||
|
||||
def _get_data_overrides(self):
|
||||
# Note memory values are to be system engineered.
|
||||
|
||||
combined_data_and_master = False
|
||||
replicas = 2
|
||||
if utils.is_aio_system(self.dbapi):
|
||||
heap_size = "512m"
|
||||
memory_size = "512Mi"
|
||||
if utils.is_aio_duplex_system(self.dbapi):
|
||||
combined_data_and_master = True
|
||||
else:
|
||||
# AIO simplex, 1 replica
|
||||
replicas = 1
|
||||
else:
|
||||
heap_size = "1536m"
|
||||
memory_size = "1536Mi"
|
||||
|
||||
conf = {
|
||||
'replicas': replicas,
|
||||
'heapSize': heap_size,
|
||||
'resources': {
|
||||
'limits': {
|
||||
'cpu': "1"
|
||||
},
|
||||
'requests': {
|
||||
'cpu': "25m",
|
||||
'memory': memory_size,
|
||||
}, },
|
||||
'persistence': {'storageClass': 'general',
|
||||
'size': "100Gi"},
|
||||
'nodeSelector': {common.LABEL_MONITOR_DATA: "enabled"},
|
||||
'antiAffinity': "hard",
|
||||
'combinedDataAndMaster': combined_data_and_master,
|
||||
}
|
||||
return conf
|
||||
|
||||
def _get_client_overrides(self):
|
||||
replicas = 2
|
||||
if utils.is_aio_system(self.dbapi):
|
||||
heap_size = "256m"
|
||||
if utils.is_aio_simplex_system(self.dbapi):
|
||||
replicas = 1
|
||||
else:
|
||||
heap_size = "512m"
|
||||
|
||||
conf = {
|
||||
'replicas': replicas,
|
||||
'heapSize': heap_size,
|
||||
'nodeSelector': {common.LABEL_MONITOR_CLIENT: "enabled"},
|
||||
'antiAffinity': "hard",
|
||||
}
|
||||
return conf
|
51
sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_client.py
Normal file
51
sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_client.py
Normal file
@ -0,0 +1,51 @@
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils
|
||||
from sysinv.helm import common
|
||||
from sysinv.helm import elastic
|
||||
|
||||
|
||||
class ElasticsearchClientHelm(elastic.ElasticBaseHelm):
|
||||
"""Class to encapsulate helm operations for elasticsearch client"""
|
||||
|
||||
CHART = common.HELM_CHART_ELASTICSEARCH_CLIENT
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
replicas = 2
|
||||
if utils.is_aio_system(self.dbapi):
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx256m -Xms256m"
|
||||
if utils.is_aio_simplex_system(self.dbapi):
|
||||
replicas = 1
|
||||
else:
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx512m -Xms512m"
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
'replicas': replicas,
|
||||
'esJavaOpts': esJavaOpts,
|
||||
'nodeSelector': {common.LABEL_MONITOR_CLIENT: "enabled"},
|
||||
'resources': {
|
||||
'limits': {
|
||||
'cpu': "1"
|
||||
},
|
||||
'requests': {
|
||||
'cpu': "25m",
|
||||
'memory': "512Mi",
|
||||
},
|
||||
},
|
||||
'persistence': {'enabled': False}
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
79
sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_data.py
Normal file
79
sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_data.py
Normal file
@ -0,0 +1,79 @@
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils
|
||||
from sysinv.helm import common
|
||||
from sysinv.helm import elastic
|
||||
|
||||
|
||||
class ElasticsearchDataHelm(elastic.ElasticBaseHelm):
|
||||
"""Class to encapsulate helm operations for elasticsearch data"""
|
||||
|
||||
CHART = common.HELM_CHART_ELASTICSEARCH_DATA
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
|
||||
# Note memory values are to be system engineered.
|
||||
|
||||
combined_data_and_master = False
|
||||
replicas = 2
|
||||
if utils.is_aio_system(self.dbapi):
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx512m -Xms512m"
|
||||
memory_size = "512Mi"
|
||||
|
||||
if (utils.is_aio_duplex_system(self.dbapi) and
|
||||
self._count_hosts_by_label(
|
||||
common.LABEL_MONITOR_MASTER) < 3):
|
||||
# For AIO-DX without master labelled worker nodes,
|
||||
# configure elasticsearch data pods as master capable,
|
||||
# so they will form a cluster of 3 masters with the single
|
||||
# elasticsearch master pod.
|
||||
combined_data_and_master = True
|
||||
|
||||
if utils.is_aio_simplex_system(self.dbapi):
|
||||
replicas = 1
|
||||
else:
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx1536m -Xms1536m"
|
||||
memory_size = "1536Mi"
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
'nodeGroup': 'data',
|
||||
'replicas': replicas,
|
||||
'esJavaOpts': esJavaOpts,
|
||||
'resources': {
|
||||
'limits': {
|
||||
'cpu': "1"
|
||||
},
|
||||
'requests': {
|
||||
'cpu': "25m",
|
||||
'memory': memory_size,
|
||||
},
|
||||
},
|
||||
'volumeClaimTemplate': {
|
||||
'accessModes': ["ReadWriteOnce"],
|
||||
'resources': {
|
||||
'requests': {'storage': '100Gi'}
|
||||
},
|
||||
'storageClass': 'general'
|
||||
},
|
||||
'nodeSelector': {common.LABEL_MONITOR_DATA: "enabled"},
|
||||
'antiAffinity': "hard",
|
||||
}
|
||||
}
|
||||
|
||||
if combined_data_and_master:
|
||||
overrides[common.HELM_NS_MONITOR]['roles'] = {'master': 'true'}
|
||||
overrides[common.HELM_NS_MONITOR]['minimumMasterNodes'] = 1
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
68
sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_master.py
Normal file
68
sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_master.py
Normal file
@ -0,0 +1,68 @@
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils
|
||||
from sysinv.helm import common
|
||||
from sysinv.helm import elastic
|
||||
|
||||
|
||||
class ElasticsearchMasterHelm(elastic.ElasticBaseHelm):
|
||||
"""Class to encapsulate helm operations for elasticsearch master"""
|
||||
|
||||
CHART = common.HELM_CHART_ELASTICSEARCH_MASTER
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
|
||||
minimumMasterNodes = 1
|
||||
|
||||
replicas = 3
|
||||
if utils.is_aio_system(self.dbapi):
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx256m -Xms256m"
|
||||
|
||||
if self._count_hosts_by_label(common.LABEL_MONITOR_MASTER) < 3:
|
||||
# For AIO-SX, we will get here by definition, as there will
|
||||
# only be 1 master labelled host.
|
||||
# For AIO-DX without master labelled worker, we only
|
||||
# need 1 elasticsearch master pod, as the 2 data
|
||||
# pods will be master capable to form a cluster of 3 masters.
|
||||
replicas = 1
|
||||
else:
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx512m -Xms512m"
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
'nodeGroup': 'master',
|
||||
'replicas': replicas,
|
||||
'esJavaOpts': esJavaOpts,
|
||||
'minimumMasterNodes': minimumMasterNodes,
|
||||
'nodeSelector': {common.LABEL_MONITOR_MASTER: "enabled"},
|
||||
'resources': {
|
||||
'limits': {
|
||||
'cpu': "1"
|
||||
},
|
||||
'requests': {
|
||||
'cpu': "25m",
|
||||
'memory': "512Mi",
|
||||
},
|
||||
},
|
||||
'volumeClaimTemplate': {
|
||||
'accessModes': ["ReadWriteOnce"],
|
||||
'resources': {
|
||||
'requests': {'storage': '4Gi'}
|
||||
},
|
||||
'storageClass': 'general'
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if namespace in self.SUPPORTED_NAMESPACES:
|
||||
return overrides[namespace]
|
||||
elif namespace:
|
||||
raise exception.InvalidHelmNamespace(chart=self.CHART,
|
||||
namespace=namespace)
|
||||
else:
|
||||
return overrides
|
@ -13,7 +13,9 @@ from sysinv.common import constants
|
||||
from sysinv.helm import manifest_base as base
|
||||
from sysinv.helm.logstash import LogstashHelm
|
||||
from sysinv.helm.kibana import KibanaHelm
|
||||
from sysinv.helm.elasticsearch import ElasticsearchHelm
|
||||
from sysinv.helm.elasticsearch_master import ElasticsearchMasterHelm
|
||||
from sysinv.helm.elasticsearch_client import ElasticsearchClientHelm
|
||||
from sysinv.helm.elasticsearch_data import ElasticsearchDataHelm
|
||||
from sysinv.helm.filebeat import FilebeatHelm
|
||||
from sysinv.helm.metricbeat import MetricbeatHelm
|
||||
from sysinv.helm.nginx_ingress import NginxIngressHelm
|
||||
@ -35,7 +37,9 @@ class MonitorArmadaManifestOperator(base.ArmadaManifestOperator):
|
||||
CHART_GROUPS_LUT = {
|
||||
NginxIngressHelm.CHART: CHART_GROUP_NGINX,
|
||||
KibanaHelm.CHART: CHART_GROUP_KIBANA,
|
||||
ElasticsearchHelm.CHART: CHART_GROUP_ELASTICSEARCH,
|
||||
ElasticsearchMasterHelm.CHART: CHART_GROUP_ELASTICSEARCH,
|
||||
ElasticsearchClientHelm.CHART: CHART_GROUP_ELASTICSEARCH,
|
||||
ElasticsearchDataHelm.CHART: CHART_GROUP_ELASTICSEARCH,
|
||||
LogstashHelm.CHART: CHART_GROUP_LOGSTASH,
|
||||
FilebeatHelm.CHART: CHART_GROUP_FILEBEAT,
|
||||
MetricbeatHelm.CHART: CHART_GROUP_METRICBEAT,
|
||||
@ -45,7 +49,9 @@ class MonitorArmadaManifestOperator(base.ArmadaManifestOperator):
|
||||
CHARTS_LUT = {
|
||||
NginxIngressHelm.CHART: 'nginx-ingress',
|
||||
KibanaHelm.CHART: 'kibana',
|
||||
ElasticsearchHelm.CHART: 'elasticsearch',
|
||||
ElasticsearchMasterHelm.CHART: 'elasticsearch-master',
|
||||
ElasticsearchClientHelm.CHART: 'elasticsearch-client',
|
||||
ElasticsearchDataHelm.CHART: 'elasticsearch-data',
|
||||
LogstashHelm.CHART: 'logstash',
|
||||
FilebeatHelm.CHART: 'filebeat',
|
||||
MetricbeatHelm.CHART: 'metricbeat',
|
||||
|
Loading…
x
Reference in New Issue
Block a user