@Hung Please find my OpenSearch and OpenSearch Dashboards values.yml below.
OpenSearch values.yml
---
clusterName: "opensearch-cluster"
nodeGroup: "master"
# If discovery.type in the opensearch configuration is set to "single-node",
# this should be set to "true"
# If "true", replicas will be forced to 1
singleNode: false
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: "opensearch-cluster-master"
# OpenSearch roles that will be applied to this nodeGroup
# These will be set as environment variable "node.roles". E.g. node.roles=master,ingest,data,remote_cluster_client
roles:
- master
- ingest
- data
- remote_cluster_client
replicas: 3
# if not set, falls back to parsing .Values.imageTag, then .Chart.appVersion.
majorVersion: ""
global:
# Set if you want to change the default docker registry, e.g. a private one.
dockerRegistry: ""
# Allows you to add any config files in {{ .Values.opensearchHome }}/config
opensearchHome: /usr/share/opensearch
# such as opensearch.yml and log4j2.properties
config:
# Values must be YAML literal style scalar / YAML multiline string.
# <filename>: |
# <formatted-value(s)>
# log4j2.properties: |
# status = error
#
# appender.console.type = Console
# appender.console.name = console
# appender.console.layout.type = PatternLayout
# appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
#
# rootLogger.level = info
# rootLogger.appenderRef.console.ref = console
opensearch.yml: |
cluster.name: opensearch-cluster
# Bind to all interfaces because we don't know what IP address Docker will assign to us.
network.host: 0.0.0.0
# Setting network.host to a non-loopback address enables the annoying bootstrap checks. "Single-node" mode disables them again.
# Implicitly done if ".singleNode" is set to "true".
# discovery.type: single-node
# Start OpenSearch Security Demo Configuration
# WARNING: revise all the lines below before you go into production
plugins:
security:
ssl:
transport:
pemcert_filepath: customSSL/esnode.pem
pemkey_filepath: customSSL/esnode-key.pem
pemtrustedcas_filepath: customSSL/root-ca.pem
enforce_hostname_verification: false
http:
enabled: true
pemcert_filepath: customSSL/esnode.pem
pemkey_filepath: customSSL/esnode-key.pem
pemtrustedcas_filepath: customSSL/root-ca.pem
allow_unsafe_democertificates: true
allow_default_init_securityindex: true
authcz:
admin_dn:
- CN=kirk,OU=client,O=client,L=test,C=de
audit.type: internal_opensearch
enable_snapshot_restore_privilege: true
check_snapshot_restore_write_privileges: true
restapi:
roles_enabled: ["all_access", "security_rest_api_access"]
system_indices:
enabled: true
indices:
[
".opendistro-alerting-config",
".opendistro-alerting-alert*",
".opendistro-anomaly-results*",
".opendistro-anomaly-detector*",
".opendistro-anomaly-checkpoints",
".opendistro-anomaly-detection-state",
".opendistro-reports-*",
".opendistro-notifications-*",
".opendistro-notebooks",
".opendistro-asynchronous-search-response*",
]
######## End OpenSearch Security Demo Configuration ########
# log4j2.properties:
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs:
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
# Chart version 2.18.0 and App Version OpenSearch 2.12.0 onwards a custom strong password needs to be provided in order to setup demo admin user.
# Cluster will not spin-up without this unless demo config install is disabled.
- name: OPENSEARCH_INITIAL_ADMIN_PASSWORD
value: Eliatra123
- name: DISABLE_INSTALL_DEMO_CONFIG
value: "true"
# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
# name: env-secret
# - configMapRef:
# name: config-map
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts:
- name: opensearch-certs
secretName: opensearch-certs
path: /usr/share/opensearch/config/customSSL
hostAliases: []
# - ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
image:
repository: "opensearchproject/opensearch"
# override image tag, which is .Chart.AppVersion by default
tag: ""
pullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# OpenSearch Statefulset annotations
openSearchAnnotations: {}
# additionals labels
labels: {}
opensearchJavaOpts: "-Xmx512M -Xms512M"
resources:
requests:
cpu: "1000m"
memory: "100Mi"
initResources: {}
# limits:
# cpu: "25m"
# memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
sidecarResources: {}
# limits:
# cpu: "25m"
# memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
networkHost: "0.0.0.0"
rbac:
create: false
serviceAccountAnnotations: {}
serviceAccountName: ""
# Controls whether or not the Service Account token is automatically mounted to /var/run/secrets/kubernetes.io/serviceaccount
automountServiceAccountToken: false
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
- emptyDir
persistence:
enabled: true
# Set to false to disable the `fsgroup-volume` initContainer that will update permissions on the persistent disk.
enableInitChown: true
# override image, which is busybox by default
# image: busybox
# override image tag, which is latest by default
# imageTag:
labels:
# Add default labels for the volumeClaimTemplate of the StatefulSet
enabled: false
# Add custom labels for the volumeClaimTemplate of the StatefulSet
additionalLabels: {}
# OpenSearch Persistent Volume Storage Class
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing the default provisioner. (gp2 on AWS, standard on
# GKE, AWS & OpenStack)
#
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
extraVolumes: []
# - name: extras
# emptyDir: {}
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
extraInitContainers: []
# - name: do-somethings
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort".
# Setting this to custom will use what is passed into customAntiAffinity.
antiAffinity: "soft"
# Allows passing in custom anti-affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity
# Using this parameter requires setting antiAffinity to custom.
customAntiAffinity: {}
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# This is the pod affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity
podAffinity: {}
# This is the pod topology spread constraints
# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
# The environment variables injected by service links are not used, but can lead to slow OpenSearch boot times when
# there are many services in the current namespace.
# If you experience slow pod startups you probably want to set this to `false`.
enableServiceLinks: true
protocol: https
httpPort: 9200
transportPort: 9300
metricsPort: 9600
httpHostPort: ""
transportHostPort: ""
service:
labels: {}
labelsHeadless: {}
headless:
annotations: {}
type: ClusterIP
# The IP family and IP families options are to set the behaviour in a dual-stack environment
# Omitting these values will let the service fall back to whatever the CNI dictates the defaults
# should be
#
# ipFamilyPolicy: SingleStack
# ipFamilies:
# - IPv4
nodePort: ""
annotations: {}
httpPortName: http
transportPortName: transport
metricsPortName: metrics
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
securityConfig:
enabled: true
path: "/usr/share/opensearch/config/opensearch-security"
actionGroupsSecret:
configSecret:
internalUsersSecret:
rolesSecret:
rolesMappingSecret:
tenantsSecret:
# The following option simplifies securityConfig by using a single secret and
# specifying the config files as keys in the secret instead of creating
# different secrets for for each config file.
# Note that this is an alternative to the individual secret configuration
# above and shouldn't be used if the above secrets are used.
config:
# There are multiple ways to define the configuration here:
# * If you define anything under data, the chart will automatically create
# a secret and mount it. This is best option to choose if you want to override all the
# existing yml files at once.
# * If you define securityConfigSecret, the chart will assume this secret is
# created externally and mount it. This is best option to choose if your intention is to
# only update a single yml file.
# * It is an error to define both data and securityConfigSecret.
securityConfigSecret: ""
dataComplete: true
data:
config.yml: |-
_meta:
type: "config"
config_version: "2"
config:
dynamic:
kibana:
server_username: pablo
http:
anonymous_auth_enabled: false
authc:
basic_internal_auth_domain:
description: "Authenticate via HTTP Basic against internal users database"
http_enabled: true
transport_enabled: true
order: 0
http_authenticator:
type: basic
challenge: false
authentication_backend:
type: intern
saml_auth_domain:
order: 1
description: "SAML provider"
http_enabled: true
transport_enabled: false
http_authenticator:
type: saml
challenge: true
config:
idp:
metadata_url: https://login.microsoftonline.com/<tenant_id>/federationmetadata/2007-06/federationmetadata.xml?appid=<app_id>
entity_id: https://sts.windows.net/<tenant_id>/
sp:
entity_id: Azure_SAML
kibana_url: https://test.pablo.local/
exchange_key : "12345678901234567890123456789012"
roles_key: http://schemas.microsoft.com/ws/2008/06/identity/claims/groups
authentication_backend:
type: noop
internal_users.yml: |-
---
# This is the internal user database
# The hash value is a bcrypt hash and can be generated with plugin/tools/hash.sh
_meta:
type: "internalusers"
config_version: 2
# Define your internal users here
## Demo users
admin:
hash: "$2a$12$VcCDgh2NDk07JGN0rjGbM.Ad41qVR/YFJcgHp0UGns5JDymv..TOG"
reserved: true
backend_roles:
- "admin"
description: "Demo admin user"
anomalyadmin:
hash: "$2y$12$TRwAAJgnNo67w3rVUz4FIeLx9Dy/llB79zf9I15CKJ9vkM4ZzAd3."
reserved: false
opendistro_security_roles:
- "anomaly_full_access"
description: "Demo anomaly admin user, using internal role"
kibanaserver:
hash: "$2y$12$rHTC9pPb8G4/j1092CRpnuGuewcIBsmTEz0cHylLzX.jBoEYwJ/0i"
reserved: true
description: "Demo OpenSearch Dashboards user"
pablo:
hash: "$2y$12$rHTC9pPb8G4/j1092CRpnuGuewcIBsmTEz0cHylLzX.jBoEYwJ/0i"
reserved: true
description: "Demo OpenSearch Dashboards user"
kibanaro:
hash: "$2a$12$JJSXNfTowz7Uu5ttXfeYpeYE0arACvcwlPBStB1F.MI7f0U9Z4DGC"
reserved: false
backend_roles:
- "kibanauser"
- "readall"
attributes:
attribute1: "value1"
attribute2: "value2"
attribute3: "value3"
description: "Demo OpenSearch Dashboards read only user, using external role mapping"
logstash:
hash: "$2a$12$u1ShR4l4uBS3Uv59Pa2y5.1uQuZBrZtmNfqB3iM/.jL0XoV9sghS2"
reserved: false
backend_roles:
- "logstash"
description: "Demo logstash user, using external role mapping"
readall:
hash: "$2a$12$ae4ycwzwvLtZxwZ82RmiEunBbIPiAmGZduBAjKN0TXdwQFtCwARz2"
reserved: false
backend_roles:
- "readall"
description: "Demo readall user, using external role mapping"
snapshotrestore:
hash: "$2y$12$DpwmetHKwgYnorbgdvORCenv4NAK8cPUg8AI6pxLCuWf/ALc0.v7W"
reserved: false
backend_roles:
- "snapshotrestore"
description: "Demo snapshotrestore user, using external role mapping"
roles.yml: |-
---
_meta:
type: "roles"
config_version: 2
kibana_read_only:
reserved: true
security_rest_api_access:
reserved: true
alerting_read_access:
reserved: true
cluster_permissions:
- "cluster:admin/opendistro/alerting/alerts/get"
- "cluster:admin/opendistro/alerting/destination/get"
- "cluster:admin/opendistro/alerting/monitor/get"
- "cluster:admin/opendistro/alerting/monitor/search"
alerting_ack_alerts:
reserved: true
cluster_permissions:
- "cluster:admin/opendistro/alerting/alerts/*"
alerting_full_access:
reserved: true
cluster_permissions:
- "cluster_monitor"
- "cluster:admin/opendistro/alerting/*"
index_permissions:
- index_patterns:
- "*"
allowed_actions:
- "indices_monitor"
- "indices:admin/aliases/get"
- "indices:admin/mappings/get"
anomaly_read_access:
reserved: true
cluster_permissions:
- "cluster:admin/opendistro/ad/detector/info"
- "cluster:admin/opendistro/ad/detector/search"
- "cluster:admin/opendistro/ad/detectors/get"
- "cluster:admin/opendistro/ad/result/search"
anomaly_full_access:
reserved: true
cluster_permissions:
- "cluster_monitor"
- "cluster:admin/opendistro/ad/*"
index_permissions:
- index_patterns:
- "*"
allowed_actions:
- "indices_monitor"
- "indices:admin/aliases/get"
- "indices:admin/mappings/get"
reports_instances_read_access:
reserved: true
cluster_permissions:
- "cluster:admin/opendistro/reports/instance/list"
- "cluster:admin/opendistro/reports/instance/get"
- "cluster:admin/opendistro/reports/menu/download"
reports_read_access:
reserved: true
cluster_permissions:
- "cluster:admin/opendistro/reports/definition/get"
- "cluster:admin/opendistro/reports/definition/list"
- "cluster:admin/opendistro/reports/instance/list"
- "cluster:admin/opendistro/reports/instance/get"
- "cluster:admin/opendistro/reports/menu/download"
reports_full_access:
reserved: true
cluster_permissions:
- "cluster:admin/opendistro/reports/definition/create"
- "cluster:admin/opendistro/reports/definition/update"
- "cluster:admin/opendistro/reports/definition/on_demand"
- "cluster:admin/opendistro/reports/definition/delete"
- "cluster:admin/opendistro/reports/definition/get"
- "cluster:admin/opendistro/reports/definition/list"
- "cluster:admin/opendistro/reports/instance/list"
- "cluster:admin/opendistro/reports/instance/get"
- "cluster:admin/opendistro/reports/menu/download"
SYSTEME_ABC-XYZ:
reserved: false
hidden: false
cluster_permissions:
- 'cluster_composite_ops'
index_permissions:
- index_patterns:
- 'abc*logs-socles*'
fls: []
dls: ""
masked_fields: []
allowed_actions:
- 'read'
- 'kibana_all_read'
- index_patterns:
- 'abc*logs-2*'
fls: []
dls: ""
masked_fields: []
allowed_actions:
- 'read'
- 'kibana_all_read'
- index_patterns:
- '.kibana'
fls: []
dls: ""
masked_fields: []
allowed_actions:
- 'read'
- 'kibana_all_read'
roles_mapping.yml: |-
---
# In this file users, backendroles and hosts can be mapped to Security roles.
# Permissions for OpenSearch roles are configured in roles.yml
_meta:
type: "rolesmapping"
config_version: 2
# Define your roles mapping here
## Demo roles mapping
all_access:
reserved: false
backend_roles:
- "admin"
description: "Maps admin to all_access"
own_index:
reserved: false
users:
- "*"
description: "Allow full access to an index named like the username"
logstash:
reserved: false
backend_roles:
- "logstash"
kibana_user:
reserved: false
backend_roles:
- "kibanauser"
description: "Maps kibanauser to kibana_user"
readall:
reserved: false
backend_roles:
- "readall"
manage_snapshots:
reserved: false
backend_roles:
- "snapshotrestore"
kibana_server:
reserved: true
users:
- "kibanaserver"
- "pablo"
action_groups.yml: |-
_meta:
type: "actiongroups"
config_version: 2
audit.yml: |-
---
_meta:
type: "audit"
config_version: 2
config:
enabled: true
audit:
enable_rest: true
disabled_rest_categories:
- "AUTHENTICATED"
- "GRANTED_PRIVILEGES"
enable_transport: true
disabled_transport_categories:
- "AUTHENTICATED"
- "GRANTED_PRIVILEGES"
ignore_users:
- "kibanaserver"
ignore_requests: []
resolve_bulk_requests: false
log_request_body: true
resolve_indices: true
exclude_sensitive_headers: true
compliance:
enabled: true
internal_config: true
external_config: false
read_metadata_only: true
read_watched_fields: {}
read_ignore_users:
- "kibanaserver"
write_metadata_only: true
write_log_diffs: false
write_watched_indices: []
write_ignore_users:
- "kibanaserver"
tenants.yml: |-
---
_meta:
type: "tenants"
config_version: 2
# Define your tenants here
## Demo tenants
admin_tenant:
reserved: false
description: "Demo tenant for admin user"
# How long to wait for opensearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
startupProbe:
tcpSocket:
port: 9200
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 30
livenessProbe: {}
# periodSeconds: 20
# timeoutSeconds: 5
# failureThreshold: 10
# successThreshold: 1
# initialDelaySeconds: 10
# tcpSocket:
# port: 9200
readinessProbe:
tcpSocket:
port: 9200
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publically expose your OpenSearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
ingressLabels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
masterTerminationFix: false
opensearchLifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"]
# postStart:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command:
# - bash
# - -c
# - |
# #!/bin/bash
# # Add a template to adjust number of shards/replicas1
# TEMPLATE_NAME=my_template
# INDEX_PATTERN="logstash-*"
# SHARD_COUNT=8
# REPLICA_COUNT=1
# ES_URL=http://localhost:9200
# while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
keystore: []
# To add secrets to the keystore:
# - secretName: opensearch-encryption-key
networkPolicy:
create: false
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## In order for a Pod to access OpenSearch, it needs to have the following label:
## {{ template "uname" . }}-client: "true"
## Example for default configuration to access HTTP port:
## opensearch-master-http-client: "true"
## Example for default configuration to access transport port:
## opensearch-master-transport-client: "true"
http:
enabled: false
# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
## Set optimal sysctl's through securityContext. This requires privilege. Can be disabled if
## the system has already been preconfigured. (Ex: https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
## Also see: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/
sysctl:
enabled: false
## Set optimal sysctl's through privileged initContainer.
sysctlInit:
enabled: false
# override image, which is busybox by default
# image: busybox
# override image tag, which is latest by default
# imageTag:
## Enable to add 3rd Party / Custom plugins not offered in the default OpenSearch image.
plugins:
enabled: false
installList: []
# - example-fake-plugin
removeList: []
# - example-fake-plugin
# -- Array of extra K8s manifests to deploy
extraObjects: []
# - apiVersion: secrets-store.csi.x-k8s.io/v1
# kind: SecretProviderClass
# metadata:
# name: argocd-secrets-store
# spec:
# provider: aws
# parameters:
# objects: |
# - objectName: "argocd"
# objectType: "secretsmanager"
# jmesPath:
# - path: "client_id"
# objectAlias: "client_id"
# - path: "client_secret"
# objectAlias: "client_secret"
# secretObjects:
# - data:
# - key: client_id
# objectName: client_id
# - key: client_secret
# objectName: client_secret
# secretName: argocd-secrets-store
# type: Opaque
# labels:
# app.kubernetes.io/part-of: argocd
# - |
# apiVersion: policy/v1
# kind: PodDisruptionBudget
# metadata:
# name: {{ template "opensearch.uname" . }}
# labels:
# {{- include "opensearch.labels" . | nindent 4 }}
# spec:
# minAvailable: 1
# selector:
# matchLabels:
# {{- include "opensearch.selectorLabels" . | nindent 6 }}
# ServiceMonitor Configuration for Prometheus
# Enabling this option will create a ServiceMonitor resource that allows Prometheus to scrape metrics from the OpenSearch service.
# This only creates the serviceMonitor, to actually have metrics Make sure to install the prometheus-exporter plugin needed for
# serving metrics over the `.Values.plugins` value:
# plugins:
# enabled: true
# installList:
# - https://github.com/aiven/prometheus-exporter-plugin-for-opensearch/releases/download/x.x.x.x/prometheus-exporter-x.x.x.x.zip
serviceMonitor:
# Set to true to enable the ServiceMonitor resource
enabled: false
# HTTP path where metrics are exposed.
# Ensure this matches your OpenSearch service configuration.
path: /_prometheus/metrics
# Scheme to use for scraping.
scheme: http
# Frequency at which Prometheus will scrape metrics.
# Adjust based on your needs.
interval: 10s
# additional labels to be added to the ServiceMonitor
# labels:
# k8s.example.com/prometheus: kube-prometheus
labels: {}
# additional tlsConfig to be added to the ServiceMonitor
tlsConfig: {}
# Basic Auth configuration for the service monitor
# You can either use existingSecret, which expects a secret to be already present with data.username and data.password
# or set the credentials over the helm values, making helm create a secret for you
# basicAuth:
# enaled: true
# existingSecret: my-secret
# username: my-username
# password: my-password
basicAuth:
enabled: false
OpenSearch Dashboards values.yml
# Copyright OpenSearch Contributors
# SPDX-License-Identifier: Apache-2.0
# Default values for opensearch-dashboards.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
opensearchHosts: "https://opensearch-cluster-master:9200"
replicaCount: 1
image:
repository: "opensearchproject/opensearch-dashboards"
# override image tag, which is .Chart.AppVersion by default
tag: ""
pullPolicy: "IfNotPresent"
startupProbe:
tcpSocket:
port: 5601
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 20
successThreshold: 1
initialDelaySeconds: 10
livenessProbe:
tcpSocket:
port: 5601
periodSeconds: 20
timeoutSeconds: 5
failureThreshold: 10
successThreshold: 1
initialDelaySeconds: 10
readinessProbe:
tcpSocket:
port: 5601
periodSeconds: 20
timeoutSeconds: 5
failureThreshold: 10
successThreshold: 1
initialDelaySeconds: 10
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
rbac:
create: true
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
# - name: certs
# secretName: dashboard-certs
# path: /usr/share/dashboards/certs
podAnnotations: {}
# Deployment annotations
dashboardAnnotations: {}
extraEnvs: []
# - name: "NODE_OPTIONS"
# value: "--max-old-space-size=1800"
envFrom: []
extraVolumes: []
# - name: extras
# emptyDir: {}
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: ""
extraContainers: ""
podSecurityContext: {}
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
config: {}
# Default OpenSearch Dashboards configuration from docker image of Dashboards
# opensearch_dashboards.yml: |
# server:
# name: dashboards
# host: "{{ .Values.serverHost }}"
# opensearch_dashboards.yml:
# server:
# name: dashboards
# host: "{{ .Values.serverHost }}"
# Dashboards TLS Config (Ensure the cert files are present before enabling SSL
# ssl:
# enabled: true
# key: /usr/share/opensearch-dashboards/certs/dashboards-key.pem
# certificate: /usr/share/opensearch-dashboards/certs/dashboards-crt.pem
# determines how dashboards will verify certificates (needs to be none for default opensearch certificates to work)
# opensearch:
# ssl:
# certificateAuthorities: /usr/share/opensearch-dashboards/certs/dashboards-root-ca.pem
# if utilizing custom CA certs for connection to opensearch, provide the CA here
opensearchDashboardsYml:
defaultMode:
# value should be 0-0777
priorityClassName: ""
opensearchAccount:
secret: "pablo-auth"
keyPassphrase:
enabled: false
labels: {}
hostAliases: []
# - ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
serverHost: "0.0.0.0"
service:
type: ClusterIP
# The IP family and IP families options are to set the behaviour in a dual-stack environment
# Omitting these values will let the service fall back to whatever the CNI dictates the defaults
# should be
#
# ipFamilyPolicy: SingleStack
# ipFamilies:
# - IPv4
port: 5601
metricsPort: 9601
loadBalancerIP: ""
nodePort: ""
labels: {}
annotations: {}
loadBalancerSourceRanges: []
# 0.0.0.0/0
httpPortName: http
metricsPortName: metrics
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
hosts:
- host: chart-example.local
paths:
- path: /
backend:
serviceName: ""
servicePort: ""
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "100m"
memory: "512M"
autoscaling:
# This requires metrics server to be installed, to install use kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# See https://github.com/kubernetes-sigs/metrics-server
enabled: false
minReplicas: 1
maxReplicas: 10
# The legacy `targetCPUUtilizationPercentage` key has been deprecated in favor of `targetCPU`
targetCPU: "80"
targetMemory: "80"
updateStrategy:
type: "Recreate"
nodeSelector: {}
tolerations: []
affinity: {}
# This is the pod topology spread constraints
# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# -- Array of extra K8s manifests to deploy
extraObjects: []
# - apiVersion: secrets-store.csi.x-k8s.io/v1
# kind: SecretProviderClass
# metadata:
# name: argocd-secrets-store
# spec:
# provider: aws
# parameters:
# objects: |
# - objectName: "argocd"
# objectType: "secretsmanager"
# jmesPath:
# - path: "client_id"
# objectAlias: "client_id"
# - path: "client_secret"
# objectAlias: "client_secret"
# secretObjects:
# - data:
# - key: client_id
# objectName: client_id
# - key: client_secret
# objectName: client_secret
# secretName: argocd-secrets-store
# type: Opaque
# labels:
# app.kubernetes.io/part-of: argocd
# - |
# apiVersion: policy/v1
# kind: PodDisruptionBudget
# metadata:
# name: {{ template "opensearch-dashboards.fullname" . }}
# labels:
# {{- include "opensearch-dashboards.labels" . | nindent 4 }}
# spec:
# minAvailable: 1
# selector:
# matchLabels:
# {{- include "opensearch-dashboards.selectorLabels" . | nindent 6 }}
# pod lifecycle policies as outlined here:
# https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command:
# - bash
# - -c
# - |
# #!/bin/bash
# curl -I "http://admin:admin@127.0.0.1:5601/status -H "kbn-xsrf: true" -H 'kbn-xsrf: true' -H "Content-Type: application/json"
## Enable to add 3rd Party / Custom plugins not offered in the default OpenSearchDashboards image.
plugins:
enabled: false
installList: []
# - example-fake-plugin-downloadable-url
removeList: []
# - examplePluginName
# ServiceMonitor Configuration for Prometheus
# Enabling this option will create a ServiceMonitor resource that allows Prometheus to scrape metrics from the OpenSearch service.
serviceMonitor:
# Set to true to enable the ServiceMonitor resource for OpenSearch Dashboards
enabled: false
# HTTP path where metrics are exposed by OpenSearch Dashboards.
# Ensure this path is correctly set in your service.
path: /_prometheus/metrics
# Frequency at which Prometheus will scrape metrics.
# Modify as needed for your monitoring requirements.
interval: 10s
# additional labels to be added to the ServiceMonitor
# labels:
# k8s.example.com/prometheus: kube-prometheus
labels: {}