Hi @Ghata, there already seems to be an issue for this and I just left a comment there.
I was not able modify the values.yml file to avoid using initContainers with root privileges. However one of the workaround can be to modify the statefulset.yml and manually remove the initContainers. My values file looks like this:
---
clusterName: "opensearch-cluster"
nodeGroup: "master"
# If discovery.type in the opensearch configuration is set to "single-node",
# this should be set to "true"
# If "true", replicas will be forced to 1
singleNode: false
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: "opensearch-cluster-master"
# OpenSearch roles that will be applied to this nodeGroup
# These will be set as environment variable "node.roles". E.g. node.roles=master,ingest,data,remote_cluster_client
roles:
- master
- ingest
- data
- remote_cluster_client
replicas: 3
# if not set, falls back to parsing .Values.imageTag, then .Chart.appVersion.
majorVersion: ""
global:
# Set if you want to change the default docker registry, e.g. a private one.
dockerRegistry: ""
# Allows you to add any config files in {{ .Values.opensearchHome }}/config
opensearchHome: /usr/share/opensearch
# such as opensearch.yml and log4j2.properties
config:
# Values must be YAML literal style scalar / YAML multiline string.
# <filename>: |
# <formatted-value(s)>
# log4j2.properties: |
# status = error
#
# appender.console.type = Console
# appender.console.name = console
# appender.console.layout.type = PatternLayout
# appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
#
# rootLogger.level = info
# rootLogger.appenderRef.console.ref = console
opensearch.yml: |
cluster.name: opensearch-cluster
# Bind to all interfaces because we don't know what IP address Docker will assign to us.
network.host: 0.0.0.0
# Setting network.host to a non-loopback address enables the annoying bootstrap checks. "Single-node" mode disables them again.
# Implicitly done if ".singleNode" is set to "true".
# discovery.type: single-node
# Start OpenSearch Security Demo Configuration
# WARNING: revise all the lines below before you go into production
plugins:
security:
ssl:
transport:
pemcert_filepath: esnode.pem
pemkey_filepath: esnode-key.pem
pemtrustedcas_filepath: root-ca.pem
enforce_hostname_verification: false
http:
enabled: true
pemcert_filepath: esnode.pem
pemkey_filepath: esnode-key.pem
pemtrustedcas_filepath: root-ca.pem
allow_unsafe_democertificates: true
allow_default_init_securityindex: false
authcz:
admin_dn:
- CN=kirk,OU=client,O=client,L=test,C=de
audit.type: internal_opensearch
enable_snapshot_restore_privilege: true
check_snapshot_restore_write_privileges: true
restapi:
roles_enabled: ["all_access", "security_rest_api_access"]
system_indices:
enabled: true
indices:
[
".opendistro-alerting-config",
".opendistro-alerting-alert*",
".opendistro-anomaly-results*",
".opendistro-anomaly-detector*",
".opendistro-anomaly-checkpoints",
".opendistro-anomaly-detection-state",
".opendistro-reports-*",
".opendistro-notifications-*",
".opendistro-notebooks",
".opendistro-asynchronous-search-response*",
]
######## End OpenSearch Security Demo Configuration ########
# log4j2.properties:
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs:
- name: OPENSEARCH_INITIAL_ADMIN_PASSWORD
value: What1to1do!again
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
# Chart version 2.18.0 and App Version OpenSearch 2.12.0 onwards a custom strong password needs to be provided in order to setup demo admin user.
# Cluster will not spin-up without this unless demo config install is disabled.
# - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD
# value: <strong-password>
# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
# name: env-secret
# - configMapRef:
# name: config-map
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts:
- name: opensearch-certificates
secretName: opensearch-certificates
path: /usr/share/opensearch/config/esnode.pem
subPath: esnode.pem
- name: opensearch-certificates-key
secretName: opensearch-certificates
path: /usr/share/opensearch/config/esnode-key.pem
subPath: esnode-key.pem
- name: opensearch-certificates-ca
secretName: opensearch-certificates
path: /usr/share/opensearch/config/root-ca.pem
subPath: root-ca.pem
hostAliases: []
# - ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
image:
repository: "opensearchproject/opensearch"
# override image tag, which is .Chart.AppVersion by default
tag: ""
pullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# OpenSearch Statefulset annotations
openSearchAnnotations: {}
# additionals labels
labels: {}
opensearchJavaOpts: "-Xmx512M -Xms512M"
resources:
requests:
cpu: "1000m"
memory: "100Mi"
initResources: {}
# limits:
# cpu: "25m"
# memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
sidecarResources: {}
# limits:
# cpu: "25m"
# memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
networkHost: "0.0.0.0"
rbac:
create: false
serviceAccountAnnotations: {}
serviceAccountName: ""
# Controls whether or not the Service Account token is automatically mounted to /var/run/secrets/kubernetes.io/serviceaccount
automountServiceAccountToken: false
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
- emptyDir
persistence:
enabled: true
# Set to false to disable the `fsgroup-volume` initContainer that will update permissions on the persistent disk.
enableInitChown: true
# override image, which is busybox by default
# image: busybox
# override image tag, which is latest by default
# imageTag:
labels:
# Add default labels for the volumeClaimTemplate of the StatefulSet
enabled: false
# Add custom labels for the volumeClaimTemplate of the StatefulSet
additionalLabels: {}
# OpenSearch Persistent Volume Storage Class
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing the default provisioner. (gp2 on AWS, standard on
# GKE, AWS & OpenStack)
#
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
extraVolumes: []
extraVolumeMounts:
- name: config
mountPath: /usr/share/opensearch/config/opensearch.yml
subPath: opensearch.yml
extraContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
extraInitContainers: []
# - name: do-somethings
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort".
# Setting this to custom will use what is passed into customAntiAffinity.
antiAffinity: "soft"
# Allows passing in custom anti-affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity
# Using this parameter requires setting antiAffinity to custom.
customAntiAffinity: {}
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# This is the pod affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity
podAffinity: {}
# This is the pod topology spread constraints
# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
# The environment variables injected by service links are not used, but can lead to slow OpenSearch boot times when
# there are many services in the current namespace.
# If you experience slow pod startups you probably want to set this to `false`.
enableServiceLinks: true
protocol: https
httpPort: 9200
transportPort: 9300
metricsPort: 9600
httpHostPort: ""
transportHostPort: ""
service:
labels: {}
labelsHeadless: {}
headless:
annotations: {}
type: ClusterIP
# The IP family and IP families options are to set the behaviour in a dual-stack environment
# Omitting these values will let the service fall back to whatever the CNI dictates the defaults
# should be
#
# ipFamilyPolicy: SingleStack
# ipFamilies:
# - IPv4
nodePort: ""
annotations: {}
httpPortName: http
transportPortName: transport
metricsPortName: metrics
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
securityConfig:
enabled: true
path: "/usr/share/opensearch/config/opensearch-security"
actionGroupsSecret:
configSecret:
internalUsersSecret:
rolesSecret:
rolesMappingSecret:
tenantsSecret:
# The following option simplifies securityConfig by using a single secret and
# specifying the config files as keys in the secret instead of creating
# different secrets for for each config file.
# Note that this is an alternative to the individual secret configuration
# above and shouldn't be used if the above secrets are used.
config:
# There are multiple ways to define the configuration here:
# * If you define anything under data, the chart will automatically create
# a secret and mount it. This is best option to choose if you want to override all the
# existing yml files at once.
# * If you define securityConfigSecret, the chart will assume this secret is
# created externally and mount it. This is best option to choose if your intention is to
# only update a single yml file.
# * It is an error to define both data and securityConfigSecret.
securityConfigSecret: ""
dataComplete: true
data: {}
# config.yml: |-
# internal_users.yml: |-
# roles.yml: |-
# roles_mapping.yml: |-
# action_groups.yml: |-
# tenants.yml: |-
# How long to wait for opensearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
startupProbe:
tcpSocket:
port: 9200
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 30
livenessProbe: {}
# periodSeconds: 20
# timeoutSeconds: 5
# failureThreshold: 10
# successThreshold: 1
# initialDelaySeconds: 10
# tcpSocket:
# port: 9200
readinessProbe:
tcpSocket:
port: 9200
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publically expose your OpenSearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
ingressLabels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
masterTerminationFix: false
opensearchLifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"]
# postStart:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command:
# - bash
# - -c
# - |
# #!/bin/bash
# # Add a template to adjust number of shards/replicas1
# TEMPLATE_NAME=my_template
# INDEX_PATTERN="logstash-*"
# SHARD_COUNT=8
# REPLICA_COUNT=1
# ES_URL=http://localhost:9200
# while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
keystore: []
# To add secrets to the keystore:
# - secretName: opensearch-encryption-key
networkPolicy:
create: false
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## In order for a Pod to access OpenSearch, it needs to have the following label:
## {{ template "uname" . }}-client: "true"
## Example for default configuration to access HTTP port:
## opensearch-master-http-client: "true"
## Example for default configuration to access transport port:
## opensearch-master-transport-client: "true"
http:
enabled: false
# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
## Set optimal sysctl's through securityContext. This requires privilege. Can be disabled if
## the system has already been preconfigured. (Ex: https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
## Also see: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/
sysctl:
enabled: false
## Set optimal sysctl's through privileged initContainer.
sysctlInit:
enabled: false
# override image, which is busybox by default
# image: busybox
# override image tag, which is latest by default
# imageTag:
## Enable to add 3rd Party / Custom plugins not offered in the default OpenSearch image.
plugins:
enabled: false
installList: []
# - example-fake-plugin
removeList: []
# - example-fake-plugin
# -- Array of extra K8s manifests to deploy
extraObjects: []
# - apiVersion: secrets-store.csi.x-k8s.io/v1
# kind: SecretProviderClass
# metadata:
# name: argocd-secrets-store
# spec:
# provider: aws
# parameters:
# objects: |
# - objectName: "argocd"
# objectType: "secretsmanager"
# jmesPath:
# - path: "client_id"
# objectAlias: "client_id"
# - path: "client_secret"
# objectAlias: "client_secret"
# secretObjects:
# - data:
# - key: client_id
# objectName: client_id
# - key: client_secret
# objectName: client_secret
# secretName: argocd-secrets-store
# type: Opaque
# labels:
# app.kubernetes.io/part-of: argocd
# - |
# apiVersion: policy/v1
# kind: PodDisruptionBudget
# metadata:
# name: {{ template "opensearch.uname" . }}
# labels:
# {{- include "opensearch.labels" . | nindent 4 }}
# spec:
# minAvailable: 1
# selector:
# matchLabels:
# {{- include "opensearch.selectorLabels" . | nindent 6 }}
# ServiceMonitor Configuration for Prometheus
# Enabling this option will create a ServiceMonitor resource that allows Prometheus to scrape metrics from the OpenSearch service.
# This only creates the serviceMonitor, to actually have metrics Make sure to install the prometheus-exporter plugin needed for
# serving metrics over the `.Values.plugins` value:
# plugins:
# enabled: true
# installList:
# - https://github.com/aiven/prometheus-exporter-plugin-for-opensearch/releases/download/x.x.x.x/prometheus-exporter-x.x.x.x.zip
serviceMonitor:
# Set to true to enable the ServiceMonitor resource
enabled: false
# HTTP path where metrics are exposed.
# Ensure this matches your OpenSearch service configuration.
path: /_prometheus/metrics
# Scheme to use for scraping.
scheme: http
# Frequency at which Prometheus will scrape metrics.
# Adjust based on your needs.
interval: 10s
# additional labels to be added to the ServiceMonitor
# labels:
# k8s.example.com/prometheus: kube-prometheus
labels: {}
# additional tlsConfig to be added to the ServiceMonitor
tlsConfig: {}
# Basic Auth configuration for the service monitor
# You can either use existingSecret, which expects a secret to be already present with data.username and data.password
# or set the credentials over the helm values, making helm create a secret for you
# basicAuth:
# enaled: true
# existingSecret: my-secret
# username: my-username
# password: my-password
basicAuth:
enabled: true
The result of the “helm template” command produces the following:
---
# Source: opensearch/templates/poddisruptionbudget.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: "opensearch-cluster-master-pdb"
labels:
helm.sh/chart: opensearch-2.31.0
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "2.19.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: opensearch-cluster-master
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
---
# Source: opensearch/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: opensearch-cluster-master-config
labels:
helm.sh/chart: opensearch-2.31.0
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "2.19.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: opensearch-cluster-master
data:
opensearch.yml: |
cluster.name: opensearch-cluster
# Bind to all interfaces because we don't know what IP address Docker will assign to us.
network.host: 0.0.0.0
# Setting network.host to a non-loopback address enables the annoying bootstrap checks. "Single-node" mode disables them again.
# Implicitly done if ".singleNode" is set to "true".
# discovery.type: single-node
# Start OpenSearch Security Demo Configuration
# WARNING: revise all the lines below before you go into production
plugins:
security:
ssl:
transport:
pemcert_filepath: esnode.pem
pemkey_filepath: esnode-key.pem
pemtrustedcas_filepath: root-ca.pem
enforce_hostname_verification: false
http:
enabled: true
pemcert_filepath: esnode.pem
pemkey_filepath: esnode-key.pem
pemtrustedcas_filepath: root-ca.pem
allow_unsafe_democertificates: true
allow_default_init_securityindex: false
authcz:
admin_dn:
- CN=kirk,OU=client,O=client,L=test,C=de
audit.type: internal_opensearch
enable_snapshot_restore_privilege: true
check_snapshot_restore_write_privileges: true
restapi:
roles_enabled: ["all_access", "security_rest_api_access"]
system_indices:
enabled: true
indices:
[
".opendistro-alerting-config",
".opendistro-alerting-alert*",
".opendistro-anomaly-results*",
".opendistro-anomaly-detector*",
".opendistro-anomaly-checkpoints",
".opendistro-anomaly-detection-state",
".opendistro-reports-*",
".opendistro-notifications-*",
".opendistro-notebooks",
".opendistro-asynchronous-search-response*",
]
######## End OpenSearch Security Demo Configuration ########
---
# Source: opensearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: opensearch-cluster-master
labels:
helm.sh/chart: opensearch-2.31.0
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "2.19.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: opensearch-cluster-master
annotations:
{}
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
ports:
- name: http
protocol: TCP
port: 9200
- name: transport
protocol: TCP
port: 9300
- name: metrics
protocol: TCP
port: 9600
---
# Source: opensearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: opensearch-cluster-master-headless
labels:
helm.sh/chart: opensearch-2.31.0
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "2.19.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: opensearch-cluster-master
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None # This is needed for statefulset hostnames like opensearch-0 to resolve
# Create endpoints also if the related pod isn't ready
publishNotReadyAddresses: true
selector:
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
ports:
- name: http
port: 9200
- name: transport
port: 9300
- name: metrics
port: 9600
---
# Source: opensearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: opensearch-cluster-master
labels:
helm.sh/chart: opensearch-2.31.0
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "2.19.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: opensearch-cluster-master
annotations:
majorVersion: "2"
spec:
serviceName: opensearch-cluster-master-headless
selector:
matchLabels:
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
replicas: 3
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: opensearch-cluster-master
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
template:
metadata:
name: "opensearch-cluster-master"
labels:
helm.sh/chart: opensearch-2.31.0
app.kubernetes.io/name: opensearch
app.kubernetes.io/instance: release-name
app.kubernetes.io/version: "2.19.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: opensearch-cluster-master
annotations:
configchecksum: 14ba1b8475fa3842e2dac597890754f0e7ccf5fe939e16754d1e92da0c8f991
spec:
securityContext:
fsGroup: 1000
runAsUser: 1000
automountServiceAccountToken: false
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- release-name
- key: app.kubernetes.io/name
operator: In
values:
- opensearch
terminationGracePeriodSeconds: 120
volumes:
- name: config
configMap:
name: opensearch-cluster-master-config
- name: opensearch-certificates
secret:
secretName: opensearch-certificates
- name: opensearch-certificates-key
secret:
secretName: opensearch-certificates
- name: opensearch-certificates-ca
secret:
secretName: opensearch-certificates
enableServiceLinks: true
containers:
- name: "opensearch"
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
image: "opensearchproject/opensearch:2.19.0"
imagePullPolicy: "IfNotPresent"
readinessProbe:
failureThreshold: 3
periodSeconds: 5
tcpSocket:
port: 9200
timeoutSeconds: 3
startupProbe:
failureThreshold: 30
initialDelaySeconds: 5
periodSeconds: 10
tcpSocket:
port: 9200
timeoutSeconds: 3
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
- name: metrics
containerPort: 9600
resources:
requests:
cpu: 1000m
memory: 100Mi
env:
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: cluster.initial_master_nodes
value: "opensearch-cluster-master-0,opensearch-cluster-master-1,opensearch-cluster-master-2,"
- name: discovery.seed_hosts
value: "opensearch-cluster-master-headless"
- name: cluster.name
value: "opensearch-cluster"
- name: network.host
value: "0.0.0.0"
- name: OPENSEARCH_JAVA_OPTS
value: "-Xmx512M -Xms512M"
- name: node.roles
value: "master,ingest,data,remote_cluster_client,"
- name: OPENSEARCH_INITIAL_ADMIN_PASSWORD
value: Something!Secret1
volumeMounts:
- name: "opensearch-cluster-master"
mountPath: /usr/share/opensearch/data
- name: opensearch-certificates
mountPath: /usr/share/opensearch/config/esnode.pem
subPath: esnode.pem
- name: opensearch-certificates-key
mountPath: /usr/share/opensearch/config/esnode-key.pem
subPath: esnode-key.pem
- name: opensearch-certificates-ca
mountPath: /usr/share/opensearch/config/root-ca.pem
subPath: root-ca.pem
# Currently some extra blocks accept strings
# to continue with backwards compatibility this is being kept
# whilst also allowing for yaml to be specified too.
- mountPath: /usr/share/opensearch/config/opensearch.yml
name: config
subPath: opensearch.yml
I can also provide the statefulset.yaml file if needed.
Hope this helps.