Unauthorized 401

Versions (relevant - OpenSearch/Dashboard/Server OS/Browser):
Opensearch version: 2.15.0
chart: 2.21.0

Describe the issue:
Deploy opensearch with error in security, unauthorized 401 when I trying to login, and also I couldn’t change the config.yml that is in the path: /usr/share/opensearch/config/opensearch-security
and also I tried to change the configuration within pod, but I couldn’t edit the file with vim/vi command. I think the problem come from the The challenge setting.
I’m using basic auth.
And also I don’t want to install the demo config, and it still creating that

Configuration:

clusterName: "opensearch-cluster"
nodeGroup: "master"
 
singleNode: false
masterService: "opensearch-cluster-master"
 
node.roles=master,ingest,data,remote_cluster_client
roles:
  - master
  - ingest
  - data
  - remote_cluster_client
 
replicas: 3
 

majorVersion: ""
 
global:

  dockerRegistry: ""
 

opensearchHome: /usr/share/opensearch
 

config:

 
  opensearch.yml:
    cluster.name: opensearch-cluster

    plugins.security.disabled: true
 
 

extraEnvs:
 
   - name: DISABLE_INSTALL_DEMO_CONFIG
     value: "true"
   - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD
     value: o6GxpCMnWWfZOLq
 
envFrom: []


secretMounts: []
 
hostAliases: []

 
image:
  repository: 
  tag: ""
  pullPolicy: "IfNotPresent"
 
podAnnotations: {}

 

openSearchAnnotations: {}
 

labels: {}
 
opensearchJavaOpts: "-Xmx512M -Xms512M"
 
resources:
  requests:
    cpu: "1000m"
    memory: "100Mi"
 
initResources: {}

 
sidecarResources: {}

 
networkHost: "0.0.0.0"
 
rbac:
  create: false
  serviceAccountAnnotations: {}
  serviceAccountName: ""

  automountServiceAccountToken: false
 
podSecurityPolicy:
  create: false
  name: ""
  spec:
    privileged: true
    fsGroup:
      rule: RunAsAny
    runAsUser:
      rule: RunAsAny
    seLinux:
      rule: RunAsAny
    supplementalGroups:
      rule: RunAsAny
    volumes:
      - secret
      - configMap
      - persistentVolumeClaim
      - emptyDir
 
persistence:
  enabled: true
  
  enableInitChown: true

  labels:

  accessModes:
    - ReadWriteOnce
  size: 8Gi
  annotations: {}
 
extraVolumes: []

 
extraVolumeMounts: []
 
 
extraContainers: []

 
extraInitContainers:
 - name: sysctl
   image: "docker.i o/ bitnami/bitnami-shell:10-debian-10-r199" 
   imagePullPolicy: "IfNotPresent"
   command:
      - /bin/bash
      - -ec
      - |
        CURRENT=`sysctl -n vm.max_map_count`;
        DESIRED="262144";
        if [ "$DESIRED" -gt "$CURRENT" ]; then
            sysctl -w vm.max_map_count=262144;
        fi;
        CURRENT=`sysctl -n fs.file-max`;
        DESIRED="65536";
        if [ "$DESIRED" -gt "$CURRENT" ]; then
            sysctl -w fs.file-max=65536;
        fi;
   securityContext:
      runAsUser: 0
      privileged: true

priorityClassName: ""
 

 

antiAffinity: "soft"

customAntiAffinity: {}

nodeAffinity: {}
 
podAffinity: {}

topologySpreadConstraints: []

podManagementPolicy: "Parallel"
 

enableServiceLinks: true
 
protocol: http
httpPort: 9200
transportPort: 9300
metricsPort: 9600
httpHostPort: ""
transportHostPort: ""
 
 
service:
  labels: {}
  labelsHeadless: {}
  headless:
    annotations: {}
  type: ClusterIP

  nodePort: ""
  annotations: {}
  httpPortName: http
  transportPortName: transport
  metricsPortName: metrics
  loadBalancerIP: ""
  loadBalancerSourceRanges: []
  externalTrafficPolicy: ""
 
updateStrategy: RollingUpdate
 

maxUnavailable: 1
 
podSecurityContext:
  fsGroup: 1000
  runAsUser: 1000
 
securityContext:
  capabilities:
    drop:
      - ALL

  runAsNonRoot: true
  runAsUser: 1000
 
securityConfig:
  enabled: true
  path: "/usr/share/opensearch/config/opensearch-security"
  actionGroupsSecret:
  configSecret:
  internalUsersSecret:
  rolesSecret:
  rolesMappingSecret:
  tenantsSecret:
  
  config:

    securityConfigSecret: ""
    dataComplete: true
    data:
       config.yml: |-
         http:
       anonymous_auth_enabled: false
       xff:
        enabled: false
        internalProxies: '192\.168\.0\.10|192\.168\.0\.11' # regex pattern
        #internalProxies: '.*' # trust all internal proxies, regex pattern
        #remoteIpHeader:  'x-forwarded-for'
        ###### see https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html for regex help
        ###### more information about XFF https://en.wikipedia.org/wiki/X-Forwarded-For
        ###### and here https://tools.ietf.org/html/rfc7239
        ###### and https://tomcat.apache.org/tomcat-8.0-doc/config/valve.html#Remote_IP_Valve
    authc:
      kerberos_auth_domain:
        http_enabled: false
        transport_enabled: false
        order: 6
        http_authenticator:
          type: kerberos
          challenge: true
          config:
            # If true a lot of kerberos/security related debugging output will be logged to standard out
            krb_debug: false
            # If true then the realm will be stripped from the user name
            strip_realm_from_principal: true
        authentication_backend:
          type: noop
      basic_internal_auth_domain:
        description: "Authenticate via HTTP Basic against internal users database"
        http_enabled: true
        transport_enabled: true
        order: 4
        http_authenticator:
          type: basic
          challenge: true
        authentication_backend:
          type: intern
      proxy_auth_domain:
        description: "Authenticate via proxy"
        http_enabled: false
        transport_enabled: false
        order: 3
        http_authenticator:
          type: proxy
          challenge: false
          config:
            user_header: "x-proxy-user"
            roles_header: "x-proxy-roles"
        authentication_backend:
          type: noop
      jwt_auth_domain:
        description: "Authenticate via Json Web Token"
        http_enabled: false
        transport_enabled: false
        order: 0
        http_authenticator:
          type: jwt
          challenge: false
          config:
            signing_key: "base64 encoded HMAC key or public RSA/ECDSA pem key"
            jwt_header: "Authorization"
            jwt_url_parameter: null
            jwt_clock_skew_tolerance_seconds: 30
            roles_key: null
            subject_key: null
        authentication_backend:
          type: noop
      clientcert_auth_domain:
        description: "Authenticate via SSL client certificates"
        http_enabled: false
        transport_enabled: false
        order: 2
        http_authenticator:
          type: clientcert
          config:
            username_attribute: cn #optional, if omitted DN becomes username
          challenge: false
        authentication_backend:
          type: noop
      ldap:
        description: "Authenticate via LDAP or Active Directory"
        http_enabled: false
        transport_enabled: false
        order: 5
        http_authenticator:
          type: basic
          challenge: false
        authentication_backend:
          # LDAP authentication backend (authenticate users against a LDAP or Active Directory)
          type: ldap
          config:
            enable ldaps
            enable_ssl: false
            # enable start tls, enable_ssl should be false
            enable_start_tls: false
            # send client certificate
            enable_ssl_client_auth: false
            # verify ldap hostname
            verify_hostnames: true
            hosts:
            - localhost:8389
            bind_dn: null
            password: null
            userbase: 'ou=people,dc=example,dc=com'
            # Filter to search for users (currently in the whole subtree beneath userbase)
            # {0} is substituted with the username
            usersearch: '(sAMAccountName={0})'
            # Use this attribute from the user as username (if not set then DN is used)
            username_attribute: null
    authz:
      roles_from_myldap:
        description: "Authorize via LDAP or Active Directory"
        http_enabled: false
        transport_enabled: false
        authorization_backend:
          # LDAP authorization backend (gather roles from a LDAP or Active Directory, you have to configure the above LDAP authentication backend settings too)
          type: ldap
          config:
            # enable ldaps
            enable_ssl: false
            # enable start tls, enable_ssl should be false
            enable_start_tls: false
            # send client certificate
            enable_ssl_client_auth: false
            # verify ldap hostname
            verify_hostnames: true
            hosts:
            - localhost:8389
            bind_dn: null
            password: null
            rolebase: 'ou=groups,dc=example,dc=com'
authenticated user. Use userroleattribute to specify the name of the attribute
            rolesearch: '(member={0})'
above
            userroleattribute: null

            userrolename: disabled

            rolename: cn
        
            resolve_nested_roles: true
            userbase: 'ou=people,dc=example,dc=com'

      roles_from_another_ldap:
        description: "Authorize via another Active Directory"
        http_enabled: false
        transport_enabled: false
        authorization_backend:
          type: ldap
 
      # internal_users.yml: |-
      # roles.yml: |-
      # roles_mapping.yml: |-
      # action_groups.yml: |-
      # tenants.yml: |-
 
# How long to wait for opensearch to stop gracefully
terminationGracePeriod: 120
 
sysctlVmMaxMapCount: 262144
 
startupProbe:
  tcpSocket:
    port: 9200
  initialDelaySeconds: 5
  periodSeconds: 10
  timeoutSeconds: 3
  failureThreshold: 30
 
livenessProbe: {}
  # periodSeconds: 20
  # timeoutSeconds: 5
  # failureThreshold: 10
  # successThreshold: 1
  # initialDelaySeconds: 10
  # tcpSocket:
  #   port: 9200
 
readinessProbe:
  tcpSocket:
    port: 9200
  periodSeconds: 5
  timeoutSeconds: 3
  failureThreshold: 3
 

schedulerName: ""
 
imagePullSecrets: []
nodeSelector: {}
tolerations: []

ingress:
  enabled: false

 
  annotations: {}
   
  ingressLabels: {}
  path: /
  hosts:
    - chart-example.local
  tls: []

 
nameOverride: ""
fullnameOverride: ""
 
masterTerminationFix: false
 
opensearchLifecycle: {}

 
lifecycle: {}

keystore: []

networkPolicy:
  create: false

 
  http:
    enabled: true
 

fsGroup: ""
 

sysctl:
  enabled: false
 

sysctlInit:
  enabled: false

plugins:
  enabled: false
  installList: []

extraObjects: []

Relevant Logs or Screenshots:

Hi @riardomora10,

Would you mind sharing your OpenSearch values.yml file, please use the preformatted text option to keep it readable (Ctrl + e) or :
image

thanks,
mj

---
clusterName: "opensearch-cluster"
nodeGroup: "master"
 
# If discovery.type in the opensearch configuration is set to "single-node",
# this should be set to "true"
# If "true", replicas will be forced to 1
singleNode: false
 
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: "opensearch-cluster-master"
 
# OpenSearch roles that will be applied to this nodeGroup
# These will be set as environment variable "node.roles". E.g. node.roles=master,ingest,data,remote_cluster_client
roles:
  - master
  - ingest
  - data
  - remote_cluster_client
 
replicas: 3
 
# if not set, falls back to parsing .Values.imageTag, then .Chart.appVersion.
majorVersion: ""
 
global:
  # Set if you want to change the default docker registry, e.g. a private one.
  dockerRegistry: ""
 
# Allows you to add any config files in {{ .Values.opensearchHome }}/config
opensearchHome: /usr/share/opensearch
 
# such as opensearch.yml and log4j2.properties
config:
  # Values must be YAML literal style scalar / YAML multiline string.
  # <filename>: |
  #   <formatted-value(s)>
  # log4j2.properties: |
  #   status = error
  #
  #   appender.console.type = Console
  #   appender.console.name = console
  #   appender.console.layout.type = PatternLayout
  #   appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
  #
  #   rootLogger.level = info
  #   rootLogger.appenderRef.console.ref = console
 
  opensearch.yml:
    cluster.name: opensearch-cluster
 
    # Bind to all interfaces because we don't know what IP address Docker will assign to us.
    network.host: 0.0.0.0
 
    # Setting network.host to a non-loopback address enables the annoying bootstrap checks. "Single-node" mode disables them again.
    # Implicitly done if ".singleNode" is set to "true".
    # discovery.type: single-node
 
    # Start OpenSearch Security Demo Configuration
    # WARNING: revise all the lines below before you go into production
    plugins:
      security:
        ssl:
          transport:
            pemcert_filepath: esnode.pem
            pemkey_filepath: esnode-key.pem
            pemtrustedcas_filepath: root-ca.pem
            enforce_hostname_verification: false
          http:
            enabled: true
            pemcert_filepath: esnode.pem
            pemkey_filepath: esnode-key.pem
            pemtrustedcas_filepath: root-ca.pem
        allow_unsafe_democertificates: true
        allow_default_init_securityindex: true
        authcz:
          admin_dn:
            - CN=kirk,OU=client,O=client,L=test,C=de
        audit.type: internal_opensearch
        enable_snapshot_restore_privilege: true
        check_snapshot_restore_write_privileges: true
        restapi:
          roles_enabled: ["all_access", "security_rest_api_access"]
        system_indices:
          enabled: true
          indices:
            [
              ".opendistro-alerting-config",
              ".opendistro-alerting-alert*",
              ".opendistro-anomaly-results*",
              ".opendistro-anomaly-detector*",
              ".opendistro-anomaly-checkpoints",
              ".opendistro-anomaly-detection-state",
              ".opendistro-reports-*",
              ".opendistro-notifications-*",
              ".opendistro-notebooks",
              ".opendistro-asynchronous-search-response*",
            ]
    ######## End OpenSearch Security Demo Configuration ########
 
   
 
######## End OpenSearch Security Demo Configuration ########
######## End OpenSearch Security Demo Configuration ########
# log4j2.properties:
 
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs:
 # - name: DISABLE_INSTALL_DEMO_CONFIG
 #   value: "true"
  - name: OPENSEARCH_INITIAL_ADMIN_PASSWORD
    value: o6GxpCMnWWfZOLq
 
# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
#     name: env-secret
# - configMapRef:
#     name: config-map
 
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
 
hostAliases: []
# - ip: "127.0.0.1"
#   hostnames:
#   - "foo.local"
#   - "bar.local"
 
image:
  repository: asgcmnsbxscuscr.azurecr.io/opensearch-dashboards
  # override image tag, which is .Chart.AppVersion by default
  tag: ""
  pullPolicy: "IfNotPresent"
 
podAnnotations:
  {}
  # iam.amazonaws.com/role: es-cluster
 
# OpenSearch Statefulset annotations
openSearchAnnotations: {}
 
# additionals labels
labels: {}
 
opensearchJavaOpts: "-Xmx512M -Xms512M"
 
resources:
  requests:
    cpu: "1000m"
    memory: "100Mi"
 
initResources: {}
#  limits:
#     cpu: "25m"
#     memory: "128Mi"
#  requests:
#     cpu: "25m"
#     memory: "128Mi"
 
sidecarResources: {}
#   limits:
#     cpu: "25m"
#     memory: "128Mi"
#   requests:
#     cpu: "25m"
#     memory: "128Mi"
 
networkHost: "0.0.0.0"
 
rbac:
  create: false
  serviceAccountAnnotations: {}
  serviceAccountName: ""
  # Controls whether or not the Service Account token is automatically mounted to /var/run/secrets/kubernetes.io/serviceaccount
  automountServiceAccountToken: false
 
podSecurityPolicy:
  create: false
  name: ""
  spec:
    privileged: true
    fsGroup:
      rule: RunAsAny
    runAsUser:
      rule: RunAsAny
    seLinux:
      rule: RunAsAny
    supplementalGroups:
      rule: RunAsAny
    volumes:
      - secret
      - configMap
      - persistentVolumeClaim
      - emptyDir
 
persistence:
  enabled: true
  # Set to false to disable the `fsgroup-volume` initContainer that will update permissions on the persistent disk.
  enableInitChown: true
  # override image, which is busybox by default
  # image: busybox
  # override image tag, which is latest by default
  # imageTag:
  labels:
    # Add default labels for the volumeClaimTemplate of the StatefulSet
    enabled: false
  # OpenSearch Persistent Volume Storage Class
  # If defined, storageClassName: <storageClass>
  # If set to "-", storageClassName: "", which disables dynamic provisioning
  # If undefined (the default) or set to null, no storageClassName spec is
  #   set, choosing the default provisioner.  (gp2 on AWS, standard on
  #   GKE, AWS & OpenStack)
  #
  # storageClass: "-"
  accessModes:
    - ReadWriteOnce
  size: 8Gi
  annotations: {}
 
extraVolumes:
  []
  # - name: extras
  #   emptyDir: {}
 
extraVolumeMounts:
  []
  # - name: extras
  #   mountPath: /usr/share/extras
  #   readOnly: true
 
extraContainers:
  []
  # - name: do-something
  #   image: busybox
  #   command: ['do', 'something']
 
extraInitContainers:
  - name: sysctl
    image: docker.io/bitnami/bitnami-shell:10-debian-10-r199
    imagePullPolicy: "IfNotPresent"
    command:
      - /bin/bash
      - -ec
      - |
        CURRENT=`sysctl -n vm.max_map_count`;
        DESIRED="262144";
        if [ "$DESIRED" -gt "$CURRENT" ]; then
            sysctl -w vm.max_map_count=262144;
        fi;
        CURRENT=`sysctl -n fs.file-max`;
        DESIRED="65536";
        if [ "$DESIRED" -gt "$CURRENT" ]; then
            sysctl -w fs.file-max=65536;
        fi;
    securityContext:
      runAsUser: 0
      privileged: true
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
 
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
 
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort".
# Setting this to custom will use what is passed into customAntiAffinity.
antiAffinity: "soft"
 
# Allows passing in custom anti-affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity
# Using this parameter requires setting antiAffinity to custom.
customAntiAffinity: {}
 
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
 
# This is the pod affinity settings as defined in
# https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity
podAffinity: {}
 
# This is the pod topology spread constraints
# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
 
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
 
# The environment variables injected by service links are not used, but can lead to slow OpenSearch boot times when
# there are many services in the current namespace.
# If you experience slow pod startups you probably want to set this to `false`.
enableServiceLinks: true
 
protocol: http
httpPort: 9200
transportPort: 9300
metricsPort: 9600
httpHostPort: ""
transportHostPort: ""
 
service:
  labels: {}
  labelsHeadless: {}
  headless:
    annotations: {}
  type: ClusterIP
  # The IP family and IP families options are to set the behaviour in a dual-stack environment
  # Omitting these values will let the service fall back to whatever the CNI dictates the defaults
  # should be
  #
  # ipFamilyPolicy: SingleStack
  # ipFamilies:
  # - IPv4
  nodePort: ""
  annotations: {}
  httpPortName: http
  transportPortName: transport
  metricsPortName: metrics
  loadBalancerIP: ""
  loadBalancerSourceRanges: []
  externalTrafficPolicy: ""
 
updateStrategy: RollingUpdate
 
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
 
podSecurityContext:
  fsGroup: 1000
  runAsUser: 1000
 
securityContext:
  capabilities:
    drop:
      - ALL
  # readOnlyRootFilesystem: true
  runAsNonRoot: true
  runAsUser: 1000
 
securityConfig:
  enabled: true
  path: "/usr/share/opensearch/config/opensearch-security"
  actionGroupsSecret:
  configSecret:
  internalUsersSecret:
  rolesSecret:
  rolesMappingSecret:
  tenantsSecret:
  # The following option simplifies securityConfig by using a single secret and
  # specifying the config files as keys in the secret instead of creating
  # different secrets for for each config file.
  # Note that this is an alternative to the individual secret configuration
  # above and shouldn't be used if the above secrets are used.
  config:
 
    securityConfigSecret: ""
    dataComplete: true
    data:
      #config.yml: |-
      # internal_users.yml: |-
      # roles.yml: |-
      # roles_mapping.yml: |-
      # action_groups.yml: |-
      # tenants.yml: |-
 
# How long to wait for opensearch to stop gracefully
terminationGracePeriod: 120
 
sysctlVmMaxMapCount: 262144
 
startupProbe:
  tcpSocket:
    port: 9200
  initialDelaySeconds: 5
  periodSeconds: 10
  timeoutSeconds: 3
  failureThreshold: 30
 
livenessProbe:
  {}
  # periodSeconds: 20
  # timeoutSeconds: 5
  # failureThreshold: 10
  # successThreshold: 1
  # initialDelaySeconds: 10
  # tcpSocket:
  #   port: 9200
 
readinessProbe:
  tcpSocket:
    port: 9200
  periodSeconds: 5
  timeoutSeconds: 3
  failureThreshold: 3
 
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
 
imagePullSecrets: []
nodeSelector: {}
tolerations: []
 
# Enabling this will publically expose your OpenSearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
  enabled: false
  # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
  # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
  # ingressClassName: nginx
 
  annotations:
    {}
    # kubernetes.io/ingress.class: nginx
    # kubernetes.io/tls-acme: "true"
  ingressLabels: {}
  path: /
  hosts:
    - chart-example.local
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local
 
nameOverride: ""
fullnameOverride: ""
 
masterTerminationFix: false
 
opensearchLifecycle:
  {}
  # preStop:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"]
  # postStart:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
 
lifecycle:
  {}
  # preStop:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
  # postStart:
  #   exec:
  #     command:
  #       - bash
  #       - -c
  #       - |
  #         #!/bin/bash
  #         # Add a template to adjust number of shards/replicas1
  #         TEMPLATE_NAME=my_template
  #         INDEX_PATTERN="logstash-*"
  #         SHARD_COUNT=8
  #         REPLICA_COUNT=1
  #         ES_URL=http://localhost:9200
  #         while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
  #         curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
 
keystore: []
# To add secrets to the keystore:
#  - secretName: opensearch-encryption-key
 
networkPolicy:
  create: false
  ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
  ## In order for a Pod to access OpenSearch, it needs to have the following label:
  ## {{ template "uname" . }}-client: "true"
  ## Example for default configuration to access HTTP port:
  ## opensearch-master-http-client: "true"
  ## Example for default configuration to access transport port:
  ## opensearch-master-transport-client: "true"
 
  http:
    enabled: true
 
# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
 
## Set optimal sysctl's through securityContext. This requires privilege. Can be disabled if
## the system has already been preconfigured. (Ex: https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
## Also see: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/
sysctl:
  enabled: false
 
## Set optimal sysctl's through privileged initContainer.
sysctlInit:
  enabled: false
  # override image, which is busybox by default
  # image: busybox
  # override image tag, which is latest by default
  # imageTag:
 
## Enable to add 3rd Party / Custom plugins not offered in the default OpenSearch image.
plugins:
  enabled: false
  installList: []
  # - example-fake-plugin
 
# -- Array of extra K8s manifests to deploy
extraObjects:
  []
  # - apiVersion: secrets-store.csi.x-k8s.io/v1
  #   kind: SecretProviderClass
  #   metadata:
  #     name: argocd-secrets-store
  #   spec:
  #     provider: aws
  #     parameters:
  #       objects: |
  #         - objectName: "argocd"
  #           objectType: "secretsmanager"
  #           jmesPath:
  #               - path: "client_id"
  #                 objectAlias: "client_id"
  #               - path: "client_secret"
  #                 objectAlias: "client_secret"
  #     secretObjects:
  #     - data:
  #       - key: client_id
  #         objectName: client_id
  #       - key: client_secret
  #         objectName: client_secret
  #       secretName: argocd-secrets-store
  #       type: Opaque
  #       labels:
  #         app.kubernetes.io/part-of: argocd
  # - |
  #    apiVersion: policy/v1
  #    kind: PodDisruptionBudget
  #    metadata:
  #      name: {{ template "opensearch.uname" . }}
  #      labels:
  #        {{- include "opensearch.labels" . | nindent 4 }}
  #    spec:
  #      minAvailable: 1
  #      selector:
  #        matchLabels:
  #          {{- include "opensearch.selectorLabels" . | nindent 6 }}

Hi @riardomora10,

Could you run the below and compare the outputs (hashes)?

cat config/opensearch-security/internal_users.yml | grep -A1 admin:

plugins/opensearch-security/tools/hash.sh -p o6GxpCMnWWfZOLq

Let me know if any questions.

As a quick test can you try “admin” as a user and password?

Best,
mj