Versions (relevant - OpenSearch/Dashboard/Server OS/Browser):
OS: 2.5.0
OSD: 2.5.0
kubernetes 1.23.9
Describe the issue:
SAML via Azure AD is not working.
Direct access from OpenSearch Dashboards returns:
{"statusCode":500,"error":"Internal Server Error","message":"Internal Error"}
Configuration:
opensearch.yaml
---
clusterName: "opensearch-cluster"
nodeGroup: "master"
# If discovery.type in the opensearch configuration is set to "single-node",
# this should be set to "true"
# If "true", replicas will be forced to 1
singleNode: false
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: "opensearch-cluster-master"
# OpenSearch roles that will be applied to this nodeGroup
# These will be set as environment variable "node.roles". E.g. node.roles=master,ingest,data,remote_cluster_client
roles:
- master
- ingest
- data
- remote_cluster_client
replicas: 3
# if not set, falls back to parsing .Values.imageTag, then .Chart.appVersion.
majorVersion: ""
global:
# Set if you want to change the default docker registry, e.g. a private one.
dockerRegistry: ""
# Allows you to add any config files in {{ .Values.opensearchHome }}/config
opensearchHome: /usr/share/opensearch
# such as opensearch.yml and log4j2.properties
config:
# Values must be YAML literal style scalar / YAML multiline string.
# <filename>: |
# <formatted-value(s)>
# log4j2.properties: |
# status = error
#
# appender.console.type = Console
# appender.console.name = console
# appender.console.layout.type = PatternLayout
# appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
#
# rootLogger.level = info
# rootLogger.appenderRef.console.ref = console
opensearch.yml: |
cluster.name: opensearch-cluster
# Bind to all interfaces because we don't know what IP address Docker will assign to us.
network.host: 0.0.0.0
# Setting network.host to a non-loopback address enables the annoying bootstrap checks. "Single-node" mode disables them again.
# Implicitly done if ".singleNode" is set to "true".
# discovery.type: single-node
# Start OpenSearch Security Demo Configuration
# WARNING: revise all the lines below before you go into production
plugins:
security:
ssl:
transport:
pemcert_filepath: esnode.pem
pemkey_filepath: esnode-key.pem
pemtrustedcas_filepath: root-ca.pem
enforce_hostname_verification: false
http:
enabled: true
pemcert_filepath: esnode.pem
pemkey_filepath: esnode-key.pem
pemtrustedcas_filepath: root-ca.pem
allow_unsafe_democertificates: true
allow_default_init_securityindex: true
authcz:
admin_dn:
- CN=kirk,OU=client,O=client,L=test,C=de
audit.type: internal_opensearch
enable_snapshot_restore_privilege: true
check_snapshot_restore_write_privileges: true
restapi:
roles_enabled: ["all_access", "security_rest_api_access"]
system_indices:
enabled: true
indices:
[
".opendistro-alerting-config",
".opendistro-alerting-alert*",
".opendistro-anomaly-results*",
".opendistro-anomaly-detector*",
".opendistro-anomaly-checkpoints",
".opendistro-anomaly-detection-state",
".opendistro-reports-*",
".opendistro-notifications-*",
".opendistro-notebooks",
".opendistro-asynchronous-search-response*",
]
######## End OpenSearch Security Demo Configuration ########
# log4j2.properties:
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
# name: env-secret
# - configMapRef:
# name: config-map
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
hostAliases: []
# - ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
image:
repository: "opensearchproject/opensearch"
# override image tag, which is .Chart.AppVersion by default
tag: ""
pullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# additionals labels
labels: {}
opensearchJavaOpts: "-Xmx512M -Xms512M"
resources:
requests:
cpu: "1000m"
memory: "100Mi"
initResources: {}
# limits:
# cpu: "25m"
# memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
sidecarResources: {}
# limits:
# cpu: "25m"
# memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
networkHost: "0.0.0.0"
rbac:
create: false
serviceAccountAnnotations: {}
serviceAccountName: ""
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
- emptyDir
persistence:
enabled: true
# Set to false to disable the `fsgroup-volume` initContainer that will update permissions on the persistent disk.
enableInitChown: true
# override image, which is busybox by default
# image: busybox
# override image tag, which is latest by default
# imageTag:
labels:
# Add default labels for the volumeClaimTemplate of the StatefulSet
enabled: false
# OpenSearch Persistent Volume Storage Class
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing the default provisioner. (gp2 on AWS, standard on
# GKE, AWS & OpenStack)
#
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
extraVolumes: []
# - name: extras
# emptyDir: {}
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
extraInitContainers: []
# - name: do-somethings
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "soft"
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# This is the pod topology spread constraints
# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
# The environment variables injected by service links are not used, but can lead to slow OpenSearch boot times when
# there are many services in the current namespace.
# If you experience slow pod startups you probably want to set this to `false`.
enableServiceLinks: true
protocol: https
httpPort: 9200
transportPort: 9300
httpHostPort: ""
transportHostPort: ""
service:
labels: {}
labelsHeadless: {}
headless:
annotations: {}
type: ClusterIP
nodePort: ""
annotations: {}
httpPortName: http
transportPortName: transport
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
securityConfig:
enabled: true
path: "/usr/share/opensearch/config/opensearch-security"
actionGroupsSecret:
configSecret:
internalUsersSecret:
rolesSecret:
rolesMappingSecret:
tenantsSecret:
# The following option simplifies securityConfig by using a single secret and
# specifying the config files as keys in the secret instead of creating
# different secrets for for each config file.
# Note that this is an alternative to the individual secret configuration
# above and shouldn't be used if the above secrets are used.
config:
# There are multiple ways to define the configuration here:
# * If you define anything under data, the chart will automatically create
# a secret and mount it.
# * If you define securityConfigSecret, the chart will assume this secret is
# created externally and mount it.
# * It is an error to define both data and securityConfigSecret.
securityConfigSecret: ""
dataComplete: true
data:
config.yml: |-
_meta:
type: "config"
config_version: "2"
config:
dynamic:
http:
anonymous_auth_enabled: false
authc:
basic_internal_auth_domain:
description: "Authenticate via HTTP Basic against internal users database"
http_enabled: true
transport_enabled: true
order: 0
http_authenticator:
type: basic
challenge: false
authentication_backend:
type: intern
saml_auth_domain:
order: 1
description: "SAML provider"
http_enabled: true
transport_enabled: false
http_authenticator:
type: saml
challenge: true
config:
idp:
metadata_url: https://login.microsoftonline.com/XXX/federationmetadata/2007-06/federationmetadata.xml?appid=XXX
entity_id: https://sts.windows.net/xxxxxxxxxxxx/
sp:
entity_id: https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/
kibana_url: https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/
exchange_key : "XXXXXXXXXXXXXX"
roles_key: http://schemas.microsoft.com/ws/2008/06/identity/claims/groups
authentication_backend:
type: noop
# internal_users.yml: |-
# roles.yml: |-
# roles_mapping.yml: |-
# action_groups.yml: |-
# tenants.yml: |-
# How long to wait for opensearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
startupProbe:
tcpSocket:
port: 9200
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 30
livenessProbe: {}
# periodSeconds: 20
# timeoutSeconds: 5
# failureThreshold: 10
# successThreshold: 1
# initialDelaySeconds: 10
# tcpSocket:
# port: 9200
readinessProbe:
tcpSocket:
port: 9200
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publically expose your OpenSearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
masterTerminationFix: false
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command:
# - bash
# - -c
# - |
# #!/bin/bash
# # Add a template to adjust number of shards/replicas1
# TEMPLATE_NAME=my_template
# INDEX_PATTERN="logstash-*"
# SHARD_COUNT=8
# REPLICA_COUNT=1
# ES_URL=http://localhost:9200
# while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
keystore: []
# To add secrets to the keystore:
# - secretName: opensearch-encryption-key
networkPolicy:
create: false
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## In order for a Pod to access OpenSearch, it needs to have the following label:
## {{ template "uname" . }}-client: "true"
## Example for default configuration to access HTTP port:
## opensearch-master-http-client: "true"
## Example for default configuration to access transport port:
## opensearch-master-transport-client: "true"
http:
enabled: false
# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
## Set optimal sysctl's through securityContext. This requires privilege. Can be disabled if
## the system has already been preconfigured. (Ex: https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
## Also see: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/
sysctl:
enabled: false
## Set optimal sysctl's through privileged initContainer.
sysctlInit:
enabled: false
# override image, which is busybox by default
# image: busybox
# override image tag, which is latest by default
# imageTag:
## Enable to add 3rd Party / Custom plugins not offered in the default OpenSearch image.
plugins:
enabled: false
installList: []
# - example-fake-plugin
# -- Array of extra K8s manifests to deploy
extraObjects: []
# - apiVersion: secrets-store.csi.x-k8s.io/v1
# kind: SecretProviderClass
# metadata:
# name: argocd-secrets-store
# spec:
# provider: aws
# parameters:
# objects: |
# - objectName: "argocd"
# objectType: "secretsmanager"
# jmesPath:
# - path: "client_id"
# objectAlias: "client_id"
# - path: "client_secret"
# objectAlias: "client_secret"
# secretObjects:
# - data:
# - key: client_id
# objectName: client_id
# - key: client_secret
# objectName: client_secret
# secretName: argocd-secrets-store
# type: Opaque
# labels:
# app.kubernetes.io/part-of: argocd
opensearch-dashboard.yaml
# Copyright OpenSearch Contributors
# SPDX-License-Identifier: Apache-2.0
# Default values for opensearch-dashboards.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
opensearchHosts: "https://opensearch-cluster-master:9200"
replicaCount: 1
image:
repository: "opensearchproject/opensearch-dashboards"
# override image tag, which is .Chart.AppVersion by default
tag: ""
pullPolicy: "IfNotPresent"
startupProbe:
tcpSocket:
port: 5601
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 20
successThreshold: 1
initialDelaySeconds: 10
livenessProbe:
tcpSocket:
port: 5601
periodSeconds: 20
timeoutSeconds: 5
failureThreshold: 10
successThreshold: 1
initialDelaySeconds: 10
readinessProbe:
tcpSocket:
port: 5601
periodSeconds: 20
timeoutSeconds: 5
failureThreshold: 10
successThreshold: 1
initialDelaySeconds: 10
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
rbac:
create: true
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: []
# - name: certs
# secretName: dashboard-certs
# path: /usr/share/dashboards/certs
podAnnotations: {}
extraEnvs: []
# - name: "NODE_OPTIONS"
# value: "--max-old-space-size=1800"
envFrom: []
extraVolumes: []
# - name: extras
# emptyDir: {}
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: ""
extraContainers: ""
podSecurityContext: {}
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
config:
# Default OpenSearch Dashboards configuration from docker image of Dashboards
opensearch_dashboards.yml: |
timelion:
ui:
enabled: "true"
server:
host: "0"
ssl:
enabled: "false"
xsrf:
allowlist: ["/_plugins/_security/api/authtoken", "/_opendistro/_security/api/authtoken", "/_opendistro/_security/saml/acs/idpinitiated", "/_opendistro/_security/saml/acs", "/_opendistro/_security/saml/logout", "/_plugins/_security/saml/acs/idpinitiated", "/_plugins/_security/saml/acs", "/_plugins/_security/saml/logout"]
opensearch_security:
multitenancy:
enabled: "true"
tenants:
preferred: ["Private", "Global"]
auth:
type: ["basicauth","saml"]
multiple_auth_enabled: "true"
opensearch:
ssl:
verificationMode: "none"
hosts: ["https://opensearch-cluster-master:9200"]
username: "admin"
password: "admin"
requestHeadersAllowlist: ["securitytenant", "security_tenant", "Authorization"]
# opensearch_dashboards.yml: |
# server:
# name: dashboards
# host: "{{ .Values.serverHost }}"
# opensearch_dashboards.yml:
# server:
# name: dashboards
# host: "{{ .Values.serverHost }}"
# Dashboards TLS Config (Ensure the cert files are present before enabling SSL
# ssl:
# enabled: true
# key: /usr/share/opensearch-dashboards/certs/dashboards-key.pem
# certificate: /usr/share/opensearch-dashboards/certs/dashboards-crt.pem
# determines how dashboards will verify certificates (needs to be none for default opensearch certificates to work)
# opensearch:
# ssl:
# certificateAuthorities: /usr/share/opensearch-dashboards/certs/dashboards-root-ca.pem
# if utilizing custom CA certs for connection to opensearch, provide the CA here
opensearchDashboardsYml:
defaultMode:
# value should be 0-0777
priorityClassName: ""
opensearchAccount:
secret: ""
keyPassphrase:
enabled: false
labels: {}
hostAliases: []
# - ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
serverHost: "0.0.0.0"
service:
type: ClusterIP
port: 5601
loadBalancerIP: ""
nodePort: ""
labels: {}
annotations: {}
loadBalancerSourceRanges: []
# 0.0.0.0/0
httpPortName: http
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
backend:
serviceName: ""
servicePort: ""
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
requests:
cpu: "100m"
memory: "512M"
limits:
cpu: "100m"
memory: "512M"
autoscaling:
# This requires metrics server to be installed, to install use kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# See https://github.com/kubernetes-sigs/metrics-server
enabled: false
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
updateStrategy:
type: "Recreate"
nodeSelector: {}
tolerations: []
affinity: {}
# -- Array of extra K8s manifests to deploy
extraObjects: []
# - apiVersion: secrets-store.csi.x-k8s.io/v1
# kind: SecretProviderClass
# metadata:
# name: argocd-secrets-store
# spec:
# provider: aws
# parameters:
# objects: |
# - objectName: "argocd"
# objectType: "secretsmanager"
# jmesPath:
# - path: "client_id"
# objectAlias: "client_id"
# - path: "client_secret"
# objectAlias: "client_secret"
# secretObjects:
# - data:
# - key: client_id
# objectName: client_id
# - key: client_secret
# objectName: client_secret
# secretName: argocd-secrets-store
# type: Opaque
# labels:
# app.kubernetes.io/part-of: argocd
# pod lifecycle policies as outlined here:
# https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command:
# - bash
# - -c
# - |
# #!/bin/bash
# curl -I "http://admin:admin@127.0.0.1:5601/status -H "kbn-xsrf: true" -H 'kbn-xsrf: true' -H "Content-Type: application/json"
## Enable to add 3rd Party / Custom plugins not offered in the default OpenSearchDashboards image.
plugins:
enabled: false
installList: []
# - example-fake-plugin-downloadable-url
Relevant Logs or Screenshots:
opensearch-dashboard logs
β {"type":"response","@timestamp":"2023-03-15T16:11:24Z","tags":[],"pid":1,"method":"get","statusCode":200,"req":{"url":"/auth/saml/captureUrlFragment.js","method":"get","headers":{"host":"kibana.qa.elastic- β
β XXXXXX.cloud.c3.xxx.xx","x-request-id":"3c43f70919eb29616b10397f1898e026","x-real-ip":"172.16.233.99","x-forwarded-for":"172.16.233.99","x-forwarded-host":"kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx","x-fo β
β rwarded-port":"443","x-forwarded-proto":"https","x-forwarded-scheme":"https","x-scheme":"https","sec-ch-ua":"\"Microsoft Edge\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"","dnt":"1","sec-ch β
β -ua-mobile":"?0","user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.41","sec-ch-ua-platform":"\"Windows\"","accept" β
β :"*/*","sec-fetch-site":"same-origin","sec-fetch-mode":"no-cors","sec-fetch-dest":"script","referer":"https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/auth/saml/captureUrlFragment?nextUrl=%2F","accept-enc β
β oding":"gzip, deflate, br","accept-language":"en,de;q=0.9,de-DE;q=0.8,en-GB;q=0.7,en-US;q=0.6"},"remoteAddress":"10.100.64.137","userAgent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KH β
β TML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.41","referer":"https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/auth/saml/captureUrlFragment?nextUrl=%2F"},"res":{"statusCode":200,"responseT β
β ime":3,"contentLength":9},"message":"GET /auth/saml/captureUrlFragment.js 200 3ms - 9.0B"} β
β Error: failed parsing SAML config β
β at SecurityClient.getSamlHeader (/usr/share/opensearch-dashboards/plugins/securityDashboards/server/backend/opensearch_security_client.ts:177:15) β
β at processTicksAndRejections (internal/process/task_queues.js:95:5) β
β at /usr/share/opensearch-dashboards/plugins/securityDashboards/server/auth/types/saml/routes.ts:67:30 β
β at Router.handle (/usr/share/opensearch-dashboards/src/core/server/http/router/router.js:163:44) β
β at handler (/usr/share/opensearch-dashboards/src/core/server/http/router/router.js:124:50) β
β at exports.Manager.execute (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/toolkit.js:60:28) β
β at Object.internals.handler (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/handler.js:46:20) β
β at exports.execute (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/handler.js:31:20) β
β at Request._lifecycle (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/request.js:371:32) β
β at Request._execute (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/request.js:281:9)
{"type":"log","@timestamp":"2023-03-15T16:11:24Z","tags":["error","plugins","securityDashboards"],"pid":1,"message":"Failed to get saml header: Error: Error: failed parsing SAML config"} β
β{"type":"error","@timestamp":"2023-03-15T16:11:24Z","tags":[],"pid":1,"level":"error","error":{"message":"Internal Server Error","name":"Error","stack":"Error: Internal Server Error\n at HapiResponseAda β
β pter.toError (/usr/share/opensearch-dashboards/src/core/server/http/router/response_adapter.js:143:19)\n at HapiResponseAdapter.toHapiResponse (/usr/share/opensearch-dashboards/src/core/server/http/rout β
β er/response_adapter.js:97:19)\n at HapiResponseAdapter.handle (/usr/share/opensearch-dashboards/src/core/server/http/router/response_adapter.js:92:17)\n at Router.handle (/usr/share/opensearch-dashbo β
β ards/src/core/server/http/router/router.js:164:34)\n at processTicksAndRejections (internal/process/task_queues.js:95:5)\n at handler (/usr/share/opensearch-dashboards/src/core/server/http/router/rou β
β ter.js:124:50)\n at exports.Manager.execute (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/toolkit.js:60:28)\n at Object.internals.handler (/usr/share/opensearch-dashboards/node_module β
β s/@hapi/hapi/lib/handler.js:46:20)\n at exports.execute (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/handler.js:31:20)\n at Request._lifecycle (/usr/share/opensearch-dashboards/node_ β
β modules/@hapi/hapi/lib/request.js:371:32)\n at Request._execute (/usr/share/opensearch-dashboards/node_modules/@hapi/hapi/lib/request.js:281:9)"},"url":"http://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/ β
β auth/saml/login?nextUrl=%2F&redirectHash=false","message":"Internal Server Error"} β
β {"type":"response","@timestamp":"2023-03-15T16:11:24Z","tags":[],"pid":1,"method":"get","statusCode":500,"req":{"url":"/auth/saml/login?nextUrl=%2F&redirectHash=false","method":"get","headers":{"host":"kib β
β ana.qa.elastic-XXXXXX.cloud.c3.xxx.xx","x-request-id":"4a088ab52a38ee24096a6b3c34701b67","x-real-ip":"172.16.233.99","x-forwarded-for":"172.16.233.99","x-forwarded-host":"kibana.qa.elastic-XXXXXX.cloud.c β
β 3.xxx.xx","x-forwarded-port":"443","x-forwarded-proto":"https","x-forwarded-scheme":"https","x-scheme":"https","sec-ch-ua":"\"Microsoft Edge\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"","s β
β ec-ch-ua-mobile":"?0","sec-ch-ua-platform":"\"Windows\"","upgrade-insecure-requests":"1","dnt":"1","user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111. β
β 0.0.0 Safari/537.36 Edg/111.0.1661.41","accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7","sec-fetch-site":"same-origin" β
β ,"sec-fetch-mode":"navigate","sec-fetch-dest":"document","referer":"https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/auth/saml/captureUrlFragment?nextUrl=%2F","accept-encoding":"gzip, deflate, br","accept β
β -language":"en,de;q=0.9,de-DE;q=0.8,en-GB;q=0.7,en-US;q=0.6"},"remoteAddress":"10.100.64.137","userAgent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 β
β Safari/537.36 Edg/111.0.1661.41","referer":"https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/auth/saml/captureUrlFragment?nextUrl=%2F"},"res":{"statusCode":500,"responseTime":519,"contentLength":9},"messa β
β ge":"GET /auth/saml/login?nextUrl=%2F&redirectHash=false 500 519ms - 9.0B"} β
β {"type":"response","@timestamp":"2023-03-15T16:11:24Z","tags":[],"pid":1,"method":"get","statusCode":401,"req":{"url":"/favicon.ico","method":"get","headers":{"host":"kibana.qa.elastic-XXXXXX.cloud.c3.xxx β
β .xx","x-request-id":"ee6fbd7d4040f9a5d39560a5d348eb1b","x-real-ip":"172.16.233.99","x-forwarded-for":"172.16.233.99","x-forwarded-host":"kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx","x-forwarded-port":"443", β
β "x-forwarded-proto":"https","x-forwarded-scheme":"https","x-scheme":"https","sec-ch-ua":"\"Microsoft Edge\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"","dnt":"1","sec-ch-ua-mobile":"?0","us β
β er-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.41","sec-ch-ua-platform":"\"Windows\"","accept":"image/webp,image/a β
β png,image/svg+xml,image/*,*/*;q=0.8","sec-fetch-site":"same-origin","sec-fetch-mode":"no-cors","sec-fetch-dest":"image","referer":"https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/auth/saml/login?nextUrl= β
β %2F&redirectHash=false","accept-encoding":"gzip, deflate, br","accept-language":"en,de;q=0.9,de-DE;q=0.8,en-GB;q=0.7,en-US;q=0.6"},"remoteAddress":"10.100.64.137","userAgent":"Mozilla/5.0 (Windows NT 10.0; β
β Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.41","referer":"https://kibana.qa.elastic-XXXXXX.cloud.c3.xxx.xx/auth/saml/login?nextUrl=%2F&redirectHash=f β
β alse"},"res":{"statusCode":401,"responseTime":3,"contentLength":9},"message":"GET /favicon.ico 401 3ms - 9.0B"}