Hi Community members !
I have installed the following to ship logs from my k8s cluster to opensearch
beats used : docker.elastic.co/beats/filebeat-oss:8.2.2
logstash used : opensearchproject/logstash-oss-with-opensearch-output-plugin:latest
fileabeat
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config-dev
namespace: filebeat
labels:
k8s-app: filebeat-dev
data:
filebeat.yml: |-
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
output.logstash:
hosts: ["<public ip>:5044"]
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat-dev
namespace: filebeat
labels:
k8s-app: filebeat-dev
spec:
selector:
matchLabels:
k8s-app: filebeat-dev
template:
metadata:
labels:
k8s-app: filebeat-dev
spec:
serviceAccountName: filebeat-dev
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat-dev
image: docker.elastic.co/beats/filebeat-oss:8.2.2
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0640
name: filebeat-config-dev
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
# When filebeat runs as non-root user, this directory needs to be writable by group (g+w).
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat-dev
subjects:
- kind: ServiceAccount
name: filebeat-dev
namespace: filebeat
roleRef:
kind: ClusterRole
name: filebeat-dev
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat-dev
labels:
k8s-app: filebeat-dev
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups: ["apps"]
resources:
- replicasets
verbs: ["get", "list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat-dev
namespace: filebeat
labels:
k8s-app: filebeat-dev
---
logstash
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash-default
namespace: filebeat
labels:
k8s-app: logstash-default
spec:
replicas: 4
selector:
matchLabels:
k8s-app: logstash-default
template:
metadata:
labels:
k8s-app: logstash-default
spec:
serviceAccountName: logstash-default
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: logstash-default
image: opensearchproject/logstash-oss-with-opensearch-output-plugin:latest
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: "512Mi"
cpu: "500m"
requests:
memory: "256Mi"
cpu: "250m"
ports:
- containerPort: 5044
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
#nodeName: aks-agentpool-34396291-vmss000005
#nodeSelector:
# disktype: ssd
volumes:
- name: config-volume
configMap:
name: logstash-configmap-default
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap-default
items:
- key: logstash.conf
path: logstash.conf
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logstash-msql
subjects:
- kind: ServiceAccount
name: logstash-default
namespace: filebeat
roleRef:
kind: ClusterRole
name: logstash-default
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: logstash-default
labels:
k8s-app: logstash-default
rules:
- apiGroups: [""] # "" indicates the core API group commented section
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups: ["apps"]
resources:
- replicasets
verbs: ["get", "list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: logstash-default
namespace: filebeat
labels:
k8s-app: logstash-default
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap-default
namespace: filebeat
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
pipeline.batch.size: 125
pipeline.batch.delay: 50
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
json {
source => "message"
}
split { field => 'records' }
date { match => [ "[records][time]", "HH:mm:ss.SSS" ] }
}
output {
opensearch {
hosts => ["https://domain:443"]
index => "logstash-mw-k8s-default-logs-%{+yyyy.MM.dd}"
user => "****"
password => "****"
}
}
logstash service
apiVersion: v1
kind: Service
metadata:
name: logstash-service-k8s
namespace: filebeat
spec:
selector:
app: logstash-default
ports:
- name: "http"
port: 5044
targetPort: 5044
type: LoadBalancer
The error is filebeat cannot connect to logstash service.
2022-06-11T17:30:04.348Z INFO [publisher_pipeline_output] pipeline/output.go:143 Connecting to backoff(async(tcp://*.*.*.*:**))
2022-06-11T17:30:04.349Z INFO [publisher] pipeline/retry.go:219 retryer: send unwait signal to consumer
Requesting to provide any suggestions or if there is something I am configuring wrongly here.
Thanks.