When upgrading OpenSearch installed with Helm 1.0.4 to helm 1.4.2 I’m facing the following problem:
helm upgrade client opensearch/opensearch --version 1.4.2 -f client.yaml
the output is:
Error: UPGRADE FAILED: cannot patch "logs-corporativos-client" with kind StatefulSet: StatefulSet.apps "logs-corporativos-client" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden
My values file for 1.4.2 OpenSearch Helm is:
---
clusterName: "logs-corporativos"
nodeGroup: "client"
roles: []
replicas: 3
minimumMasterNodes: 1
majorVersion: ""
opensearchHome: /usr/share/opensearch
extraVolumes:
- name: tls-opensearch
secret:
secretName: opensearch
- name: tls-ca
secret:
secretName: logs-ca
- name: tls-admin
secret:
secretName: logs-admin
- name: snapshots
persistentVolumeClaim:
claimName: logs-snapshots-repository
extraVolumeMounts:
- name: tls-opensearch
mountPath: /usr/share/opensearch/data/pki/certs/opensearch
- name: tls-ca
mountPath: /usr/share/opensearch/data/pki/ca
- name: tls-admin
mountPath: /usr/share/opensearch/data/pki/certs/admin
- name: snapshots
mountPath: /mnt/snapshots
config:
opensearch.yml: |
indices.memory.index_buffer_size: 30%
path.repo: ["/mnt/snapshots"]
cluster.name: opensearch-cluster
cluster.initial_master_nodes:
- logs-corporativos-master-0
- logs-corporativos-master-1
- logs-corporativos-master-2
network.host: 0.0.0.0
plugins:
security:
nodes_dn:
- 'CN=opensearch-node'
ssl:
transport:
pemcert_filepath: /usr/share/opensearch/data/pki/certs/opensearch/tls.crt
pemkey_filepath: /usr/share/opensearch/data/pki/certs/opensearch/tls.key
pemtrustedcas_filepath: /usr/share/opensearch/data/pki/ca/tls.crt
enforce_hostname_verification: false
http:
enabled: true
pemcert_filepath: /usr/share/opensearch/data/pki/certs/opensearch/tls.crt
pemkey_filepath: /usr/share/opensearch/data/pki/certs/opensearch/tls.key
pemtrustedcas_filepath: /usr/share/opensearch/data/pki/ca/tls.crt
allow_default_init_securityindex: true
authcz:
admin_dn:
- CN=admin
audit.type: internal_opensearch
enable_snapshot_restore_privilege: true
check_snapshot_restore_write_privileges: true
restapi:
roles_enabled: ["all_access", "security_rest_api_access"]
system_indices:
enabled: true
indices:
[
".opendistro-alerting-config",
".opendistro-alerting-alert*",
".opendistro-anomaly-results*",
".opendistro-anomaly-detector*",
".opendistro-anomaly-checkpoints",
".opendistro-anomaly-detection-state",
".opendistro-reports-*",
".opendistro-notifications-*",
".opendistro-notebooks",
".opendistro-asynchronous-search-response*"
]
extraEnvs:
- name: "TZ"
value: "America/Sao_Paulo"
secretMounts: []
hostAliases: []
image: "opensearchproject/opensearch"
imageTag: "1.1.0"
imagePullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# additionals labels
labels: {}
opensearchJavaOpts: "-Xmx10G -Xms10G"
resources:
requests:
cpu: "6000m"
memory: "18G"
limits:
cpu: "6000m"
memory: "18G"
networkHost: "0.0.0.0"
rbac:
create: false
serviceAccountAnnotations: {}
serviceAccountName: ""
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
- emptyDir
persistence:
enabled: true
labels:
enabled: false
storageClass: "local-storage-manual"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
priorityClassName: ""
nodeSelector:
role-logs-client: "true"
podManagementPolicy: "Parallel"
enableServiceLinks: true
protocol: http
httpPort: 9200
transportPort: 9300
service:
labels:
haproxy.org/secure-crt-secret: opensearch
haproxy.org/secure-verify-ca-secret: logs-ca-crt
labelsHeadless:
haproxy.org/secure-crt-secret: opensearch
haproxy.org/secure-verify-ca-secret: logs-ca-crt
type: ClusterIP
nodePort: ""
annotations: {}
httpPortName: http
transportPortName: transport
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: prd-interno
haproxy.org/pod-maxconn: "256"
haproxy.org/load-balance: leastconn
haproxy.org/server-ssl: "true"
path: /
hosts:
- opensearch.mydomain
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 2000
tcpSocket:
port: 9200
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
tcpSocket:
port: 9200
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
securityConfig:
enabled: true
path: "/usr/share/opensearch/plugins/opensearch-security/securityconfig"
actionGroupsSecret:
configSecret:
internalUsersSecret:
rolesSecret:
rolesMappingSecret:
tenantsSecret:
#The following option simplifies securityConfig by using a single secret and specifying the respective secrets in the corresponding files instead of creating different secrets for config,internal users, roles, roles mapping and tenants
#Note that this is an alternative to the above secrets and shouldn't be used if the above secrets are used
config:
securityConfigSecret:
data: {}
# config.yml: |-
# internal_users.yml: |-
# roles.yml: |-
# rolesMapping.yml: |-
# tenants.yml: |-
# How long to wait for opensearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
tolerations: []
nameOverride: ""
fullnameOverride: ""
masterTerminationFix: false
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command:
# - bash
# - -c
# - |
# #!/bin/bash
# # Add a template to adjust number of shards/replicas1
# TEMPLATE_NAME=my_template
# INDEX_PATTERN="logstash-*"
# SHARD_COUNT=8
# REPLICA_COUNT=1
# ES_URL=http://localhost:9200
# while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
# curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
keystore: []
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## In order for a Pod to access OpenSearch, it needs to have the following label:
## {{ template "uname" . }}-client: "true"
## Example for default configuration to access HTTP port:
## opensearch-master-http-client: "true"
## Example for default configuration to access transport port:
## opensearch-master-transport-client: "true"
http:
enabled: false
# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
## Set optimal sysctl's. This requires privilege. Can be disabled if
## the system has already been preconfigured. (Ex: https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html)
## Also see: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/
sysctl:
enabled: false