Versions (relevant - OpenSearch/Dashboard/Server OS/Browser):
1.3
2.x
Describe the issue:
Hi, I am unable to apply “knn.model.index.number_of_shards” in our production environment in AWS opensearch. I am able to apply this configuration when running a local docker image of opensearch.
Configuration:
Here is the following request
PUT /_cluster/settings
{
"persistent": {
"knn.model.index.number_of_shards": "22"
}
}
This request succeeds on my local container but fails when ran against AWS opensearch with the error:
✖ RestResponseError: HTTP 401: { Message: "Your request: '/_cluster/settings' payload is not allowed." }
PUT https://<domain>.com/_cluster/settings
Oddly enough I am able to run all other cluster settting changes, for example the following succeeds in both envs:
PUT /_cluster/settings
{
"persistent":{
"knn.plugin.enabled": true,
"knn.algo_param.index_thread_qty": 20,
"knn.cache.item.expiry.enabled": true,
}
}
I have tested this on AWS Opensearch 1.3 and 2.x.
Here is what GET /_cluster/settings
looks like on the working/not working instances.
AWS Opensearch (where it does not work):
{
"persistent" : {
"action" : {
"auto_create_index" : "false"
},
"cluster" : {
"routing" : {
"allocation" : {
"cluster_concurrent_rebalance" : "2",
"node_concurrent_recoveries" : "2",
"disk" : {
"watermark" : {
"low" : "7.355635070800781gb",
"flood_stage" : "2.451878356933594gb",
"high" : "4.903756713867188gb"
}
},
"node_initial_primaries_recoveries" : "4",
"awareness" : {
"force" : {
"zone" : {
"values" : "xx-xxxxx-xx,xx-xxxxx-xx"
}
}
},
"load_awareness" : {
"provisioned_capacity" : "2",
"skew_factor" : "50.0"
}
}
}
},
"indices" : {
"recovery" : {
"max_bytes_per_sec" : "20mb"
}
},
"knn" : {
"algo_param" : {
"index_thread_qty" : "20"
},
"cache" : {
"item" : {
"expiry" : {
"enabled" : "true"
}
}
},
"circuit_breaker" : {
"triggered" : "false"
},
"memory" : {
"circuit_breaker" : {
"limit" : "50%"
}
},
"plugin" : {
"enabled" : "true"
}
},
"plugins" : {
"index_state_management" : {
"metadata_migration" : {
"status" : "1"
},
"template_migration" : {
"control" : "-1"
}
}
}
},
"transient" : {
"cluster" : {
"routing" : {
"allocation" : {
"disk" : {
"watermark" : {
"low" : "7.355635070800781gb",
"flood_stage" : "2.451878356933594gb",
"high" : "4.903756713867188gb"
}
},
"node_initial_primaries_recoveries" : "4",
"awareness" : {
"force" : {
"zone" : {
"values" : "xx-xxxxx-xx,xx-xxxxx-xx"
}
}
},
"load_awareness" : {
"provisioned_capacity" : "2",
"skew_factor" : "50.0"
},
"cluster_concurrent_rebalance" : "2",
"node_concurrent_recoveries" : "2",
"exclude" : { }
}
},
"max_shards_per_node" : "5100"
},
"indices" : {
"recovery" : {
"max_bytes_per_sec" : "20mb"
}
}
}
}
Local container (where it does work):
{
"persistent" : {
"action" : {
"auto_create_index" : "false"
},
"knn" : {
"algo_param" : {
"index_thread_qty" : "20"
},
"cache" : {
"item" : {
"expiry" : {
"enabled" : "true"
}
}
},
"model" : {
"index" : {
"number_of_shards" : "22"
}
},
"plugin" : {
"enabled" : "true"
}
},
"plugins" : {
"index_state_management" : {
"template_migration" : {
"control" : "-1"
}
}
}
},
"transient" : {
"cluster" : {
"max_shards_per_node" : "5100"
}
}
}
Relevant Logs or Screenshots: