Get an error response while getting _cluster/stats

we are using opensearch 2.1 in openshift container platform.

I have a three node OS cluster deployed in our OCP cluster, detail opensearch info:
[root@xxxx ~]# oc get pod |grep esnode
elasticsea-0ac3-ib-6fb9-es-server-esnodes-0 2/2 Running 0 17d
elasticsea-0ac3-ib-6fb9-es-server-esnodes-1 2/2 Running 0 17d
elasticsea-0ac3-ib-6fb9-es-server-esnodes-2 2/2 Running 0 17d

[root@xxxx ~]# host=https://xxx:443
[root@xxxx ~]# curl -s -k -XGET -u elastic:xxx -k $host/xxx:443/
{
“name” : “elasticsea-0ac3-ib-6fb9-es-server-esnodes-0”,
“cluster_name” : “es-cluster”,
“cluster_uuid” : “g8XiHYqDRG6U2yfOOEBobg”,
“version” : {
“distribution” : “opensearch”,
“number” : “2.1.1”, → open search 2.1.1
“build_type” : “tar”,
“build_hash” : “857112090fe90549ce95e380f9c2681b4a5e3f99”,
“build_date” : “2022-09-13T00:31:11.309805990Z”,
“build_snapshot” : false,
“lucene_version” : “9.2.0”,
“minimum_wire_compatibility_version” : “7.10.0”,
“minimum_index_compatibility_version” : “7.0.0”
},
“tagline” : “The OpenSearch Project: xxxx”
}

[root@xxxx ~]# curl -s -k -XGET -u elastic:xxx -k $host/xxx:443//_cat/indices
green open index1 bR0nzWhSSwiUGU9mxWjUoA 10 2 0 0 6kb 2kb
green open index2 tdGiGg1jQ4i3Vi6WsrqyCg 3 1 1 0 361.9kb 180.9kb
green open index3 ex5RhevqRee1cP2gycIPdg 10 2 532 233 9.5mb 3.1mb

but when I try to GET cluster/stats, it gives me below error:

[root@xxxx ~]# curl -s -k -XGET -u elastic:xxx -k $host/_cluster/stats?human |jq .
{
“error”: {
“root_cause”: [
{
“type”: “illegal_argument_exception”,
“reason”: “Values less than -1 bytes are not supported: -3559587840b”
}
],
“type”: “illegal_argument_exception”,
“reason”: “Values less than -1 bytes are not supported: -3559587840b”,
“suppressed”: [
{
“type”: “illegal_state_exception”,
“reason”: “Failed to close the XContentBuilder”,
“caused_by”: {
“type”: “i_o_exception”,
“reason”: “Unclosed object or array found”
}
}
]
},
“status”: 400
}

I’ve tried to GET “_cluster/stats” in a single node opensearch environment, it works correctly.

root@yyyyy1:~# curl -XGET http://localhost:9200/_cluster/stats
{“_nodes”:{“total”:1,“successful”:1,“failed”:0},“cluster_name”:“esbox”,“cluster_uuid”:“vJenEx1yRpemSQeKm9HyEA”,“timestamp”:1666024587907,“status”:“yellow”,“indices”:{“count”:9,“shards”:{“total”:49,“primaries”:49,“replication”:0.0,“index”:{“shards”:{“min”:1,“max”:10,“avg”:5.444444444444445},“primaries”:{“min”:1,“max”:10,“avg”:5.444444444444445},“replication”:{“min”:0.0,“max”:0.0,“avg”:0.0}}},“docs”:{“count”:103,“deleted”:157},“store”:{“size_in_bytes”:1668054,“reserved_in_bytes”:0},“fielddata”:{“memory_size_in_bytes”:0,“evictions”:0},“query_cache”:{“memory_size_in_bytes”:0,“total_count”:0,“hit_count”:0,“miss_count”:0,“cache_size”:0,“cache_count”:0,“evictions”:0},“completion”:{“size_in_bytes”:0},“segments”:{“count”:24,“memory_in_bytes”:496080,“terms_memory_in_bytes”:401408,“stored_fields_memory_in_bytes”:11712,“term_vectors_memory_in_bytes”:0,“norms_memory_in_bytes”:47872,“points_memory_in_bytes”:0,“doc_values_memory_in_bytes”:35088,“index_writer_memory_in_bytes”:0,“version_map_memory_in_bytes”:0,“fixed_bit_set_memory_in_bytes”:960,“max_unsafe_auto_id_timestamp”:-1,“file_sizes”:{}},“mappings”:{“field_types”:[{“name”:“boolean”,“count”:203,“index_count”:4},{“name”:“date”,“count”:20,“index_count”:2},{“name”:“double”,“count”:1,“index_count”:1},{“name”:“float”,“count”:2,“index_count”:2},{“name”:“keyword”,“count”:433,“index_count”:7},{“name”:“long”,“count”:16,“index_count”:6},{“name”:“nested”,“count”:2,“index_count”:2},{“name”:“object”,“count”:157,“index_count”:6},{“name”:“text”,“count”:401,“index_count”:7}]},“analysis”:{“char_filter_types”:,“tokenizer_types”:[{“name”:“ngram”,“count”:2,“index_count”:2}],“filter_types”:[{“name”:“stemmer”,“count”:4,“index_count”:2},{“name”:“stop”,“count”:2,“index_count”:2},{“name”:“truncate”,“count”:4,“index_count”:4}],“analyzer_types”:[{“name”:“custom”,“count”:6,“index_count”:4}],“built_in_char_filters”:,“built_in_tokenizers”:[{“name”:“standard”,“count”:2,“index_count”:2},{“name”:“whitespace”,“count”:2,“index_count”:2}],“built_in_filters”:[{“name”:“asciifolding”,“count”:6,“index_count”:4},{“name”:“lowercase”,“count”:6,“index_count”:4}],“built_in_analyzers”:[{“name”:“standard”,“count”:2,“index_count”:2}]}},“nodes”:{“count”:{“total”:1,“coordinating_only”:0,“data”:1,“data_cold”:1,“data_content”:1,“data_hot”:1,“data_warm”:1,“ingest”:1,“master”:1,“ml”:1,“remote_cluster_client”:1,“transform”:1,“voting_only”:0},“versions”:[“7.10.2”],“os”:{“available_processors”:8,“allocated_processors”:8,“names”:[{“name”:“Linux”,“count”:1}],“pretty_names”:[{“pretty_name”:“Ubuntu 22.04 LTS”,“count”:1}],“mem”:{“total_in_bytes”:16777928704,“free_in_bytes”:12855758848,“used_in_bytes”:3922169856,“free_percent”:77,“used_percent”:23}},“process”:{“cpu”:{“percent”:0},“open_file_descriptors”:{“min”:481,“max”:481,“avg”:481}},“jvm”:{“max_uptime_in_millis”:943843187,“versions”:[{“version”:“15.0.1”,“vm_name”:“OpenJDK 64-Bit Server VM”,“vm_version”:“15.0.1+9”,“vm_vendor”:“AdoptOpenJDK”,“bundled_jdk”:true,“using_bundled_jdk”:true,“count”:1}],“mem”:{“heap_used_in_bytes”:148885904,“heap_max_in_bytes”:1073741824},“threads”:66},“fs”:{“total_in_bytes”:266153758720,“free_in_bytes”:239139762176,“available_in_bytes”:239139762176},“plugins”:,“network_types”:{“transport_types”:{“security4”:1},“http_types”:{“security4”:1}},“discovery_types”:{“zen”:1},“packaging_types”:[{“flavor”:“default”,“type”:“deb”,“count”:1}],“ingest”:{“number_of_pipelines”:1,“processor_stats”:{“gsub”:{“count”:0,“failed”:0,“current”:0,“time_in_millis”:0},“script”:{“count”:0,“failed”:0,“current”:0,“time_in_millis”:0}}}}}

my question here is:
is this a bug of opensearch 2.1.1?
if the answer is not, how could I get the cluster stats correctly?
if the answer is yes, which version fix this issue?

Thanks in advance.