Logstash pushes log to aws opensearch in lag

Versions (relevant - logstash 8.17.2):

Describe the issue:
logstash processed the event 2025-02-19T05:13:17,823 DEBUG [scheduler-TaskQueueEngine-thread-231] jdbc.sqlonly - select TenantId, Name, Uid, DefaultLocale, PartnerUID, Guid from [OracleTenant] where [OracleTenant].[TenantId]=(int)1\r" on 2025-02-19T10:22:58
Configuration:

input {
  file {
    path => "C:/Program Files (x86)/db/Server/logs/oracle.log"
    type => "localhost_access_log"
    start_position => "beginning"
	ignore_older => 86400   # ignore files older than 24 hours
    close_older => 86400  # free the resources
	sincedb_path => "C:\setup\tools\logstash\logstash-8.17.0\logs\oracle_log.sincedb"
  }
  file {
    path => "C:/Program Files (x86)/db/Server/logs/system.log"
    type => "system"
    codec => multiline {
      pattern => "^%{TIMESTAMP_ISO8601}"
      negate => true
      what => "previous"
      charset => "ISO-8859-1"
    }
    start_position => "beginning"
	ignore_older => 86400   # ignore files older than 24 hours
    close_older => 86400  # free the resources
	sincedb_path => "C:\setup\tools\logstash\logstash-8.17.0\logs\system.sincedb"
  }
}

filter {
   if [type] == "localhost_access_log" {
    grok {
      match => {"message" => "\[%{HTTPDATE:logtimeStamp}\] %{IP:hostip} %{URIPROTO:method} %{URIPATH:post-data} (?:%{NOTSPACE:queryparam}|-) %{NUMBER:useragent} %{NUMBER:responsestatus} \[%{GREEDYDATA:message}\] - %{NUMBER:time-taken:int}"}
      overwrite => [ "message"]
    }
    mutate {
      remove_field => [ "logtimeStamp" ]
    }
  }
  if [type] == "system" {
    mutate {
      gsub => [
        "message", "\[\] ", " ",
        "message", "\- ", " ",
        "message", "\s+", " "
      ]
    }
    mutate {
      strip => ["message"]
    }
    grok {
      match => {"message" => ["%{TIMESTAMP_ISO8601:logtimeStamp} %{WORD:loglevel} \[%{USERNAME:httpcall}] %{USERNAME:dbName} %{USERNAME:tenantGuid} %{INT:tenantId} %{INT:userId} %{USERNAME:sessionID} %{GREEDYDATA:message}",
                              "%{TIMESTAMP_ISO8601:logtimeStamp} %{WORD:loglevel} %{GREEDYDATA:message}" ]}
      overwrite => [ "message" ]
    }
    mutate {
      remove_field => [ "logtimeStamp" ]
    }
  }
 
}
output {
    opensearch {
      ecs_compatibility => disabled
      hosts => ["https://${opensearch_endpoint}:443"]
      ssl => true
      index => "${NODE_ROLE}-%{+YYYY.MM.dd}"
	  document_id => "%{fingerprint}"
    }
  }
}

Relevant Logs or Screenshots:

[2025-02-19T10:22:58,795][DEBUG][logstash.filters.grok    ][main][40e2ddf7bf9bfcdd3a98f5f146527cc76da83c1955901bbe12a47cbd6a078fa9] Event now:  {:event=>{"loglevel"=>"DEBUG", "host"=>{"name"=>"web-server"}, "@timestamp"=>2025-02-19T10:22:58.743645800Z, "nodeRole"=>"web-server-1", "event"=>{"original"=>"2025-02-19T05:13:17,823 DEBUG [scheduler-TaskQueueEngine-thread-231] jdbc.sqlonly       - select TenantId, Name, Uid, DefaultLocale, PartnerUID, Guid from [OracleTenant]  where [OracleTenant].[TenantId]=(int)1\r"}, "message"=>"[scheduler-TaskQueueEngine-thread-231] jdbc.sqlonly select TenantId, Name, Uid, DefaultLocale, PartnerUID, Guid from [OracleTenant] where [OracleTenant].[TenantId]=(int)1", "log"=>{"file"=>{"path"=>"C:/Program Files (x86)/McAfee/ePolicy Orchestrator/Server/logs/orion.log"}}

This is happening in windows instance. Why is logstash processing logs slowly.

@devuser Did you check the CPU and RAM of the Logstash VM?
Do you have different timezones set in the source logs, Logstash or OpenSearch?