@@ -42,8 +42,6 @@ type Plugin struct {
4242 batcher * pipeline.RetriableBatcher
4343 avgEventSize int
4444
45- begin []int
46-
4745 time string
4846 headerPrefix string
4947 cancel context.CancelFunc
@@ -173,6 +171,11 @@ type Config struct {
173171 // > **Experimental feature**
174172 FatalOnFailedInsert bool `json:"fatal_on_failed_insert" default:"false"` // *
175173
174+ // > @3@4@5@6
175+ // >
176+ // > Enable split big batches
177+ SplitEnabled bool `json:"split_enabled" default:"false"` // *
178+
176179 // > @3@4@5@6
177180 // >
178181 // > Retention milliseconds for retry to DB.
@@ -202,6 +205,7 @@ type KeepAliveConfig struct {
202205
203206type data struct {
204207 outBuf []byte
208+ begin []int
205209}
206210
207211func init () {
@@ -223,7 +227,6 @@ func (p *Plugin) Start(config pipeline.AnyConfig, params *pipeline.OutputPluginP
223227 p .registerMetrics (params .MetricCtl )
224228 p .mu = & sync.Mutex {}
225229 p .headerPrefix = `{"` + p .config .BatchOpType + `":{"_index":"`
226- p .begin = make ([]int , 0 , p .config .BatchSize_ + 1 )
227230
228231 if len (p .config .IndexValues ) == 0 {
229232 p .config .IndexValues = append (p .config .IndexValues , "@time" )
@@ -341,6 +344,7 @@ func (p *Plugin) out(workerData *pipeline.WorkerData, batch *pipeline.Batch) err
341344 if * workerData == nil {
342345 * workerData = & data {
343346 outBuf : make ([]byte , 0 , p .config .BatchSize_ * p .avgEventSize ),
347+ begin : make ([]int , 0 , p .config .BatchSize_ + 1 ),
344348 }
345349 }
346350
@@ -351,16 +355,24 @@ func (p *Plugin) out(workerData *pipeline.WorkerData, batch *pipeline.Batch) err
351355 }
352356
353357 eventsCount := 0
354- p .begin = p .begin [:0 ]
358+ data .begin = data .begin [:0 ]
355359 data .outBuf = data .outBuf [:0 ]
356360 batch .ForEach (func (event * pipeline.Event ) {
357361 eventsCount ++
358- p .begin = append (p .begin , len (data .outBuf ))
362+ data .begin = append (data .begin , len (data .outBuf ))
359363 data .outBuf = p .appendEvent (data .outBuf , event )
360364 })
361- p .begin = append (p .begin , len (data .outBuf ))
365+ data .begin = append (data .begin , len (data .outBuf ))
366+
367+ var statusCode int
368+ var err error
369+
370+ if p .config .SplitEnabled {
371+ statusCode , err = p .saveOrSplit (0 , eventsCount , data .begin , data .outBuf )
372+ } else {
373+ statusCode , err = p .save (data .outBuf )
374+ }
362375
363- statusCode , err := p .saveOrSplit (0 , eventsCount , p .begin , data .outBuf )
364376 if err != nil {
365377 p .sendErrorMetric .WithLabelValues (strconv .Itoa (statusCode )).Inc ()
366378 switch statusCode {
@@ -381,6 +393,16 @@ func (p *Plugin) out(workerData *pipeline.WorkerData, batch *pipeline.Batch) err
381393 return nil
382394}
383395
396+ func (p * Plugin ) save (data []byte ) (int , error ) {
397+ return p .client .DoTimeout (
398+ http .MethodPost ,
399+ NDJSONContentType ,
400+ data ,
401+ p .config .ConnectionTimeout_ ,
402+ p .reportESErrors ,
403+ )
404+ }
405+
384406func (p * Plugin ) saveOrSplit (left int , right int , begin []int , data []byte ) (int , error ) {
385407 if left == right {
386408 return http .StatusOK , nil
0 commit comments