@@ -42,8 +42,6 @@ type Plugin struct {
42
42
batcher * pipeline.RetriableBatcher
43
43
avgEventSize int
44
44
45
- begin []int
46
-
47
45
time string
48
46
headerPrefix string
49
47
cancel context.CancelFunc
@@ -173,6 +171,11 @@ type Config struct {
173
171
// > **Experimental feature**
174
172
FatalOnFailedInsert bool `json:"fatal_on_failed_insert" default:"false"` // *
175
173
174
+ // > @3@4@5@6
175
+ // >
176
+ // > Enable split big batches
177
+ SplitEnabled bool `json:"split_enabled" default:"false"` // *
178
+
176
179
// > @3@4@5@6
177
180
// >
178
181
// > Retention milliseconds for retry to DB.
@@ -202,6 +205,7 @@ type KeepAliveConfig struct {
202
205
203
206
type data struct {
204
207
outBuf []byte
208
+ begin []int
205
209
}
206
210
207
211
func init () {
@@ -223,7 +227,6 @@ func (p *Plugin) Start(config pipeline.AnyConfig, params *pipeline.OutputPluginP
223
227
p .registerMetrics (params .MetricCtl )
224
228
p .mu = & sync.Mutex {}
225
229
p .headerPrefix = `{"` + p .config .BatchOpType + `":{"_index":"`
226
- p .begin = make ([]int , 0 , p .config .BatchSize_ + 1 )
227
230
228
231
if len (p .config .IndexValues ) == 0 {
229
232
p .config .IndexValues = append (p .config .IndexValues , "@time" )
@@ -341,6 +344,7 @@ func (p *Plugin) out(workerData *pipeline.WorkerData, batch *pipeline.Batch) err
341
344
if * workerData == nil {
342
345
* workerData = & data {
343
346
outBuf : make ([]byte , 0 , p .config .BatchSize_ * p .avgEventSize ),
347
+ begin : make ([]int , 0 , p .config .BatchSize_ + 1 ),
344
348
}
345
349
}
346
350
@@ -351,16 +355,24 @@ func (p *Plugin) out(workerData *pipeline.WorkerData, batch *pipeline.Batch) err
351
355
}
352
356
353
357
eventsCount := 0
354
- p .begin = p .begin [:0 ]
358
+ data .begin = data .begin [:0 ]
355
359
data .outBuf = data .outBuf [:0 ]
356
360
batch .ForEach (func (event * pipeline.Event ) {
357
361
eventsCount ++
358
- p .begin = append (p .begin , len (data .outBuf ))
362
+ data .begin = append (data .begin , len (data .outBuf ))
359
363
data .outBuf = p .appendEvent (data .outBuf , event )
360
364
})
361
- p .begin = append (p .begin , len (data .outBuf ))
365
+ data .begin = append (data .begin , len (data .outBuf ))
366
+
367
+ var statusCode int
368
+ var err error
369
+
370
+ if p .config .SplitEnabled {
371
+ statusCode , err = p .saveOrSplit (0 , eventsCount , data .begin , data .outBuf )
372
+ } else {
373
+ statusCode , err = p .save (data .outBuf )
374
+ }
362
375
363
- statusCode , err := p .saveOrSplit (0 , eventsCount , p .begin , data .outBuf )
364
376
if err != nil {
365
377
p .sendErrorMetric .WithLabelValues (strconv .Itoa (statusCode )).Inc ()
366
378
switch statusCode {
@@ -381,6 +393,16 @@ func (p *Plugin) out(workerData *pipeline.WorkerData, batch *pipeline.Batch) err
381
393
return nil
382
394
}
383
395
396
+ func (p * Plugin ) save (data []byte ) (int , error ) {
397
+ return p .client .DoTimeout (
398
+ http .MethodPost ,
399
+ NDJSONContentType ,
400
+ data ,
401
+ p .config .ConnectionTimeout_ ,
402
+ p .reportESErrors ,
403
+ )
404
+ }
405
+
384
406
func (p * Plugin ) saveOrSplit (left int , right int , begin []int , data []byte ) (int , error ) {
385
407
if left == right {
386
408
return http .StatusOK , nil
0 commit comments