@@ -20,6 +20,7 @@ import org.slf4j.LoggerFactory
20
20
21
21
// Scala
22
22
import scala .concurrent .ExecutionContext .Implicits .global
23
+ import scala .concurrent .Future
23
24
import scala .util .{Failure => SFailure , Success => SSuccess }
24
25
25
26
import org .elasticsearch .client .RestClient
@@ -43,7 +44,7 @@ import cats.syntax.validated._
43
44
44
45
import retry .implicits ._
45
46
import retry .{RetryDetails , RetryPolicy }
46
- import retry .CatsEffect . _
47
+ import retry ._
47
48
48
49
import com .snowplowanalytics .snowplow .scalatracker .Tracker
49
50
@@ -116,9 +117,11 @@ class ElasticsearchBulkSender(
116
117
117
118
override def send (records : List [EmitterJsonInput ]): List [EmitterJsonInput ] = {
118
119
val connectionAttemptStartTime = System .currentTimeMillis()
119
- implicit def onErrorHandler : (Throwable , RetryDetails ) => IO [Unit ] =
120
+ val onErrorHandler : (Throwable , RetryDetails ) => IO [Unit ] =
120
121
BulkSender .onError(log, tracker, connectionAttemptStartTime)
121
- implicit def retryPolicy : RetryPolicy [IO ] =
122
+ def onFailureHandler [A ](res : Response [A ], rd : RetryDetails ): IO [Unit ] =
123
+ onErrorHandler(res.error.asException, rd)
124
+ val retryPolicy : RetryPolicy [IO ] =
122
125
BulkSender .delayPolicy[IO ](maxAttempts, maxConnectionWaitTimeMs)
123
126
124
127
// oldFailures - failed at the transformation step
@@ -132,7 +135,12 @@ class ElasticsearchBulkSender(
132
135
val newFailures : List [EmitterJsonInput ] = if (actions.nonEmpty) {
133
136
BulkSender
134
137
.futureToTask(client.execute(bulk(actions)))
135
- .retryingOnSomeErrors(BulkSender .exPredicate)
138
+ .retryingOnFailuresAndAllErrors(
139
+ r => r.isSuccess,
140
+ retryPolicy,
141
+ onFailureHandler,
142
+ onErrorHandler
143
+ )
136
144
.map(extractResult(records))
137
145
.attempt
138
146
.unsafeRunSync() match {
@@ -168,12 +176,14 @@ class ElasticsearchBulkSender(
168
176
def extractResult (
169
177
records : List [EmitterJsonInput ]
170
178
)(response : Response [BulkResponse ]): List [EmitterJsonInput ] =
171
- response.result.items
172
- .zip(records)
173
- .flatMap { case (bulkResponseItem, record) =>
174
- handleResponse(bulkResponseItem.error.map(_.reason), record)
175
- }
176
- .toList
179
+ response.fold(records) { result =>
180
+ result.items
181
+ .zip(records)
182
+ .flatMap { case (bulkResponseItem, record) =>
183
+ handleResponse(bulkResponseItem.error.map(_.reason), record)
184
+ }
185
+ .toList
186
+ }
177
187
178
188
def composeObject (jsonRecord : JsonRecord ): ElasticsearchObject = {
179
189
val index = jsonRecord.shard match {
@@ -186,18 +196,20 @@ class ElasticsearchBulkSender(
186
196
187
197
/** Logs the cluster health */
188
198
override def logHealth (): Unit =
189
- client.execute(clusterHealth).onComplete {
190
- case SSuccess (health) =>
191
- health match {
192
- case response =>
193
- response.result.status match {
194
- case " green" => log.info(" Cluster health is green" )
195
- case " yellow" => log.warn(" Cluster health is yellow" )
196
- case " red" => log.error(" Cluster health is red" )
197
- }
198
- }
199
- case SFailure (e) => log.error(" Couldn't retrieve cluster health" , e)
200
- }
199
+ client
200
+ .execute(clusterHealth)
201
+ .flatMap { health =>
202
+ health.fold(failure => Future .failed(failure.error.asException), Future .successful(_))
203
+ }
204
+ .onComplete {
205
+ case SSuccess (result) =>
206
+ result.status match {
207
+ case " green" => log.info(" Cluster health is green" )
208
+ case " yellow" => log.warn(" Cluster health is yellow" )
209
+ case " red" => log.error(" Cluster health is red" )
210
+ }
211
+ case SFailure (e) => log.error(" Couldn't retrieve cluster health" , e)
212
+ }
201
213
202
214
/**
203
215
* Handle the response given for a bulk request, by producing a failure if we failed to insert
0 commit comments