Skip to content

Commit bf51ecf

Browse files
robobarioppatierno
andcommitted
Fix metadata order in response when producing records (#945)
* Add integration test checking result ordering Signed-off-by: Robert Young <[email protected]> * fix: preserve record send order in response metadata This uses the send CompletionStages to keep the original ordering of records. kafka-clients Producer#send only guarantees that callbacks are executed in order per partition, the ordering of partitions is not guaranteed. Signed-off-by: Robert Young <[email protected]> * Updated CHANGELOG with metadata fix description Signed-off-by: Paolo Patierno <[email protected]> --------- Signed-off-by: Robert Young <[email protected]> Signed-off-by: Paolo Patierno <[email protected]> Co-authored-by: Paolo Patierno <[email protected]>
1 parent 38da89e commit bf51ecf

File tree

3 files changed

+74
-34
lines changed

3 files changed

+74
-34
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
* Dependency updates (Kafka 3.9.0, Vert.x 4.5.11, Netty 4.1.115.Final)
66
* Added support for creating a new topic via endpoint.
7+
* Fixed metadata order on the HTTP "offsets" response when producing records.
78

89
## 0.30.0
910

src/main/java/io/strimzi/kafka/bridge/http/HttpSourceBridgeEndpoint.java

Lines changed: 15 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
import java.util.UUID;
3636
import java.util.concurrent.CompletableFuture;
3737
import java.util.concurrent.CompletionStage;
38+
import java.util.stream.Collectors;
3839

3940
/**
4041
* Represents an HTTP bridge source endpoint for the Kafka producer operations
@@ -137,7 +138,6 @@ public void handle(RoutingContext routingContext, Handler<HttpBridgeEndpoint> ha
137138

138139
return;
139140
}
140-
List<HttpBridgeResult<?>> results = new ArrayList<>(records.size());
141141

142142
// fulfilling the request of sending (multiple) record(s) sequentially but in a separate thread
143143
// this will free the Vert.x event loop still in place
@@ -154,31 +154,27 @@ public void handle(RoutingContext routingContext, Handler<HttpBridgeEndpoint> ha
154154
return;
155155
}
156156

157-
@SuppressWarnings({ "rawtypes" })
158-
List<CompletableFuture> promises = new ArrayList<>(records.size());
157+
List<CompletableFuture<HttpBridgeResult<?>>> promises = new ArrayList<>(records.size());
159158
for (ProducerRecord<K, V> record : records) {
160-
CompletionStage<RecordMetadata> sendHandler =
161-
// inside send method, the callback which completes the promise is executed in the kafka-producer-network-thread
162-
// let's do the result handling in the same thread to keep the messages order delivery execution
163-
this.kafkaBridgeProducer.send(record).handle((metadata, ex) -> {
164-
LOGGER.trace("Handle thread {}", Thread.currentThread());
165-
if (ex == null) {
166-
LOGGER.debug("Delivered record {} to Kafka on topic {} at partition {} [{}]", record, metadata.topic(), metadata.partition(), metadata.offset());
167-
results.add(new HttpBridgeResult<>(metadata));
168-
} else {
169-
String msg = ex.getMessage();
170-
int code = handleError(ex);
171-
LOGGER.error("Failed to deliver record {}", record, ex);
172-
results.add(new HttpBridgeResult<>(new HttpBridgeError(code, msg)));
173-
}
174-
return metadata;
175-
});
159+
CompletionStage<HttpBridgeResult<?>> sendHandler = this.kafkaBridgeProducer.send(record).handle((metadata, ex) -> {
160+
LOGGER.trace("Handle thread {}", Thread.currentThread());
161+
if (ex == null) {
162+
LOGGER.debug("Delivered record {} to Kafka on topic {} at partition {} [{}]", record, metadata.topic(), metadata.partition(), metadata.offset());
163+
return new HttpBridgeResult<>(metadata);
164+
} else {
165+
String msg = ex.getMessage();
166+
int code = handleError(ex);
167+
LOGGER.error("Failed to deliver record {}", record, ex);
168+
return new HttpBridgeResult<>(new HttpBridgeError(code, msg));
169+
}
170+
});
176171
promises.add(sendHandler.toCompletableFuture());
177172
}
178173

179174
CompletableFuture.allOf(promises.toArray(new CompletableFuture[0]))
180175
// sending HTTP response asynchronously to free the kafka-producer-network-thread
181176
.whenCompleteAsync((v, t) -> {
177+
List<HttpBridgeResult<?>> results = promises.stream().map(CompletableFuture::join).collect(Collectors.toList());
182178
LOGGER.trace("All sent thread {}", Thread.currentThread());
183179
// always return OK, since failure cause is in the response, per message
184180
span.finish(HttpResponseStatus.OK.code());

src/test/java/io/strimzi/kafka/bridge/http/ProducerIT.java

Lines changed: 58 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -103,16 +103,67 @@ void sendSimpleMessage(VertxTestContext context) throws InterruptedException, Ex
103103
}
104104

105105
@Test
106-
void sendSimpleMessageToPartition(VertxTestContext context) throws InterruptedException, ExecutionException {
107-
KafkaFuture<Void> future = adminClientFacade.createTopic(topic, 2, 1);
106+
void sendMessagesToMultiplePartitions(VertxTestContext context) throws InterruptedException, ExecutionException {
107+
KafkaFuture<Void> future = adminClientFacade.createTopic(topic, 3, 1);
108108

109109
String value = "message-value";
110-
int partition = 1;
111110

112111
JsonArray records = new JsonArray();
112+
records.add(valuePartitionRecord(value, 0));
113+
114+
records.add(valuePartitionRecord(value, 1));
115+
116+
records.add(valuePartitionRecord(value, 2));
117+
118+
JsonObject root = new JsonObject();
119+
root.put("records", records);
120+
System.out.println(root);
121+
122+
future.get();
123+
124+
producerService()
125+
.sendRecordsRequest(topic, root, BridgeContentType.KAFKA_JSON_JSON)
126+
.sendJsonObject(root, ar ->
127+
context.verify(() -> {
128+
assertThat(ar.succeeded(), is(true));
129+
HttpResponse<JsonObject> response = ar.result();
130+
assertThat(response.statusCode(), is(HttpResponseStatus.OK.code()));
131+
JsonObject bridgeResponse = response.body();
132+
System.out.println(bridgeResponse);
133+
JsonArray offsets = bridgeResponse.getJsonArray("offsets");
134+
assertThat(offsets.size(), is(3));
135+
JsonObject metadata = offsets.getJsonObject(0);
136+
assertThat(metadata.getInteger("partition"), is(0));
137+
assertThat(metadata.getLong("offset"), is(0L));
138+
139+
JsonObject metadata2 = offsets.getJsonObject(1);
140+
assertThat(metadata2.getInteger("partition"), is(1));
141+
assertThat(metadata2.getLong("offset"), is(0L));
142+
143+
JsonObject metadata3 = offsets.getJsonObject(2);
144+
assertThat(metadata3.getInteger("partition"), is(2));
145+
assertThat(metadata3.getLong("offset"), is(0L));
146+
context.completeNow();
147+
}));
148+
assertThat(context.awaitCompletion(TEST_TIMEOUT, TimeUnit.SECONDS), is(true));
149+
}
150+
151+
private static JsonObject valuePartitionRecord(String value, int partition) {
113152
JsonObject json = new JsonObject();
114153
json.put("value", value);
115154
json.put("partition", partition);
155+
return json;
156+
}
157+
158+
@Test
159+
void sendSimpleMessageToPartition(VertxTestContext context) throws InterruptedException, ExecutionException {
160+
KafkaFuture<Void> future = adminClientFacade.createTopic(topic, 2, 1);
161+
162+
String value = "message-value";
163+
int partition = 1;
164+
165+
JsonArray records = new JsonArray();
166+
JsonObject json = valuePartitionRecord(value, partition);
116167
records.add(json);
117168

118169
JsonObject root = new JsonObject();
@@ -782,9 +833,7 @@ void sendToNonExistingPartitionsTest(VertxTestContext context) throws Interrupte
782833
int partition = 1000;
783834

784835
JsonArray records = new JsonArray();
785-
JsonObject json = new JsonObject();
786-
json.put("value", value);
787-
json.put("partition", partition);
836+
JsonObject json = valuePartitionRecord(value, partition);
788837
records.add(json);
789838

790839
JsonObject root = new JsonObject();
@@ -824,9 +873,7 @@ void sendToNonExistingTopicTest(VertxTestContext context) {
824873
int partition = 1;
825874

826875
JsonArray records = new JsonArray();
827-
JsonObject json = new JsonObject();
828-
json.put("value", value);
829-
json.put("partition", partition);
876+
JsonObject json = valuePartitionRecord(value, partition);
830877
records.add(json);
831878

832879
JsonObject root = new JsonObject();
@@ -1020,14 +1067,10 @@ void sendMultipleRecordsWithOneInvalidPartitionTest(VertxTestContext context) th
10201067
int partition = 1;
10211068

10221069
JsonArray records = new JsonArray();
1023-
JsonObject json = new JsonObject();
1024-
json.put("value", value);
1025-
json.put("partition", partition);
1070+
JsonObject json = valuePartitionRecord(value, partition);
10261071
records.add(json);
10271072

1028-
JsonObject json2 = new JsonObject();
1029-
json2.put("value", value + "invalid");
1030-
json2.put("partition", 500);
1073+
JsonObject json2 = valuePartitionRecord(value + "invalid", 500);
10311074
records.add(json2);
10321075

10331076
JsonObject root = new JsonObject();

0 commit comments

Comments
 (0)