Skip to content

[FLINK-32344][connectors/mongodb] Support unbounded streaming read via change stream feature. #11

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,16 @@
import org.apache.flink.connector.testframe.container.FlinkContainers;
import org.apache.flink.connector.testframe.container.FlinkContainersSettings;
import org.apache.flink.connector.testframe.container.TestcontainersSettings;
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions;
import org.apache.flink.test.resources.ResourceTestUtils;
import org.apache.flink.test.util.SQLJobSubmission;

import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoClients;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.model.Filters;
import com.mongodb.client.model.Updates;
import org.bson.Document;
import org.bson.types.ObjectId;
import org.junit.jupiter.api.AfterAll;
Expand All @@ -47,11 +51,13 @@
import java.nio.file.Paths;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;

import static org.apache.flink.connector.mongodb.testutils.MongoTestUtil.MONGODB_HOSTNAME;
import static org.apache.flink.connector.mongodb.testutils.MongoTestUtil.MONGO_4_0;
import static org.apache.flink.connector.mongodb.testutils.MongoTestUtil.MONGO_IMAGE_PREFIX;
import static org.assertj.core.api.Assertions.assertThat;

/** End-to-end test for the MongoDB connectors. */
Expand All @@ -65,11 +71,11 @@ class MongoE2ECase {
private static final Path SQL_CONNECTOR_MONGODB_JAR =
ResourceTestUtils.getResource(".*mongodb.jar");

private static final int TEST_ORDERS_COUNT = 5;
private static final int TEST_ORDERS_INITIAL_COUNT = 5;

@Container
static final MongoDBContainer MONGO_CONTAINER =
new MongoDBContainer(MONGO_4_0)
new MongoDBContainer(MONGO_IMAGE_PREFIX + MONGO_4_0)
.withLogConsumer(new Slf4jLogConsumer(LOG))
.withNetwork(NETWORK)
.withNetworkAliases(MONGODB_HOSTNAME);
Expand All @@ -85,7 +91,12 @@ class MongoE2ECase {
public static final FlinkContainers FLINK =
FlinkContainers.builder()
.withFlinkContainersSettings(
FlinkContainersSettings.builder().numTaskManagers(2).build())
FlinkContainersSettings.builder()
.setConfigOption(
ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL,
Duration.ofSeconds(1))
.numTaskManagers(2)
.build())
.withTestcontainersSettings(TESTCONTAINERS_SETTINGS)
.build();

Expand All @@ -107,12 +118,12 @@ static void teardown() {
public void testUpsertSink() throws Exception {
MongoDatabase db = mongoClient.getDatabase("test_upsert");

List<Document> orders = mockOrders();
List<Document> orders = generateOrders();
db.getCollection("orders").insertMany(orders);

executeSqlStatements(readSqlFile("e2e_upsert.sql"));

List<Document> ordersBackup = readAllBackupOrders(db);
List<Document> ordersBackup = readAllBackupOrders(db, orders.size());

assertThat(ordersBackup).containsExactlyInAnyOrderElementsOf(orders);
}
Expand All @@ -121,43 +132,88 @@ public void testUpsertSink() throws Exception {
public void testAppendOnlySink() throws Exception {
MongoDatabase db = mongoClient.getDatabase("test_append_only");

List<Document> orders = mockOrders();
List<Document> orders = generateOrders();
db.getCollection("orders").insertMany(orders);

executeSqlStatements(readSqlFile("e2e_append_only.sql"));

List<Document> ordersBackup = readAllBackupOrders(db);
List<Document> ordersBackup = readAllBackupOrders(db, orders.size());

List<Document> expected = removeIdField(orders);
assertThat(removeIdField(ordersBackup)).containsExactlyInAnyOrderElementsOf(expected);
}

private static List<Document> readAllBackupOrders(MongoDatabase db) throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(20));
List<Document> backupOrders;
do {
Thread.sleep(1000);
backupOrders = db.getCollection("orders_bak").find().into(new ArrayList<>());
} while (deadline.hasTimeLeft() && backupOrders.size() < TEST_ORDERS_COUNT);
@Test
public void testUnboundedSink() throws Exception {
MongoDatabase db = mongoClient.getDatabase("test_unbounded");
MongoCollection<Document> coll = db.getCollection("orders");

List<Document> orders = generateOrders();
coll.insertMany(orders);

executeSqlStatements(readSqlFile("e2e_unbounded.sql"));

// -- scan records --
List<Document> ordersBackup = readAllBackupOrders(db, orders.size());
assertThat(ordersBackup).containsExactlyInAnyOrderElementsOf(orders);

// -- stream records --
// insert 3 records
List<Document> newOrders =
Arrays.asList(generateOrder(6), generateOrder(7), generateOrder(8));
coll.insertMany(newOrders);
orders.addAll(newOrders);

// assert inserted
ordersBackup = readAllBackupOrders(db, orders.size());
assertThat(ordersBackup).containsExactlyInAnyOrderElementsOf(orders);

return backupOrders;
// update 1 record
Document updateOrder = orders.get(0);
coll.updateOne(Filters.eq("_id", updateOrder.get("_id")), Updates.set("quantity", 1000L));

// replace 1 record
Document replacement = Document.parse(orders.get(1).toJson());
replacement.put("quantity", 1001L);
coll.replaceOne(Filters.eq("_id", replacement.remove("_id")), replacement);

// asert updated
ordersBackup = readAllBackupOrders(db, orders.size());
assertThat(ordersBackup).containsExactlyInAnyOrderElementsOf(orders);
}

private static List<Document> readAllBackupOrders(MongoDatabase db, int expectSize)
throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
MongoCollection<Document> coll = db.getCollection("orders_bak");
while (deadline.hasTimeLeft()) {
if (coll.countDocuments() < expectSize) {
Thread.sleep(1000L);
} else {
break;
}
}
return coll.find().into(new ArrayList<>());
}

private static List<Document> removeIdField(List<Document> documents) {
return documents.stream().peek(doc -> doc.remove("_id")).collect(Collectors.toList());
}

private static List<Document> mockOrders() {
private static List<Document> generateOrders() {
List<Document> orders = new ArrayList<>();
for (int i = 1; i <= TEST_ORDERS_COUNT; i++) {
orders.add(
new Document("_id", new ObjectId())
.append("code", "ORDER_" + i)
.append("quantity", i * 10L));
for (int i = 1; i <= TEST_ORDERS_INITIAL_COUNT; i++) {
orders.add(generateOrder(i));
}
return orders;
}

private static Document generateOrder(int index) {
return new Document("_id", new ObjectId())
.append("code", "ORDER_" + index)
.append("quantity", index * 10L);
}

private static List<String> readSqlFile(final String resourceName) throws Exception {
return Files.readAllLines(
Paths.get(MongoE2ECase.class.getResource("/" + resourceName).toURI()));
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
--/*
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements. See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership. The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing, software
-- * distributed under the License is distributed on an "AS IS" BASIS,
-- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- * See the License for the specific language governing permissions and
-- * limitations under the License.
-- */

DROP TABLE IF EXISTS orders;
DROP TABLE IF EXISTS orders_bak;

CREATE TABLE orders (
`_id` STRING,
`code` STRING,
`quantity` BIGINT,
PRIMARY KEY (_id) NOT ENFORCED
) WITH (
'connector' = 'mongodb',
'uri' = 'mongodb://mongodb:27017',
'database' = 'test_unbounded',
'collection' = 'orders',
'scan.startup.mode' = 'initial'
);

CREATE TABLE orders_bak (
`_id` STRING,
`code` STRING,
`quantity` BIGINT,
PRIMARY KEY (_id) NOT ENFORCED
) WITH (
'connector' = 'mongodb',
'uri' = 'mongodb://mongodb:27017',
'database' = 'test_unbounded',
'collection' = 'orders_bak'
);

INSERT INTO orders_bak SELECT * FROM orders;
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,18 @@ public class MongoConstants {

public static final String DROPPED_FIELD = "dropped";

public static final String CLUSTER_TIME_FIELD = "clusterTime";

public static final String RESUME_TOKEN_FIELD = "resumeToken";

public static final String OPERATION_TYPE_FIELD = "operationType";

public static final String DOCUMENT_KEY_FIELD = "documentKey";

public static final String FULL_DOCUMENT_FIELD = "fullDocument";

public static final String FULL_DOCUMENT_BEFORE_CHANGE_FIELD = "fullDocumentBeforeChange";

public static final BsonValue BSON_MIN_KEY = new BsonMinKey();

public static final BsonValue BSON_MAX_KEY = new BsonMaxKey();
Expand All @@ -68,5 +80,13 @@ public class MongoConstants {
public static final JsonWriterSettings DEFAULT_JSON_WRITER_SETTINGS =
JsonWriterSettings.builder().outputMode(JsonMode.EXTENDED).build();

public static final int FAILED_TO_PARSE_ERROR = 9;

public static final int UNAUTHORIZED_ERROR = 13;

public static final int ILLEGAL_OPERATION_ERROR = 20;

public static final int UNKNOWN_FIELD_ERROR = 40415;

private MongoConstants() {}
}
Loading